file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
io_export_arm.py | (aabb_center[1])) * 2, \
abs((bobject.bound_box[6][2] - bobject.bound_box[0][2]) / 2 + abs(aabb_center[2])) * 2 \
]
def export_mesh_data(self, exportMesh, bobject, o, has_armature=False):
exportMesh.calc_normals_split()
# exportMesh.calc_loop_triangles()
loops = exportMesh.loops
num_verts = len(loops)
num_uv_layers = len(exportMesh.uv_layers)
has_tex = num_uv_layers > 0
has_tex1 = num_uv_layers > 1
num_colors = len(exportMesh.vertex_colors)
has_col = num_colors > 0
has_tang = has_tex
pdata = np.empty(num_verts * 4, dtype='<f4') # p.xyz, n.z
ndata = np.empty(num_verts * 2, dtype='<f4') # n.xy
if has_tex:
t0map = 0 # Get active uvmap
t0data = np.empty(num_verts * 2, dtype='<f4')
uv_layers = exportMesh.uv_layers
if uv_layers != None:
if 'UVMap_baked' in uv_layers:
for i in range(0, len(uv_layers)):
if uv_layers[i].name == 'UVMap_baked':
t0map = i
break
else:
for i in range(0, len(uv_layers)):
if uv_layers[i].active_render:
t0map = i
break
if has_tex1:
t1map = 1 if t0map == 0 else 0
t1data = np.empty(num_verts * 2, dtype='<f4')
# Scale for packed coords
maxdim = 1.0
lay0 = uv_layers[t0map] # TODO: handle t1map
for v in lay0.data:
if abs(v.uv[0]) > maxdim:
maxdim = abs(v.uv[0])
if abs(v.uv[1]) > maxdim:
maxdim = abs(v.uv[1])
if maxdim > 1:
o['scale_tex'] = maxdim
invscale_tex = (1 / o['scale_tex']) * 32767
else:
invscale_tex = 1 * 32767
if has_tang:
exportMesh.calc_tangents(uvmap=lay0.name)
tangdata = np.empty(num_verts * 3, dtype='<f4')
if has_col:
cdata = np.empty(num_verts * 3, dtype='<f4')
# Scale for packed coords
maxdim = max(bobject.data.arm_aabb[0], max(bobject.data.arm_aabb[1], bobject.data.arm_aabb[2]))
if maxdim > 2:
o['scale_pos'] = maxdim / 2
else:
o['scale_pos'] = 1.0
if has_armature: # Allow up to 2x bigger bounds for skinned mesh
o['scale_pos'] *= 2.0
scale_pos = o['scale_pos']
invscale_pos = (1 / scale_pos) * 32767
verts = exportMesh.vertices
if has_tex:
lay0 = exportMesh.uv_layers[t0map]
if has_tex1:
lay1 = exportMesh.uv_layers[t1map]
for i, loop in enumerate(loops):
v = verts[loop.vertex_index]
co = v.co
normal = loop.normal
tang = loop.tangent
i4 = i * 4
i2 = i * 2
pdata[i4 ] = co[0]
pdata[i4 + 1] = co[1]
pdata[i4 + 2] = co[2]
pdata[i4 + 3] = normal[2] * scale_pos # Cancel scale
ndata[i2 ] = normal[0]
ndata[i2 + 1] = normal[1]
if has_tex:
uv = lay0.data[loop.index].uv
t0data[i2 ] = uv[0]
t0data[i2 + 1] = 1.0 - uv[1] # Reverse Y
if has_tex1:
uv = lay1.data[loop.index].uv
t1data[i2 ] = uv[0]
t1data[i2 + 1] = 1.0 - uv[1]
if has_tang:
i3 = i * 3
tangdata[i3 ] = tang[0]
tangdata[i3 + 1] = tang[1]
tangdata[i3 + 2] = tang[2]
if has_col:
i3 = i * 3
cdata[i3 ] = pow(v.col[0], 2.2)
cdata[i3 + 1] = pow(v.col[1], 2.2)
cdata[i3 + 2] = pow(v.col[2], 2.2)
mats = exportMesh.materials
poly_map = []
for i in range(max(len(mats), 1)):
poly_map.append([])
for poly in exportMesh.polygons:
poly_map[poly.material_index].append(poly)
o['index_arrays'] = []
for index, polys in enumerate(poly_map):
tris = 0
for poly in polys:
tris += poly.loop_total - 2
if tris == 0: # No face assigned
continue
prim = np.empty(tris * 3, dtype='<i4')
i = 0
for poly in polys:
first = poly.loop_start
total = poly.loop_total
if total == 3:
prim[i ] = loops[first ].index
prim[i + 1] = loops[first + 1].index
prim[i + 2] = loops[first + 2].index
i += 3
else:
for j in range(total - 2):
prim[i ] = loops[first + total - 1].index
prim[i + 1] = loops[first + j ].index
prim[i + 2] = loops[first + j + 1 ].index
i += 3
ia = {}
ia['values'] = prim
ia['material'] = 0
if len(mats) > 1:
for i in range(len(mats)): # Multi-mat mesh
if (mats[i] == mats[index]): # Default material for empty slots
ia['material'] = i
break
o['index_arrays'].append(ia)
# Pack
pdata *= invscale_pos
ndata *= 32767
pdata = np.array(pdata, dtype='<i2')
ndata = np.array(ndata, dtype='<i2')
if has_tex:
t0data *= invscale_tex
t0data = np.array(t0data, dtype='<i2')
if has_tex1:
t1data *= invscale_tex
t1data = np.array(t1data, dtype='<i2')
if has_col:
cdata *= 32767 | cdata = np.array(cdata, dtype='<i2')
if has_tang:
tangdata *= 32767
tangdata = np.array(tangdata, dtype='<i2')
# Output
o['vertex_arrays'] = []
o['vertex_arrays'].append({ 'attrib': 'pos', 'values': pdata })
o['vertex_arrays'].append({ 'attrib': 'nor', 'values': ndata })
if has_tex:
o['vertex_arrays'].append({ 'attrib': 'tex', 'values': t0data })
if has_tex1:
o['vertex_arrays'].append({ 'attrib': 'tex1', 'values': t1data })
if has_col:
o['vertex_arrays'].append({ 'attrib': 'col', 'values': cdata })
if has_tang:
o['vertex_arrays'].append({ 'attrib': 'tang', 'values': tangdata })
def export_mesh(self, bobject, scene):
# This function exports a single mesh object
print('Exporting mesh ' + bobject.data.name)
o = {}
o['name'] = bobject.name
mesh = bobject.data
armature = bobject.find_armature()
apply_modifiers = not armature
bobject_eval = bobject.evaluated_get(self.depsgraph) if apply_modifiers else bobject
exportMesh = bobject_eval.to_mesh()
self.calc_aabb(bobject)
self.export_mesh_data(exportMesh, bobject, o, has_armature=armature != None)
# if armature:
# self.export_skin(bobject, armature, exportMesh, o)
self.write_mesh(bobject, o)
bobject_eval.to_mesh_clear()
def export_objects(self, scene):
meshes = []
self.output['mesh_datas'] = [];
for o in scene | random_line_split |
|
geom_func.py | not invert the triangle being deformed.'''
dm1 = trim[1]-trim[0] #precompute in final algorithm
dm2 = trim[2]-trim[0] #precompute in final algorithm
Am = np.cross(dm1,dm2)/2 #precompute in final algorithm
ds1 = tris[1]-tris[0]
ds2 = tris[2]-tris[0]
As = np.cross(ds1,ds2)/2
#Ra is a rotation_matrix that rotation_matrix rotates Ashat onto Amhat
Ashat = As/np.linalg.norm(As)
Amhat = Am/np.linalg.norm(Am)
v2 = Ashat
v1 = Amhat
axisa = np.cross(v1,v2)
thetaa = np.arcsin(np.linalg.norm(axisa))
if thetaa == 0.:
Ra = np.eye(3)
else:
Ra = rotation_matrix(axisa, thetaa)
if not np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all(): #doesn't care if area's end up flipped
Ra = Ra.T
if testing:
assert(np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all())
#Rb is a rotation_matrix that rotates the Ra*dm1 onto ds1 without unaligning the area vectors
v1 = np.dot(Ra,dm1/np.linalg.norm(dm1))
v2 = ds1/np.linalg.norm(ds1)
axisb = Ashat
v1v2 = np.dot(v1,v2)
if v1v2 >= 1.:
Rb = np.eye(3)
else:
thetab = np.arccos(v1v2)
Rb = rotation_matrix(axisb, thetab).T
if not np.isclose(np.dot(Rb, v1),v2).all():
Rb = Rb.T
if testing:
# test that Rb keeps the area vectors aligned
assert(np.isclose(np.dot(Rb, v1),v2).all())
assert(np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all())
R = Rb.dot(Ra)
if testing:
# test that R = Rb.dot(Ra).T rotates Amhat onto Ashat
assert(np.isclose(np.abs(np.dot(R.dot(Amhat),Ashat)),1.).all())
# test that R = (Rb*Ra).T rotates dm1 onto ds1
assert(np.isclose(R.dot(dm1/np.linalg.norm(dm1)),ds1/np.linalg.norm(ds1)).all())
return R
##########################################################################################
# Stretch and Shear Deformations
##########################################################################################
def align_triangles(trim,tris, testing=True):
'''return parameters for aligning trim to tris.
coplanar positions are returned for trim before any shear deformation,
but contracting the first edges to match.'''
dm1 = trim[1] - trim[0] #precompute in final algorithm
dm2 = trim[2] - trim[0] #precompute in final algorithm
Am = np.cross(dm1, dm2) / 2 #precompute in final algorithm
ds1 = tris[1] - tris[0]
ds2 = tris[2] - tris[0]
As = np.cross(ds1, ds2) / 2
#Ra is a rotation_matrix that rotation_matrix rotates Ashat onto Amhat
Ashat = As / np.linalg.norm(As)
Amhat = Am / np.linalg.norm(Am)
R = get_R(trim, tris, testing=testing)
if testing:
assert (np.isclose(np.linalg.norm(R.dot(dm1)) / np.linalg.norm(dm1), 1.))
#test that the local "x" axis is aligned by R
xhat = ds1 / np.linalg.norm(ds1)
#yhat is not needed for energy calculation, but is needed for deformation gradient calculation via outer product
yhat = np.cross(As, xhat)
yhat /= np.linalg.norm(yhat)
#scale so "the first" edge matches between the two triangles
xi1 = np.linalg.norm(ds1) / np.linalg.norm(dm1)
if testing:
# Test that the first vectors match.
assert (np.isclose(xhat, R.dot(dm1) / np.linalg.norm(dm1)).all())
assert (np.isclose(np.linalg.norm(xi1 * R.dot(dm1)), np.linalg.norm(ds1)))
# project all edges onto the first vector and the second vector
xs1 = xhat.dot(ds1) # full length
ys1 = yhat.dot(ds1) # zero
xs2 = xhat.dot(ds2)
ys2 = yhat.dot(ds2)
# for each second vector, compute the orthogonal component
xm1 = xhat.dot(xi1 * R.dot(dm1))
ym1 = yhat.dot(xi1 * R.dot(dm1))
xm2 = xhat.dot(xi1 * R.dot(dm2))
ym2 = yhat.dot(xi1 * R.dot(dm2))
if testing:
# test that nothing's left out of plane using the pythagorean theorem
assert (np.isclose(np.sqrt(xm1**2 + ym1**2), np.linalg.norm(xi1 * dm1)))
assert (np.isclose(np.sqrt(xm2**2 + ym2**2), np.linalg.norm(xi1 * dm2)))
assert (np.isclose(np.sqrt(xs1**2 + ys1**2), np.linalg.norm(ds1)))
assert (np.isclose(np.sqrt(xs2**2 + ys2**2), np.linalg.norm(ds2)))
# scale the triangle heights to match
xi2 = ys2 / ym2
#use a shear deformation from the heisenburg group to make the triangles match
s = (xs2 - xm2) / ys2
return xm2, xm1, ym2, ym1, xs2, xs1, ys2, ys1, xhat, yhat, xi1, xi2, s
# compute the 3D strain gradient
def make_S_3by3(xi1, xi2, s, xhat, yhat):
'''returns the strain gradient in the global basis of the dynamical space.
s = ( xs2 - xm1 ) / ys2
xi2 = ys2 / ym2
xi1 = norm(ds1)/norm(dm1)
xm, ym have been rotated and scaled to be coplanar with ds1 and ds2. '''
projx = np.outer(xhat,xhat)
projy = np.outer(yhat,yhat)
projy_to_x = np.outer(xhat,yhat)
S = np.eye(3) + (xi2-1) * projy + xi2*s * projy_to_x
S *= xi1
return S
# compute the 2D strain gradient
def make_S_2by2(xi1, xi2, s):
'''returns the strain gradient in the local basis of the dynamical space.
s = ( xs2 - xm2 ) / ys2
xi2 = ys2 / ym2
xi1 = norm(ds1)/norm(dm1)
xm, ym have been rotated and scaled to be coplanar with ds1 and ds2. '''
return xi1 * np.array([[1., s*xi2],[0., xi2]])
##########################################################################################
# Putting it all together and testing it
##########################################################################################
#TODO: njit this!!
# collect the operations into polar decomposition of deformation gradient matrix.
def get_SR(trim,tris, printing=True, testing=False):
R = get_R(trim,tris,testing=testing)
retval = align_triangles(trim,tris, testing=testing)
#TODO: make retval more compact
xm2, xm1, ym2, ym1, xs2, xs1, ys2, ys1, xhat, yhat, xi1, xi2, s = retval
S = make_S_3by3(xi1, xi2, s, xhat, yhat)
# if (xi2<0) and printing:
# xi2<0 is True if ym2<0 is True
# print('xi2 is negative.')
if (ym2<0) and printing:
S = make_S_3by3(xi1, xi2, s, xhat, yhat)
print(f'ym2 is negative. detS is {np.linalg.det(S):.3f}, and detR is {np.linalg.det(R):.3f}.\r')
return S, R
# collect the operations into one matrix. congrats! you can now measure the deformation gradient F!
def get_F(trim,tris, printing=True, testing=False):
S, R = get_SR(trim,tris, printing=printing, testing=testing)
F = S.dot(R)
return F
# the explicit deformation map
def phi(F,X,b):
return F.dot(X) + b
def get_phi(trim,tris):
| F = get_F(trim,tris)
b = tris[0] - F.dot(trim[0])
return lambda X: phi(F,X,b) | identifier_body |
|
geom_func.py | material/reference space that is
deformed to tris, which is a triangle in real space.
returns the 3x3 rotation matrix aligning both their area normals and their first shape vector.
get_R assumes the deformation is continuous and did not invert the triangle being deformed.'''
dm1 = trim[1]-trim[0] #precompute in final algorithm
dm2 = trim[2]-trim[0] #precompute in final algorithm
Am = np.cross(dm1,dm2)/2 #precompute in final algorithm
ds1 = tris[1]-tris[0]
ds2 = tris[2]-tris[0]
As = np.cross(ds1,ds2)/2
#Ra is a rotation_matrix that rotation_matrix rotates Ashat onto Amhat
Ashat = As/np.linalg.norm(As)
Amhat = Am/np.linalg.norm(Am)
v2 = Ashat
v1 = Amhat
axisa = np.cross(v1,v2)
thetaa = np.arcsin(np.linalg.norm(axisa))
if thetaa == 0.:
Ra = np.eye(3)
else:
Ra = rotation_matrix(axisa, thetaa)
if not np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all(): #doesn't care if area's end up flipped
Ra = Ra.T
if testing:
assert(np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all())
#Rb is a rotation_matrix that rotates the Ra*dm1 onto ds1 without unaligning the area vectors
v1 = np.dot(Ra,dm1/np.linalg.norm(dm1))
v2 = ds1/np.linalg.norm(ds1)
axisb = Ashat
v1v2 = np.dot(v1,v2)
if v1v2 >= 1.:
Rb = np.eye(3)
else:
thetab = np.arccos(v1v2)
Rb = rotation_matrix(axisb, thetab).T
if not np.isclose(np.dot(Rb, v1),v2).all():
Rb = Rb.T
if testing:
# test that Rb keeps the area vectors aligned
assert(np.isclose(np.dot(Rb, v1),v2).all())
assert(np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all())
R = Rb.dot(Ra)
if testing:
# test that R = Rb.dot(Ra).T rotates Amhat onto Ashat
assert(np.isclose(np.abs(np.dot(R.dot(Amhat),Ashat)),1.).all())
# test that R = (Rb*Ra).T rotates dm1 onto ds1
assert(np.isclose(R.dot(dm1/np.linalg.norm(dm1)),ds1/np.linalg.norm(ds1)).all())
return R
##########################################################################################
# Stretch and Shear Deformations
##########################################################################################
def align_triangles(trim,tris, testing=True):
'''return parameters for aligning trim to tris.
coplanar positions are returned for trim before any shear deformation,
but contracting the first edges to match.'''
dm1 = trim[1] - trim[0] #precompute in final algorithm
dm2 = trim[2] - trim[0] #precompute in final algorithm
Am = np.cross(dm1, dm2) / 2 #precompute in final algorithm
ds1 = tris[1] - tris[0]
ds2 = tris[2] - tris[0]
As = np.cross(ds1, ds2) / 2
#Ra is a rotation_matrix that rotation_matrix rotates Ashat onto Amhat
Ashat = As / np.linalg.norm(As)
Amhat = Am / np.linalg.norm(Am)
R = get_R(trim, tris, testing=testing)
if testing:
assert (np.isclose(np.linalg.norm(R.dot(dm1)) / np.linalg.norm(dm1), 1.))
#test that the local "x" axis is aligned by R
xhat = ds1 / np.linalg.norm(ds1)
#yhat is not needed for energy calculation, but is needed for deformation gradient calculation via outer product
yhat = np.cross(As, xhat)
yhat /= np.linalg.norm(yhat)
#scale so "the first" edge matches between the two triangles
xi1 = np.linalg.norm(ds1) / np.linalg.norm(dm1)
if testing:
# Test that the first vectors match.
assert (np.isclose(xhat, R.dot(dm1) / np.linalg.norm(dm1)).all())
assert (np.isclose(np.linalg.norm(xi1 * R.dot(dm1)), np.linalg.norm(ds1)))
# project all edges onto the first vector and the second vector
xs1 = xhat.dot(ds1) # full length
ys1 = yhat.dot(ds1) # zero
xs2 = xhat.dot(ds2)
ys2 = yhat.dot(ds2)
# for each second vector, compute the orthogonal component
xm1 = xhat.dot(xi1 * R.dot(dm1))
ym1 = yhat.dot(xi1 * R.dot(dm1))
xm2 = xhat.dot(xi1 * R.dot(dm2))
ym2 = yhat.dot(xi1 * R.dot(dm2))
if testing:
# test that nothing's left out of plane using the pythagorean theorem
assert (np.isclose(np.sqrt(xm1**2 + ym1**2), np.linalg.norm(xi1 * dm1)))
assert (np.isclose(np.sqrt(xm2**2 + ym2**2), np.linalg.norm(xi1 * dm2)))
assert (np.isclose(np.sqrt(xs1**2 + ys1**2), np.linalg.norm(ds1)))
assert (np.isclose(np.sqrt(xs2**2 + ys2**2), np.linalg.norm(ds2)))
# scale the triangle heights to match
xi2 = ys2 / ym2
#use a shear deformation from the heisenburg group to make the triangles match
s = (xs2 - xm2) / ys2
return xm2, xm1, ym2, ym1, xs2, xs1, ys2, ys1, xhat, yhat, xi1, xi2, s
# compute the 3D strain gradient
def make_S_3by3(xi1, xi2, s, xhat, yhat):
'''returns the strain gradient in the global basis of the dynamical space.
s = ( xs2 - xm1 ) / ys2
xi2 = ys2 / ym2
xi1 = norm(ds1)/norm(dm1)
xm, ym have been rotated and scaled to be coplanar with ds1 and ds2. '''
projx = np.outer(xhat,xhat)
projy = np.outer(yhat,yhat)
projy_to_x = np.outer(xhat,yhat)
S = np.eye(3) + (xi2-1) * projy + xi2*s * projy_to_x
S *= xi1
return S
# compute the 2D strain gradient
def make_S_2by2(xi1, xi2, s):
'''returns the strain gradient in the local basis of the dynamical space.
s = ( xs2 - xm2 ) / ys2
xi2 = ys2 / ym2
xi1 = norm(ds1)/norm(dm1)
xm, ym have been rotated and scaled to be coplanar with ds1 and ds2. '''
return xi1 * np.array([[1., s*xi2],[0., xi2]])
##########################################################################################
# Putting it all together and testing it
##########################################################################################
#TODO: njit this!!
# collect the operations into polar decomposition of deformation gradient matrix.
def get_SR(trim,tris, printing=True, testing=False):
R = get_R(trim,tris,testing=testing)
retval = align_triangles(trim,tris, testing=testing)
#TODO: make retval more compact
xm2, xm1, ym2, ym1, xs2, xs1, ys2, ys1, xhat, yhat, xi1, xi2, s = retval
S = make_S_3by3(xi1, xi2, s, xhat, yhat)
# if (xi2<0) and printing:
# xi2<0 is True if ym2<0 is True
# print('xi2 is negative.')
if (ym2<0) and printing:
S = make_S_3by3(xi1, xi2, s, xhat, yhat)
print(f'ym2 is negative. detS is {np.linalg.det(S):.3f}, and detR is {np.linalg.det(R):.3f}.\r')
return S, R
# collect the operations into one matrix. congrats! you can now measure the deformation gradient F!
def get_F(trim,tris, printing=True, testing=False):
S, R = get_SR(trim,tris, printing=printing, testing=testing)
F = S.dot(R)
return F
# the explicit deformation map
def | phi | identifier_name |
|
geom_func.py | # # test the explicit deformation map for a number of triangles
# tris = mesh.triangles[71]
# trim = mesh.triangles[30]
# mtos = get_phi(trim,tris)
# trim_mapped = np.array([mtos(trim[0]),mtos(trim[1]),mtos(trim[2])])
# print('tris is')
# print(tris)
# print('trim is mapped to')
# print(trim_mapped)
# print('difference after mapping is')
# print(tris - trim_mapped)
# assert(np.isclose(tris - trim_mapped,0.).all())
##########################################################################################
##########################################################################################
# Rotations
##########################################################################################
#TODO: optimize this function by removing the first two lines (and maybe precomputing a?), and then njiting it.
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians. Uses the Euler-Rodriguez formula.
"""
axis = np.asarray(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
a = np.cos(theta / 2.0)
b, c, d = -axis * np.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def get_shape(triangle):
d1 = triangle[1]-triangle[0] #TODO: precompute in final algorithm for material space triangles
d2 = triangle[2]-triangle[0] #TODO: precompute in final algorithm for material space triangles
A = np.cross(d1,d2)/2 #TODO: precompute in final algorithm for material space triangles
return (d1, d2, A)
def get_R(trim,tris,testing=True):
'''trim is a triangle in material/reference space that is
deformed to tris, which is a triangle in real space.
returns the 3x3 rotation matrix aligning both their area normals and their first shape vector.
get_R assumes the deformation is continuous and did not invert the triangle being deformed.'''
dm1 = trim[1]-trim[0] #precompute in final algorithm
dm2 = trim[2]-trim[0] #precompute in final algorithm
Am = np.cross(dm1,dm2)/2 #precompute in final algorithm
ds1 = tris[1]-tris[0]
ds2 = tris[2]-tris[0]
As = np.cross(ds1,ds2)/2
#Ra is a rotation_matrix that rotation_matrix rotates Ashat onto Amhat
Ashat = As/np.linalg.norm(As)
Amhat = Am/np.linalg.norm(Am)
v2 = Ashat
v1 = Amhat
axisa = np.cross(v1,v2)
thetaa = np.arcsin(np.linalg.norm(axisa))
if thetaa == 0.:
Ra = np.eye(3)
else:
Ra = rotation_matrix(axisa, thetaa)
if not np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all(): #doesn't care if area's end up flipped
Ra = Ra.T
if testing:
assert(np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all())
#Rb is a rotation_matrix that rotates the Ra*dm1 onto ds1 without unaligning the area vectors
v1 = np.dot(Ra,dm1/np.linalg.norm(dm1))
v2 = ds1/np.linalg.norm(ds1)
axisb = Ashat
v1v2 = np.dot(v1,v2)
if v1v2 >= 1.:
Rb = np.eye(3)
else:
thetab = np.arccos(v1v2)
Rb = rotation_matrix(axisb, thetab).T
if not np.isclose(np.dot(Rb, v1),v2).all():
Rb = Rb.T
if testing:
# test that Rb keeps the area vectors aligned
assert(np.isclose(np.dot(Rb, v1),v2).all())
assert(np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all())
R = Rb.dot(Ra)
if testing:
# test that R = Rb.dot(Ra).T rotates Amhat onto Ashat
assert(np.isclose(np.abs(np.dot(R.dot(Amhat),Ashat)),1.).all())
# test that R = (Rb*Ra).T rotates dm1 onto ds1
assert(np.isclose(R.dot(dm1/np.linalg.norm(dm1)),ds1/np.linalg.norm(ds1)).all())
return R
##########################################################################################
# Stretch and Shear Deformations
##########################################################################################
def align_triangles(trim,tris, testing=True):
'''return parameters for aligning trim to tris.
coplanar positions are returned for trim before any shear deformation,
but contracting the first edges to match.'''
dm1 = trim[1] - trim[0] #precompute in final algorithm
dm2 = trim[2] - trim[0] #precompute in final algorithm
Am = np.cross(dm1, dm2) / 2 #precompute in final algorithm
ds1 = tris[1] - tris[0]
ds2 = tris[2] - tris[0]
As = np.cross(ds1, ds2) / 2
#Ra is a rotation_matrix that rotation_matrix rotates Ashat onto Amhat
Ashat = As / np.linalg.norm(As)
Amhat = Am / np.linalg.norm(Am)
R = get_R(trim, tris, testing=testing)
if testing:
assert (np.isclose(np.linalg.norm(R.dot(dm1)) / np.linalg.norm(dm1), 1.))
#test that the local "x" axis is aligned by R
xhat = ds1 / np.linalg.norm(ds1)
#yhat is not needed for energy calculation, but is needed for deformation gradient calculation via outer product
yhat = np.cross(As, xhat)
yhat /= np.linalg.norm(yhat)
#scale so "the first" edge matches between the two triangles
xi1 = np.linalg.norm(ds1) / np.linalg.norm(dm1)
if testing:
# Test that the first vectors match.
assert (np.isclose(xhat, R.dot(dm1) / np.linalg.norm(dm1)).all())
assert (np.isclose(np.linalg.norm(xi1 * R.dot(dm1)), np.linalg.norm(ds1)))
# project all edges onto the first vector and the second vector
xs1 = xhat.dot(ds1) # full length
ys1 = yhat.dot(ds1) # zero
xs2 = xhat.dot(ds2)
ys2 = yhat.dot(ds2)
# for each second vector, compute the orthogonal component
xm1 = xhat.dot(xi1 * R.dot(dm1))
ym1 = yhat.dot(xi1 * R.dot(dm1))
xm2 = xhat.dot(xi1 * R.dot(dm2))
ym2 = yhat.dot(xi1 * R.dot(dm2))
if testing:
# test that nothing's left out of plane using the pythagorean theorem
assert (np.isclose(np.sqrt(xm1**2 + ym1**2), np.linalg.norm(xi1 * dm1)))
assert (np.isclose(np.sqrt(xm2**2 + ym2**2), np.linalg.norm(xi1 * dm2)))
assert (np.isclose(np.sqrt(xs1**2 + ys1**2), np.linalg.norm(ds1)))
assert (np.isclose(np.sqrt(xs2**2 + ys2**2), np.linalg.norm(ds2)))
# scale the triangle heights to match
xi2 = ys2 / ym2
#use a shear deformation from the heisenburg group to make the triangles match
s = (xs2 - xm2) / ys2
return xm2, xm1, ym2, ym1, xs2, xs1, ys2, ys1, xhat, yhat, xi1, xi2, s
# compute the 3D strain gradient
def make_S_3by3(xi1, xi2, s, xhat, yhat):
'''returns the strain gradient in the global basis of the dynamical space.
s = ( xs2 - xm1 ) / ys2
xi2 = ys2 / ym2
xi1 = norm(ds1)/norm(dm1)
xm, ym have been rotated and scaled to be coplanar | # #normalize the mean radius to 1
# mesh.vertices /= np.cbrt(mesh.volume*3/(4*np.pi)) | random_line_split |
|
geom_func.py | and maybe precomputing a?), and then njiting it.
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians. Uses the Euler-Rodriguez formula.
"""
axis = np.asarray(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
a = np.cos(theta / 2.0)
b, c, d = -axis * np.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def get_shape(triangle):
d1 = triangle[1]-triangle[0] #TODO: precompute in final algorithm for material space triangles
d2 = triangle[2]-triangle[0] #TODO: precompute in final algorithm for material space triangles
A = np.cross(d1,d2)/2 #TODO: precompute in final algorithm for material space triangles
return (d1, d2, A)
def get_R(trim,tris,testing=True):
'''trim is a triangle in material/reference space that is
deformed to tris, which is a triangle in real space.
returns the 3x3 rotation matrix aligning both their area normals and their first shape vector.
get_R assumes the deformation is continuous and did not invert the triangle being deformed.'''
dm1 = trim[1]-trim[0] #precompute in final algorithm
dm2 = trim[2]-trim[0] #precompute in final algorithm
Am = np.cross(dm1,dm2)/2 #precompute in final algorithm
ds1 = tris[1]-tris[0]
ds2 = tris[2]-tris[0]
As = np.cross(ds1,ds2)/2
#Ra is a rotation_matrix that rotation_matrix rotates Ashat onto Amhat
Ashat = As/np.linalg.norm(As)
Amhat = Am/np.linalg.norm(Am)
v2 = Ashat
v1 = Amhat
axisa = np.cross(v1,v2)
thetaa = np.arcsin(np.linalg.norm(axisa))
if thetaa == 0.:
Ra = np.eye(3)
else:
Ra = rotation_matrix(axisa, thetaa)
if not np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all(): #doesn't care if area's end up flipped
Ra = Ra.T
if testing:
assert(np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all())
#Rb is a rotation_matrix that rotates the Ra*dm1 onto ds1 without unaligning the area vectors
v1 = np.dot(Ra,dm1/np.linalg.norm(dm1))
v2 = ds1/np.linalg.norm(ds1)
axisb = Ashat
v1v2 = np.dot(v1,v2)
if v1v2 >= 1.:
Rb = np.eye(3)
else:
thetab = np.arccos(v1v2)
Rb = rotation_matrix(axisb, thetab).T
if not np.isclose(np.dot(Rb, v1),v2).all():
Rb = Rb.T
if testing:
# test that Rb keeps the area vectors aligned
|
R = Rb.dot(Ra)
if testing:
# test that R = Rb.dot(Ra).T rotates Amhat onto Ashat
assert(np.isclose(np.abs(np.dot(R.dot(Amhat),Ashat)),1.).all())
# test that R = (Rb*Ra).T rotates dm1 onto ds1
assert(np.isclose(R.dot(dm1/np.linalg.norm(dm1)),ds1/np.linalg.norm(ds1)).all())
return R
##########################################################################################
# Stretch and Shear Deformations
##########################################################################################
def align_triangles(trim,tris, testing=True):
'''return parameters for aligning trim to tris.
coplanar positions are returned for trim before any shear deformation,
but contracting the first edges to match.'''
dm1 = trim[1] - trim[0] #precompute in final algorithm
dm2 = trim[2] - trim[0] #precompute in final algorithm
Am = np.cross(dm1, dm2) / 2 #precompute in final algorithm
ds1 = tris[1] - tris[0]
ds2 = tris[2] - tris[0]
As = np.cross(ds1, ds2) / 2
#Ra is a rotation_matrix that rotation_matrix rotates Ashat onto Amhat
Ashat = As / np.linalg.norm(As)
Amhat = Am / np.linalg.norm(Am)
R = get_R(trim, tris, testing=testing)
if testing:
assert (np.isclose(np.linalg.norm(R.dot(dm1)) / np.linalg.norm(dm1), 1.))
#test that the local "x" axis is aligned by R
xhat = ds1 / np.linalg.norm(ds1)
#yhat is not needed for energy calculation, but is needed for deformation gradient calculation via outer product
yhat = np.cross(As, xhat)
yhat /= np.linalg.norm(yhat)
#scale so "the first" edge matches between the two triangles
xi1 = np.linalg.norm(ds1) / np.linalg.norm(dm1)
if testing:
# Test that the first vectors match.
assert (np.isclose(xhat, R.dot(dm1) / np.linalg.norm(dm1)).all())
assert (np.isclose(np.linalg.norm(xi1 * R.dot(dm1)), np.linalg.norm(ds1)))
# project all edges onto the first vector and the second vector
xs1 = xhat.dot(ds1) # full length
ys1 = yhat.dot(ds1) # zero
xs2 = xhat.dot(ds2)
ys2 = yhat.dot(ds2)
# for each second vector, compute the orthogonal component
xm1 = xhat.dot(xi1 * R.dot(dm1))
ym1 = yhat.dot(xi1 * R.dot(dm1))
xm2 = xhat.dot(xi1 * R.dot(dm2))
ym2 = yhat.dot(xi1 * R.dot(dm2))
if testing:
# test that nothing's left out of plane using the pythagorean theorem
assert (np.isclose(np.sqrt(xm1**2 + ym1**2), np.linalg.norm(xi1 * dm1)))
assert (np.isclose(np.sqrt(xm2**2 + ym2**2), np.linalg.norm(xi1 * dm2)))
assert (np.isclose(np.sqrt(xs1**2 + ys1**2), np.linalg.norm(ds1)))
assert (np.isclose(np.sqrt(xs2**2 + ys2**2), np.linalg.norm(ds2)))
# scale the triangle heights to match
xi2 = ys2 / ym2
#use a shear deformation from the heisenburg group to make the triangles match
s = (xs2 - xm2) / ys2
return xm2, xm1, ym2, ym1, xs2, xs1, ys2, ys1, xhat, yhat, xi1, xi2, s
# compute the 3D strain gradient
def make_S_3by3(xi1, xi2, s, xhat, yhat):
'''returns the strain gradient in the global basis of the dynamical space.
s = ( xs2 - xm1 ) / ys2
xi2 = ys2 / ym2
xi1 = norm(ds1)/norm(dm1)
xm, ym have been rotated and scaled to be coplanar with ds1 and ds2. '''
projx = np.outer(xhat,xhat)
projy = np.outer(yhat,yhat)
projy_to_x = np.outer(xhat,yhat)
S = np.eye(3) + (xi2-1) * projy + xi2*s * projy_to_x
S *= xi1
return S
# compute the 2D strain gradient
def make_S_2by2(xi1, xi2, s):
'''returns the strain gradient in the local basis of the dynamical space.
s = ( xs2 - xm2 ) / ys2
xi2 = ys2 / ym2
xi1 = norm(ds1)/norm(dm1)
xm, ym have been rotated and scaled to be coplanar with ds1 and ds2. '''
return | assert(np.isclose(np.dot(Rb, v1),v2).all())
assert(np.isclose(np.abs(np.dot(np.dot(Ra, Amhat),Ashat)),1.).all()) | conditional_block |
train_i2t_gan.py | loader = dataloader(transform, batch_size)
return loader
import numpy as np
normalization = torch.Tensor([np.log(2 * np.pi)])
def NLL(sample, params):
|
def make_target(word_idcs):
target = torch.zeros(word_idcs.size(0), 2100).cuda()
for idx in range(word_idcs.shape[0]):
target[idx][word_idcs[idx]] = 1
return target
from random import shuffle
def true_randperm(size, device=torch.device("cuda:0")):
def unmatched_randperm(size):
l1 = [i for i in range(size)]
l2 = []
for j in range(size):
deleted = False
if j in l1:
deleted = True
del l1[l1.index(j)]
shuffle(l1)
if len(l1) == 0:
return 0, False
l2.append(l1[0])
del l1[0]
if deleted:
l1.append(j)
return l2, True
flag = False
while not flag:
l, flag = unmatched_randperm(size)
return torch.LongTensor(l).to(device)
def train_image_gan_with_text(net_ig, net_id, opt_ig, opt_id, total_iter, loader, options):
text_g_val = 0
text_d_val = 0
text_dt_val = 0
text_gt_val = 0
text_dt_mis_val = 0
log_folder = options.trial_name
if not os.path.exists(log_folder):
os.mkdir(log_folder)
os.mkdir(log_folder+'/checkpoint')
os.mkdir(log_folder+'/sample')
log_file_name = os.path.join(log_folder, 'train_image_to_text_log.txt')
log_file = open(log_file_name, 'w')
log_file.write('rec, prob, code\n')
log_file.close()
copy('train_i2t_gan.py', log_folder+'/train_i2t_gan.py')
copy('models.py', log_folder+'/models.py')
data_loader = sample_data(loader, image_size=128, batch_size=options.batch_size)
dataset = iter(data_loader)
for i in tqdm(range(options.start_iter, options.total_iter)):
try:
real_image, bird_idx, bert_idx = next(dataset)
except (OSError, StopIteration):
dataset = iter(data_loader)
real_image, bird_idx, bert_idx = next(dataset)
### 1. load the data
b_size = real_image.shape[0]
real_image = real_image.cuda()
real_embs = net_t_ae.bert(bert_idx.cuda())[0]
real_text_latent = net_t_ae.encode(real_embs).detach()
bird_idx = bird_idx.long().cuda()
perm = true_randperm(b_size)
img_feat_16, img_feat_8, img_feat_4 = net_iae.encoder(real_image)
# 2. Train the Generators
if i==(options.total_iter//4) and options.checkpoint is None:
opt_tg.add_param_group({'params': chain(net_tg.word_attn_4.parameters(),
net_tg.word_attn_16.parameters(),
net_tg.sentence_attn_4.parameters(),
net_tg.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
net_tg.zero_grad()
noise = torch.randn(b_size, 128).cuda()
g_text_latent = net_tg(noise, img_feat_4, img_feat_16)
g_pred = net_td(g_text_latent)
g_pred_i = net_tdi(g_text_latent, img_feat_4, img_feat_16)
loss_g_latent = -g_pred.mean() - g_pred_i.mean()
loss_total = loss_g_latent
loss_total.backward()
opt_tg.step()
text_g_val += g_pred.mean().item()
text_gt_val += g_pred_i.mean().item()
### 3. Train the Discriminators
if i==(options.total_iter//4) and options.checkpoint is None:
opt_tdi.add_param_group({'params': chain(
net_tdi.sentence_attn_4.parameters(),
net_tdi.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
### 3.1 train the image-only discriminator
net_id.zero_grad()
real_predict = net_td(real_text_latent)
fake_predict = net_id(g_text_latent.detach())
loss_disc = F.relu(1-real_predict).mean() + F.relu(1+fake_predict).mean()
loss_disc.backward()
opt_td.step()
text_d_val += real_predict.mean().item()
### 3.2 train the image-text discriminator
net_tdi.zero_grad()
real_predict = net_tdi(real_text_latent, img_feat_4, img_feat_16)
fake_predict = net_tdi(g_text_latent.detach(), img_feat_4, img_feat_16)
mismatch_predict = net_tdi(real_text_latent, img_feat_4[perm], img_feat_16[perm])
loss_disc = F.relu(1-real_predict).mean() + \
F.relu(1+fake_predict).mean() + \
F.relu(1+mismatch_predict).mean()
loss_disc.backward()
opt_tdi.step()
text_dt_val += real_predict.mean().item()
text_dt_mis_val += mismatch_predict.mean().item()
### 4. Logging
if (i + 1) % 2000 == 0 or i==0:
with torch.no_grad():
vutils.save_image(real_image.detach().add(1).mul(0.5), f'{log_folder}/sample/r_img_{str(i + 1).zfill(6)}.jpg')
real_texts = net_t_ae.generate(real_text_latent)
g_texts = net_t_ae.generate(g_text_latent)
f = open(f'{log_folder}/sample/g_real_txt_{str(i + 1).zfill(6)}.txt', 'w')
for cap in real_texts+g_texts:
f.write(cap+'\n')
f.close()
if (i+1) % 5000 == 0 or i==0:
torch.save({'tg':net_tg.state_dict(), 'td':net_td.state_dict(), 'tdi':net_tdi.state_dict()}, f'{log_folder}/checkpoint/image_to_text_memory_{str(i + 1).zfill(6)}_model.pth')
torch.save({'tg':opt_tg.state_dict(), 'td':opt_td.state_dict(), 'tdi':opt_tdi.state_dict()}, f'{log_folder}/checkpoint/image_to_text_memory_{str(i + 1).zfill(6)}_opt.pth')
interval = 100
if (i+1)%interval == 0:
state_msg = (f'txt_g_val: {text_g_val/(interval):.3f}; txt_d_val: {text_d_val/interval:.3f}; \n'
f'txt_gt_val: {text_gt_val/(interval):.3f}; txt_dt_val: {text_dt_val/interval:.3f}; txt_dt_mis: {text_dt_mis_val/interval:.3f} \n')
log_file = open(log_file_name, 'a+')
new_line = "%.5f,%.5f,%.5f\n"%\
(text_g_val/(interval), text_d_val/interval, text_dt_val/interval)
log_file.write(new_line)
log_file.close()
text_g_val = 0
text_d_val = 0
text_gt_val = 0
text_dt_val = 0
text_dt_mis_val = 0
print(state_msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Image Text Generation Together')
parser.add_argument('--path', type=str, default='../../../research3/CUB_birds/CUB_200_2011/images', help='path of specified dataset')
parser.add_argument('--lr', default=0.0002, type=float, help='learning rate')
parser.add_argument('--trial_name', default='trial_i2t_gan_with_pre-trained_sw', type=str, help='name of the trial')
parser.add_argument('--total_iter', default=300000, type=int, help='iterations')
parser.add_argument('--start_iter', default=0, type=int, help='start iterations')
parser.add_argument('--im_size', default=128, type=int, help='initial | """Analytically computes
E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]
If mu_2, and sigma_2^2 are not provided, defaults to entropy.
"""
mu = params[:,:,0]
logsigma = params[:,:,1]
c = normalization.to(mu.device)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return torch.mean(0.5 * (tmp * tmp + 2 * logsigma + c)) | identifier_body |
train_i2t_gan.py | (img_root='/media/bingchen/research3/CUB_birds/CUB_200_2011/images'):
img_meta_root = img_root
img_meta_root = img_meta_root.replace('images','birds_meta')
def loader(transform, batch_size=4):
data = CaptionImageDataset(img_root, img_meta_root, transform=transform)
data_loader = DataLoader(data, shuffle=True, batch_size=batch_size, num_workers=4)
return data_loader
return loader
def sample_data(dataloader, image_size=4, batch_size=4):
transform = transforms.Compose([
transforms.Resize( int(1.1 * image_size) ),
#transforms.CenterCrop( int(1.2 * image_size) ),
transforms.RandomCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
loader = dataloader(transform, batch_size)
return loader
import numpy as np
normalization = torch.Tensor([np.log(2 * np.pi)])
def NLL(sample, params):
"""Analytically computes
E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]
If mu_2, and sigma_2^2 are not provided, defaults to entropy.
"""
mu = params[:,:,0]
logsigma = params[:,:,1]
c = normalization.to(mu.device)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return torch.mean(0.5 * (tmp * tmp + 2 * logsigma + c))
def make_target(word_idcs):
target = torch.zeros(word_idcs.size(0), 2100).cuda()
for idx in range(word_idcs.shape[0]):
target[idx][word_idcs[idx]] = 1
return target
from random import shuffle
def true_randperm(size, device=torch.device("cuda:0")):
def unmatched_randperm(size):
l1 = [i for i in range(size)]
l2 = []
for j in range(size):
deleted = False
if j in l1:
deleted = True
del l1[l1.index(j)]
shuffle(l1)
if len(l1) == 0:
return 0, False
l2.append(l1[0])
del l1[0]
if deleted:
l1.append(j)
return l2, True
flag = False
while not flag:
l, flag = unmatched_randperm(size)
return torch.LongTensor(l).to(device)
def train_image_gan_with_text(net_ig, net_id, opt_ig, opt_id, total_iter, loader, options):
text_g_val = 0
text_d_val = 0
text_dt_val = 0
text_gt_val = 0
text_dt_mis_val = 0
log_folder = options.trial_name
if not os.path.exists(log_folder):
os.mkdir(log_folder)
os.mkdir(log_folder+'/checkpoint')
os.mkdir(log_folder+'/sample')
log_file_name = os.path.join(log_folder, 'train_image_to_text_log.txt')
log_file = open(log_file_name, 'w')
log_file.write('rec, prob, code\n')
log_file.close()
copy('train_i2t_gan.py', log_folder+'/train_i2t_gan.py')
copy('models.py', log_folder+'/models.py')
data_loader = sample_data(loader, image_size=128, batch_size=options.batch_size)
dataset = iter(data_loader)
for i in tqdm(range(options.start_iter, options.total_iter)):
try:
real_image, bird_idx, bert_idx = next(dataset)
except (OSError, StopIteration):
dataset = iter(data_loader)
real_image, bird_idx, bert_idx = next(dataset)
### 1. load the data
b_size = real_image.shape[0]
real_image = real_image.cuda()
real_embs = net_t_ae.bert(bert_idx.cuda())[0]
real_text_latent = net_t_ae.encode(real_embs).detach()
bird_idx = bird_idx.long().cuda()
perm = true_randperm(b_size)
img_feat_16, img_feat_8, img_feat_4 = net_iae.encoder(real_image)
# 2. Train the Generators
if i==(options.total_iter//4) and options.checkpoint is None:
opt_tg.add_param_group({'params': chain(net_tg.word_attn_4.parameters(),
net_tg.word_attn_16.parameters(),
net_tg.sentence_attn_4.parameters(),
net_tg.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
net_tg.zero_grad()
noise = torch.randn(b_size, 128).cuda()
g_text_latent = net_tg(noise, img_feat_4, img_feat_16)
g_pred = net_td(g_text_latent)
g_pred_i = net_tdi(g_text_latent, img_feat_4, img_feat_16)
loss_g_latent = -g_pred.mean() - g_pred_i.mean()
loss_total = loss_g_latent
loss_total.backward()
opt_tg.step()
text_g_val += g_pred.mean().item()
text_gt_val += g_pred_i.mean().item()
### 3. Train the Discriminators
if i==(options.total_iter//4) and options.checkpoint is None:
opt_tdi.add_param_group({'params': chain(
net_tdi.sentence_attn_4.parameters(),
net_tdi.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
### 3.1 train the image-only discriminator
net_id.zero_grad()
real_predict = net_td(real_text_latent)
fake_predict = net_id(g_text_latent.detach())
loss_disc = F.relu(1-real_predict).mean() + F.relu(1+fake_predict).mean()
loss_disc.backward()
opt_td.step()
text_d_val += real_predict.mean().item()
### 3.2 train the image-text discriminator
net_tdi.zero_grad()
real_predict = net_tdi(real_text_latent, img_feat_4, img_feat_16)
fake_predict = net_tdi(g_text_latent.detach(), img_feat_4, img_feat_16)
mismatch_predict = net_tdi(real_text_latent, img_feat_4[perm], img_feat_16[perm])
loss_disc = F.relu(1-real_predict).mean() + \
F.relu(1+fake_predict).mean() + \
F.relu(1+mismatch_predict).mean()
loss_disc.backward()
opt_tdi.step()
text_dt_val += real_predict.mean().item()
text_dt_mis_val += mismatch_predict.mean().item()
### 4. Logging
if (i + 1) % 2000 == 0 or i==0:
with torch.no_grad():
vutils.save_image(real_image.detach().add(1).mul(0.5), f'{log_folder}/sample/r_img_{str(i + 1).zfill(6)}.jpg')
real_texts = net_t_ae.generate(real_text_latent)
g_texts = net_t_ae.generate(g_text_latent)
f = open(f'{log_folder}/sample/g_real_txt_{str(i + 1).zfill(6)}.txt', 'w')
for cap in real_texts+g_texts:
f.write(cap+'\n')
f.close()
if (i+1) % 5000 == 0 or i==0:
torch.save({'tg':net_tg.state_dict(), 'td':net_td.state_dict(), 'tdi':net_tdi.state_dict()}, f'{log_folder}/checkpoint/image_to_text_memory_{str(i + 1).zfill(6)}_model.pth')
torch.save({'tg':opt_tg.state_dict(), 'td':opt_td.state_dict(), 'tdi':opt_tdi.state_dict()}, f'{log_folder}/checkpoint/image_to_text_memory_{str(i + 1).zfill(6)}_opt.pth')
interval = 100
if (i+1)%interval == 0:
state_msg = (f'txt_g_val: {text_g_val/(interval):.3f}; txt_d_val: {text_d_val/interval:.3f}; \n'
f'txt_gt_val: {text_gt_val/(interval):.3f}; txt_dt_val: {text_dt_val/interval:.3f}; txt_dt_mis: {text_dt_mis_val/interval:.3f} \n')
log_file = open(log_file_name, 'a+')
new_line = "%.5f,%.5f,%.5f\n"%\
(text_g_val/(interval), text_d_val/interval, text_dt_val/interval)
log_file.write(new_line)
log_file.close()
text_g_val = 0
text_d_val | image_cap_loader | identifier_name |
|
train_i2t_gan.py | 16.parameters(),
net_tg.sentence_attn_4.parameters(),
net_tg.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
net_tg.zero_grad()
noise = torch.randn(b_size, 128).cuda()
g_text_latent = net_tg(noise, img_feat_4, img_feat_16)
g_pred = net_td(g_text_latent)
g_pred_i = net_tdi(g_text_latent, img_feat_4, img_feat_16)
loss_g_latent = -g_pred.mean() - g_pred_i.mean()
loss_total = loss_g_latent
loss_total.backward()
opt_tg.step()
text_g_val += g_pred.mean().item()
text_gt_val += g_pred_i.mean().item()
### 3. Train the Discriminators
if i==(options.total_iter//4) and options.checkpoint is None:
opt_tdi.add_param_group({'params': chain(
net_tdi.sentence_attn_4.parameters(),
net_tdi.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
### 3.1 train the image-only discriminator
net_id.zero_grad()
real_predict = net_td(real_text_latent)
fake_predict = net_id(g_text_latent.detach())
loss_disc = F.relu(1-real_predict).mean() + F.relu(1+fake_predict).mean()
loss_disc.backward()
opt_td.step()
text_d_val += real_predict.mean().item()
### 3.2 train the image-text discriminator
net_tdi.zero_grad()
real_predict = net_tdi(real_text_latent, img_feat_4, img_feat_16)
fake_predict = net_tdi(g_text_latent.detach(), img_feat_4, img_feat_16)
mismatch_predict = net_tdi(real_text_latent, img_feat_4[perm], img_feat_16[perm])
loss_disc = F.relu(1-real_predict).mean() + \
F.relu(1+fake_predict).mean() + \
F.relu(1+mismatch_predict).mean()
loss_disc.backward()
opt_tdi.step()
text_dt_val += real_predict.mean().item()
text_dt_mis_val += mismatch_predict.mean().item()
### 4. Logging
if (i + 1) % 2000 == 0 or i==0:
with torch.no_grad():
vutils.save_image(real_image.detach().add(1).mul(0.5), f'{log_folder}/sample/r_img_{str(i + 1).zfill(6)}.jpg')
real_texts = net_t_ae.generate(real_text_latent)
g_texts = net_t_ae.generate(g_text_latent)
f = open(f'{log_folder}/sample/g_real_txt_{str(i + 1).zfill(6)}.txt', 'w')
for cap in real_texts+g_texts:
f.write(cap+'\n')
f.close()
if (i+1) % 5000 == 0 or i==0:
torch.save({'tg':net_tg.state_dict(), 'td':net_td.state_dict(), 'tdi':net_tdi.state_dict()}, f'{log_folder}/checkpoint/image_to_text_memory_{str(i + 1).zfill(6)}_model.pth')
torch.save({'tg':opt_tg.state_dict(), 'td':opt_td.state_dict(), 'tdi':opt_tdi.state_dict()}, f'{log_folder}/checkpoint/image_to_text_memory_{str(i + 1).zfill(6)}_opt.pth')
interval = 100
if (i+1)%interval == 0:
state_msg = (f'txt_g_val: {text_g_val/(interval):.3f}; txt_d_val: {text_d_val/interval:.3f}; \n'
f'txt_gt_val: {text_gt_val/(interval):.3f}; txt_dt_val: {text_dt_val/interval:.3f}; txt_dt_mis: {text_dt_mis_val/interval:.3f} \n')
log_file = open(log_file_name, 'a+')
new_line = "%.5f,%.5f,%.5f\n"%\
(text_g_val/(interval), text_d_val/interval, text_dt_val/interval)
log_file.write(new_line)
log_file.close()
text_g_val = 0
text_d_val = 0
text_gt_val = 0
text_dt_val = 0
text_dt_mis_val = 0
print(state_msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Image Text Generation Together')
parser.add_argument('--path', type=str, default='../../../research3/CUB_birds/CUB_200_2011/images', help='path of specified dataset')
parser.add_argument('--lr', default=0.0002, type=float, help='learning rate')
parser.add_argument('--trial_name', default='trial_i2t_gan_with_pre-trained_sw', type=str, help='name of the trial')
parser.add_argument('--total_iter', default=300000, type=int, help='iterations')
parser.add_argument('--start_iter', default=0, type=int, help='start iterations')
parser.add_argument('--im_size', default=128, type=int, help='initial image size')
parser.add_argument('--batch_size', default=8, type=int, help='initial image size')
parser.add_argument('--checkpoint', type=str, default=None, help='path to load pre-trained model')
parser.add_argument('--channel', type=int, default=128, help='channel number in models')
parser.add_argument('--ae_path', type=str, default=None, help='path to load pre-trained text Autoencoder model')
args = parser.parse_args()
img_meta_root = str(args.path).replace('images','birds_meta')
# creating Text model
pre_trained_path = './trial_it_attn/checkpoint/it_ae_160000_model.pth'
checkpoint = torch.load(pre_trained_path)
net_t_ae = Text_VAE(vocab_size=2098, channels=256, latent=128, meta_data_root=img_meta_root).cuda()
net_t_ae.load_state_dict(checkpoint['t'])
net_t_ae.eval()
for p in net_t_ae.parameters():
p.requires_grad = False
net_iae = ImageAE(channel=256).cuda()
net_iae.load_state_dict(checkpoint['i'])
net_iae.eval()
for p in net_iae.parameters():
p.requires_grad = False
net_tg = TextFromImageG()
net_tg.cuda()
net_tg.sentence_attn_4.load_state_dict(checkpoint['sa4'])
net_tg.sentence_attn_16.load_state_dict(checkpoint['sa16'])
net_tg.word_attn_4.load_state_dict(checkpoint['wa4'])
net_tg.word_attn_16.load_state_dict(checkpoint['wa16'])
net_td = Text_Latent_D()
net_td.cuda()
net_tdi = TextFromImageD()
net_tdi.cuda()
net_tdi.sentence_attn_4.load_state_dict(checkpoint['sa4'])
net_tdi.sentence_attn_16.load_state_dict(checkpoint['sa16'])
if args.checkpoint is not None:
checkpoint = torch.load(args.checkpoint)
net_tg.load_state_dict(checkpoint['tg'])
net_td.load_state_dict(checkpoint['td'])
net_tdi.load_state_dict(checkpoint['tdi'])
opt_tg = optim.Adam( chain( net_tg.text_values.parameters(),
net_tg.final.parameters(),
net_tg.sentence_receiver.parameters(),
net_tg.sentence_receiver.parameters(),
), lr=args.lr, betas=(0.5, 0.99))
opt_td = optim.Adam( net_td.parameters(), lr=args.lr, betas=(0.5, 0.99))
opt_tdi = optim.Adam( chain(
net_tdi.text_values.parameters(), net_tdi.final.parameters()), lr=args.lr, betas=(0.5, 0.99))
if args.checkpoint is not None:
| opt_tg.add_param_group({'params': chain(net_tg.word_attn_4.parameters(),
net_tg.word_attn_16.parameters(),
net_tg.sentence_attn_4.parameters(),
net_tg.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
opt_tdi.add_param_group({'params': chain(
net_tdi.sentence_attn_4.parameters(),
net_tdi.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
checkpoint = torch.load(args.checkpoint.replace('model.pth', 'opt.pth'))
opt_tg.load_state_dict(checkpoint['tg'])
opt_td.load_state_dict(checkpoint['td'])
opt_tdi.load_state_dict(checkpoint['tdi']) | conditional_block |
|
train_i2t_gan.py | loader = dataloader(transform, batch_size)
return loader
import numpy as np
normalization = torch.Tensor([np.log(2 * np.pi)])
def NLL(sample, params):
"""Analytically computes
E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]
If mu_2, and sigma_2^2 are not provided, defaults to entropy.
"""
mu = params[:,:,0]
logsigma = params[:,:,1]
c = normalization.to(mu.device)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return torch.mean(0.5 * (tmp * tmp + 2 * logsigma + c))
def make_target(word_idcs):
target = torch.zeros(word_idcs.size(0), 2100).cuda()
for idx in range(word_idcs.shape[0]):
target[idx][word_idcs[idx]] = 1
return target
from random import shuffle
def true_randperm(size, device=torch.device("cuda:0")):
def unmatched_randperm(size):
l1 = [i for i in range(size)]
l2 = []
for j in range(size):
deleted = False
if j in l1:
deleted = True
del l1[l1.index(j)]
shuffle(l1)
if len(l1) == 0:
return 0, False
l2.append(l1[0])
del l1[0]
if deleted:
l1.append(j)
return l2, True
flag = False
while not flag:
l, flag = unmatched_randperm(size)
return torch.LongTensor(l).to(device)
def train_image_gan_with_text(net_ig, net_id, opt_ig, opt_id, total_iter, loader, options):
text_g_val = 0
text_d_val = 0
text_dt_val = 0
text_gt_val = 0
text_dt_mis_val = 0
log_folder = options.trial_name
if not os.path.exists(log_folder):
os.mkdir(log_folder)
os.mkdir(log_folder+'/checkpoint')
os.mkdir(log_folder+'/sample')
log_file_name = os.path.join(log_folder, 'train_image_to_text_log.txt')
log_file = open(log_file_name, 'w')
log_file.write('rec, prob, code\n')
log_file.close()
copy('train_i2t_gan.py', log_folder+'/train_i2t_gan.py')
copy('models.py', log_folder+'/models.py')
data_loader = sample_data(loader, image_size=128, batch_size=options.batch_size)
dataset = iter(data_loader)
for i in tqdm(range(options.start_iter, options.total_iter)):
try:
real_image, bird_idx, bert_idx = next(dataset)
except (OSError, StopIteration):
dataset = iter(data_loader)
real_image, bird_idx, bert_idx = next(dataset)
### 1. load the data
b_size = real_image.shape[0]
real_image = real_image.cuda()
real_embs = net_t_ae.bert(bert_idx.cuda())[0]
real_text_latent = net_t_ae.encode(real_embs).detach()
bird_idx = bird_idx.long().cuda()
perm = true_randperm(b_size)
img_feat_16, img_feat_8, img_feat_4 = net_iae.encoder(real_image)
# 2. Train the Generators
if i==(options.total_iter//4) and options.checkpoint is None:
opt_tg.add_param_group({'params': chain(net_tg.word_attn_4.parameters(),
net_tg.word_attn_16.parameters(),
net_tg.sentence_attn_4.parameters(),
net_tg.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
net_tg.zero_grad()
noise = torch.randn(b_size, 128).cuda()
g_text_latent = net_tg(noise, img_feat_4, img_feat_16)
g_pred = net_td(g_text_latent)
g_pred_i = net_tdi(g_text_latent, img_feat_4, img_feat_16)
loss_g_latent = -g_pred.mean() - g_pred_i.mean()
loss_total = loss_g_latent
loss_total.backward()
opt_tg.step()
text_g_val += g_pred.mean().item()
text_gt_val += g_pred_i.mean().item()
### 3. Train the Discriminators
if i==(options.total_iter//4) and options.checkpoint is None:
opt_tdi.add_param_group({'params': chain(
net_tdi.sentence_attn_4.parameters(),
net_tdi.sentence_attn_16.parameters(),
), 'lr': 0.1*args.lr})
### 3.1 train the image-only discriminator
net_id.zero_grad()
| loss_disc.backward()
opt_td.step()
text_d_val += real_predict.mean().item()
### 3.2 train the image-text discriminator
net_tdi.zero_grad()
real_predict = net_tdi(real_text_latent, img_feat_4, img_feat_16)
fake_predict = net_tdi(g_text_latent.detach(), img_feat_4, img_feat_16)
mismatch_predict = net_tdi(real_text_latent, img_feat_4[perm], img_feat_16[perm])
loss_disc = F.relu(1-real_predict).mean() + \
F.relu(1+fake_predict).mean() + \
F.relu(1+mismatch_predict).mean()
loss_disc.backward()
opt_tdi.step()
text_dt_val += real_predict.mean().item()
text_dt_mis_val += mismatch_predict.mean().item()
### 4. Logging
if (i + 1) % 2000 == 0 or i==0:
with torch.no_grad():
vutils.save_image(real_image.detach().add(1).mul(0.5), f'{log_folder}/sample/r_img_{str(i + 1).zfill(6)}.jpg')
real_texts = net_t_ae.generate(real_text_latent)
g_texts = net_t_ae.generate(g_text_latent)
f = open(f'{log_folder}/sample/g_real_txt_{str(i + 1).zfill(6)}.txt', 'w')
for cap in real_texts+g_texts:
f.write(cap+'\n')
f.close()
if (i+1) % 5000 == 0 or i==0:
torch.save({'tg':net_tg.state_dict(), 'td':net_td.state_dict(), 'tdi':net_tdi.state_dict()}, f'{log_folder}/checkpoint/image_to_text_memory_{str(i + 1).zfill(6)}_model.pth')
torch.save({'tg':opt_tg.state_dict(), 'td':opt_td.state_dict(), 'tdi':opt_tdi.state_dict()}, f'{log_folder}/checkpoint/image_to_text_memory_{str(i + 1).zfill(6)}_opt.pth')
interval = 100
if (i+1)%interval == 0:
state_msg = (f'txt_g_val: {text_g_val/(interval):.3f}; txt_d_val: {text_d_val/interval:.3f}; \n'
f'txt_gt_val: {text_gt_val/(interval):.3f}; txt_dt_val: {text_dt_val/interval:.3f}; txt_dt_mis: {text_dt_mis_val/interval:.3f} \n')
log_file = open(log_file_name, 'a+')
new_line = "%.5f,%.5f,%.5f\n"%\
(text_g_val/(interval), text_d_val/interval, text_dt_val/interval)
log_file.write(new_line)
log_file.close()
text_g_val = 0
text_d_val = 0
text_gt_val = 0
text_dt_val = 0
text_dt_mis_val = 0
print(state_msg)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Image Text Generation Together')
parser.add_argument('--path', type=str, default='../../../research3/CUB_birds/CUB_200_2011/images', help='path of specified dataset')
parser.add_argument('--lr', default=0.0002, type=float, help='learning rate')
parser.add_argument('--trial_name', default='trial_i2t_gan_with_pre-trained_sw', type=str, help='name of the trial')
parser.add_argument('--total_iter', default=300000, type=int, help='iterations')
parser.add_argument('--start_iter', default=0, type=int, help='start iterations')
parser.add_argument('--im_size', default=128, type=int, help='initial image | real_predict = net_td(real_text_latent)
fake_predict = net_id(g_text_latent.detach())
loss_disc = F.relu(1-real_predict).mean() + F.relu(1+fake_predict).mean() | random_line_split |
jquery.pagination.js | _per_page: 10,
num_display_entries: 10,
current_page: 0,
num_edge_entries: 0,
link_to: "#",
prev_text: "Prev",
next_text: "Next",
ellipse_text: "...",
jump:true,
jump_input_style:"pagjump_txt",
jump_button_style:"pagjump_btn",
prev_show_always: true,
next_show_always: true,
callback: function() { return false; }
}, opts || {});
return this.each(function() {
/**
* Calculate the maximum number of pages
*/
function numPages() {
return Math.ceil(maxentries / opts.items_per_page);
}
/**
* Calculate start and end point of pagination links depending on
* current_page and num_display_entries.
* @return {Array}
*/
function getInterval() {
var ne_half = Math.ceil(opts.num_display_entries / 2);
var np = numPages();
var upper_limit = np - opts.num_display_entries;
var start = current_page > ne_half ? Math.max(Math.min(current_page - ne_half, upper_limit), 0) : 0;
var end = current_page > ne_half ? Math.min(current_page + ne_half, np) : Math.min(opts.num_display_entries, np);
return [start, end];
}
/**
* This is the event handling function for the pagination links.
* @param {int} page_id The new page number
*/
function pageSelected(page_id, evt) {
current_page = page_id;
drawLinks();
var continuePropagation = opts.callback(page_id, panel);
if (!continuePropagation) {
if (evt.stopPropagation) {
evt.stopPropagation();
}
else {
evt.cancelBubble = true;
}
}
return continuePropagation;
}
/**
* This function inserts the pagination links into the container element
*/
function drawLinks() {
panel.empty();
var interval = getInterval();
var np = numPages();
// This helper function returns a handler function that calls pageSelected with the right page_id
var getClickHandler = function(page_id) {
return function(evt) { return pageSelected(page_id, evt); }
}
// Helper function for generating a single link (or a span tag if it'S the current page)
var appendItem = function(page_id, appendopts) {
page_id = page_id < 0 ? 0 : (page_id < np ? page_id : np - 1); // Normalize page id to sane value
appendopts = jQuery.extend({ text: page_id + 1, classes: "" }, appendopts || {});
if (page_id == current_page) {
var lnk = $("<span class='this_p'>" + (appendopts.text) + "</span>");
}
else {
var lnk = $("<a>" + (appendopts.text) + "</a>")
.bind("click", getClickHandler(page_id))
.attr('href', opts.link_to.replace(/__id__/, page_id));
}
if (appendopts.classes) { lnk.removeAttr('class'); lnk.addClass(appendopts.classes); }
panel.append(lnk);
}
// Generate "Previous"-Link
if (opts.prev_text && (current_page > 0 || opts.prev_show_always)) {
appendItem(current_page - 1, { text: opts.prev_text, classes: "disabled" });
}
// Generate starting points
if (interval[0] > 0 && opts.num_edge_entries > 0) {
var end = Math.min(opts.num_edge_entries, interval[0]);
for (var i = 0; i < end; i++) {
appendItem(i);
}
if (opts.num_edge_entries < interval[0] && opts.ellipse_text) {
jQuery("<span class='ellipse'>" + opts.ellipse_text + "</span>").appendTo(panel);
}
}
// Generate interval links
for (var i = interval[0]; i < interval[1]; i++) {
appendItem(i);
}
// Generate ending points
if (interval[1] < np && opts.num_edge_entries > 0) {
if (np - opts.num_edge_entries > interval[1] && opts.ellipse_text) {
jQuery("<span class='ellipse'>" + opts.ellipse_text + "</span>").appendTo(panel);
}
var begin = Math.max(np - opts.num_edge_entries, interval[1]);
for (var i = begin; i < np; i++) {
appendItem(i);
}
}
// Generate "Next"-Link
if (opts.next_text && (current_page < np - 1 || opts.next_show_always)) {
appendItem(current_page + 1, { text: opts.next_text, classes: "disabled" });
}
//新增跳页20130613
if(opts.jump){
jQuery("<span class='pagjump_box'>跳到<input class='"+opts.jump_input_style+"' type='text' /><button type='button' class='"+opts.jump_button_style+"'>确定</button>").appendTo(panel).delegate("button","click",function(e){
var page_id = jQuery(this).prev(".pagjump_txt").val();
if(page_id == ""){
return false;
}else if(page_id > np || page_id < 0){
alert("超出页码范围!");
return false;
};
pageSelected(page_id-1,e);
});
};
}
// Extract current_page from options
var current_page = opts.current_page;
// Create a sane value for maxentries and items_per_page
maxentries = (!maxentries || maxentries < 0) ? 1 : maxentries;
opts.items_per_page = (!opts.items_per_page || opts.items_per_page < 0) ? 1 : opts.items_per_page;
// Store DOM element for easy access from all inner functions
var panel = jQuery(this);
// Attach control functions to the DOM element
this.selectPage = function(page_id) { pageSelected(page_id); }
this.prevPage = function() {
if (current_page > 0) {
pageSelected(current_page - 1);
return true;
}
else {
return false | ge = function() {
if (current_page < numPages() - 1) {
pageSelected(current_page + 1);
return true;
}
else {
return false;
}
}
// When all initialisation is done, draw the links
drawLinks();
});
}
/*====
<div class="page_num">
<!--
<a href="#">上一页</a>
<a href="#" class="this_p">1</a>
<a href="#">2</a>
<a href="#">3</a>
<a href="#">4</a>
<a href="#">5</a>
<span class="ellipsis">..</span>
<a href="#">12</a>
<a href="#">下一页</a>
-->
</div>
<script type="text/javascript">
$(function(){
$(".page_num").pagination(150,{ //总记录数
items_per_page: 10, //每页显示数
num_display_entries: 3, //中间连续显示的页数
current_page: 0, //当前页
num_edge_entries: 1, //两边预留页数
link_to: "#", //页码链接
prev_text: "", //上一页文字
next_text: "下一页", //下一页文字
ellipse_text: "...", //省略页替代内容
prev_show_always: true, //上一页是否一直显示
next_show_always: true, //下一页是否一直显示
callback: function(page_id,panel) {
$.ajax({
type: "POST",
url: "json/复件 "+name+"_"+page_id+".json",
dataType: "json",
beforeSend:function(){
$("div.loading").remove();
$('<div class="loading"></div>').appendTo(a.parents(".popup_app_list").find(".app_devloper_box_m")).css({"position":"absolute","height":"100%","width":"100%","z-index":"100","background-image":"url(images/loading.gif)","background-position":"center","background-color":"rgba(0,0,0,0.2)","background-repeat":"no-repeat","left":"0","top":"0"})
oldtime = +new Date();
a.parents(".popup_app_list").find("table.d_applist tbody").remove();
},
error: function(){
console.log("请求失败!");
$("div.loading").css({"background-image":"none"}).append("<span>数据加载失败!</span>");
$("div.loading span").css({"padding":"120px 0 0 300px","position":"absolute","color":"#fff"});
setTimeout(function(){$("div.loading").remove();},2000);
return false;
},
success: function(json){
newtime = +new Date();
loadtime = newtime-oldtime<1000?2000:newtime-oldtime;
setTimeout(function(){
a.parents(".popup_app_list").find("table.d_applist tbody"). | ;
}
}
this.nextPa | conditional_block |
jquery.pagination.js | _per_page: 10,
num_display_entries: 10,
current_page: 0,
num_edge_entries: 0,
link_to: "#",
prev_text: "Prev",
next_text: "Next",
ellipse_text: "...",
jump:true,
jump_input_style:"pagjump_txt",
jump_button_style:"pagjump_btn",
prev_show_always: true,
next_show_always: true,
callback: function() { return false; }
}, opts || {});
return this.each(function() {
/**
* Calculate the maximum number of pages
*/
function numPages() {
return Math.ceil(maxentries / opts.items_per_page);
}
/**
* Calculate start and end point of pagination links depending on
* current_page and num_display_entries.
* @return {Array}
*/
function getInterval() {
var ne_half = Math.ceil(opts.num_display_entries / 2);
var np = numPages();
var upper_limit = np - opts.num_display_entries;
var start = current_page > ne_half ? Math.max(Math.min(current_page - ne_half, upper_limit), 0) : 0;
var end = current_page > ne_half ? Math.min(current_page + ne_half, np) : Math.min(opts.num_display_entries, np);
return [start, end];
}
/**
* This is the event handling function for the pagination links.
* @param {int} page_id The new page number
*/
function pageSelected(page_id, evt) {
current_page = page_id;
drawLinks();
var continuePropagation = opts.callback(page_id, panel);
if (!continuePropagation) {
if (evt.stopPropagation) {
evt.stopPropagation();
}
else {
evt.cancelBubble = true;
}
}
return continuePropagation;
}
/**
* This function inserts the pagination links into the container element
*/
function drawLinks() {
panel.empty();
var interval = getInterval();
var np = numPages();
// This helper function returns a handler function that calls pageSelected with the right page_id
var getClickHandler = function(page_id) {
return function(evt) { return pageSelected(page_id, evt); }
}
// Helper function for generating a single link (or a span tag if it'S the current page)
var appendItem = function(page_id, appendopts) {
page_id = page_id < 0 ? 0 : (page_id < np ? page_id : np - 1); // Normalize page id to sane value
appendopts = jQuery.extend({ text: page_id + 1, classes: "" }, appendopts || {});
if (page_id == current_page) {
var lnk = $("<span class='this_p'>" + (appendopts.text) + "</span>");
}
else {
var lnk = $("<a>" + (appendopts.text) + "</a>")
.bind("click", getClickHandler(page_id))
.attr('href', opts.link_to.replace(/__id__/, page_id));
}
if (appendopts.classes) { lnk.removeAttr('class'); lnk.addClass(appendopts.classes); }
panel.append(lnk);
}
// Generate "Previous"-Link
if (opts.prev_text && (current_page > 0 || opts.prev_show_always)) {
appendItem(current_page - 1, { text: opts.prev_text, classes: "disabled" });
}
// Generate starting points
if (interval[0] > 0 && opts.num_edge_entries > 0) {
var end = Math.min(opts.num_edge_entries, interval[0]);
for (var i = 0; i < end; i++) {
appendItem(i);
}
if (opts.num_edge_entries < interval[0] && opts.ellipse_text) {
jQuery("<span class='ellipse'>" + opts.ellipse_text + "</span>").appendTo(panel);
}
}
// Generate interval links
for (var i = interval[0]; i < interval[1]; i++) {
appendItem(i);
}
// Generate ending points
if (interval[1] < np && opts.num_edge_entries > 0) {
if (np - opts.num_edge_entries > interval[1] && opts.ellipse_text) {
jQuery("<span class='ellipse'>" + opts.ellipse_text + "</span>").appendTo(panel);
}
var begin = Math.max(np - opts.num_edge_entries, interval[1]);
for (var i = begin; i < np; i++) {
appendItem(i);
}
}
// Generate "Next"-Link
if (opts.next_text && (current_page < np - 1 || opts.next_show_always)) {
appendItem(current_page + 1, { text: opts.next_text, classes: "disabled" });
}
//新增跳页20130613
if(opts.jump){
jQuery("<span class='pagjump_box'>跳到<input class='"+opts.jump_input_style+"' type='text' /><button type='button' class='"+opts.jump_button_style+"'>确定</button>").appendTo(panel).delegate("button","click",function(e){
var page_id = jQuery(this).prev(".pagjump_txt").val();
if(page_id == ""){
return false;
}else if(page_id > np || page_id < 0){
alert("超出页码范围!");
return false;
}; | });
};
}
// Extract current_page from options
var current_page = opts.current_page;
// Create a sane value for maxentries and items_per_page
maxentries = (!maxentries || maxentries < 0) ? 1 : maxentries;
opts.items_per_page = (!opts.items_per_page || opts.items_per_page < 0) ? 1 : opts.items_per_page;
// Store DOM element for easy access from all inner functions
var panel = jQuery(this);
// Attach control functions to the DOM element
this.selectPage = function(page_id) { pageSelected(page_id); }
this.prevPage = function() {
if (current_page > 0) {
pageSelected(current_page - 1);
return true;
}
else {
return false;
}
}
this.nextPage = function() {
if (current_page < numPages() - 1) {
pageSelected(current_page + 1);
return true;
}
else {
return false;
}
}
// When all initialisation is done, draw the links
drawLinks();
});
}
/*====
<div class="page_num">
<!--
<a href="#">上一页</a>
<a href="#" class="this_p">1</a>
<a href="#">2</a>
<a href="#">3</a>
<a href="#">4</a>
<a href="#">5</a>
<span class="ellipsis">..</span>
<a href="#">12</a>
<a href="#">下一页</a>
-->
</div>
<script type="text/javascript">
$(function(){
$(".page_num").pagination(150,{ //总记录数
items_per_page: 10, //每页显示数
num_display_entries: 3, //中间连续显示的页数
current_page: 0, //当前页
num_edge_entries: 1, //两边预留页数
link_to: "#", //页码链接
prev_text: "", //上一页文字
next_text: "下一页", //下一页文字
ellipse_text: "...", //省略页替代内容
prev_show_always: true, //上一页是否一直显示
next_show_always: true, //下一页是否一直显示
callback: function(page_id,panel) {
$.ajax({
type: "POST",
url: "json/复件 "+name+"_"+page_id+".json",
dataType: "json",
beforeSend:function(){
$("div.loading").remove();
$('<div class="loading"></div>').appendTo(a.parents(".popup_app_list").find(".app_devloper_box_m")).css({"position":"absolute","height":"100%","width":"100%","z-index":"100","background-image":"url(images/loading.gif)","background-position":"center","background-color":"rgba(0,0,0,0.2)","background-repeat":"no-repeat","left":"0","top":"0"})
oldtime = +new Date();
a.parents(".popup_app_list").find("table.d_applist tbody").remove();
},
error: function(){
console.log("请求失败!");
$("div.loading").css({"background-image":"none"}).append("<span>数据加载失败!</span>");
$("div.loading span").css({"padding":"120px 0 0 300px","position":"absolute","color":"#fff"});
setTimeout(function(){$("div.loading").remove();},2000);
return false;
},
success: function(json){
newtime = +new Date();
loadtime = newtime-oldtime<1000?2000:newtime-oldtime;
setTimeout(function(){
a.parents(".popup_app_list").find("table.d_applist tbody").remove | pageSelected(page_id-1,e); | random_line_split |
jquery.pagination.js | _per_page: 10,
num_display_entries: 10,
current_page: 0,
num_edge_entries: 0,
link_to: "#",
prev_text: "Prev",
next_text: "Next",
ellipse_text: "...",
jump:true,
jump_input_style:"pagjump_txt",
jump_button_style:"pagjump_btn",
prev_show_always: true,
next_show_always: true,
callback: function() { return false; }
}, opts || {});
return this.each(function() {
/**
* Calculate the maximum number of pages
*/
function numPages() {
return Math.ceil(maxentries / opts.items_per_page);
}
/**
* Calculate start and end point of pagination links depending on
* current_page and num_display_entries.
* @return {Array}
*/
function getInterval() |
/**
* This is the event handling function for the pagination links.
* @param {int} page_id The new page number
*/
function pageSelected(page_id, evt) {
current_page = page_id;
drawLinks();
var continuePropagation = opts.callback(page_id, panel);
if (!continuePropagation) {
if (evt.stopPropagation) {
evt.stopPropagation();
}
else {
evt.cancelBubble = true;
}
}
return continuePropagation;
}
/**
* This function inserts the pagination links into the container element
*/
function drawLinks() {
panel.empty();
var interval = getInterval();
var np = numPages();
// This helper function returns a handler function that calls pageSelected with the right page_id
var getClickHandler = function(page_id) {
return function(evt) { return pageSelected(page_id, evt); }
}
// Helper function for generating a single link (or a span tag if it'S the current page)
var appendItem = function(page_id, appendopts) {
page_id = page_id < 0 ? 0 : (page_id < np ? page_id : np - 1); // Normalize page id to sane value
appendopts = jQuery.extend({ text: page_id + 1, classes: "" }, appendopts || {});
if (page_id == current_page) {
var lnk = $("<span class='this_p'>" + (appendopts.text) + "</span>");
}
else {
var lnk = $("<a>" + (appendopts.text) + "</a>")
.bind("click", getClickHandler(page_id))
.attr('href', opts.link_to.replace(/__id__/, page_id));
}
if (appendopts.classes) { lnk.removeAttr('class'); lnk.addClass(appendopts.classes); }
panel.append(lnk);
}
// Generate "Previous"-Link
if (opts.prev_text && (current_page > 0 || opts.prev_show_always)) {
appendItem(current_page - 1, { text: opts.prev_text, classes: "disabled" });
}
// Generate starting points
if (interval[0] > 0 && opts.num_edge_entries > 0) {
var end = Math.min(opts.num_edge_entries, interval[0]);
for (var i = 0; i < end; i++) {
appendItem(i);
}
if (opts.num_edge_entries < interval[0] && opts.ellipse_text) {
jQuery("<span class='ellipse'>" + opts.ellipse_text + "</span>").appendTo(panel);
}
}
// Generate interval links
for (var i = interval[0]; i < interval[1]; i++) {
appendItem(i);
}
// Generate ending points
if (interval[1] < np && opts.num_edge_entries > 0) {
if (np - opts.num_edge_entries > interval[1] && opts.ellipse_text) {
jQuery("<span class='ellipse'>" + opts.ellipse_text + "</span>").appendTo(panel);
}
var begin = Math.max(np - opts.num_edge_entries, interval[1]);
for (var i = begin; i < np; i++) {
appendItem(i);
}
}
// Generate "Next"-Link
if (opts.next_text && (current_page < np - 1 || opts.next_show_always)) {
appendItem(current_page + 1, { text: opts.next_text, classes: "disabled" });
}
//新增跳页20130613
if(opts.jump){
jQuery("<span class='pagjump_box'>跳到<input class='"+opts.jump_input_style+"' type='text' /><button type='button' class='"+opts.jump_button_style+"'>确定</button>").appendTo(panel).delegate("button","click",function(e){
var page_id = jQuery(this).prev(".pagjump_txt").val();
if(page_id == ""){
return false;
}else if(page_id > np || page_id < 0){
alert("超出页码范围!");
return false;
};
pageSelected(page_id-1,e);
});
};
}
// Extract current_page from options
var current_page = opts.current_page;
// Create a sane value for maxentries and items_per_page
maxentries = (!maxentries || maxentries < 0) ? 1 : maxentries;
opts.items_per_page = (!opts.items_per_page || opts.items_per_page < 0) ? 1 : opts.items_per_page;
// Store DOM element for easy access from all inner functions
var panel = jQuery(this);
// Attach control functions to the DOM element
this.selectPage = function(page_id) { pageSelected(page_id); }
this.prevPage = function() {
if (current_page > 0) {
pageSelected(current_page - 1);
return true;
}
else {
return false;
}
}
this.nextPage = function() {
if (current_page < numPages() - 1) {
pageSelected(current_page + 1);
return true;
}
else {
return false;
}
}
// When all initialisation is done, draw the links
drawLinks();
});
}
/*====
<div class="page_num">
<!--
<a href="#">上一页</a>
<a href="#" class="this_p">1</a>
<a href="#">2</a>
<a href="#">3</a>
<a href="#">4</a>
<a href="#">5</a>
<span class="ellipsis">..</span>
<a href="#">12</a>
<a href="#">下一页</a>
-->
</div>
<script type="text/javascript">
$(function(){
$(".page_num").pagination(150,{ //总记录数
items_per_page: 10, //每页显示数
num_display_entries: 3, //中间连续显示的页数
current_page: 0, //当前页
num_edge_entries: 1, //两边预留页数
link_to: "#", //页码链接
prev_text: "", //上一页文字
next_text: "下一页", //下一页文字
ellipse_text: "...", //省略页替代内容
prev_show_always: true, //上一页是否一直显示
next_show_always: true, //下一页是否一直显示
callback: function(page_id,panel) {
$.ajax({
type: "POST",
url: "json/复件 "+name+"_"+page_id+".json",
dataType: "json",
beforeSend:function(){
$("div.loading").remove();
$('<div class="loading"></div>').appendTo(a.parents(".popup_app_list").find(".app_devloper_box_m")).css({"position":"absolute","height":"100%","width":"100%","z-index":"100","background-image":"url(images/loading.gif)","background-position":"center","background-color":"rgba(0,0,0,0.2)","background-repeat":"no-repeat","left":"0","top":"0"})
oldtime = +new Date();
a.parents(".popup_app_list").find("table.d_applist tbody").remove();
},
error: function(){
console.log("请求失败!");
$("div.loading").css({"background-image":"none"}).append("<span>数据加载失败!</span>");
$("div.loading span").css({"padding":"120px 0 0 300px","position":"absolute","color":"#fff"});
setTimeout(function(){$("div.loading").remove();},2000);
return false;
},
success: function(json){
newtime = +new Date();
loadtime = newtime-oldtime<1000?2000:newtime-oldtime;
setTimeout(function(){
a.parents(".popup_app_list").find("table.d_applist tbody | {
var ne_half = Math.ceil(opts.num_display_entries / 2);
var np = numPages();
var upper_limit = np - opts.num_display_entries;
var start = current_page > ne_half ? Math.max(Math.min(current_page - ne_half, upper_limit), 0) : 0;
var end = current_page > ne_half ? Math.min(current_page + ne_half, np) : Math.min(opts.num_display_entries, np);
return [start, end];
} | identifier_body |
jquery.pagination.js | _per_page: 10,
num_display_entries: 10,
current_page: 0,
num_edge_entries: 0,
link_to: "#",
prev_text: "Prev",
next_text: "Next",
ellipse_text: "...",
jump:true,
jump_input_style:"pagjump_txt",
jump_button_style:"pagjump_btn",
prev_show_always: true,
next_show_always: true,
callback: function() { return false; }
}, opts || {});
return this.each(function() {
/**
* Calculate the maximum number of pages
*/
function | () {
return Math.ceil(maxentries / opts.items_per_page);
}
/**
* Calculate start and end point of pagination links depending on
* current_page and num_display_entries.
* @return {Array}
*/
function getInterval() {
var ne_half = Math.ceil(opts.num_display_entries / 2);
var np = numPages();
var upper_limit = np - opts.num_display_entries;
var start = current_page > ne_half ? Math.max(Math.min(current_page - ne_half, upper_limit), 0) : 0;
var end = current_page > ne_half ? Math.min(current_page + ne_half, np) : Math.min(opts.num_display_entries, np);
return [start, end];
}
/**
* This is the event handling function for the pagination links.
* @param {int} page_id The new page number
*/
function pageSelected(page_id, evt) {
current_page = page_id;
drawLinks();
var continuePropagation = opts.callback(page_id, panel);
if (!continuePropagation) {
if (evt.stopPropagation) {
evt.stopPropagation();
}
else {
evt.cancelBubble = true;
}
}
return continuePropagation;
}
/**
* This function inserts the pagination links into the container element
*/
function drawLinks() {
panel.empty();
var interval = getInterval();
var np = numPages();
// This helper function returns a handler function that calls pageSelected with the right page_id
var getClickHandler = function(page_id) {
return function(evt) { return pageSelected(page_id, evt); }
}
// Helper function for generating a single link (or a span tag if it'S the current page)
var appendItem = function(page_id, appendopts) {
page_id = page_id < 0 ? 0 : (page_id < np ? page_id : np - 1); // Normalize page id to sane value
appendopts = jQuery.extend({ text: page_id + 1, classes: "" }, appendopts || {});
if (page_id == current_page) {
var lnk = $("<span class='this_p'>" + (appendopts.text) + "</span>");
}
else {
var lnk = $("<a>" + (appendopts.text) + "</a>")
.bind("click", getClickHandler(page_id))
.attr('href', opts.link_to.replace(/__id__/, page_id));
}
if (appendopts.classes) { lnk.removeAttr('class'); lnk.addClass(appendopts.classes); }
panel.append(lnk);
}
// Generate "Previous"-Link
if (opts.prev_text && (current_page > 0 || opts.prev_show_always)) {
appendItem(current_page - 1, { text: opts.prev_text, classes: "disabled" });
}
// Generate starting points
if (interval[0] > 0 && opts.num_edge_entries > 0) {
var end = Math.min(opts.num_edge_entries, interval[0]);
for (var i = 0; i < end; i++) {
appendItem(i);
}
if (opts.num_edge_entries < interval[0] && opts.ellipse_text) {
jQuery("<span class='ellipse'>" + opts.ellipse_text + "</span>").appendTo(panel);
}
}
// Generate interval links
for (var i = interval[0]; i < interval[1]; i++) {
appendItem(i);
}
// Generate ending points
if (interval[1] < np && opts.num_edge_entries > 0) {
if (np - opts.num_edge_entries > interval[1] && opts.ellipse_text) {
jQuery("<span class='ellipse'>" + opts.ellipse_text + "</span>").appendTo(panel);
}
var begin = Math.max(np - opts.num_edge_entries, interval[1]);
for (var i = begin; i < np; i++) {
appendItem(i);
}
}
// Generate "Next"-Link
if (opts.next_text && (current_page < np - 1 || opts.next_show_always)) {
appendItem(current_page + 1, { text: opts.next_text, classes: "disabled" });
}
//新增跳页20130613
if(opts.jump){
jQuery("<span class='pagjump_box'>跳到<input class='"+opts.jump_input_style+"' type='text' /><button type='button' class='"+opts.jump_button_style+"'>确定</button>").appendTo(panel).delegate("button","click",function(e){
var page_id = jQuery(this).prev(".pagjump_txt").val();
if(page_id == ""){
return false;
}else if(page_id > np || page_id < 0){
alert("超出页码范围!");
return false;
};
pageSelected(page_id-1,e);
});
};
}
// Extract current_page from options
var current_page = opts.current_page;
// Create a sane value for maxentries and items_per_page
maxentries = (!maxentries || maxentries < 0) ? 1 : maxentries;
opts.items_per_page = (!opts.items_per_page || opts.items_per_page < 0) ? 1 : opts.items_per_page;
// Store DOM element for easy access from all inner functions
var panel = jQuery(this);
// Attach control functions to the DOM element
this.selectPage = function(page_id) { pageSelected(page_id); }
this.prevPage = function() {
if (current_page > 0) {
pageSelected(current_page - 1);
return true;
}
else {
return false;
}
}
this.nextPage = function() {
if (current_page < numPages() - 1) {
pageSelected(current_page + 1);
return true;
}
else {
return false;
}
}
// When all initialisation is done, draw the links
drawLinks();
});
}
/*====
<div class="page_num">
<!--
<a href="#">上一页</a>
<a href="#" class="this_p">1</a>
<a href="#">2</a>
<a href="#">3</a>
<a href="#">4</a>
<a href="#">5</a>
<span class="ellipsis">..</span>
<a href="#">12</a>
<a href="#">下一页</a>
-->
</div>
<script type="text/javascript">
$(function(){
$(".page_num").pagination(150,{ //总记录数
items_per_page: 10, //每页显示数
num_display_entries: 3, //中间连续显示的页数
current_page: 0, //当前页
num_edge_entries: 1, //两边预留页数
link_to: "#", //页码链接
prev_text: "", //上一页文字
next_text: "下一页", //下一页文字
ellipse_text: "...", //省略页替代内容
prev_show_always: true, //上一页是否一直显示
next_show_always: true, //下一页是否一直显示
callback: function(page_id,panel) {
$.ajax({
type: "POST",
url: "json/复件 "+name+"_"+page_id+".json",
dataType: "json",
beforeSend:function(){
$("div.loading").remove();
$('<div class="loading"></div>').appendTo(a.parents(".popup_app_list").find(".app_devloper_box_m")).css({"position":"absolute","height":"100%","width":"100%","z-index":"100","background-image":"url(images/loading.gif)","background-position":"center","background-color":"rgba(0,0,0,0.2)","background-repeat":"no-repeat","left":"0","top":"0"})
oldtime = +new Date();
a.parents(".popup_app_list").find("table.d_applist tbody").remove();
},
error: function(){
console.log("请求失败!");
$("div.loading").css({"background-image":"none"}).append("<span>数据加载失败!</span>");
$("div.loading span").css({"padding":"120px 0 0 300px","position":"absolute","color":"#fff"});
setTimeout(function(){$("div.loading").remove();},2000);
return false;
},
success: function(json){
newtime = +new Date();
loadtime = newtime-oldtime<1000?2000:newtime-oldtime;
setTimeout(function(){
a.parents(".popup_app_list").find("table.d_applist tbody"). | numPages | identifier_name |
tag_processor.go | 2 {
if number < (1 << 4) {
return 1
} else if number < (1 << 11) {
return 2
} else if number < (1 << 18) {
return 3
} else if number < (1 << 25) {
return 4
} else {
return 5
}
}
/*
* Return the number of bytes required to store a variable-length unsigned
* 32-bit integer in base-128 varint encoding.
*/
func UInt32Size(v uint32) uint32 {
if v < (1 << 7) {
return 1
} else if v < (1 << 14) {
return 2
} else if v < (1 << 21) {
return 3
} else if v < (1 << 28) {
return 4
} else {
return 5
}
}
/*
* Return the number of bytes required to store a variable-length signed 32-bit
* integer in base-128 varint encoding.
*
*/
func Int32Size(v int32) uint32 {
if v < 0 {
return 10
} else if v < (1 << 7) {
return 1
} else if v < (1 << 14) {
return 2
} else if v < (1 << 21) {
return 3
} else if v < (1 << 28) {
return 4
} else {
return 5
}
}
/*
* Return the ZigZag-encoded 32-bit unsigned integer form of a 32-bit signed
* integer.
*/
func zigzag32(v int32) uint32 {
if v < 0 {
return ((uint32)(-v))*2 - 1
} else {
return uint32(v * 2)
}
}
/*
* Return the number of bytes required to store a signed 32-bit integer,
* converted to an unsigned 32-bit integer with ZigZag encoding, using base-128
* varint encoding.
*/
func sInt32Size(v int32) uint32 {
return UInt32Size(zigzag32(v))
}
/*
* Return the number of bytes required to store a 64-bit unsigned integer in
* base-128 varint encoding.
*/
func uint64Size(v uint64) uint32 {
var upper_v uint32 = uint32(v >> 32)
if upper_v == 0 {
return UInt32Size(uint32(v))
} else if upper_v < (1 << 3) {
return 5
} else if upper_v < (1 << 10) {
return 6
} else if upper_v < (1 << 17) {
return 7
} else if upper_v < (1 << 24) {
return 8
} else if upper_v < (1 << 31) {
return 9
} else {
return 10
}
}
/*
* Return the ZigZag-encoded 64-bit unsigned integer form of a 64-bit signed
* integer.
*/
func zigzag64(v int64) uint64 {
if v < 0 {
return uint64(-v)*2 - 1
} else {
return uint64(v) * 2
}
}
/*
* Return the number of bytes required to store a signed 64-bit integer.
*/
func | (v int64) uint32 {
return uint64Size(zigzag64(v))
}
/*
* Pack an unsigned 32-bit integer in base-128 varint encoding and return the
* number of bytes written, which must be 5 or less.
*/
func Uint32Pack(value uint32, buf []byte) ([]byte, uint32) {
var rv uint32 = 0
if value >= 0x80 {
buf = append(buf, byte(value|0x80))
value = value >> 7
rv++
if value >= 0x80 {
buf = append(buf, byte(value|0x80))
value = value >> 7
if value >= 0x80 {
buf = append(buf, byte(value|0x80))
value = value >> 7
rv++
if value >= 0x80 {
buf = append(buf, byte(value|0x80))
value = value >> 7
rv++
}
}
}
}
buf = append(buf, byte(value))
rv++
return buf, rv
}
/*
* Pack a signed 32-bit integer and return the number of bytes written.
* Negative numbers are encoded as two's complement 64-bit integers.
*/
func Int32Pack(value int32, buf []byte) ([]byte, uint32) {
if value < 0 {
buf = append(buf, uint8(value)|0x80,
uint8(value>>7)|0x80,
uint8(value>>14)|0x80,
uint8(value>>21)|0x80,
uint8(value>>28)|0x80,
0xFF, 0xFF, 0xFF, 0xFF,
0x01)
return buf, 10
} else {
return Uint32Pack(uint32(value), buf)
}
}
/*
* Pack a signed 32-bit integer using ZigZag encoding and return the number of
* bytes written.
*/
func Sint32Pack(value int32, buf []byte) ([]byte, uint32) {
return Uint32Pack(zigzag32(value), buf)
}
/*
* Pack a 64-bit unsigned integer using base-128 varint encoding and return the
* number of bytes written.
*/
func Uint64Pack(value uint64, out []byte) ([]byte, uint32) {
hi := uint32(value >> 32)
lo := uint32(value)
var rv uint32
if hi == 0 {
return Uint32Pack(uint32(lo), out)
}
out = append(out, uint8(lo)|0x80,
uint8(lo>>7)|0x80,
uint8(lo>>14)|0x80,
uint8(lo>>21)|0x80)
if hi < 8 {
out = append(out, uint8(hi<<4)|uint8(lo>>28))
return out, 5
} else {
out = append(out, uint8(hi&7<<4)|uint8(lo>>28)|0x80)
hi = hi >> 3
}
rv = 5
for hi >= 128 {
out = append(out, uint8(hi|0x80))
hi >>= 7
rv++
}
out = append(out, uint8(hi))
rv++
return out, rv
}
/*
* Pack a 64-bit signed integer in ZigZag encoding and return the number of
* bytes written.
*/
func Sint64Pack(value int64, out []byte) ([]byte, uint32) {
return Uint64Pack(zigzag64(value), out)
}
/*
* Pack a 32-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed32, sfixed32, float.
*/
func Fixed32Pack(value uint32, out []byte) ([]byte, uint32) {
out = append(out, uint8(value),
uint8(value>>8),
uint8(value>>16),
uint8(value>>24))
return out, 4
}
/*
* Pack a 64-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed64, sfixed64, double.
*/
func Fixed64Pack(value uint64, out []byte) ([]byte, uint32) {
out, _ = Fixed32Pack(uint32(value), out)
out, _ = Fixed32Pack(uint32(value>>32), out)
return out, 8
}
/*
* Pack a boolean value as an integer and return the number of bytes written.
*/
func BooleanPack(value bool, out []byte) ([]byte, uint32) {
var b byte
if value == true {
b = 1
} else {
b = 0
}
out = append(out, b)
return out, 1
}
/*
* Pack a string and return the number of bytes written. The
*/
func StringPack(str string, out []byte) ([]byte, uint32) {
if str == "" {
out = append(out, 0)
return out, 1
} else {
var length uint32 = uint32(len(str))
var rv uint32
out, rv = Uint32Pack(length, out)
for _, c := range str {
out = append(out, byte(c))
}
return out, rv + length
| sint64Size | identifier_name |
tag_processor.go | }
out = append(out, uint8(lo)|0x80,
uint8(lo>>7)|0x80,
uint8(lo>>14)|0x80,
uint8(lo>>21)|0x80)
if hi < 8 {
out = append(out, uint8(hi<<4)|uint8(lo>>28))
return out, 5
} else {
out = append(out, uint8(hi&7<<4)|uint8(lo>>28)|0x80)
hi = hi >> 3
}
rv = 5
for hi >= 128 {
out = append(out, uint8(hi|0x80))
hi >>= 7
rv++
}
out = append(out, uint8(hi))
rv++
return out, rv
}
/*
* Pack a 64-bit signed integer in ZigZag encoding and return the number of
* bytes written.
*/
func Sint64Pack(value int64, out []byte) ([]byte, uint32) {
return Uint64Pack(zigzag64(value), out)
}
/*
* Pack a 32-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed32, sfixed32, float.
*/
func Fixed32Pack(value uint32, out []byte) ([]byte, uint32) {
out = append(out, uint8(value),
uint8(value>>8),
uint8(value>>16),
uint8(value>>24))
return out, 4
}
/*
* Pack a 64-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed64, sfixed64, double.
*/
func Fixed64Pack(value uint64, out []byte) ([]byte, uint32) {
out, _ = Fixed32Pack(uint32(value), out)
out, _ = Fixed32Pack(uint32(value>>32), out)
return out, 8
}
/*
* Pack a boolean value as an integer and return the number of bytes written.
*/
func BooleanPack(value bool, out []byte) ([]byte, uint32) {
var b byte
if value == true {
b = 1
} else {
b = 0
}
out = append(out, b)
return out, 1
}
/*
* Pack a string and return the number of bytes written. The
*/
func StringPack(str string, out []byte) ([]byte, uint32) {
if str == "" {
out = append(out, 0)
return out, 1
} else {
var length uint32 = uint32(len(str))
var rv uint32
out, rv = Uint32Pack(length, out)
for _, c := range str {
out = append(out, byte(c))
}
return out, rv + length
}
}
/*
* Pack a sequence of bytes
*/
func BytesDataPack(bytes []byte, out []byte) ([]byte, uint32) {
var length uint32 = uint32(len(bytes))
var rv uint32
out, rv = Uint32Pack(length, out)
for _, c := range bytes {
out = append(out, c)
}
return out, rv + length
}
/*
* Pack a field tag.
*/
func TagPack(id uint32, out []byte) ([]byte, uint32) {
if id < (1 << (32 - 3)) {
return Uint32Pack(id<<3, out)
} else {
return Uint64Pack(uint64(id)<<3, out)
}
}
/*
* Get the minimum number of bytes required to pack a field value of a
* particular type.
*/
func getTypeMinSize(t ProtobufType) uint32 {
if t == PROTOBUF_TYPE_SFIXED32 ||
t == PROTOBUF_TYPE_FIXED32 ||
t == PROTOBUF_TYPE_FLOAT {
return 4
}
if t == PROTOBUF_TYPE_SFIXED64 ||
t == PROTOBUF_TYPE_FIXED64 ||
t == PROTOBUF_TYPE_DOUBLE {
return 8
}
return 1
}
/*
* Parse the wire data and get the tag, type
*/
func ParseTagAndWiretype(length uint32, data []byte, tag_out *uint32,
wiretype_out *ProtobufWireType) uint32 {
var max_rv uint32
if length > 5 {
max_rv = 5
} else {
max_rv = length
}
var tag uint32 = uint32((uint8(data[0]) & 0x7f) >> 3)
var shift uint = 4
var rv uint32
*wiretype_out = ProtobufWireType(data[0] & 7)
if (data[0] & 0x80) == 0 {
*tag_out = tag
return 1
}
for rv = 1; rv < max_rv; rv++ {
if r := uint8(data[rv]) & 0x80; r != 0 {
tag = tag | uint32((uint8(data[rv])&0x7f)<<shift)
shift += 7
} else {
tag = tag | uint32(uint8(data[rv])<<shift)
*tag_out = tag
return rv + 1
}
}
return 0 /* error: bad header */
}
/*
* get prefix data length
*/
func ScanLengthPrefixData(length uint32, data []byte,
prefix_len_out *uint32) uint32 {
var hdr_max uint32
if length < 5 {
hdr_max = length
} else {
hdr_max = 5
}
var hdr_len uint32
var val uint32 = 0
var shift uint32 = 0
var i uint32
for i = 0; i < hdr_max; i++ {
val = val | uint32((uint8(data[i])&0x7f)<<shift)
shift += 7
if (uint8(data[i]) & 0x80) == 0 {
break
}
}
if i == hdr_max {
return 0
}
hdr_len = i + 1
*prefix_len_out = hdr_len
if hdr_len+val > length {
return 0
}
return hdr_len + val
}
/*
* parse uint32 integer
*/
func ParseUint32(length uint32, data []byte) uint32 {
var rv uint32 = uint32(data[0]) & 0x7f
if length > 1 {
rv = rv | (uint32(data[1])&0x7f)<<7
if length > 2 {
rv = rv | (uint32(data[2])&0x7f)<<14
if length > 3 {
rv = rv | (uint32(data[3])&0x7f)<<21
if length > 4 {
rv = rv | uint32(data[4])<<28
}
}
}
}
return rv
}
/*
* parse int32 integer
*/
func ParseInt32(length uint32, data []byte) uint32 {
return ParseUint32(length, data)
}
/*
* unzigzag the integer
*/
func Unzigzag32(v uint32) int32 {
if b := v & 1; b != 0 {
return -int32(v>>1) - 1
} else {
return int32(v >> 1)
}
}
/*
* parse fixed uint32 integer
*/
func ParseFixedUint32(data []byte) uint32 {
return uint32(data[0]) |
(uint32(data[1]) << 8) |
(uint32(data[2]) << 16) |
(uint32(data[3]) << 24)
}
/*
* parse uint64 integer
*/
func ParseUint64(length uint32, data []byte) uint64 {
var shift, i uint32
var rv uint64
if length < 5 {
return uint64(ParseUint32(length, data))
}
rv = (uint64(data[0] & 0x7f)) |
(uint64(data[1]&0x7f) << 7) |
(uint64(data[2]&0x7f) << 14) |
(uint64(data[3]&0x7f) << 21)
shift = 28
for i = 4; i < length; i++ {
rv = rv | ((uint64(data[i] & 0x7f)) << shift)
shift += 7
}
return rv
}
func Unzigzag64(v uint64) int64 | {
if b := v & 1; b != 0 {
return -int64(v>>1) - 1
} else {
return int64(v >> 1)
}
} | identifier_body |
|
tag_processor.go | _v uint32 = uint32(v >> 32)
if upper_v == 0 {
return UInt32Size(uint32(v))
} else if upper_v < (1 << 3) {
return 5
} else if upper_v < (1 << 10) {
return 6
} else if upper_v < (1 << 17) {
return 7
} else if upper_v < (1 << 24) {
return 8
} else if upper_v < (1 << 31) {
return 9
} else {
return 10
}
}
/*
* Return the ZigZag-encoded 64-bit unsigned integer form of a 64-bit signed
* integer.
*/
func zigzag64(v int64) uint64 {
if v < 0 {
return uint64(-v)*2 - 1
} else {
return uint64(v) * 2
}
}
/*
* Return the number of bytes required to store a signed 64-bit integer.
*/
func sint64Size(v int64) uint32 {
return uint64Size(zigzag64(v))
}
/*
* Pack an unsigned 32-bit integer in base-128 varint encoding and return the
* number of bytes written, which must be 5 or less.
*/
func Uint32Pack(value uint32, buf []byte) ([]byte, uint32) {
var rv uint32 = 0
if value >= 0x80 {
buf = append(buf, byte(value|0x80))
value = value >> 7
rv++
if value >= 0x80 {
buf = append(buf, byte(value|0x80))
value = value >> 7
if value >= 0x80 {
buf = append(buf, byte(value|0x80))
value = value >> 7
rv++
if value >= 0x80 {
buf = append(buf, byte(value|0x80))
value = value >> 7
rv++
}
}
}
}
buf = append(buf, byte(value))
rv++
return buf, rv
}
/*
* Pack a signed 32-bit integer and return the number of bytes written.
* Negative numbers are encoded as two's complement 64-bit integers.
*/
func Int32Pack(value int32, buf []byte) ([]byte, uint32) {
if value < 0 {
buf = append(buf, uint8(value)|0x80,
uint8(value>>7)|0x80,
uint8(value>>14)|0x80,
uint8(value>>21)|0x80,
uint8(value>>28)|0x80,
0xFF, 0xFF, 0xFF, 0xFF,
0x01)
return buf, 10
} else {
return Uint32Pack(uint32(value), buf)
}
}
/*
* Pack a signed 32-bit integer using ZigZag encoding and return the number of
* bytes written.
*/
func Sint32Pack(value int32, buf []byte) ([]byte, uint32) {
return Uint32Pack(zigzag32(value), buf)
}
/*
* Pack a 64-bit unsigned integer using base-128 varint encoding and return the
* number of bytes written.
*/
func Uint64Pack(value uint64, out []byte) ([]byte, uint32) {
hi := uint32(value >> 32)
lo := uint32(value)
var rv uint32
if hi == 0 {
return Uint32Pack(uint32(lo), out)
}
out = append(out, uint8(lo)|0x80,
uint8(lo>>7)|0x80,
uint8(lo>>14)|0x80,
uint8(lo>>21)|0x80)
if hi < 8 {
out = append(out, uint8(hi<<4)|uint8(lo>>28))
return out, 5
} else {
out = append(out, uint8(hi&7<<4)|uint8(lo>>28)|0x80)
hi = hi >> 3
}
rv = 5
for hi >= 128 {
out = append(out, uint8(hi|0x80))
hi >>= 7
rv++
}
out = append(out, uint8(hi))
rv++
return out, rv
}
/*
* Pack a 64-bit signed integer in ZigZag encoding and return the number of
* bytes written.
*/
func Sint64Pack(value int64, out []byte) ([]byte, uint32) {
return Uint64Pack(zigzag64(value), out)
}
/*
* Pack a 32-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed32, sfixed32, float.
*/
func Fixed32Pack(value uint32, out []byte) ([]byte, uint32) {
out = append(out, uint8(value),
uint8(value>>8),
uint8(value>>16),
uint8(value>>24))
return out, 4
}
/*
* Pack a 64-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed64, sfixed64, double.
*/
func Fixed64Pack(value uint64, out []byte) ([]byte, uint32) {
out, _ = Fixed32Pack(uint32(value), out)
out, _ = Fixed32Pack(uint32(value>>32), out)
return out, 8
}
/*
* Pack a boolean value as an integer and return the number of bytes written.
*/
func BooleanPack(value bool, out []byte) ([]byte, uint32) {
var b byte
if value == true {
b = 1
} else {
b = 0
}
out = append(out, b)
return out, 1
}
/*
* Pack a string and return the number of bytes written. The
*/
func StringPack(str string, out []byte) ([]byte, uint32) {
if str == "" {
out = append(out, 0)
return out, 1
} else {
var length uint32 = uint32(len(str))
var rv uint32
out, rv = Uint32Pack(length, out)
for _, c := range str {
out = append(out, byte(c))
}
return out, rv + length
}
}
/*
* Pack a sequence of bytes
*/
func BytesDataPack(bytes []byte, out []byte) ([]byte, uint32) {
var length uint32 = uint32(len(bytes))
var rv uint32
out, rv = Uint32Pack(length, out)
for _, c := range bytes {
out = append(out, c)
}
return out, rv + length
}
/*
* Pack a field tag.
*/
func TagPack(id uint32, out []byte) ([]byte, uint32) {
if id < (1 << (32 - 3)) {
return Uint32Pack(id<<3, out)
} else {
return Uint64Pack(uint64(id)<<3, out)
}
}
/*
* Get the minimum number of bytes required to pack a field value of a
* particular type.
*/
func getTypeMinSize(t ProtobufType) uint32 {
if t == PROTOBUF_TYPE_SFIXED32 ||
t == PROTOBUF_TYPE_FIXED32 ||
t == PROTOBUF_TYPE_FLOAT {
return 4
}
if t == PROTOBUF_TYPE_SFIXED64 ||
t == PROTOBUF_TYPE_FIXED64 ||
t == PROTOBUF_TYPE_DOUBLE {
return 8
}
return 1
}
/*
* Parse the wire data and get the tag, type
*/
func ParseTagAndWiretype(length uint32, data []byte, tag_out *uint32,
wiretype_out *ProtobufWireType) uint32 {
var max_rv uint32
if length > 5 {
max_rv = 5
} else {
max_rv = length
}
var tag uint32 = uint32((uint8(data[0]) & 0x7f) >> 3)
var shift uint = 4
var rv uint32
*wiretype_out = ProtobufWireType(data[0] & 7)
if (data[0] & 0x80) == 0 {
*tag_out = tag
return 1
}
for rv = 1; rv < max_rv; rv++ {
if r := uint8(data[rv]) & 0x80; r != 0 {
tag = tag | uint32((uint8(data[rv])&0x7f)<<shift)
shift += 7
} else {
tag = tag | uint32(uint8(data[rv])<<shift)
*tag_out = tag
return rv + 1
} | random_line_split |
||
tag_processor.go | )
return buf, 10
} else {
return Uint32Pack(uint32(value), buf)
}
}
/*
* Pack a signed 32-bit integer using ZigZag encoding and return the number of
* bytes written.
*/
func Sint32Pack(value int32, buf []byte) ([]byte, uint32) {
return Uint32Pack(zigzag32(value), buf)
}
/*
* Pack a 64-bit unsigned integer using base-128 varint encoding and return the
* number of bytes written.
*/
func Uint64Pack(value uint64, out []byte) ([]byte, uint32) {
hi := uint32(value >> 32)
lo := uint32(value)
var rv uint32
if hi == 0 {
return Uint32Pack(uint32(lo), out)
}
out = append(out, uint8(lo)|0x80,
uint8(lo>>7)|0x80,
uint8(lo>>14)|0x80,
uint8(lo>>21)|0x80)
if hi < 8 {
out = append(out, uint8(hi<<4)|uint8(lo>>28))
return out, 5
} else {
out = append(out, uint8(hi&7<<4)|uint8(lo>>28)|0x80)
hi = hi >> 3
}
rv = 5
for hi >= 128 {
out = append(out, uint8(hi|0x80))
hi >>= 7
rv++
}
out = append(out, uint8(hi))
rv++
return out, rv
}
/*
* Pack a 64-bit signed integer in ZigZag encoding and return the number of
* bytes written.
*/
func Sint64Pack(value int64, out []byte) ([]byte, uint32) {
return Uint64Pack(zigzag64(value), out)
}
/*
* Pack a 32-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed32, sfixed32, float.
*/
func Fixed32Pack(value uint32, out []byte) ([]byte, uint32) {
out = append(out, uint8(value),
uint8(value>>8),
uint8(value>>16),
uint8(value>>24))
return out, 4
}
/*
* Pack a 64-bit quantity in little-endian byte order. Used for protobuf wire
* types fixed64, sfixed64, double.
*/
func Fixed64Pack(value uint64, out []byte) ([]byte, uint32) {
out, _ = Fixed32Pack(uint32(value), out)
out, _ = Fixed32Pack(uint32(value>>32), out)
return out, 8
}
/*
* Pack a boolean value as an integer and return the number of bytes written.
*/
func BooleanPack(value bool, out []byte) ([]byte, uint32) {
var b byte
if value == true {
b = 1
} else {
b = 0
}
out = append(out, b)
return out, 1
}
/*
* Pack a string and return the number of bytes written. The
*/
func StringPack(str string, out []byte) ([]byte, uint32) {
if str == "" {
out = append(out, 0)
return out, 1
} else {
var length uint32 = uint32(len(str))
var rv uint32
out, rv = Uint32Pack(length, out)
for _, c := range str {
out = append(out, byte(c))
}
return out, rv + length
}
}
/*
* Pack a sequence of bytes
*/
func BytesDataPack(bytes []byte, out []byte) ([]byte, uint32) {
var length uint32 = uint32(len(bytes))
var rv uint32
out, rv = Uint32Pack(length, out)
for _, c := range bytes {
out = append(out, c)
}
return out, rv + length
}
/*
* Pack a field tag.
*/
func TagPack(id uint32, out []byte) ([]byte, uint32) {
if id < (1 << (32 - 3)) {
return Uint32Pack(id<<3, out)
} else {
return Uint64Pack(uint64(id)<<3, out)
}
}
/*
* Get the minimum number of bytes required to pack a field value of a
* particular type.
*/
func getTypeMinSize(t ProtobufType) uint32 {
if t == PROTOBUF_TYPE_SFIXED32 ||
t == PROTOBUF_TYPE_FIXED32 ||
t == PROTOBUF_TYPE_FLOAT {
return 4
}
if t == PROTOBUF_TYPE_SFIXED64 ||
t == PROTOBUF_TYPE_FIXED64 ||
t == PROTOBUF_TYPE_DOUBLE {
return 8
}
return 1
}
/*
* Parse the wire data and get the tag, type
*/
func ParseTagAndWiretype(length uint32, data []byte, tag_out *uint32,
wiretype_out *ProtobufWireType) uint32 {
var max_rv uint32
if length > 5 {
max_rv = 5
} else {
max_rv = length
}
var tag uint32 = uint32((uint8(data[0]) & 0x7f) >> 3)
var shift uint = 4
var rv uint32
*wiretype_out = ProtobufWireType(data[0] & 7)
if (data[0] & 0x80) == 0 {
*tag_out = tag
return 1
}
for rv = 1; rv < max_rv; rv++ {
if r := uint8(data[rv]) & 0x80; r != 0 {
tag = tag | uint32((uint8(data[rv])&0x7f)<<shift)
shift += 7
} else {
tag = tag | uint32(uint8(data[rv])<<shift)
*tag_out = tag
return rv + 1
}
}
return 0 /* error: bad header */
}
/*
* get prefix data length
*/
func ScanLengthPrefixData(length uint32, data []byte,
prefix_len_out *uint32) uint32 {
var hdr_max uint32
if length < 5 {
hdr_max = length
} else {
hdr_max = 5
}
var hdr_len uint32
var val uint32 = 0
var shift uint32 = 0
var i uint32
for i = 0; i < hdr_max; i++ {
val = val | uint32((uint8(data[i])&0x7f)<<shift)
shift += 7
if (uint8(data[i]) & 0x80) == 0 {
break
}
}
if i == hdr_max {
return 0
}
hdr_len = i + 1
*prefix_len_out = hdr_len
if hdr_len+val > length {
return 0
}
return hdr_len + val
}
/*
* parse uint32 integer
*/
func ParseUint32(length uint32, data []byte) uint32 {
var rv uint32 = uint32(data[0]) & 0x7f
if length > 1 {
rv = rv | (uint32(data[1])&0x7f)<<7
if length > 2 {
rv = rv | (uint32(data[2])&0x7f)<<14
if length > 3 {
rv = rv | (uint32(data[3])&0x7f)<<21
if length > 4 {
rv = rv | uint32(data[4])<<28
}
}
}
}
return rv
}
/*
* parse int32 integer
*/
func ParseInt32(length uint32, data []byte) uint32 {
return ParseUint32(length, data)
}
/*
* unzigzag the integer
*/
func Unzigzag32(v uint32) int32 {
if b := v & 1; b != 0 {
return -int32(v>>1) - 1
} else {
return int32(v >> 1)
}
}
/*
* parse fixed uint32 integer
*/
func ParseFixedUint32(data []byte) uint32 {
return uint32(data[0]) |
(uint32(data[1]) << 8) |
(uint32(data[2]) << 16) |
(uint32(data[3]) << 24)
}
/*
* parse uint64 integer
*/
func ParseUint64(length uint32, data []byte) uint64 {
var shift, i uint32
var rv uint64
if length < 5 | {
return uint64(ParseUint32(length, data))
} | conditional_block |
|
forothree.go | else if unicode.IsLower(run[0]) {
str := string(run[0])
str = strings.ToUpper(str)
slic[i] = str
res := strings.Join(slic,"")
return res
} else {
continue
}
}
return ""
}
func strtoaciicode(s string, n int) (string) { //change a char number n in string to ascicode
slic := strings.Split(s,"")
run := []rune(slic[n])
int := fmt.Sprintf("%%"+"%d",run)
slic[n] = string(int)
res := strings.Join(slic,"")
res = strings.Replace(res,"]","",1)
res = strings.Replace(res,"[","",1)
return res
}
func parseHeaders (v string) (string,string) { //parse header in stdin
htemp := strings.SplitAfterN(v,":",2)
//temp := htemp[0]
htemp[0] = strings.Replace(htemp[0],":","",1)
return htemp[0],htemp[1]
//req.Header.Add(temp, htemp[1])
}
func parseurldir (urlz string) (string,string) { //parse url with single directory
unparse,err := url.QueryUnescape(urlz)
u,err := url.Parse(unparse)
var dir,domain = "",""
if err != nil {
fmt.Println("[-]error, something wrong when parsing the url : %s",err)
}
if u.Scheme == "" { //parsing when no http schema
u.Scheme = "https"
x := strings.SplitAfterN(urlz,"/",2)
u.Host = x[0]
dir = x[1]
domain = u.Scheme + "://" + u.Host
} else { //parsing when there's http schema
dir = strings.Replace(u.Path,"/","",1)
domain = u.Scheme + "://" + u.Host + "/"
}
return domain,dir
}
func parseurldirs (urlz string) (string,[]string) { //parse url with subdirectory
unparse,err := url.QueryUnescape(urlz)
u,err := url.Parse(unparse)
var temp,domain = "",""
if err != nil {
fmt.Println("[-]error, something wrong when parsing the url in directory: %s",err)
}
if u.Scheme == "" { //parsing when no http schema
u.Scheme = "https"
x := strings.SplitAfterN(urlz,"/",2)
u.Host = x[0]
temp = x[1]
domain = u.Scheme + "://" + u.Host
} else { //parsing when there's http schema
domain = u.Scheme + "://" + u.Host + "/"
temp = strings.Replace(u.Path,"/","",1)
}
dir := strings.Split(temp,"/")
if dir[len(dir)-1] == "" {
dir = dir[:len(dir)-1]
}
return domain, dir
}
func reqiterateheader(r rawconf,dir string,wg sync.WaitGroup,lol []string,i int) {
headerstemp := r.Headers
r.Headers = append(r.Headers,lol[i])
myrequest(r,dir,"","",&wg)
//if len(r.Headers) != 0 { //magic if to debug goroutine panic: runtime error: slice bounds out of range [:-1]
// r.Headers = r.Headers[:len(r.Headers)-1]
//}
r.Headers = headerstemp
}
func myrequest(r rawconf, dir string, before string, after string, wg *sync.WaitGroup) { //request engine
//prepare url
url := ""
if (before == "DOMAINMOD") { //url exception for bypass that modify domain
r.Url = r.Url[:len(r.Url)-1]
url = r.Url + after + "/" + dir
} else if strings.HasPrefix(before, "DIRMOD") { //url exception for bypass that modify admin to %97dmin. coz special behavior in golang len() function
url = r.Url+""+dir+after
} else {
url = r.Url+before+dir+after
}
wg.Add(1)
//prepare request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer func() {
fasthttp.ReleaseResponse(resp)
fasthttp.ReleaseRequest(req)
}()
//set URL
req.SetRequestURI(url)
//add header
if len(r.Headers) > 0 {
for _,v := range r.Headers {
i,j := parseHeaders(v)
req.Header.Add(i, j)
}
}
req.Header.Set("User-Agent", r.Useragent)
// define web client request Method
req.Header.SetMethod(r.Method)
//set request timeout
var tout = time.Duration(r.Timeout) * time.Second
//do request, break if not timeout, still
timeout := false
for true {
var err = fasthttp.DoTimeout(req, resp, tout)
//print error, code still redundant/inefficient
if err != nil {
if err.Error() == "timeout" {
r.Retnum--
if r.Retnum == 0 {
//fmt.Printf("domain : %s |error : %s%s",url,err,"\n") //NEED TO ADD PADDING
timeout = true //request is timeout
break
}
}
} else {
break
}
}
//print output
domaino := fmt.Sprintf("%s : %s ",r.Method,url)
codeo := fmt.Sprintf("code : " + strconv.Itoa(resp.StatusCode()) + " |") //no filter status code yet
re := regexp.MustCompile("[0-9]+")
codeocheck := strings.Join(re.FindAllString(codeo,-1),"") //to get raw number of status code, used to determine whether to print it
lengtho := ""
locationo := ""
xheaderso := ""
paddingo := 0
if r.Bodylen {
t := resp.String()
lengtho = fmt.Sprintf("length : %v |",len(t)) //no filter length yet
}
if r.Xheaders {
xheaderso = fmt.Sprintf("xtra-header : %v |",r.Headers[len(r.Headers)-1])
}
if r.Location {
a := resp.Header
b := string(a.Peek("Location"))
if b != "" {
locationo = fmt.Sprintf("location : %v |",)
}
}
_, found := Find(r.Scode,codeocheck) //statuscode filter
//PADDING LOGIC
//============================================================================================
//add extra padding if domain is example.com.
if before == "DOMAINMOD" {
paddingo = ((len(r.Url)+len(dir)+30) - (len(domaino)) + 1 )
//add extra padding if firstchartoasciicode used
} else if strings.HasPrefix(before, "DIRMOD") {
if lastchartoasciicodeonly(before) < 100 {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino)) - 2
/*fmt.Println("LESS")
fmt.Println(lastchartoasciicodeonly(before))
fmt.Println(before)*/
} else {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino)) - 3
/*fmt.Println("MORE")
fmt.Println(lastchartoasciicodeonly(before))
fmt.Println(before)*/
}
} else {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino))
}
//add extra padding if domain is blank coz the dir is in x-rewrite
if strings.HasPrefix(r.Headers[len(r.Headers)-1], "X-Rewrite-URL:/") {
paddingo = paddingo+len(r.Headers[len(r.Headers)-1]) - len("X-Rewrite-URL:/")
}
//============================================================================================
if !(timeout) { //check if request timeout
if found{
fmt.Println(domaino + strings.Repeat(" ", paddingo)+ "|" + codeo + lengtho + locationo + xheaderso)
}
if r.Outname != ""{
if found{
storehere(domaino + strings.Repeat(" ", paddingo)+ "|" + codeo + lengtho + xheaderso + "\n",r.Outfile)
}
}
} else {
fmt.Println(domaino + strings.Repeat(" ", paddingo)+ "|" + "timeout")
}
wg.Done()
}
func payloads(r rawconf, dir string) {
var wg sync.WaitGroup
myrequest(r,dir,"","",&wg)
defer func(){
wg.Wait()
}()
//25 goroutine total
go myrequest(r,dir,"DOMAINMOD",".",&wg)
go myrequest(r,dir,"","%2500",&wg)
go myrequest(r,dir,"","%20",&wg)
go myrequest(r,dir,"%2" + "e/","",&wg)
go myrequest(r,dir," | {
str := string(run[0])
str = strings.ToLower(str)
slic[i] = str
res := strings.Join(slic,"")
return res
} | conditional_block |
|
forothree.go | if err != nil {
fmt.Println("[-]error, something wrong when parsing the url in directory: %s",err)
}
if u.Scheme == "" { //parsing when no http schema
u.Scheme = "https"
x := strings.SplitAfterN(urlz,"/",2)
u.Host = x[0]
temp = x[1]
domain = u.Scheme + "://" + u.Host
} else { //parsing when there's http schema
domain = u.Scheme + "://" + u.Host + "/"
temp = strings.Replace(u.Path,"/","",1)
}
dir := strings.Split(temp,"/")
if dir[len(dir)-1] == "" {
dir = dir[:len(dir)-1]
}
return domain, dir
}
func reqiterateheader(r rawconf,dir string,wg sync.WaitGroup,lol []string,i int) {
headerstemp := r.Headers
r.Headers = append(r.Headers,lol[i])
myrequest(r,dir,"","",&wg)
//if len(r.Headers) != 0 { //magic if to debug goroutine panic: runtime error: slice bounds out of range [:-1]
// r.Headers = r.Headers[:len(r.Headers)-1]
//}
r.Headers = headerstemp
}
func myrequest(r rawconf, dir string, before string, after string, wg *sync.WaitGroup) { //request engine
//prepare url
url := ""
if (before == "DOMAINMOD") { //url exception for bypass that modify domain
r.Url = r.Url[:len(r.Url)-1]
url = r.Url + after + "/" + dir
} else if strings.HasPrefix(before, "DIRMOD") { //url exception for bypass that modify admin to %97dmin. coz special behavior in golang len() function
url = r.Url+""+dir+after
} else {
url = r.Url+before+dir+after
}
wg.Add(1)
//prepare request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer func() {
fasthttp.ReleaseResponse(resp)
fasthttp.ReleaseRequest(req)
}()
//set URL
req.SetRequestURI(url)
//add header
if len(r.Headers) > 0 {
for _,v := range r.Headers {
i,j := parseHeaders(v)
req.Header.Add(i, j)
}
}
req.Header.Set("User-Agent", r.Useragent)
// define web client request Method
req.Header.SetMethod(r.Method)
//set request timeout
var tout = time.Duration(r.Timeout) * time.Second
//do request, break if not timeout, still
timeout := false
for true {
var err = fasthttp.DoTimeout(req, resp, tout)
//print error, code still redundant/inefficient
if err != nil {
if err.Error() == "timeout" {
r.Retnum--
if r.Retnum == 0 {
//fmt.Printf("domain : %s |error : %s%s",url,err,"\n") //NEED TO ADD PADDING
timeout = true //request is timeout
break
}
}
} else {
break
}
}
//print output
domaino := fmt.Sprintf("%s : %s ",r.Method,url)
codeo := fmt.Sprintf("code : " + strconv.Itoa(resp.StatusCode()) + " |") //no filter status code yet
re := regexp.MustCompile("[0-9]+")
codeocheck := strings.Join(re.FindAllString(codeo,-1),"") //to get raw number of status code, used to determine whether to print it
lengtho := ""
locationo := ""
xheaderso := ""
paddingo := 0
if r.Bodylen {
t := resp.String()
lengtho = fmt.Sprintf("length : %v |",len(t)) //no filter length yet
}
if r.Xheaders {
xheaderso = fmt.Sprintf("xtra-header : %v |",r.Headers[len(r.Headers)-1])
}
if r.Location {
a := resp.Header
b := string(a.Peek("Location"))
if b != "" {
locationo = fmt.Sprintf("location : %v |",)
}
}
_, found := Find(r.Scode,codeocheck) //statuscode filter
//PADDING LOGIC
//============================================================================================
//add extra padding if domain is example.com.
if before == "DOMAINMOD" {
paddingo = ((len(r.Url)+len(dir)+30) - (len(domaino)) + 1 )
//add extra padding if firstchartoasciicode used
} else if strings.HasPrefix(before, "DIRMOD") {
if lastchartoasciicodeonly(before) < 100 {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino)) - 2
/*fmt.Println("LESS")
fmt.Println(lastchartoasciicodeonly(before))
fmt.Println(before)*/
} else {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino)) - 3
/*fmt.Println("MORE")
fmt.Println(lastchartoasciicodeonly(before))
fmt.Println(before)*/
}
} else {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino))
}
//add extra padding if domain is blank coz the dir is in x-rewrite
if strings.HasPrefix(r.Headers[len(r.Headers)-1], "X-Rewrite-URL:/") {
paddingo = paddingo+len(r.Headers[len(r.Headers)-1]) - len("X-Rewrite-URL:/")
}
//============================================================================================
if !(timeout) { //check if request timeout
if found{
fmt.Println(domaino + strings.Repeat(" ", paddingo)+ "|" + codeo + lengtho + locationo + xheaderso)
}
if r.Outname != ""{
if found{
storehere(domaino + strings.Repeat(" ", paddingo)+ "|" + codeo + lengtho + xheaderso + "\n",r.Outfile)
}
}
} else {
fmt.Println(domaino + strings.Repeat(" ", paddingo)+ "|" + "timeout")
}
wg.Done()
}
func payloads(r rawconf, dir string) {
var wg sync.WaitGroup
myrequest(r,dir,"","",&wg)
defer func(){
wg.Wait()
}()
//25 goroutine total
go myrequest(r,dir,"DOMAINMOD",".",&wg)
go myrequest(r,dir,"","%2500",&wg)
go myrequest(r,dir,"","%20",&wg)
go myrequest(r,dir,"%2" + "e/","",&wg)
go myrequest(r,dir,"","%09",&wg)
go myrequest(r,dir,"","/..;/",&wg)
go myrequest(r,dir,"","..;/",&wg)
go myrequest(r,dir,".;/","",&wg)
go myrequest(r,dir,"..;/","",&wg)
go myrequest(r,dir,"","/.",&wg)
go myrequest(r,dir,"","//",&wg)
go myrequest(r,dir,"./","/./",&wg)
go myrequest(r,dir,"/","",&wg)
go myrequest(r,dir,"","//[email protected]",&wg)
go myrequest(r,dir,"","//google.com",&wg)
go myrequest(r,dir,"",".json",&wg)
go myrequest(r,dir,"","?",&wg)
go myrequest(r,dir,"\\..\\.\\","",&wg)
go myrequest(r,dir,"","??",&wg)
go myrequest(r,dir,"","#",&wg)
go myrequest(r,dir,".;","",&wg)
go myrequest(r,dir,"","/~",&wg)
go myrequest(r,dir,"./","",&wg)
if dir != "" {
go myrequest(r,firstchartoasciicode(dir),fmt.Sprintf("%s%s","DIRMOD",fmt.Sprintf(dir[:1])),"",&wg)
}
if strtoreversecase(dir) != "" {
go myrequest(r,strtoreversecase(dir),"","",&wg) //not in goroutine fo a nasty way to keep goroutine run w/o encountering race condition
}
methodtemp := r.Method
if r.Method == "GET" {
r.Method = "POST"
myrequest(r,dir,"","",&wg)
r.Method = "TRACE"
myrequest(r,dir,"","",&wg)
r.Method = methodtemp
} else if r.Method == "POST" {
r.Method = "GET"
myrequest(r,dir,"","",&wg)
r.Method = "TRACE"
myrequest(r,dir,"","",&wg)
r.Method = methodtemp
} else {
r.Method = "POST"
myrequest(r,dir,"","",&wg)
r.Method = "GET"
myrequest(r,dir,"","",&wg)
r.Method = "TRACE"
myrequest(r,dir,"","",&wg)
r.Method = methodtemp
}
}
func | payloads2 | identifier_name |
|
forothree.go | string, before string, after string, wg *sync.WaitGroup) { //request engine
//prepare url
url := ""
if (before == "DOMAINMOD") { //url exception for bypass that modify domain
r.Url = r.Url[:len(r.Url)-1]
url = r.Url + after + "/" + dir
} else if strings.HasPrefix(before, "DIRMOD") { //url exception for bypass that modify admin to %97dmin. coz special behavior in golang len() function
url = r.Url+""+dir+after
} else {
url = r.Url+before+dir+after
}
wg.Add(1)
//prepare request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer func() {
fasthttp.ReleaseResponse(resp)
fasthttp.ReleaseRequest(req)
}()
//set URL
req.SetRequestURI(url)
//add header
if len(r.Headers) > 0 {
for _,v := range r.Headers {
i,j := parseHeaders(v)
req.Header.Add(i, j)
}
}
req.Header.Set("User-Agent", r.Useragent)
// define web client request Method
req.Header.SetMethod(r.Method)
//set request timeout
var tout = time.Duration(r.Timeout) * time.Second
//do request, break if not timeout, still
timeout := false
for true {
var err = fasthttp.DoTimeout(req, resp, tout)
//print error, code still redundant/inefficient
if err != nil {
if err.Error() == "timeout" {
r.Retnum--
if r.Retnum == 0 {
//fmt.Printf("domain : %s |error : %s%s",url,err,"\n") //NEED TO ADD PADDING
timeout = true //request is timeout
break
}
}
} else {
break
}
}
//print output
domaino := fmt.Sprintf("%s : %s ",r.Method,url)
codeo := fmt.Sprintf("code : " + strconv.Itoa(resp.StatusCode()) + " |") //no filter status code yet
re := regexp.MustCompile("[0-9]+")
codeocheck := strings.Join(re.FindAllString(codeo,-1),"") //to get raw number of status code, used to determine whether to print it
lengtho := ""
locationo := ""
xheaderso := ""
paddingo := 0
if r.Bodylen {
t := resp.String()
lengtho = fmt.Sprintf("length : %v |",len(t)) //no filter length yet
}
if r.Xheaders {
xheaderso = fmt.Sprintf("xtra-header : %v |",r.Headers[len(r.Headers)-1])
}
if r.Location {
a := resp.Header
b := string(a.Peek("Location"))
if b != "" {
locationo = fmt.Sprintf("location : %v |",)
}
}
_, found := Find(r.Scode,codeocheck) //statuscode filter
//PADDING LOGIC
//============================================================================================
//add extra padding if domain is example.com.
if before == "DOMAINMOD" {
paddingo = ((len(r.Url)+len(dir)+30) - (len(domaino)) + 1 )
//add extra padding if firstchartoasciicode used
} else if strings.HasPrefix(before, "DIRMOD") {
if lastchartoasciicodeonly(before) < 100 {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino)) - 2
/*fmt.Println("LESS")
fmt.Println(lastchartoasciicodeonly(before))
fmt.Println(before)*/
} else {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino)) - 3
/*fmt.Println("MORE")
fmt.Println(lastchartoasciicodeonly(before))
fmt.Println(before)*/
}
} else {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino))
}
//add extra padding if domain is blank coz the dir is in x-rewrite
if strings.HasPrefix(r.Headers[len(r.Headers)-1], "X-Rewrite-URL:/") {
paddingo = paddingo+len(r.Headers[len(r.Headers)-1]) - len("X-Rewrite-URL:/")
}
//============================================================================================
if !(timeout) { //check if request timeout
if found{
fmt.Println(domaino + strings.Repeat(" ", paddingo)+ "|" + codeo + lengtho + locationo + xheaderso)
}
if r.Outname != ""{
if found{
storehere(domaino + strings.Repeat(" ", paddingo)+ "|" + codeo + lengtho + xheaderso + "\n",r.Outfile)
}
}
} else {
fmt.Println(domaino + strings.Repeat(" ", paddingo)+ "|" + "timeout")
}
wg.Done()
}
func payloads(r rawconf, dir string) {
var wg sync.WaitGroup
myrequest(r,dir,"","",&wg)
defer func(){
wg.Wait()
}()
//25 goroutine total
go myrequest(r,dir,"DOMAINMOD",".",&wg)
go myrequest(r,dir,"","%2500",&wg)
go myrequest(r,dir,"","%20",&wg)
go myrequest(r,dir,"%2" + "e/","",&wg)
go myrequest(r,dir,"","%09",&wg)
go myrequest(r,dir,"","/..;/",&wg)
go myrequest(r,dir,"","..;/",&wg)
go myrequest(r,dir,".;/","",&wg)
go myrequest(r,dir,"..;/","",&wg)
go myrequest(r,dir,"","/.",&wg)
go myrequest(r,dir,"","//",&wg)
go myrequest(r,dir,"./","/./",&wg)
go myrequest(r,dir,"/","",&wg)
go myrequest(r,dir,"","//[email protected]",&wg)
go myrequest(r,dir,"","//google.com",&wg)
go myrequest(r,dir,"",".json",&wg)
go myrequest(r,dir,"","?",&wg)
go myrequest(r,dir,"\\..\\.\\","",&wg)
go myrequest(r,dir,"","??",&wg)
go myrequest(r,dir,"","#",&wg)
go myrequest(r,dir,".;","",&wg)
go myrequest(r,dir,"","/~",&wg)
go myrequest(r,dir,"./","",&wg)
if dir != "" {
go myrequest(r,firstchartoasciicode(dir),fmt.Sprintf("%s%s","DIRMOD",fmt.Sprintf(dir[:1])),"",&wg)
}
if strtoreversecase(dir) != "" {
go myrequest(r,strtoreversecase(dir),"","",&wg) //not in goroutine fo a nasty way to keep goroutine run w/o encountering race condition
}
methodtemp := r.Method
if r.Method == "GET" {
r.Method = "POST"
myrequest(r,dir,"","",&wg)
r.Method = "TRACE"
myrequest(r,dir,"","",&wg)
r.Method = methodtemp
} else if r.Method == "POST" {
r.Method = "GET"
myrequest(r,dir,"","",&wg)
r.Method = "TRACE"
myrequest(r,dir,"","",&wg)
r.Method = methodtemp
} else {
r.Method = "POST"
myrequest(r,dir,"","",&wg)
r.Method = "GET"
myrequest(r,dir,"","",&wg)
r.Method = "TRACE"
myrequest(r,dir,"","",&wg)
r.Method = methodtemp
}
}
func payloads2(r rawconf, dir string) {
var wg sync.WaitGroup
myrequest(r,dir,"","",&wg)
defer func(){
wg.Wait()
}()
go myrequest(r,dir,"DOMAINMOD",".",&wg)
go myrequest(r,dir,"%2" + "e/","",&wg)
go myrequest(r,dir,"","..;/",&wg) // LOOP?
go myrequest(r,dir,"..;/","",&wg) //and ../ LOOP?
go myrequest(r,dir,"/","",&wg)
go myrequest(r,dir,"","/~",&wg)
go myrequest(r,dir,"./","",&wg)
if dir != "" {
myrequest(r,firstchartoasciicode(dir),fmt.Sprintf("%s%s","DIRMOD",fmt.Sprintf(dir[:1])),"",&wg) //not in goroutine fo a nasty way to keep goroutine run w/o encountering race condition
}
}
func payloads3(r rawconf, dir string) | {
r.Xheaders = true
var wg sync.WaitGroup
defer func(){
//wg.Done()
wg.Wait()
}()
g,_ := os.Open("headerbypass.txt") // iterate file lineByLine
g2 := bufio.NewScanner(g)
var lol []string
for g2.Scan() {
var line = g2.Text() | identifier_body |
|
forothree.go | ,err := url.QueryUnescape(urlz)
u,err := url.Parse(unparse)
var dir,domain = "",""
if err != nil {
fmt.Println("[-]error, something wrong when parsing the url : %s",err)
}
if u.Scheme == "" { //parsing when no http schema
u.Scheme = "https"
x := strings.SplitAfterN(urlz,"/",2)
u.Host = x[0]
dir = x[1]
domain = u.Scheme + "://" + u.Host
} else { //parsing when there's http schema
dir = strings.Replace(u.Path,"/","",1)
domain = u.Scheme + "://" + u.Host + "/"
}
|
return domain,dir
}
func parseurldirs (urlz string) (string,[]string) { //parse url with subdirectory
unparse,err := url.QueryUnescape(urlz)
u,err := url.Parse(unparse)
var temp,domain = "",""
if err != nil {
fmt.Println("[-]error, something wrong when parsing the url in directory: %s",err)
}
if u.Scheme == "" { //parsing when no http schema
u.Scheme = "https"
x := strings.SplitAfterN(urlz,"/",2)
u.Host = x[0]
temp = x[1]
domain = u.Scheme + "://" + u.Host
} else { //parsing when there's http schema
domain = u.Scheme + "://" + u.Host + "/"
temp = strings.Replace(u.Path,"/","",1)
}
dir := strings.Split(temp,"/")
if dir[len(dir)-1] == "" {
dir = dir[:len(dir)-1]
}
return domain, dir
}
func reqiterateheader(r rawconf,dir string,wg sync.WaitGroup,lol []string,i int) {
headerstemp := r.Headers
r.Headers = append(r.Headers,lol[i])
myrequest(r,dir,"","",&wg)
//if len(r.Headers) != 0 { //magic if to debug goroutine panic: runtime error: slice bounds out of range [:-1]
// r.Headers = r.Headers[:len(r.Headers)-1]
//}
r.Headers = headerstemp
}
func myrequest(r rawconf, dir string, before string, after string, wg *sync.WaitGroup) { //request engine
//prepare url
url := ""
if (before == "DOMAINMOD") { //url exception for bypass that modify domain
r.Url = r.Url[:len(r.Url)-1]
url = r.Url + after + "/" + dir
} else if strings.HasPrefix(before, "DIRMOD") { //url exception for bypass that modify admin to %97dmin. coz special behavior in golang len() function
url = r.Url+""+dir+after
} else {
url = r.Url+before+dir+after
}
wg.Add(1)
//prepare request
req := fasthttp.AcquireRequest()
resp := fasthttp.AcquireResponse()
defer func() {
fasthttp.ReleaseResponse(resp)
fasthttp.ReleaseRequest(req)
}()
//set URL
req.SetRequestURI(url)
//add header
if len(r.Headers) > 0 {
for _,v := range r.Headers {
i,j := parseHeaders(v)
req.Header.Add(i, j)
}
}
req.Header.Set("User-Agent", r.Useragent)
// define web client request Method
req.Header.SetMethod(r.Method)
//set request timeout
var tout = time.Duration(r.Timeout) * time.Second
//do request, break if not timeout, still
timeout := false
for true {
var err = fasthttp.DoTimeout(req, resp, tout)
//print error, code still redundant/inefficient
if err != nil {
if err.Error() == "timeout" {
r.Retnum--
if r.Retnum == 0 {
//fmt.Printf("domain : %s |error : %s%s",url,err,"\n") //NEED TO ADD PADDING
timeout = true //request is timeout
break
}
}
} else {
break
}
}
//print output
domaino := fmt.Sprintf("%s : %s ",r.Method,url)
codeo := fmt.Sprintf("code : " + strconv.Itoa(resp.StatusCode()) + " |") //no filter status code yet
re := regexp.MustCompile("[0-9]+")
codeocheck := strings.Join(re.FindAllString(codeo,-1),"") //to get raw number of status code, used to determine whether to print it
lengtho := ""
locationo := ""
xheaderso := ""
paddingo := 0
if r.Bodylen {
t := resp.String()
lengtho = fmt.Sprintf("length : %v |",len(t)) //no filter length yet
}
if r.Xheaders {
xheaderso = fmt.Sprintf("xtra-header : %v |",r.Headers[len(r.Headers)-1])
}
if r.Location {
a := resp.Header
b := string(a.Peek("Location"))
if b != "" {
locationo = fmt.Sprintf("location : %v |",)
}
}
_, found := Find(r.Scode,codeocheck) //statuscode filter
//PADDING LOGIC
//============================================================================================
//add extra padding if domain is example.com.
if before == "DOMAINMOD" {
paddingo = ((len(r.Url)+len(dir)+30) - (len(domaino)) + 1 )
//add extra padding if firstchartoasciicode used
} else if strings.HasPrefix(before, "DIRMOD") {
if lastchartoasciicodeonly(before) < 100 {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino)) - 2
/*fmt.Println("LESS")
fmt.Println(lastchartoasciicodeonly(before))
fmt.Println(before)*/
} else {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino)) - 3
/*fmt.Println("MORE")
fmt.Println(lastchartoasciicodeonly(before))
fmt.Println(before)*/
}
} else {
paddingo = (len(r.Url)+len(dir)+30) - (len(domaino))
}
//add extra padding if domain is blank coz the dir is in x-rewrite
if strings.HasPrefix(r.Headers[len(r.Headers)-1], "X-Rewrite-URL:/") {
paddingo = paddingo+len(r.Headers[len(r.Headers)-1]) - len("X-Rewrite-URL:/")
}
//============================================================================================
if !(timeout) { //check if request timeout
if found{
fmt.Println(domaino + strings.Repeat(" ", paddingo)+ "|" + codeo + lengtho + locationo + xheaderso)
}
if r.Outname != ""{
if found{
storehere(domaino + strings.Repeat(" ", paddingo)+ "|" + codeo + lengtho + xheaderso + "\n",r.Outfile)
}
}
} else {
fmt.Println(domaino + strings.Repeat(" ", paddingo)+ "|" + "timeout")
}
wg.Done()
}
func payloads(r rawconf, dir string) {
var wg sync.WaitGroup
myrequest(r,dir,"","",&wg)
defer func(){
wg.Wait()
}()
//25 goroutine total
go myrequest(r,dir,"DOMAINMOD",".",&wg)
go myrequest(r,dir,"","%2500",&wg)
go myrequest(r,dir,"","%20",&wg)
go myrequest(r,dir,"%2" + "e/","",&wg)
go myrequest(r,dir,"","%09",&wg)
go myrequest(r,dir,"","/..;/",&wg)
go myrequest(r,dir,"","..;/",&wg)
go myrequest(r,dir,".;/","",&wg)
go myrequest(r,dir,"..;/","",&wg)
go myrequest(r,dir,"","/.",&wg)
go myrequest(r,dir,"","//",&wg)
go myrequest(r,dir,"./","/./",&wg)
go myrequest(r,dir,"/","",&wg)
go myrequest(r,dir,"","//[email protected]",&wg)
go myrequest(r,dir,"","//google.com",&wg)
go myrequest(r,dir,"",".json",&wg)
go myrequest(r,dir,"","?",&wg)
go myrequest(r,dir,"\\..\\.\\","",&wg)
go myrequest(r,dir,"","??",&wg)
go myrequest(r,dir,"","#",&wg)
go myrequest(r,dir,".;","",&wg)
go myrequest(r,dir,"","/~",&wg)
go myrequest(r,dir,"./","",&wg)
if dir != "" {
go myrequest(r,firstchartoasciicode(dir),fmt.Sprintf("%s%s","DIRMOD",fmt.Sprintf(dir[:1])),"",&wg)
}
if strtoreversecase(dir) != "" {
go myrequest(r,strtoreversecase(dir),""," | random_line_split |
|
Sponsoring.js | () {
const { status } = this.state;
const { form } = this.state;
return (
<>
<DemoNavbar />
<main ref="main" style={{userSelect: 'none'}}>
<div className="position-relative">
<section className="section section-lg section-shaped pb-150 " style={{backgroundColor:"#04638f"}}>
<div className="shape shape-style-1 shape-default ">
</div>
<Row>
<Col className="mt-9 mt-sm-9" sm="3" xs="6">
<img
alt="..."
className="img-fluid floating"
src={require("assets/img/like.png")}
sm="3" xs="6"
/>
</Col>
<Col className="mt-9 mt-sm-6" sm="6" xs="12">
<center>
<h2 className="align-items-center display-1 text-white" style={{marginTop:"100px"}}>
<MovingComponent
type="fadeInFromBottom"
duration="1000ms"
delay="0s"
direction="alternate"
timing="ease-in-out"
iteration="1"
fillMode="both">
Service Sponsoring
</MovingComponent>
<MovingComponent
type="fadeInFromBottom"
duration="1000ms"
delay="1s"
direction="alternate"
timing="ease-in-out"
iteration="1"
fillMode="both">
Facebook & Instagram
</MovingComponent>
<MovingComponent
type="fadeInFromBottom"
duration="1000ms"
delay="2s"
direction="alternate"
timing="ease-in-out"
iteration="1"
fillMode="both">
de TekTree
</MovingComponent>
</h2>
</center>
</Col>
<Col className="mt-9 mt-sm-9" sm="3" xs="6">
<img
alt="..."
className="img-fluid floating"
src={require("assets/img/heart.png")}
style={{ width: "250px" }}
/>
</Col>
</Row>
</section>
</div>
<section className="section bg-secondary">
<Container>
<Row className="row-grid align-items-center">
<Col md="6">
<Card className="bg-default shadow border-0">
<CardImg alt="..." src={require("assets/img/spon.png")} top/>
<blockquote className="card-blockquote">
<svg xmlns="http://www.w3.org/2000/svg" className="svg-bg" preserveAspectRatio="none" viewBox="0 0 583 95">
<polygon className="fill-default" points="0,52 583,95 0,95"/>
<polygon className="fill-default" opacity=".2" points="0,42 583,95 683,0 0,95"/>
</svg>
<h4 className="display-3 font-weight-bold text-white">
Sponsoring Facebook
</h4>
</blockquote>
</Card>
</Col>
<Col md="6">
<div className="pl-md-5">
<div className="icon icon-lg icon-shape shadow rounded-circle mb-5">
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48"><title>like-2</title><g><path fill="#EEBC99" d="M38,45H24c-2.7168,0-5.53418-0.66113-7.93311-1.86035l-4.51416-2.25684 C11.21387,40.71289,11,40.36719,11,39.98828v-14c0-0.23438,0.08203-0.46094,0.23193-0.64062L21,13.62598v-7.6377 c0-1.07812,0.55908-2.04199,1.49561-2.58008c0.93066-0.53418,2.03809-0.53125,2.96191,0.00879 C28.88086,5.41504,29,10.45996,29,13.98828V19h11c2.75684,0,5,2.2373,5,4.98828c0,0.04102-0.00244,0.08301-0.00781,0.12402 l-1.99268,15.94141C42.96436,42.78711,40.73535,45,38,45z"></path> <path fill="#5A7A84" d="M12,45H4c-0.55225,0-1-0.44727-1-1V22c0-0.55273,0.44775-1,1-1h8c0.55225,0,1,0.44727,1,1v22 C13,44.55273,12.55225,45,12,45z"></path></g></svg>
</div>
<h3>Publicité sur Facebook</h3>
<p className=" lead">
Faire de la pub sur Facebook efficace et rentable quel que soit votre budget
</p>
<p>
Vous êtes une entreprise et vous souhaitez avoir plus de visibilité et augmenter le trafic sur votre site web, ou bénéficier de plus de clients... peut opter pour la publicité Facebook.
C’est un excellent moyen pour toucher vos cibles au plus près là où elles sont.
</p>
<Link
className="font-weight-bold text-warning mt-5"
to="/offres"
>
Découvrez nos promos
</Link>
</div>
</Col>
</Row>
</Container>
</section>
<section className="section ">
<Container>
<Row className="row-grid align-items-center">
<Col md="6">
<div className="pl-md-5">
<div className="icon icon-lg icon-shape shadow rounded-circle mb-5">
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48"><title>heart-2</title><g><path fill="#E86C60" d="M43.192,6.808c-5.068-5.068-13.316-5.068-18.385,0C24.526,7.089,24.257,7.385,24,7.695 c-0.257-0.311-0.526-0.606-0.808-0.888c-5.068-5.068-13.316-5.068-18.385,0s-5.068,13.316,0,18.385l18.485,18.485 c0.195,0.195,0.451,0.293,0.707,0.293s0.512-0.098,0.707-0.293l18.485-18.485C48.261,20.124,48.261,11.876,43.192,6.808z"></path></g></svg>
</div>
<h3>Publicité sur Instagram</h3>
<p className="lead">
DÉVELOPPEZ VOTRE ENTREPRISE SUR INSTAGRAM
| render | identifier_name |
|
Sponsoring.js | />
</Col>
<Col className="mt-9 mt-sm-6" sm="6" xs="12">
<center>
<h2 className="align-items-center display-1 text-white" style={{marginTop:"100px"}}>
<MovingComponent
type="fadeInFromBottom"
duration="1000ms"
delay="0s"
direction="alternate"
timing="ease-in-out"
iteration="1"
fillMode="both">
Service Sponsoring
</MovingComponent>
<MovingComponent
type="fadeInFromBottom"
duration="1000ms"
delay="1s"
direction="alternate"
timing="ease-in-out"
iteration="1"
fillMode="both">
Facebook & Instagram
</MovingComponent>
<MovingComponent
type="fadeInFromBottom"
duration="1000ms"
delay="2s"
direction="alternate"
timing="ease-in-out"
iteration="1"
fillMode="both">
de TekTree
</MovingComponent>
</h2>
</center>
</Col>
<Col className="mt-9 mt-sm-9" sm="3" xs="6">
<img
alt="..."
className="img-fluid floating"
src={require("assets/img/heart.png")}
style={{ width: "250px" }}
/>
</Col>
</Row>
</section>
</div>
<section className="section bg-secondary">
<Container>
<Row className="row-grid align-items-center">
<Col md="6">
<Card className="bg-default shadow border-0">
<CardImg alt="..." src={require("assets/img/spon.png")} top/>
<blockquote className="card-blockquote">
<svg xmlns="http://www.w3.org/2000/svg" className="svg-bg" preserveAspectRatio="none" viewBox="0 0 583 95">
<polygon className="fill-default" points="0,52 583,95 0,95"/>
<polygon className="fill-default" opacity=".2" points="0,42 583,95 683,0 0,95"/>
</svg>
<h4 className="display-3 font-weight-bold text-white">
Sponsoring Facebook
</h4>
</blockquote>
</Card>
</Col>
<Col md="6">
<div className="pl-md-5">
<div className="icon icon-lg icon-shape shadow rounded-circle mb-5">
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48"><title>like-2</title><g><path fill="#EEBC99" d="M38,45H24c-2.7168,0-5.53418-0.66113-7.93311-1.86035l-4.51416-2.25684 C11.21387,40.71289,11,40.36719,11,39.98828v-14c0-0.23438,0.08203-0.46094,0.23193-0.64062L21,13.62598v-7.6377 c0-1.07812,0.55908-2.04199,1.49561-2.58008c0.93066-0.53418,2.03809-0.53125,2.96191,0.00879 C28.88086,5.41504,29,10.45996,29,13.98828V19h11c2.75684,0,5,2.2373,5,4.98828c0,0.04102-0.00244,0.08301-0.00781,0.12402 l-1.99268,15.94141C42.96436,42.78711,40.73535,45,38,45z"></path> <path fill="#5A7A84" d="M12,45H4c-0.55225,0-1-0.44727-1-1V22c0-0.55273,0.44775-1,1-1h8c0.55225,0,1,0.44727,1,1v22 C13,44.55273,12.55225,45,12,45z"></path></g></svg>
</div>
<h3>Publicité sur Facebook</h3>
<p className=" lead">
Faire de la pub sur Facebook efficace et rentable quel que soit votre budget
</p>
<p>
Vous êtes une entreprise et vous souhaitez avoir plus de visibilité et augmenter le trafic sur votre site web, ou bénéficier de plus de clients... peut opter pour la publicité Facebook.
C’est un excellent moyen pour toucher vos cibles au plus près là où elles sont.
</p>
<Link
className="font-weight-bold text-warning mt-5"
to="/offres"
>
Découvrez nos promos
</Link>
</div>
</Col>
</Row>
</Container>
</section>
<section className="section ">
<Container>
<Row className="row-grid align-items-center">
<Col md="6">
<div className="pl-md-5">
<div className="icon icon-lg icon-shape shadow rounded-circle mb-5">
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48"><title>heart-2</title><g><path fill="#E86C60" d="M43.192,6.808c-5.068-5.068-13.316-5.068-18.385,0C24.526,7.089,24.257,7.385,24,7.695 c-0.257-0.311-0.526-0.606-0.808-0.888c-5.068-5.068-13.316-5.068-18.385,0s-5.068,13.316,0,18.385l18.485,18.485 c0.195,0.195,0.451,0.293,0.707,0.293s0.512-0.098,0.707-0.293l18.485-18.485C48.261,20.124,48.261,11.876,43.192,6.808z"></path></g></svg>
</div>
<h3>Publicité sur Instagram</h3>
<p className="lead">
DÉVELOPPEZ VOTRE ENTREPRISE SUR INSTAGRAM
</ | {
const { status } = this.state;
const { form } = this.state;
return (
<>
<DemoNavbar />
<main ref="main" style={{userSelect: 'none'}}>
<div className="position-relative">
<section className="section section-lg section-shaped pb-150 " style={{backgroundColor:"#04638f"}}>
<div className="shape shape-style-1 shape-default ">
</div>
<Row>
<Col className="mt-9 mt-sm-9" sm="3" xs="6">
<img
alt="..."
className="img-fluid floating"
src={require("assets/img/like.png")}
sm="3" xs="6" | identifier_body |
|
Sponsoring.js | 44,0.08301-0.00781,0.12402 l-1.99268,15.94141C42.96436,42.78711,40.73535,45,38,45z"></path> <path fill="#5A7A84" d="M12,45H4c-0.55225,0-1-0.44727-1-1V22c0-0.55273,0.44775-1,1-1h8c0.55225,0,1,0.44727,1,1v22 C13,44.55273,12.55225,45,12,45z"></path></g></svg>
</div>
<h3>Publicité sur Facebook</h3>
<p className=" lead">
Faire de la pub sur Facebook efficace et rentable quel que soit votre budget
</p>
<p>
Vous êtes une entreprise et vous souhaitez avoir plus de visibilité et augmenter le trafic sur votre site web, ou bénéficier de plus de clients... peut opter pour la publicité Facebook.
C’est un excellent moyen pour toucher vos cibles au plus près là où elles sont.
</p>
<Link
className="font-weight-bold text-warning mt-5"
to="/offres"
>
Découvrez nos promos
</Link>
</div>
</Col>
</Row>
</Container>
</section>
<section className="section ">
<Container>
<Row className="row-grid align-items-center">
<Col md="6">
<div className="pl-md-5">
<div className="icon icon-lg icon-shape shadow rounded-circle mb-5">
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48"><title>heart-2</title><g><path fill="#E86C60" d="M43.192,6.808c-5.068-5.068-13.316-5.068-18.385,0C24.526,7.089,24.257,7.385,24,7.695 c-0.257-0.311-0.526-0.606-0.808-0.888c-5.068-5.068-13.316-5.068-18.385,0s-5.068,13.316,0,18.385l18.485,18.485 c0.195,0.195,0.451,0.293,0.707,0.293s0.512-0.098,0.707-0.293l18.485-18.485C48.261,20.124,48.261,11.876,43.192,6.808z"></path></g></svg>
</div>
<h3>Publicité sur Instagram</h3>
<p className="lead">
DÉVELOPPEZ VOTRE ENTREPRISE SUR INSTAGRAM
</p>
<p>
On vous propose des publicités dans les Stories, des publicités photo, des publicités vidéo, des publicités au format carrousel et plein d'autres propositions vous attendent avec TekTree.
</p>
<p>
Avec le service sponsoring Instagram de TekTree atteignez les personnes qui comptent le plus pour vous.
</p>
<Link className="font-weight-bold text-warning mt-5" to="/offres">
Découvrez nos promos
</Link>
</div>
</Col>
<Col md="6">
<Card className="bg-default shadow border-0">
<CardImg alt="..." src={require("assets/img/instagram.jpg")} top/>
<blockquote className="card-blockquote">
<svg xmlns="http://www.w3.org/2000/svg" className="svg-bg" preserveAspectRatio="none" viewBox="0 0 583 95">
<polygon className="fill-default" points="0,52 583,95 0,95"/>
<polygon className="fill-default" opacity=".2" points="0,42 583,95 683,0 0,95"/>
</svg>
<h4 className="display-3 font-weight-bold text-white">Sponsoring Instagram
</h4>
</blockquote>
</Card>
</Col>
</Row>
</Container>
</section>
<section className="section section-lg " style={{backgroundColor:"#04638f"}}>
<Container className="pt-lg pb-300">
<Row className="text-center justify-content-center">
<Col lg="10">
<h2 className="display-3 text-white">Contactez-nous pour plus d'information</h2>
<p className="lead text-white">
Nous vous fournissons tous ces services et plus encore, à des prix abordables pour tous et avec plusieurs moyens de facilités de paiement.
</p>
</Col>
</Row>
</Container>
<div className="separator separator-bottom separator-skew zindex-100">
<svg
xmlns="http://www.w3.org/2000/svg"
preserveAspectRatio="none"
version="1.1"
viewBox="0 0 2560 100"
x="0"
y="0"
>
<polygon
className="fill-white"
points="2560 0 2560 100 0 100"
/>
</svg>
</div>
</section>
<section className="section section-lg pt-lg-0 section-contact-us">
<Container>
<Row className="justify-content-center mt--300">
<Col lg="8">
<Card className="bg-gradient-secondary shadow">
<CardBody className="p-lg-5">
<h4 className="mb-1">Avez-vous des questions?</h4>
<p className="mt-0">
Envoyez-nous un message et nous vous répondrons dans les plus brefs délais.
</p>
{form === "on" ?
<form name="sentMessage" onSubmit={this.submitForm} id="contactForm" action="https://formspree.io/mvovpoer" method="POST">
<div className="row">
<div className="col-md-6">
<div className="form-group">
<input
type="text"
id="name"
className="form-control"
placeholder="Nom et Prénom"
required="required"
name="name"
/>
<p className="help-block text-danger"></p>
</div>
</div>
<div className="col-md-6">
<div className="form-group">
<input
type="email"
id="email"
className="form-control"
placeholder="Email"
required="required"
name="_replyto"
/>
<p className="help-block text-danger"></p>
</div>
</div>
</div>
<div className="form-group">
<textarea
name="message"
id="message"
className="form-control"
rows="4"
placeholder="Message"
required
></textarea>
</div>
{status === "SUCCESS" ?
<img alt="..." src={require("assets/img/message.gif")}/>
: <button className="btn btn-custom btn-lg" type="submit" value="Send">
Envoyez votre Message
</button>}
{status === "ERROR" && <p>Ooops! Il ya un erreur.</p>}
</form> :<img alt="..." src={require("assets/img/message.gif")} style={{height:"80%", width:"95%"}}/>}
</CardBody>
</Card>
</Col>
</Row>
</Container>
</section>
</main>
<CardsFooter />
</>
);
}
submitForm(ev) {
ev.preventDefault();
const form = ev.target;
const data = new FormData(form);
const xhr = new XMLHttpRequest();
xhr.open(form.method, form.action); | random_line_split |
||
Sponsoring.js | c-0.55225,0-1-0.44727-1-1V22c0-0.55273,0.44775-1,1-1h8c0.55225,0,1,0.44727,1,1v22 C13,44.55273,12.55225,45,12,45z"></path></g></svg>
</div>
<h3>Publicité sur Facebook</h3>
<p className=" lead">
Faire de la pub sur Facebook efficace et rentable quel que soit votre budget
</p>
<p>
Vous êtes une entreprise et vous souhaitez avoir plus de visibilité et augmenter le trafic sur votre site web, ou bénéficier de plus de clients... peut opter pour la publicité Facebook.
C’est un excellent moyen pour toucher vos cibles au plus près là où elles sont.
</p>
<Link
className="font-weight-bold text-warning mt-5"
to="/offres"
>
Découvrez nos promos
</Link>
</div>
</Col>
</Row>
</Container>
</section>
<section className="section ">
<Container>
<Row className="row-grid align-items-center">
<Col md="6">
<div className="pl-md-5">
<div className="icon icon-lg icon-shape shadow rounded-circle mb-5">
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48"><title>heart-2</title><g><path fill="#E86C60" d="M43.192,6.808c-5.068-5.068-13.316-5.068-18.385,0C24.526,7.089,24.257,7.385,24,7.695 c-0.257-0.311-0.526-0.606-0.808-0.888c-5.068-5.068-13.316-5.068-18.385,0s-5.068,13.316,0,18.385l18.485,18.485 c0.195,0.195,0.451,0.293,0.707,0.293s0.512-0.098,0.707-0.293l18.485-18.485C48.261,20.124,48.261,11.876,43.192,6.808z"></path></g></svg>
</div>
<h3>Publicité sur Instagram</h3>
<p className="lead">
DÉVELOPPEZ VOTRE ENTREPRISE SUR INSTAGRAM
</p>
<p>
On vous propose des publicités dans les Stories, des publicités photo, des publicités vidéo, des publicités au format carrousel et plein d'autres propositions vous attendent avec TekTree.
</p>
<p>
Avec le service sponsoring Instagram de TekTree atteignez les personnes qui comptent le plus pour vous.
</p>
<Link className="font-weight-bold text-warning mt-5" to="/offres">
Découvrez nos promos
</Link>
</div>
</Col>
<Col md="6">
<Card className="bg-default shadow border-0">
<CardImg alt="..." src={require("assets/img/instagram.jpg")} top/>
<blockquote className="card-blockquote">
<svg xmlns="http://www.w3.org/2000/svg" className="svg-bg" preserveAspectRatio="none" viewBox="0 0 583 95">
<polygon className="fill-default" points="0,52 583,95 0,95"/>
<polygon className="fill-default" opacity=".2" points="0,42 583,95 683,0 0,95"/>
</svg>
<h4 className="display-3 font-weight-bold text-white">Sponsoring Instagram
</h4>
</blockquote>
</Card>
</Col>
</Row>
</Container>
</section>
<section className="section section-lg " style={{backgroundColor:"#04638f"}}>
<Container className="pt-lg pb-300">
<Row className="text-center justify-content-center">
<Col lg="10">
<h2 className="display-3 text-white">Contactez-nous pour plus d'information</h2>
<p className="lead text-white">
Nous vous fournissons tous ces services et plus encore, à des prix abordables pour tous et avec plusieurs moyens de facilités de paiement.
</p>
</Col>
</Row>
</Container>
<div className="separator separator-bottom separator-skew zindex-100">
<svg
xmlns="http://www.w3.org/2000/svg"
preserveAspectRatio="none"
version="1.1"
viewBox="0 0 2560 100"
x="0"
y="0"
>
<polygon
className="fill-white"
points="2560 0 2560 100 0 100"
/>
</svg>
</div>
</section>
<section className="section section-lg pt-lg-0 section-contact-us">
<Container>
<Row className="justify-content-center mt--300">
<Col lg="8">
<Card className="bg-gradient-secondary shadow">
<CardBody className="p-lg-5">
<h4 className="mb-1">Avez-vous des questions?</h4>
<p className="mt-0">
Envoyez-nous un message et nous vous répondrons dans les plus brefs délais.
</p>
{form === "on" ?
<form name="sentMessage" onSubmit={this.submitForm} id="contactForm" action="https://formspree.io/mvovpoer" method="POST">
<div className="row">
<div className="col-md-6">
<div className="form-group">
<input
type="text"
id="name"
className="form-control"
placeholder="Nom et Prénom"
required="required"
name="name"
/>
<p className="help-block text-danger"></p>
</div>
</div>
<div className="col-md-6">
<div className="form-group">
<input
type="email"
id="email"
className="form-control"
placeholder="Email"
required="required"
name="_replyto"
/>
<p className="help-block text-danger"></p>
</div>
</div>
</div>
<div className="form-group">
<textarea
name="message"
id="message"
className="form-control"
rows="4"
placeholder="Message"
required
></textarea>
</div>
{status === "SUCCESS" ?
<img alt="..." src={require("assets/img/message.gif")}/>
: <button className="btn btn-custom btn-lg" type="submit" value="Send">
Envoyez votre Message
</button>}
{status === "ERROR" && <p>Ooops! Il ya un erreur.</p>}
</form> :<img alt="..." src={require("assets/img/message.gif")} style={{height:"80%", width:"95%"}}/>}
</CardBody>
</Card>
</Col>
</Row>
</Container>
</section>
</main>
<CardsFooter />
</>
);
}
submitForm(ev) {
ev.preventDefault();
const form = ev.target;
const data = new FormData(form);
const xhr = new XMLHttpRequest();
xhr.open(form.method, form.action);
xhr.setRequestHeader("Accept", "application/json");
xhr.onreadystatechange = () => {
if (xhr.readyState !== XMLHttpRequest.DONE) return;
if (xhr.status === 200) {
form.reset();
| this.setState({ status: "SUCCESS" });
this.setState({ form: "off" });
setTimeout(() => {
this.setState({ status: "" });
this.setState({ form: "on" });
}, (5000));
} else {
this.setS | conditional_block |
|
model.go | returned when retries are exhausted.
errTooManyRetries = errors.New("Too many retries")
// Error returned from a transaction callback to trigger a rollback and
// retry. Other errors cause a rollback and abort.
errRetryTransaction = errors.New("Retry transaction")
)
//////////////////////////////////////////
// Relational type definitions
type BundleData struct {
// Raw SHA256 of the bundle contents
Hash []byte `db:"hash"` // primary key
// The bundle contents
Json string `db:"json"`
}
type BundleLink struct {
// 64-byte printable ASCII string
Id string `db:"id"` // primary key
// Part of the BundleLink specified by bundle author
BundleDesc
// Marks default bundles, returned by GetDefaultBundleList()
IsDefault bool `db:"is_default"`
// Raw SHA256 of the bundle contents
Hash []byte `db:"hash"` // foreign key => BundleData.Hash
// Link record creation time
CreatedAt time.Time `db:"created_at"`
}
type BundleDesc struct {
// Human-readable, URL-friendly unique name, up to 128 Unicode characters;
// used for newest version of default bundles (see `is_default`)
Slug EmptyNullString `db:"slug"`
}
//////////////////////////////////////////
// Helper type definitions
// Bundle that has not yet been saved into the database. Used as input type.
type NewBundle struct {
// Part of the BundleLink specified by bundle author
BundleDesc
// The bundle contents
Json string `db:"json"`
}
///////////////////////////////////////
// DB read-only methods
// TODO(nlacasse): Use prepared statements, otherwise we have an extra
// round-trip to the db, which is slow on cloud sql.
func getBundleLinkById(q sqlx.Queryer, id string) (*BundleLink, error) {
var bLink BundleLink
if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE id=?", id); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bLink, nil
}
// Only default bundles can be retrieved by slug for now.
func getDefaultBundleLinkBySlug(q sqlx.Queryer, slug string) (*BundleLink, error) {
var bLink BundleLink
if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE slug=? AND is_default", slug); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bLink, nil
}
func getBundleDataByHash(q sqlx.Queryer, hash []byte) (*BundleData, error) {
var bData BundleData
if err := sqlx.Get(q, &bData, "SELECT * FROM bundle_data WHERE hash=?", hash); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bData, nil
}
// All default bundles have non-empty slugs. Check just in case.
func getDefaultBundleList(q sqlx.Queryer) ([]*BundleLink, error) {
var bLinks []*BundleLink
if err := sqlx.Select(q, &bLinks, "SELECT * FROM bundle_link WHERE is_default AND slug IS NOT NULL"); err != nil {
return nil, err
} | // BundleLink with a particular id or slug. Id is tried first, slug if id
// doesn't exist.
// Note: This can fail if the bundle is deleted between fetching BundleLink
// and BundleData. However, it is highly unlikely, costly to mitigate (using
// a serializable transaction), and unimportant (error 500 instead of 404).
func GetBundleByLinkIdOrSlug(idOrSlug string) (*BundleLink, *BundleData, error) {
bLink, err := getBundleLinkById(dbRead, idOrSlug)
if err == ErrNotFound {
bLink, err = getDefaultBundleLinkBySlug(dbRead, idOrSlug)
}
if err != nil {
return nil, nil, err
}
bData, err := getBundleDataByHash(dbRead, bLink.Hash)
if err != nil {
return nil, nil, err
}
return bLink, bData, nil
}
// GetDefaultBundleList retrieves a list of BundleLink objects describing
// default bundles. All default bundles have slugs.
func GetDefaultBundleList() ([]*BundleLink, error) {
return getDefaultBundleList(dbRead)
}
////////////////////////////////////
// DB write methods
func storeBundleData(ext sqlx.Ext, bData *BundleData) error {
_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_data (hash, json) VALUES (:hash, :json)", bData)
return err
}
func storeBundleLink(ext sqlx.Ext, bLink *BundleLink) error {
_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_link (id, slug, is_default, hash) VALUES (:id, :slug, :is_default, :hash)", bLink)
return err
}
func storeBundle(tx *sqlx.Tx, bundle *NewBundle, asDefault bool) (*BundleLink, *BundleData, error) {
// All default bundles must have non-empty slugs.
if asDefault && bundle.Slug == "" {
return nil, nil, fmt.Errorf("default bundle must have non-empty slug")
}
bHashRaw := hash.Raw([]byte(bundle.Json))
bHash := bHashRaw[:]
// Generate a random id for the bundle link.
id, err := randomLink(bHash)
if err != nil {
return nil, nil, fmt.Errorf("error creating link id: %v", err)
}
// Check if bundle link with this id already exists in DB.
if _, err = getBundleLinkById(tx, id); err == nil {
// Bundle was found. Return ID collision error.
return nil, nil, errIDCollision
} else if err != ErrNotFound {
return nil, nil, fmt.Errorf("error checking for bundle link: %v", err)
}
// Check if bundle data with this hash already exists in DB.
bData, err := getBundleDataByHash(tx, bHash)
if err != nil {
if err != ErrNotFound {
return nil, nil, fmt.Errorf("error checking for bundle data: %v", err)
}
// Bundle does not exist in DB. Store it.
bData = &BundleData{
Hash: bHash,
Json: bundle.Json,
}
if err = storeBundleData(tx, bData); err != nil {
return nil, nil, fmt.Errorf("error storing bundle data: %v", err)
}
}
// Store the bundle link.
bLink := &BundleLink{
Id: id,
BundleDesc: bundle.BundleDesc,
IsDefault: asDefault,
Hash: bHash,
}
if err = storeBundleLink(tx, bLink); err != nil {
return nil, nil, fmt.Errorf("error storing bundle link: %v", err)
}
return bLink, bData, nil
}
func unmarkDefaultBundles(ext sqlx.Ext) error {
_, err := ext.Exec("UPDATE bundle_link SET slug=NULL, is_default=false WHERE is_default")
if err != nil {
return fmt.Errorf("failed unmarking default bundles: %v", err)
}
return nil
}
// StoreBundleLinkAndData creates a new bundle data for a given json byte slice
// if one does not already exist. It will create a new bundle link pointing to
// that data. All DB access is done in a transaction, which will retry up to 3
// times. Both the link and the data are returned, or an error if one occured.
// Slugs are currently not allowed for user-stored bundles.
func StoreBundleLinkAndData(json string) (bLink *BundleLink, bData *BundleData, retErr error) {
retErr = runInTransaction(dbSeq, 3, func(tx *sqlx.Tx) (err error) {
bLink, bData, err = storeBundle(tx, &NewBundle{Json: string(json)}, false)
if err == errIDCollision {
return errRetryTransaction
}
return err
})
return
}
// ReplaceDefaultBundles removes slugs and default flags from all existing
// default bundles and inserts all bundles in newDefBundles as default bundles.
// Each bundle in newDefBundles must have a unique non-empty slug.
func ReplaceDefaultBundles(newDefBundles []*NewBundle) (retErr error) {
retErr = runInTransaction(dbSeq, 5, func(tx *sqlx.Tx) error {
if err := unmarkDefaultBundles(tx); err != nil {
return err
}
for _, bundle := range newDefBundles {
if _, _, err := storeBundle(tx, bundle, true); err != nil {
if err == errIDCollision {
return errRetryTransaction
}
return err
}
}
return nil
})
return
}
//////////////////////////////////////////
// Transaction support
// Runs function txf inside a SQL transaction. txf should only use the database
// handle passed to it, which shares the prepared transaction cache with the
// original handle | return bLinks, nil
}
// GetBundleByLinkIdOrSlug retrieves a BundleData object linked to by a | random_line_split |
model.go | returned when retries are exhausted.
errTooManyRetries = errors.New("Too many retries")
// Error returned from a transaction callback to trigger a rollback and
// retry. Other errors cause a rollback and abort.
errRetryTransaction = errors.New("Retry transaction")
)
//////////////////////////////////////////
// Relational type definitions
type BundleData struct {
// Raw SHA256 of the bundle contents
Hash []byte `db:"hash"` // primary key
// The bundle contents
Json string `db:"json"`
}
type BundleLink struct {
// 64-byte printable ASCII string
Id string `db:"id"` // primary key
// Part of the BundleLink specified by bundle author
BundleDesc
// Marks default bundles, returned by GetDefaultBundleList()
IsDefault bool `db:"is_default"`
// Raw SHA256 of the bundle contents
Hash []byte `db:"hash"` // foreign key => BundleData.Hash
// Link record creation time
CreatedAt time.Time `db:"created_at"`
}
type BundleDesc struct {
// Human-readable, URL-friendly unique name, up to 128 Unicode characters;
// used for newest version of default bundles (see `is_default`)
Slug EmptyNullString `db:"slug"`
}
//////////////////////////////////////////
// Helper type definitions
// Bundle that has not yet been saved into the database. Used as input type.
type NewBundle struct {
// Part of the BundleLink specified by bundle author
BundleDesc
// The bundle contents
Json string `db:"json"`
}
///////////////////////////////////////
// DB read-only methods
// TODO(nlacasse): Use prepared statements, otherwise we have an extra
// round-trip to the db, which is slow on cloud sql.
func getBundleLinkById(q sqlx.Queryer, id string) (*BundleLink, error) {
var bLink BundleLink
if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE id=?", id); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bLink, nil
}
// Only default bundles can be retrieved by slug for now.
func getDefaultBundleLinkBySlug(q sqlx.Queryer, slug string) (*BundleLink, error) {
var bLink BundleLink
if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE slug=? AND is_default", slug); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bLink, nil
}
func getBundleDataByHash(q sqlx.Queryer, hash []byte) (*BundleData, error) {
var bData BundleData
if err := sqlx.Get(q, &bData, "SELECT * FROM bundle_data WHERE hash=?", hash); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bData, nil
}
// All default bundles have non-empty slugs. Check just in case.
func getDefaultBundleList(q sqlx.Queryer) ([]*BundleLink, error) {
var bLinks []*BundleLink
if err := sqlx.Select(q, &bLinks, "SELECT * FROM bundle_link WHERE is_default AND slug IS NOT NULL"); err != nil {
return nil, err
}
return bLinks, nil
}
// GetBundleByLinkIdOrSlug retrieves a BundleData object linked to by a
// BundleLink with a particular id or slug. Id is tried first, slug if id
// doesn't exist.
// Note: This can fail if the bundle is deleted between fetching BundleLink
// and BundleData. However, it is highly unlikely, costly to mitigate (using
// a serializable transaction), and unimportant (error 500 instead of 404).
func GetBundleByLinkIdOrSlug(idOrSlug string) (*BundleLink, *BundleData, error) {
bLink, err := getBundleLinkById(dbRead, idOrSlug)
if err == ErrNotFound |
if err != nil {
return nil, nil, err
}
bData, err := getBundleDataByHash(dbRead, bLink.Hash)
if err != nil {
return nil, nil, err
}
return bLink, bData, nil
}
// GetDefaultBundleList retrieves a list of BundleLink objects describing
// default bundles. All default bundles have slugs.
func GetDefaultBundleList() ([]*BundleLink, error) {
return getDefaultBundleList(dbRead)
}
////////////////////////////////////
// DB write methods
func storeBundleData(ext sqlx.Ext, bData *BundleData) error {
_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_data (hash, json) VALUES (:hash, :json)", bData)
return err
}
func storeBundleLink(ext sqlx.Ext, bLink *BundleLink) error {
_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_link (id, slug, is_default, hash) VALUES (:id, :slug, :is_default, :hash)", bLink)
return err
}
func storeBundle(tx *sqlx.Tx, bundle *NewBundle, asDefault bool) (*BundleLink, *BundleData, error) {
// All default bundles must have non-empty slugs.
if asDefault && bundle.Slug == "" {
return nil, nil, fmt.Errorf("default bundle must have non-empty slug")
}
bHashRaw := hash.Raw([]byte(bundle.Json))
bHash := bHashRaw[:]
// Generate a random id for the bundle link.
id, err := randomLink(bHash)
if err != nil {
return nil, nil, fmt.Errorf("error creating link id: %v", err)
}
// Check if bundle link with this id already exists in DB.
if _, err = getBundleLinkById(tx, id); err == nil {
// Bundle was found. Return ID collision error.
return nil, nil, errIDCollision
} else if err != ErrNotFound {
return nil, nil, fmt.Errorf("error checking for bundle link: %v", err)
}
// Check if bundle data with this hash already exists in DB.
bData, err := getBundleDataByHash(tx, bHash)
if err != nil {
if err != ErrNotFound {
return nil, nil, fmt.Errorf("error checking for bundle data: %v", err)
}
// Bundle does not exist in DB. Store it.
bData = &BundleData{
Hash: bHash,
Json: bundle.Json,
}
if err = storeBundleData(tx, bData); err != nil {
return nil, nil, fmt.Errorf("error storing bundle data: %v", err)
}
}
// Store the bundle link.
bLink := &BundleLink{
Id: id,
BundleDesc: bundle.BundleDesc,
IsDefault: asDefault,
Hash: bHash,
}
if err = storeBundleLink(tx, bLink); err != nil {
return nil, nil, fmt.Errorf("error storing bundle link: %v", err)
}
return bLink, bData, nil
}
func unmarkDefaultBundles(ext sqlx.Ext) error {
_, err := ext.Exec("UPDATE bundle_link SET slug=NULL, is_default=false WHERE is_default")
if err != nil {
return fmt.Errorf("failed unmarking default bundles: %v", err)
}
return nil
}
// StoreBundleLinkAndData creates a new bundle data for a given json byte slice
// if one does not already exist. It will create a new bundle link pointing to
// that data. All DB access is done in a transaction, which will retry up to 3
// times. Both the link and the data are returned, or an error if one occured.
// Slugs are currently not allowed for user-stored bundles.
func StoreBundleLinkAndData(json string) (bLink *BundleLink, bData *BundleData, retErr error) {
retErr = runInTransaction(dbSeq, 3, func(tx *sqlx.Tx) (err error) {
bLink, bData, err = storeBundle(tx, &NewBundle{Json: string(json)}, false)
if err == errIDCollision {
return errRetryTransaction
}
return err
})
return
}
// ReplaceDefaultBundles removes slugs and default flags from all existing
// default bundles and inserts all bundles in newDefBundles as default bundles.
// Each bundle in newDefBundles must have a unique non-empty slug.
func ReplaceDefaultBundles(newDefBundles []*NewBundle) (retErr error) {
retErr = runInTransaction(dbSeq, 5, func(tx *sqlx.Tx) error {
if err := unmarkDefaultBundles(tx); err != nil {
return err
}
for _, bundle := range newDefBundles {
if _, _, err := storeBundle(tx, bundle, true); err != nil {
if err == errIDCollision {
return errRetryTransaction
}
return err
}
}
return nil
})
return
}
//////////////////////////////////////////
// Transaction support
// Runs function txf inside a SQL transaction. txf should only use the database
// handle passed to it, which shares the prepared transaction cache with the
| {
bLink, err = getDefaultBundleLinkBySlug(dbRead, idOrSlug)
} | conditional_block |
model.go | returned when retries are exhausted.
errTooManyRetries = errors.New("Too many retries")
// Error returned from a transaction callback to trigger a rollback and
// retry. Other errors cause a rollback and abort.
errRetryTransaction = errors.New("Retry transaction")
)
//////////////////////////////////////////
// Relational type definitions
type BundleData struct {
// Raw SHA256 of the bundle contents
Hash []byte `db:"hash"` // primary key
// The bundle contents
Json string `db:"json"`
}
type BundleLink struct {
// 64-byte printable ASCII string
Id string `db:"id"` // primary key
// Part of the BundleLink specified by bundle author
BundleDesc
// Marks default bundles, returned by GetDefaultBundleList()
IsDefault bool `db:"is_default"`
// Raw SHA256 of the bundle contents
Hash []byte `db:"hash"` // foreign key => BundleData.Hash
// Link record creation time
CreatedAt time.Time `db:"created_at"`
}
type BundleDesc struct {
// Human-readable, URL-friendly unique name, up to 128 Unicode characters;
// used for newest version of default bundles (see `is_default`)
Slug EmptyNullString `db:"slug"`
}
//////////////////////////////////////////
// Helper type definitions
// Bundle that has not yet been saved into the database. Used as input type.
type NewBundle struct {
// Part of the BundleLink specified by bundle author
BundleDesc
// The bundle contents
Json string `db:"json"`
}
///////////////////////////////////////
// DB read-only methods
// TODO(nlacasse): Use prepared statements, otherwise we have an extra
// round-trip to the db, which is slow on cloud sql.
func getBundleLinkById(q sqlx.Queryer, id string) (*BundleLink, error) {
var bLink BundleLink
if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE id=?", id); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bLink, nil
}
// Only default bundles can be retrieved by slug for now.
func getDefaultBundleLinkBySlug(q sqlx.Queryer, slug string) (*BundleLink, error) {
var bLink BundleLink
if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE slug=? AND is_default", slug); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bLink, nil
}
func getBundleDataByHash(q sqlx.Queryer, hash []byte) (*BundleData, error) {
var bData BundleData
if err := sqlx.Get(q, &bData, "SELECT * FROM bundle_data WHERE hash=?", hash); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bData, nil
}
// All default bundles have non-empty slugs. Check just in case.
func getDefaultBundleList(q sqlx.Queryer) ([]*BundleLink, error) {
var bLinks []*BundleLink
if err := sqlx.Select(q, &bLinks, "SELECT * FROM bundle_link WHERE is_default AND slug IS NOT NULL"); err != nil {
return nil, err
}
return bLinks, nil
}
// GetBundleByLinkIdOrSlug retrieves a BundleData object linked to by a
// BundleLink with a particular id or slug. Id is tried first, slug if id
// doesn't exist.
// Note: This can fail if the bundle is deleted between fetching BundleLink
// and BundleData. However, it is highly unlikely, costly to mitigate (using
// a serializable transaction), and unimportant (error 500 instead of 404).
func GetBundleByLinkIdOrSlug(idOrSlug string) (*BundleLink, *BundleData, error) {
bLink, err := getBundleLinkById(dbRead, idOrSlug)
if err == ErrNotFound {
bLink, err = getDefaultBundleLinkBySlug(dbRead, idOrSlug)
}
if err != nil {
return nil, nil, err
}
bData, err := getBundleDataByHash(dbRead, bLink.Hash)
if err != nil {
return nil, nil, err
}
return bLink, bData, nil
}
// GetDefaultBundleList retrieves a list of BundleLink objects describing
// default bundles. All default bundles have slugs.
func GetDefaultBundleList() ([]*BundleLink, error) {
return getDefaultBundleList(dbRead)
}
////////////////////////////////////
// DB write methods
func storeBundleData(ext sqlx.Ext, bData *BundleData) error {
_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_data (hash, json) VALUES (:hash, :json)", bData)
return err
}
func storeBundleLink(ext sqlx.Ext, bLink *BundleLink) error {
_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_link (id, slug, is_default, hash) VALUES (:id, :slug, :is_default, :hash)", bLink)
return err
}
func | (tx *sqlx.Tx, bundle *NewBundle, asDefault bool) (*BundleLink, *BundleData, error) {
// All default bundles must have non-empty slugs.
if asDefault && bundle.Slug == "" {
return nil, nil, fmt.Errorf("default bundle must have non-empty slug")
}
bHashRaw := hash.Raw([]byte(bundle.Json))
bHash := bHashRaw[:]
// Generate a random id for the bundle link.
id, err := randomLink(bHash)
if err != nil {
return nil, nil, fmt.Errorf("error creating link id: %v", err)
}
// Check if bundle link with this id already exists in DB.
if _, err = getBundleLinkById(tx, id); err == nil {
// Bundle was found. Return ID collision error.
return nil, nil, errIDCollision
} else if err != ErrNotFound {
return nil, nil, fmt.Errorf("error checking for bundle link: %v", err)
}
// Check if bundle data with this hash already exists in DB.
bData, err := getBundleDataByHash(tx, bHash)
if err != nil {
if err != ErrNotFound {
return nil, nil, fmt.Errorf("error checking for bundle data: %v", err)
}
// Bundle does not exist in DB. Store it.
bData = &BundleData{
Hash: bHash,
Json: bundle.Json,
}
if err = storeBundleData(tx, bData); err != nil {
return nil, nil, fmt.Errorf("error storing bundle data: %v", err)
}
}
// Store the bundle link.
bLink := &BundleLink{
Id: id,
BundleDesc: bundle.BundleDesc,
IsDefault: asDefault,
Hash: bHash,
}
if err = storeBundleLink(tx, bLink); err != nil {
return nil, nil, fmt.Errorf("error storing bundle link: %v", err)
}
return bLink, bData, nil
}
func unmarkDefaultBundles(ext sqlx.Ext) error {
_, err := ext.Exec("UPDATE bundle_link SET slug=NULL, is_default=false WHERE is_default")
if err != nil {
return fmt.Errorf("failed unmarking default bundles: %v", err)
}
return nil
}
// StoreBundleLinkAndData creates a new bundle data for a given json byte slice
// if one does not already exist. It will create a new bundle link pointing to
// that data. All DB access is done in a transaction, which will retry up to 3
// times. Both the link and the data are returned, or an error if one occured.
// Slugs are currently not allowed for user-stored bundles.
func StoreBundleLinkAndData(json string) (bLink *BundleLink, bData *BundleData, retErr error) {
retErr = runInTransaction(dbSeq, 3, func(tx *sqlx.Tx) (err error) {
bLink, bData, err = storeBundle(tx, &NewBundle{Json: string(json)}, false)
if err == errIDCollision {
return errRetryTransaction
}
return err
})
return
}
// ReplaceDefaultBundles removes slugs and default flags from all existing
// default bundles and inserts all bundles in newDefBundles as default bundles.
// Each bundle in newDefBundles must have a unique non-empty slug.
func ReplaceDefaultBundles(newDefBundles []*NewBundle) (retErr error) {
retErr = runInTransaction(dbSeq, 5, func(tx *sqlx.Tx) error {
if err := unmarkDefaultBundles(tx); err != nil {
return err
}
for _, bundle := range newDefBundles {
if _, _, err := storeBundle(tx, bundle, true); err != nil {
if err == errIDCollision {
return errRetryTransaction
}
return err
}
}
return nil
})
return
}
//////////////////////////////////////////
// Transaction support
// Runs function txf inside a SQL transaction. txf should only use the database
// handle passed to it, which shares the prepared transaction cache with the
// | storeBundle | identifier_name |
model.go | returned when retries are exhausted.
errTooManyRetries = errors.New("Too many retries")
// Error returned from a transaction callback to trigger a rollback and
// retry. Other errors cause a rollback and abort.
errRetryTransaction = errors.New("Retry transaction")
)
//////////////////////////////////////////
// Relational type definitions
type BundleData struct {
// Raw SHA256 of the bundle contents
Hash []byte `db:"hash"` // primary key
// The bundle contents
Json string `db:"json"`
}
type BundleLink struct {
// 64-byte printable ASCII string
Id string `db:"id"` // primary key
// Part of the BundleLink specified by bundle author
BundleDesc
// Marks default bundles, returned by GetDefaultBundleList()
IsDefault bool `db:"is_default"`
// Raw SHA256 of the bundle contents
Hash []byte `db:"hash"` // foreign key => BundleData.Hash
// Link record creation time
CreatedAt time.Time `db:"created_at"`
}
type BundleDesc struct {
// Human-readable, URL-friendly unique name, up to 128 Unicode characters;
// used for newest version of default bundles (see `is_default`)
Slug EmptyNullString `db:"slug"`
}
//////////////////////////////////////////
// Helper type definitions
// Bundle that has not yet been saved into the database. Used as input type.
type NewBundle struct {
// Part of the BundleLink specified by bundle author
BundleDesc
// The bundle contents
Json string `db:"json"`
}
///////////////////////////////////////
// DB read-only methods
// TODO(nlacasse): Use prepared statements, otherwise we have an extra
// round-trip to the db, which is slow on cloud sql.
func getBundleLinkById(q sqlx.Queryer, id string) (*BundleLink, error) {
var bLink BundleLink
if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE id=?", id); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bLink, nil
}
// Only default bundles can be retrieved by slug for now.
func getDefaultBundleLinkBySlug(q sqlx.Queryer, slug string) (*BundleLink, error) {
var bLink BundleLink
if err := sqlx.Get(q, &bLink, "SELECT * FROM bundle_link WHERE slug=? AND is_default", slug); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bLink, nil
}
func getBundleDataByHash(q sqlx.Queryer, hash []byte) (*BundleData, error) |
// All default bundles have non-empty slugs. Check just in case.
func getDefaultBundleList(q sqlx.Queryer) ([]*BundleLink, error) {
var bLinks []*BundleLink
if err := sqlx.Select(q, &bLinks, "SELECT * FROM bundle_link WHERE is_default AND slug IS NOT NULL"); err != nil {
return nil, err
}
return bLinks, nil
}
// GetBundleByLinkIdOrSlug retrieves a BundleData object linked to by a
// BundleLink with a particular id or slug. Id is tried first, slug if id
// doesn't exist.
// Note: This can fail if the bundle is deleted between fetching BundleLink
// and BundleData. However, it is highly unlikely, costly to mitigate (using
// a serializable transaction), and unimportant (error 500 instead of 404).
func GetBundleByLinkIdOrSlug(idOrSlug string) (*BundleLink, *BundleData, error) {
bLink, err := getBundleLinkById(dbRead, idOrSlug)
if err == ErrNotFound {
bLink, err = getDefaultBundleLinkBySlug(dbRead, idOrSlug)
}
if err != nil {
return nil, nil, err
}
bData, err := getBundleDataByHash(dbRead, bLink.Hash)
if err != nil {
return nil, nil, err
}
return bLink, bData, nil
}
// GetDefaultBundleList retrieves a list of BundleLink objects describing
// default bundles. All default bundles have slugs.
func GetDefaultBundleList() ([]*BundleLink, error) {
return getDefaultBundleList(dbRead)
}
////////////////////////////////////
// DB write methods
func storeBundleData(ext sqlx.Ext, bData *BundleData) error {
_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_data (hash, json) VALUES (:hash, :json)", bData)
return err
}
func storeBundleLink(ext sqlx.Ext, bLink *BundleLink) error {
_, err := sqlx.NamedExec(ext, "INSERT INTO bundle_link (id, slug, is_default, hash) VALUES (:id, :slug, :is_default, :hash)", bLink)
return err
}
func storeBundle(tx *sqlx.Tx, bundle *NewBundle, asDefault bool) (*BundleLink, *BundleData, error) {
// All default bundles must have non-empty slugs.
if asDefault && bundle.Slug == "" {
return nil, nil, fmt.Errorf("default bundle must have non-empty slug")
}
bHashRaw := hash.Raw([]byte(bundle.Json))
bHash := bHashRaw[:]
// Generate a random id for the bundle link.
id, err := randomLink(bHash)
if err != nil {
return nil, nil, fmt.Errorf("error creating link id: %v", err)
}
// Check if bundle link with this id already exists in DB.
if _, err = getBundleLinkById(tx, id); err == nil {
// Bundle was found. Return ID collision error.
return nil, nil, errIDCollision
} else if err != ErrNotFound {
return nil, nil, fmt.Errorf("error checking for bundle link: %v", err)
}
// Check if bundle data with this hash already exists in DB.
bData, err := getBundleDataByHash(tx, bHash)
if err != nil {
if err != ErrNotFound {
return nil, nil, fmt.Errorf("error checking for bundle data: %v", err)
}
// Bundle does not exist in DB. Store it.
bData = &BundleData{
Hash: bHash,
Json: bundle.Json,
}
if err = storeBundleData(tx, bData); err != nil {
return nil, nil, fmt.Errorf("error storing bundle data: %v", err)
}
}
// Store the bundle link.
bLink := &BundleLink{
Id: id,
BundleDesc: bundle.BundleDesc,
IsDefault: asDefault,
Hash: bHash,
}
if err = storeBundleLink(tx, bLink); err != nil {
return nil, nil, fmt.Errorf("error storing bundle link: %v", err)
}
return bLink, bData, nil
}
func unmarkDefaultBundles(ext sqlx.Ext) error {
_, err := ext.Exec("UPDATE bundle_link SET slug=NULL, is_default=false WHERE is_default")
if err != nil {
return fmt.Errorf("failed unmarking default bundles: %v", err)
}
return nil
}
// StoreBundleLinkAndData creates a new bundle data for a given json byte slice
// if one does not already exist. It will create a new bundle link pointing to
// that data. All DB access is done in a transaction, which will retry up to 3
// times. Both the link and the data are returned, or an error if one occured.
// Slugs are currently not allowed for user-stored bundles.
func StoreBundleLinkAndData(json string) (bLink *BundleLink, bData *BundleData, retErr error) {
retErr = runInTransaction(dbSeq, 3, func(tx *sqlx.Tx) (err error) {
bLink, bData, err = storeBundle(tx, &NewBundle{Json: string(json)}, false)
if err == errIDCollision {
return errRetryTransaction
}
return err
})
return
}
// ReplaceDefaultBundles removes slugs and default flags from all existing
// default bundles and inserts all bundles in newDefBundles as default bundles.
// Each bundle in newDefBundles must have a unique non-empty slug.
func ReplaceDefaultBundles(newDefBundles []*NewBundle) (retErr error) {
retErr = runInTransaction(dbSeq, 5, func(tx *sqlx.Tx) error {
if err := unmarkDefaultBundles(tx); err != nil {
return err
}
for _, bundle := range newDefBundles {
if _, _, err := storeBundle(tx, bundle, true); err != nil {
if err == errIDCollision {
return errRetryTransaction
}
return err
}
}
return nil
})
return
}
//////////////////////////////////////////
// Transaction support
// Runs function txf inside a SQL transaction. txf should only use the database
// handle passed to it, which shares the prepared transaction cache with the
| {
var bData BundleData
if err := sqlx.Get(q, &bData, "SELECT * FROM bundle_data WHERE hash=?", hash); err != nil {
if err == sql.ErrNoRows {
err = ErrNotFound
}
return nil, err
}
return &bData, nil
} | identifier_body |
selenium_test.go | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package tests
import (
"errors"
"fmt"
"net"
"os"
"testing"
"time"
"github.com/tebeka/selenium"
gclient "github.com/skydive-project/skydive/cmd/client"
"github.com/skydive-project/skydive/common"
shttp "github.com/skydive-project/skydive/http"
"github.com/skydive-project/skydive/tests/helper"
)
func TestSelenium(t *testing.T) {
gopath := os.Getenv("GOPATH")
topology := gopath + "/src/github.com/skydive-project/skydive/scripts/simple.sh"
setupCmds := []helper.Cmd{
{fmt.Sprintf("%s start 124.65.54.42/24 124.65.54.43/24", topology), true},
{"docker pull elgalu/selenium", true},
{"docker run -d --name=grid -p 4444:24444 -p 5900:25900 -e --shm-size=1g -p 6080:26080 -e SCREEN_WIDTH=1600 -e SCREEN_HEIGHT=1400 -e NOVNC=true elgalu/selenium", true},
{"docker exec grid wait_all_done 30s", true},
}
tearDownCmds := []helper.Cmd{
{fmt.Sprintf("%s stop", topology), true},
{"docker stop grid", true},
{"docker rm -f grid", true},
}
helper.ExecCmds(t, setupCmds...)
defer helper.ExecCmds(t, tearDownCmds...)
caps := selenium.Capabilities{"browserName": "chrome"}
webdriver, err := selenium.NewRemote(caps, "http://127.0.0.1:4444/wd/hub")
if err != nil {
t.Fatal(err)
}
defer webdriver.Quit()
ipaddr, err := getIPv4Addr()
if err != nil {
t.Fatalf("Not able to find Analayzer addr: %v", err)
}
if err := webdriver.Get("http://" + ipaddr + ":8082"); err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Second)
authOptions := &shttp.AuthenticationOpts{}
gh := gclient.NewGremlinQueryHelper(authOptions)
findElement := func(selection, xpath string) (el selenium.WebElement, err error) {
common.Retry(func() error {
el, err = webdriver.FindElement(selection, xpath)
if err != nil || el == nil {
return fmt.Errorf("Failed to find element for %s (error: %+v)", xpath, err)
}
return nil
}, 10, time.Second)
return
}
zoomOut := func() error {
for i := 0; i != 5; i++ {
zo, err := findElement(selenium.ByID, "zoom-out")
if err != nil {
return err
}
if err = zo.Click(); err != nil {
return err
}
}
return nil
}
expandGroup := func(gremlin string) error {
node, err := gh.GetNode(gremlin)
if err != nil {
return err
}
if err = webdriver.KeyDown(selenium.AltKey); err != nil {
return err
}
err = common.Retry(func() error {
el, err := findElement(selenium.ByXPATH, ".//*[@id='node-"+string(node.ID)+"']")
if err != nil {
return err
}
if err = el.Click(); err != nil {
zoomOut()
return err
}
if collapsed, err := el.GetAttribute("collapsed"); err != nil || collapsed != "false" {
return errors.New("group still collapsed")
}
return nil
}, 10, time.Second)
if err != nil {
return err
}
if err = webdriver.KeyUp(selenium.AltKey); err != nil {
return err
}
return nil
}
selectNode := func(gremlin string) error {
node, err := gh.GetNode(gremlin)
if err != nil {
return err
}
el, err := findElement(selenium.ByXPATH, ".//*[@id='node-"+string(node.ID)+"']")
if err != nil {
return err
}
return common.Retry(func() error {
if err := el.Click(); err != nil {
zoomOut()
return fmt.Errorf("Failed to click on source node: %s", err.Error())
}
return nil
}, 10, time.Second)
}
startCapture := func() error {
captureTab, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='Captures']")
if err != nil || captureTab == nil {
return fmt.Errorf("Not found capture tab: %v", err)
}
if err := captureTab.Click(); err != nil {
return fmt.Errorf("%v", err)
}
createBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='create-capture']")
if err != nil || createBtn == nil {
return fmt.Errorf("Not found create button : %v", err)
}
if err := createBtn.Click(); err != nil {
return fmt.Errorf("%v", err)
}
time.Sleep(2 * time.Second)
gremlinRdoBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='by-gremlin']")
if err != nil || gremlinRdoBtn == nil {
return fmt.Errorf("Not found gremlin expression radio button: %v", err)
}
if err := gremlinRdoBtn.Click(); err != nil {
return err
}
queryTxtBox, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='capture-query']")
if err != nil || queryTxtBox == nil {
return fmt.Errorf("Not found Query text box: %v", err)
}
if err := queryTxtBox.Clear(); err != nil {
return err
}
if err := queryTxtBox.SendKeys("G.V().Has('Name', 'br-int', 'Type', 'ovsbridge')"); err != nil {
return err
}
startBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='start-capture']")
if err != nil || startBtn == nil {
return fmt.Errorf("Not found start button: %v", err)
}
if err := startBtn.Click(); err != nil {
return err
}
time.Sleep(3 * time.Second)
//check capture created with the given query
captures, err := webdriver.FindElements(selenium.ByClassName, "query")
if err != nil {
return err
}
var foundCapture bool
for _, capture := range captures {
if txt, _ := capture.Text(); txt == "G.V().Has('Name', 'br-int', 'Type', 'ovsbridge')" {
foundCapture = true
break
}
}
if !foundCapture {
return fmt.Errorf("Capture not found in the list")
}
return nil
}
injectPacket := func() error {
generatorTab, err := findElement(selenium.ByXPATH, ".//*[@id='Generator']")
if err != nil {
return err
}
err = common.Retry(func() error {
return generatorTab.Click()
}, 10, time.Second)
if err != nil {
return fmt.Errorf("Could not click on generator tab: %s", err.Error())
}
injectSrc, err := findElement(selenium.ByXPATH, ".//*[@id='inject-src']/input")
if err != nil {
return err
}
if err := injectSrc.Click(); err != nil {
return fmt.Errorf("Failed to click on inject input: %s", err.Error())
}
if err = selectNode("G.V().Has('Name', 'eth0', 'IPV4', Contains('124.65.54.42/24'))"); err != nil {
return err
}
injectDst, err := findElement(selenium.ByXPATH, ".//*[@id='inject-dst']/input")
if err != nil {
return err
}
if err := injectDst.Click(); err != nil {
return fmt.Errorf("Failed to click on destination input: %s", err.Error())
}
if err = selectNode("G.V().Has('Name', 'eth0', 'IPV4', Contains('124.65.54.43/24'))"); err != nil {
return err
}
inject | *
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an | random_line_split |
|
selenium_test.go | || captureTab == nil {
return fmt.Errorf("Not found capture tab: %v", err)
}
if err := captureTab.Click(); err != nil {
return fmt.Errorf("%v", err)
}
createBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='create-capture']")
if err != nil || createBtn == nil {
return fmt.Errorf("Not found create button : %v", err)
}
if err := createBtn.Click(); err != nil {
return fmt.Errorf("%v", err)
}
time.Sleep(2 * time.Second)
gremlinRdoBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='by-gremlin']")
if err != nil || gremlinRdoBtn == nil {
return fmt.Errorf("Not found gremlin expression radio button: %v", err)
}
if err := gremlinRdoBtn.Click(); err != nil {
return err
}
queryTxtBox, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='capture-query']")
if err != nil || queryTxtBox == nil {
return fmt.Errorf("Not found Query text box: %v", err)
}
if err := queryTxtBox.Clear(); err != nil {
return err
}
if err := queryTxtBox.SendKeys("G.V().Has('Name', 'br-int', 'Type', 'ovsbridge')"); err != nil {
return err
}
startBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='start-capture']")
if err != nil || startBtn == nil {
return fmt.Errorf("Not found start button: %v", err)
}
if err := startBtn.Click(); err != nil {
return err
}
time.Sleep(3 * time.Second)
//check capture created with the given query
captures, err := webdriver.FindElements(selenium.ByClassName, "query")
if err != nil {
return err
}
var foundCapture bool
for _, capture := range captures {
if txt, _ := capture.Text(); txt == "G.V().Has('Name', 'br-int', 'Type', 'ovsbridge')" {
foundCapture = true
break
}
}
if !foundCapture {
return fmt.Errorf("Capture not found in the list")
}
return nil
}
injectPacket := func() error {
generatorTab, err := findElement(selenium.ByXPATH, ".//*[@id='Generator']")
if err != nil {
return err
}
err = common.Retry(func() error {
return generatorTab.Click()
}, 10, time.Second)
if err != nil {
return fmt.Errorf("Could not click on generator tab: %s", err.Error())
}
injectSrc, err := findElement(selenium.ByXPATH, ".//*[@id='inject-src']/input")
if err != nil {
return err
}
if err := injectSrc.Click(); err != nil {
return fmt.Errorf("Failed to click on inject input: %s", err.Error())
}
if err = selectNode("G.V().Has('Name', 'eth0', 'IPV4', Contains('124.65.54.42/24'))"); err != nil {
return err
}
injectDst, err := findElement(selenium.ByXPATH, ".//*[@id='inject-dst']/input")
if err != nil {
return err
}
if err := injectDst.Click(); err != nil {
return fmt.Errorf("Failed to click on destination input: %s", err.Error())
}
if err = selectNode("G.V().Has('Name', 'eth0', 'IPV4', Contains('124.65.54.43/24'))"); err != nil {
return err
}
injectBtn, err := findElement(selenium.ByXPATH, ".//*[@id='inject']")
if err != nil {
return err
}
if err := injectBtn.Click(); err != nil {
return fmt.Errorf("Failed to click on inject button: %s", err.Error())
}
var alertMsg selenium.WebElement
err = common.Retry(func() error {
alertMsg, err = findElement(selenium.ByClassName, "alert-success")
return err
}, 10, time.Second)
if err != nil {
return err
}
closeBtn, _ := alertMsg.FindElement(selenium.ByClassName, "close")
if closeBtn != nil {
closeBtn.Click()
}
return nil
}
verifyFlows := func() error {
time.Sleep(3 * time.Second)
flowsTab, err := findElement(selenium.ByXPATH, ".//*[@id='Flows']")
if err != nil {
return fmt.Errorf("Flows tab not found: %v", err)
}
if err := flowsTab.Click(); err != nil {
return err
}
flowQuery, err := findElement(selenium.ByXPATH, ".//*[@id='flow-table-query']")
if err != nil {
return err
}
if err := flowQuery.Clear(); err != nil {
return err
}
query := "G.Flows().Has('Network.A', '124.65.54.42', 'Network.B', '124.65.54.43')"
if err := flowQuery.SendKeys(query); err != nil {
return err
}
time.Sleep(2 * time.Second)
flowRow, err := findElement(selenium.ByClassName, "flow-row")
if err != nil {
return err
}
rowData, err := flowRow.FindElements(selenium.ByTagName, "td")
if err != nil {
return err
}
if len(rowData) != 7 {
return fmt.Errorf("By default 7 rows should be return")
}
txt, err := rowData[1].Text()
if err != nil {
return err
}
if txt != "124.65.54.42" {
return fmt.Errorf("Network.A should be '124.65.54.42' but got: %s", txt)
}
return nil
}
takeScreenshot := func(path string) error {
t.Logf("Taking screenshot %s", path)
content, err := webdriver.Screenshot()
if err != nil {
return err
}
f, err := os.Create(path)
if err != nil {
return err
}
if _, err = f.Write(content); err != nil {
return err
}
return f.Close()
}
// expand the topology to be sure to find nodes
expand, err := findElement(selenium.ByID, "expand-collapse")
if err != nil {
if err := takeScreenshot("postmortem.png"); err != nil {
t.Log(err)
}
t.Fatal(err)
}
expand.Click()
fit, err := findElement(selenium.ByID, "zoom-fit")
if err != nil {
if err := takeScreenshot("postmortem.png"); err != nil {
t.Log(err)
}
t.Fatal(err)
}
fit.Click()
if err = expandGroup("G.V().Has('Name', 'vm1', 'Type', 'netns')"); err != nil {
if err := takeScreenshot("postmortem.png"); err != nil {
t.Log(err)
}
t.Fatal(err)
}
time.Sleep(2 * time.Second)
fit.Click()
if err = expandGroup("G.V().Has('Name', 'vm2', 'Type', 'netns')"); err != nil {
if err := takeScreenshot("postmortem.png"); err != nil {
t.Log(err)
}
t.Fatal(err)
}
time.Sleep(2 * time.Second)
fit.Click()
if err := startCapture(); err != nil {
if err := takeScreenshot("postmortem.png"); err != nil {
t.Log(err)
}
t.Fatal(err)
}
if err := injectPacket(); err != nil {
if err := takeScreenshot("postmortem.png"); err != nil {
t.Log(err)
}
t.Fatal(err)
}
if err := verifyFlows(); err != nil {
if err := takeScreenshot("postmortem.png"); err != nil {
t.Log(err)
}
t.Fatal(err)
}
}
func getIPv4Addr() (string, error) | {
ifaces, err := net.Interfaces()
if err != nil {
return "", err
}
for _, iface := range ifaces {
//neglect interfaces which are down
if iface.Flags&net.FlagUp == 0 {
continue
}
//neglect loopback interface
if iface.Flags&net.FlagLoopback != 0 {
continue
}
addrs, err := iface.Addrs()
if err != nil {
return "", err
}
for _, addr := range addrs { | identifier_body |
|
selenium_test.go | 44:24444 -p 5900:25900 -e --shm-size=1g -p 6080:26080 -e SCREEN_WIDTH=1600 -e SCREEN_HEIGHT=1400 -e NOVNC=true elgalu/selenium", true},
{"docker exec grid wait_all_done 30s", true},
}
tearDownCmds := []helper.Cmd{
{fmt.Sprintf("%s stop", topology), true},
{"docker stop grid", true},
{"docker rm -f grid", true},
}
helper.ExecCmds(t, setupCmds...)
defer helper.ExecCmds(t, tearDownCmds...)
caps := selenium.Capabilities{"browserName": "chrome"}
webdriver, err := selenium.NewRemote(caps, "http://127.0.0.1:4444/wd/hub")
if err != nil {
t.Fatal(err)
}
defer webdriver.Quit()
ipaddr, err := getIPv4Addr()
if err != nil {
t.Fatalf("Not able to find Analayzer addr: %v", err)
}
if err := webdriver.Get("http://" + ipaddr + ":8082"); err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Second)
authOptions := &shttp.AuthenticationOpts{}
gh := gclient.NewGremlinQueryHelper(authOptions)
findElement := func(selection, xpath string) (el selenium.WebElement, err error) {
common.Retry(func() error {
el, err = webdriver.FindElement(selection, xpath)
if err != nil || el == nil {
return fmt.Errorf("Failed to find element for %s (error: %+v)", xpath, err)
}
return nil
}, 10, time.Second)
return
}
zoomOut := func() error {
for i := 0; i != 5; i++ {
zo, err := findElement(selenium.ByID, "zoom-out")
if err != nil {
return err
}
if err = zo.Click(); err != nil {
return err
}
}
return nil
}
expandGroup := func(gremlin string) error {
node, err := gh.GetNode(gremlin)
if err != nil {
return err
}
if err = webdriver.KeyDown(selenium.AltKey); err != nil {
return err
}
err = common.Retry(func() error {
el, err := findElement(selenium.ByXPATH, ".//*[@id='node-"+string(node.ID)+"']")
if err != nil {
return err
}
if err = el.Click(); err != nil {
zoomOut()
return err
}
if collapsed, err := el.GetAttribute("collapsed"); err != nil || collapsed != "false" {
return errors.New("group still collapsed")
}
return nil
}, 10, time.Second)
if err != nil {
return err
}
if err = webdriver.KeyUp(selenium.AltKey); err != nil {
return err
}
return nil
}
selectNode := func(gremlin string) error {
node, err := gh.GetNode(gremlin)
if err != nil {
return err
}
el, err := findElement(selenium.ByXPATH, ".//*[@id='node-"+string(node.ID)+"']")
if err != nil {
return err
}
return common.Retry(func() error {
if err := el.Click(); err != nil {
zoomOut()
return fmt.Errorf("Failed to click on source node: %s", err.Error())
}
return nil
}, 10, time.Second)
}
startCapture := func() error {
captureTab, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='Captures']")
if err != nil || captureTab == nil {
return fmt.Errorf("Not found capture tab: %v", err)
}
if err := captureTab.Click(); err != nil {
return fmt.Errorf("%v", err)
}
createBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='create-capture']")
if err != nil || createBtn == nil {
return fmt.Errorf("Not found create button : %v", err)
}
if err := createBtn.Click(); err != nil {
return fmt.Errorf("%v", err)
}
time.Sleep(2 * time.Second)
gremlinRdoBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='by-gremlin']")
if err != nil || gremlinRdoBtn == nil {
return fmt.Errorf("Not found gremlin expression radio button: %v", err)
}
if err := gremlinRdoBtn.Click(); err != nil {
return err
}
queryTxtBox, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='capture-query']")
if err != nil || queryTxtBox == nil {
return fmt.Errorf("Not found Query text box: %v", err)
}
if err := queryTxtBox.Clear(); err != nil {
return err
}
if err := queryTxtBox.SendKeys("G.V().Has('Name', 'br-int', 'Type', 'ovsbridge')"); err != nil {
return err
}
startBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='start-capture']")
if err != nil || startBtn == nil {
return fmt.Errorf("Not found start button: %v", err)
}
if err := startBtn.Click(); err != nil {
return err
}
time.Sleep(3 * time.Second)
//check capture created with the given query
captures, err := webdriver.FindElements(selenium.ByClassName, "query")
if err != nil {
return err
}
var foundCapture bool
for _, capture := range captures {
if txt, _ := capture.Text(); txt == "G.V().Has('Name', 'br-int', 'Type', 'ovsbridge')" {
foundCapture = true
break
}
}
if !foundCapture {
return fmt.Errorf("Capture not found in the list")
}
return nil
}
injectPacket := func() error {
generatorTab, err := findElement(selenium.ByXPATH, ".//*[@id='Generator']")
if err != nil {
return err
}
err = common.Retry(func() error {
return generatorTab.Click()
}, 10, time.Second)
if err != nil |
injectSrc, err := findElement(selenium.ByXPATH, ".//*[@id='inject-src']/input")
if err != nil {
return err
}
if err := injectSrc.Click(); err != nil {
return fmt.Errorf("Failed to click on inject input: %s", err.Error())
}
if err = selectNode("G.V().Has('Name', 'eth0', 'IPV4', Contains('124.65.54.42/24'))"); err != nil {
return err
}
injectDst, err := findElement(selenium.ByXPATH, ".//*[@id='inject-dst']/input")
if err != nil {
return err
}
if err := injectDst.Click(); err != nil {
return fmt.Errorf("Failed to click on destination input: %s", err.Error())
}
if err = selectNode("G.V().Has('Name', 'eth0', 'IPV4', Contains('124.65.54.43/24'))"); err != nil {
return err
}
injectBtn, err := findElement(selenium.ByXPATH, ".//*[@id='inject']")
if err != nil {
return err
}
if err := injectBtn.Click(); err != nil {
return fmt.Errorf("Failed to click on inject button: %s", err.Error())
}
var alertMsg selenium.WebElement
err = common.Retry(func() error {
alertMsg, err = findElement(selenium.ByClassName, "alert-success")
return err
}, 10, time.Second)
if err != nil {
return err
}
closeBtn, _ := alertMsg.FindElement(selenium.ByClassName, "close")
if closeBtn != nil {
closeBtn.Click()
}
return nil
}
verifyFlows := func() error {
time.Sleep(3 * time.Second)
flowsTab, err := findElement(selenium.ByXPATH, ".//*[@id='Flows']")
if err != nil {
return fmt.Errorf("Flows tab not found: %v", err)
}
if err := flowsTab.Click(); err != nil {
return err
}
flowQuery, err := findElement(selenium.ByXPATH, ".//*[@id='flow-table-query']")
if err != nil {
return err
}
if | {
return fmt.Errorf("Could not click on generator tab: %s", err.Error())
} | conditional_block |
selenium_test.go | (t *testing.T) {
gopath := os.Getenv("GOPATH")
topology := gopath + "/src/github.com/skydive-project/skydive/scripts/simple.sh"
setupCmds := []helper.Cmd{
{fmt.Sprintf("%s start 124.65.54.42/24 124.65.54.43/24", topology), true},
{"docker pull elgalu/selenium", true},
{"docker run -d --name=grid -p 4444:24444 -p 5900:25900 -e --shm-size=1g -p 6080:26080 -e SCREEN_WIDTH=1600 -e SCREEN_HEIGHT=1400 -e NOVNC=true elgalu/selenium", true},
{"docker exec grid wait_all_done 30s", true},
}
tearDownCmds := []helper.Cmd{
{fmt.Sprintf("%s stop", topology), true},
{"docker stop grid", true},
{"docker rm -f grid", true},
}
helper.ExecCmds(t, setupCmds...)
defer helper.ExecCmds(t, tearDownCmds...)
caps := selenium.Capabilities{"browserName": "chrome"}
webdriver, err := selenium.NewRemote(caps, "http://127.0.0.1:4444/wd/hub")
if err != nil {
t.Fatal(err)
}
defer webdriver.Quit()
ipaddr, err := getIPv4Addr()
if err != nil {
t.Fatalf("Not able to find Analayzer addr: %v", err)
}
if err := webdriver.Get("http://" + ipaddr + ":8082"); err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Second)
authOptions := &shttp.AuthenticationOpts{}
gh := gclient.NewGremlinQueryHelper(authOptions)
findElement := func(selection, xpath string) (el selenium.WebElement, err error) {
common.Retry(func() error {
el, err = webdriver.FindElement(selection, xpath)
if err != nil || el == nil {
return fmt.Errorf("Failed to find element for %s (error: %+v)", xpath, err)
}
return nil
}, 10, time.Second)
return
}
zoomOut := func() error {
for i := 0; i != 5; i++ {
zo, err := findElement(selenium.ByID, "zoom-out")
if err != nil {
return err
}
if err = zo.Click(); err != nil {
return err
}
}
return nil
}
expandGroup := func(gremlin string) error {
node, err := gh.GetNode(gremlin)
if err != nil {
return err
}
if err = webdriver.KeyDown(selenium.AltKey); err != nil {
return err
}
err = common.Retry(func() error {
el, err := findElement(selenium.ByXPATH, ".//*[@id='node-"+string(node.ID)+"']")
if err != nil {
return err
}
if err = el.Click(); err != nil {
zoomOut()
return err
}
if collapsed, err := el.GetAttribute("collapsed"); err != nil || collapsed != "false" {
return errors.New("group still collapsed")
}
return nil
}, 10, time.Second)
if err != nil {
return err
}
if err = webdriver.KeyUp(selenium.AltKey); err != nil {
return err
}
return nil
}
selectNode := func(gremlin string) error {
node, err := gh.GetNode(gremlin)
if err != nil {
return err
}
el, err := findElement(selenium.ByXPATH, ".//*[@id='node-"+string(node.ID)+"']")
if err != nil {
return err
}
return common.Retry(func() error {
if err := el.Click(); err != nil {
zoomOut()
return fmt.Errorf("Failed to click on source node: %s", err.Error())
}
return nil
}, 10, time.Second)
}
startCapture := func() error {
captureTab, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='Captures']")
if err != nil || captureTab == nil {
return fmt.Errorf("Not found capture tab: %v", err)
}
if err := captureTab.Click(); err != nil {
return fmt.Errorf("%v", err)
}
createBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='create-capture']")
if err != nil || createBtn == nil {
return fmt.Errorf("Not found create button : %v", err)
}
if err := createBtn.Click(); err != nil {
return fmt.Errorf("%v", err)
}
time.Sleep(2 * time.Second)
gremlinRdoBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='by-gremlin']")
if err != nil || gremlinRdoBtn == nil {
return fmt.Errorf("Not found gremlin expression radio button: %v", err)
}
if err := gremlinRdoBtn.Click(); err != nil {
return err
}
queryTxtBox, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='capture-query']")
if err != nil || queryTxtBox == nil {
return fmt.Errorf("Not found Query text box: %v", err)
}
if err := queryTxtBox.Clear(); err != nil {
return err
}
if err := queryTxtBox.SendKeys("G.V().Has('Name', 'br-int', 'Type', 'ovsbridge')"); err != nil {
return err
}
startBtn, err := webdriver.FindElement(selenium.ByXPATH, ".//*[@id='start-capture']")
if err != nil || startBtn == nil {
return fmt.Errorf("Not found start button: %v", err)
}
if err := startBtn.Click(); err != nil {
return err
}
time.Sleep(3 * time.Second)
//check capture created with the given query
captures, err := webdriver.FindElements(selenium.ByClassName, "query")
if err != nil {
return err
}
var foundCapture bool
for _, capture := range captures {
if txt, _ := capture.Text(); txt == "G.V().Has('Name', 'br-int', 'Type', 'ovsbridge')" {
foundCapture = true
break
}
}
if !foundCapture {
return fmt.Errorf("Capture not found in the list")
}
return nil
}
injectPacket := func() error {
generatorTab, err := findElement(selenium.ByXPATH, ".//*[@id='Generator']")
if err != nil {
return err
}
err = common.Retry(func() error {
return generatorTab.Click()
}, 10, time.Second)
if err != nil {
return fmt.Errorf("Could not click on generator tab: %s", err.Error())
}
injectSrc, err := findElement(selenium.ByXPATH, ".//*[@id='inject-src']/input")
if err != nil {
return err
}
if err := injectSrc.Click(); err != nil {
return fmt.Errorf("Failed to click on inject input: %s", err.Error())
}
if err = selectNode("G.V().Has('Name', 'eth0', 'IPV4', Contains('124.65.54.42/24'))"); err != nil {
return err
}
injectDst, err := findElement(selenium.ByXPATH, ".//*[@id='inject-dst']/input")
if err != nil {
return err
}
if err := injectDst.Click(); err != nil {
return fmt.Errorf("Failed to click on destination input: %s", err.Error())
}
if err = selectNode("G.V().Has('Name', 'eth0', 'IPV4', Contains('124.65.54.43/24'))"); err != nil {
return err
}
injectBtn, err := findElement(selenium.ByXPATH, ".//*[@id='inject']")
if err != nil {
return err
}
if err := injectBtn.Click(); err != nil {
return fmt.Errorf("Failed to click on inject button: %s", err.Error())
}
var alertMsg selenium.WebElement
err = common.Retry(func() error {
alertMsg, err = findElement(selenium.ByClassName, "alert-success")
return err
}, 10, time.Second)
if err != nil {
return err
}
closeBtn, _ := alertMsg.FindElement(selenium.ByClassName, "close")
if closeBtn != nil {
closeBtn.Click()
}
return nil
}
verify | TestSelenium | identifier_name |
|
lib.rs | // distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Pallet transporter used to move funds between chains.
#![cfg_attr(not(feature = "std"), no_std)]
#![forbid(unsafe_code)]
#![warn(rust_2018_idioms, missing_debug_implementations)]
use codec::{Decode, Encode};
use domain_runtime_primitives::{MultiAccountId, TryConvertBack};
use frame_support::traits::Currency;
pub use pallet::*;
use scale_info::TypeInfo;
use sp_messenger::messages::ChainId;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
/// Location that either sends or receives transfers between chains.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
pub struct Location {
/// Unique identity of chain.
pub chain_id: ChainId,
/// Unique account on chain.
pub account_id: MultiAccountId,
}
/// Transfer of funds from one chain to another.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
pub struct Transfer<Balance> {
/// Amount being transferred between entities.
pub amount: Balance,
/// Sender location of the transfer.
pub sender: Location,
/// Receiver location of the transfer.
pub receiver: Location,
}
/// Balance type used by the pallet.
pub(crate) type BalanceOf<T> =
<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
type MessageIdOf<T> = <<T as Config>::Sender as sp_messenger::endpoint::Sender<
<T as frame_system::Config>::AccountId,
>>::MessageId;
#[frame_support::pallet]
mod pallet {
use crate::weights::WeightInfo;
use crate::{BalanceOf, Location, MessageIdOf, MultiAccountId, Transfer, TryConvertBack};
use codec::{Decode, Encode};
use frame_support::pallet_prelude::*;
use frame_support::traits::{Currency, ExistenceRequirement, WithdrawReasons};
use frame_support::weights::Weight;
use frame_system::pallet_prelude::*;
use sp_messenger::endpoint::{
Endpoint, EndpointHandler as EndpointHandlerT, EndpointId, EndpointRequest,
EndpointResponse, Sender,
};
use sp_messenger::messages::ChainId;
use sp_runtime::traits::Convert;
use sp_std::vec;
#[pallet::config]
pub trait Config: frame_system::Config {
/// Event type for this pallet.
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Gets the chain_id of the current execution environment.
type SelfChainId: Get<ChainId>;
/// Gets the endpoint_id of the this pallet in a given execution environment.
type SelfEndpointId: Get<EndpointId>;
/// Currency used by this pallet.
type Currency: Currency<Self::AccountId>;
/// Sender used to transfer funds.
type Sender: Sender<Self::AccountId>;
/// MultiAccountID <> T::AccountId converter.
type AccountIdConverter: TryConvertBack<Self::AccountId, MultiAccountId>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
/// Pallet transporter to move funds between chains.
#[pallet::pallet]
#[pallet::without_storage_info]
pub struct Pallet<T>(_);
/// All the outgoing transfers on this execution environment.
#[pallet::storage]
#[pallet::getter(fn outgoing_transfers)]
pub(super) type OutgoingTransfers<T: Config> = StorageDoubleMap<
_,
Identity,
ChainId,
Identity,
MessageIdOf<T>,
Transfer<BalanceOf<T>>,
OptionQuery,
>;
/// Events emitted by pallet-transporter.
#[pallet::event]
#[pallet::generate_deposit(pub (super) fn deposit_event)]
pub enum Event<T: Config> {
/// Emits when there is a new outgoing transfer.
OutgoingTransferInitiated {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
/// Emits when a given outgoing transfer was failed on dst_chain.
OutgoingTransferFailed {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
/// Error from dst_chain endpoint.
err: DispatchError,
},
/// Emits when a given outgoing transfer was successful.
OutgoingTransferSuccessful {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
/// Emits when a given incoming transfer was successfully processed.
IncomingTransferSuccessful {
/// Source chain the transfer is coming from.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
}
/// Errors emitted by pallet-transporter.
#[pallet::error]
pub enum Error<T> {
/// Emits when the account has low balance to make a transfer.
LowBalance,
/// Failed to decode transfer payload.
InvalidPayload,
/// Emits when the request for a response received is missing.
MissingTransferRequest,
/// Emits when the request doesn't match the expected one..
InvalidTransferRequest,
/// Emits when the incoming message is not bound to this chain.
UnexpectedMessage,
/// Emits when the account id type is invalid.
InvalidAccountId,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Initiates transfer of funds from account on src_chain to account on dst_chain.
/// Funds are burned on src_chain first and are minted on dst_chain using Messenger.
#[pallet::call_index(0)]
#[pallet::weight((T::WeightInfo::transfer(), Pays::No))]
pub fn transfer(
origin: OriginFor<T>,
dst_location: Location,
amount: BalanceOf<T>,
) -> DispatchResult {
let sender = ensure_signed(origin)?;
// burn transfer amount
T::Currency::withdraw(
&sender,
amount,
WithdrawReasons::TRANSFER,
ExistenceRequirement::AllowDeath,
)
.map_err(|_| Error::<T>::LowBalance)?;
// initiate transfer
let dst_chain_id = dst_location.chain_id;
let transfer = Transfer {
amount,
sender: Location {
chain_id: T::SelfChainId::get(),
account_id: T::AccountIdConverter::convert(sender.clone()),
},
receiver: dst_location,
};
// send message
let message_id = T::Sender::send_message(
&sender,
dst_chain_id,
EndpointRequest {
src_endpoint: Endpoint::Id(T::SelfEndpointId::get()),
// destination endpoint must be transporter with same id
dst_endpoint: Endpoint::Id(T::SelfEndpointId::get()),
payload: transfer.encode(),
},
)?;
OutgoingTransfers::<T>::insert(dst_chain_id, message_id, transfer);
Self::deposit_event(Event::<T>::OutgoingTransferInitiated {
chain_id: dst_chain_id,
message_id,
});
Ok(())
}
}
/// Endpoint handler implementation for pallet transporter.
#[derive(Debug)]
pub struct EndpointHandler<T>(pub PhantomData<T>);
impl<T: Config> EndpointHandlerT<MessageIdOf<T>> for EndpointHandler<T> {
fn message(
&self,
src_chain_id: ChainId,
message_id: MessageIdOf<T>,
req: EndpointRequest,
) -> EndpointResponse {
// ensure message is not from the self
ensure!(
T::SelfChainId::get() != src_chain_id,
Error::<T>::InvalidTransferRequest
);
// check the endpoint id
ensure!(
req.dst_endpoint == Endpoint::Id(T::SelfEndpointId::get()),
Error::<T>::UnexpectedMessage
);
// decode payload and process message
let req = match Transfer::decode(&mut req.payload.as_slice()) {
Ok(req) => req,
Err(_) => return Err(Error::<T>::InvalidPayload.into()),
};
// mint the funds to dst_account
let account_id = T::AccountIdConverter::try_convert_back(req.receiver.account_id)
.ok_or(Error::<T>::InvalidAccountId)?;
T::Currency::deposit_creating(&account_id, req.amount);
frame_system::Pallet::<T>::deposit_event(Into::<<T as Config>::RuntimeEvent>::into(
Event::<T>::IncomingTransferSuccessful {
chain_id: src_chain_id,
message_id,
},
));
Ok(vec![])
}
fn | (&self) -> Weight {
T::WeightInfo::message()
}
fn message_response(
&self,
dst_chain_id: ChainId,
message_id: MessageIdOf<T>,
req: EndpointRequest,
resp: EndpointResponse,
) -> DispatchResult {
// ensure request is valid
let transfer = OutgoingTransfers::<T>::take(dst_chain_id, message_id)
.ok_or(Error::<T>::MissingTransferRequest)?;
ensure!(
req.payload == transfer.encode(),
Error::<T>::InvalidTransfer | message_weight | identifier_name |
lib.rs | // distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Pallet transporter used to move funds between chains.
#![cfg_attr(not(feature = "std"), no_std)]
#![forbid(unsafe_code)]
#![warn(rust_2018_idioms, missing_debug_implementations)]
use codec::{Decode, Encode};
use domain_runtime_primitives::{MultiAccountId, TryConvertBack};
use frame_support::traits::Currency;
pub use pallet::*;
use scale_info::TypeInfo;
use sp_messenger::messages::ChainId;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
/// Location that either sends or receives transfers between chains.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
pub struct Location {
/// Unique identity of chain.
pub chain_id: ChainId,
/// Unique account on chain.
pub account_id: MultiAccountId,
}
/// Transfer of funds from one chain to another.
#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)]
pub struct Transfer<Balance> {
/// Amount being transferred between entities.
pub amount: Balance,
/// Sender location of the transfer.
pub sender: Location,
/// Receiver location of the transfer.
pub receiver: Location,
}
/// Balance type used by the pallet.
pub(crate) type BalanceOf<T> =
<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
type MessageIdOf<T> = <<T as Config>::Sender as sp_messenger::endpoint::Sender<
<T as frame_system::Config>::AccountId,
>>::MessageId;
#[frame_support::pallet]
mod pallet {
use crate::weights::WeightInfo;
use crate::{BalanceOf, Location, MessageIdOf, MultiAccountId, Transfer, TryConvertBack};
use codec::{Decode, Encode};
use frame_support::pallet_prelude::*;
use frame_support::traits::{Currency, ExistenceRequirement, WithdrawReasons};
use frame_support::weights::Weight;
use frame_system::pallet_prelude::*;
use sp_messenger::endpoint::{
Endpoint, EndpointHandler as EndpointHandlerT, EndpointId, EndpointRequest,
EndpointResponse, Sender,
};
use sp_messenger::messages::ChainId;
use sp_runtime::traits::Convert;
use sp_std::vec;
#[pallet::config]
pub trait Config: frame_system::Config {
/// Event type for this pallet.
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Gets the chain_id of the current execution environment.
type SelfChainId: Get<ChainId>;
/// Gets the endpoint_id of the this pallet in a given execution environment.
type SelfEndpointId: Get<EndpointId>;
/// Currency used by this pallet.
type Currency: Currency<Self::AccountId>;
/// Sender used to transfer funds.
type Sender: Sender<Self::AccountId>;
/// MultiAccountID <> T::AccountId converter.
type AccountIdConverter: TryConvertBack<Self::AccountId, MultiAccountId>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
}
/// Pallet transporter to move funds between chains.
#[pallet::pallet]
#[pallet::without_storage_info]
pub struct Pallet<T>(_);
/// All the outgoing transfers on this execution environment.
#[pallet::storage]
#[pallet::getter(fn outgoing_transfers)]
pub(super) type OutgoingTransfers<T: Config> = StorageDoubleMap< | ChainId,
Identity,
MessageIdOf<T>,
Transfer<BalanceOf<T>>,
OptionQuery,
>;
/// Events emitted by pallet-transporter.
#[pallet::event]
#[pallet::generate_deposit(pub (super) fn deposit_event)]
pub enum Event<T: Config> {
/// Emits when there is a new outgoing transfer.
OutgoingTransferInitiated {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
/// Emits when a given outgoing transfer was failed on dst_chain.
OutgoingTransferFailed {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
/// Error from dst_chain endpoint.
err: DispatchError,
},
/// Emits when a given outgoing transfer was successful.
OutgoingTransferSuccessful {
/// Destination chain the transfer is bound to.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
/// Emits when a given incoming transfer was successfully processed.
IncomingTransferSuccessful {
/// Source chain the transfer is coming from.
chain_id: ChainId,
/// Id of the transfer.
message_id: MessageIdOf<T>,
},
}
/// Errors emitted by pallet-transporter.
#[pallet::error]
pub enum Error<T> {
/// Emits when the account has low balance to make a transfer.
LowBalance,
/// Failed to decode transfer payload.
InvalidPayload,
/// Emits when the request for a response received is missing.
MissingTransferRequest,
/// Emits when the request doesn't match the expected one..
InvalidTransferRequest,
/// Emits when the incoming message is not bound to this chain.
UnexpectedMessage,
/// Emits when the account id type is invalid.
InvalidAccountId,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Initiates transfer of funds from account on src_chain to account on dst_chain.
/// Funds are burned on src_chain first and are minted on dst_chain using Messenger.
#[pallet::call_index(0)]
#[pallet::weight((T::WeightInfo::transfer(), Pays::No))]
pub fn transfer(
origin: OriginFor<T>,
dst_location: Location,
amount: BalanceOf<T>,
) -> DispatchResult {
let sender = ensure_signed(origin)?;
// burn transfer amount
T::Currency::withdraw(
&sender,
amount,
WithdrawReasons::TRANSFER,
ExistenceRequirement::AllowDeath,
)
.map_err(|_| Error::<T>::LowBalance)?;
// initiate transfer
let dst_chain_id = dst_location.chain_id;
let transfer = Transfer {
amount,
sender: Location {
chain_id: T::SelfChainId::get(),
account_id: T::AccountIdConverter::convert(sender.clone()),
},
receiver: dst_location,
};
// send message
let message_id = T::Sender::send_message(
&sender,
dst_chain_id,
EndpointRequest {
src_endpoint: Endpoint::Id(T::SelfEndpointId::get()),
// destination endpoint must be transporter with same id
dst_endpoint: Endpoint::Id(T::SelfEndpointId::get()),
payload: transfer.encode(),
},
)?;
OutgoingTransfers::<T>::insert(dst_chain_id, message_id, transfer);
Self::deposit_event(Event::<T>::OutgoingTransferInitiated {
chain_id: dst_chain_id,
message_id,
});
Ok(())
}
}
/// Endpoint handler implementation for pallet transporter.
#[derive(Debug)]
pub struct EndpointHandler<T>(pub PhantomData<T>);
impl<T: Config> EndpointHandlerT<MessageIdOf<T>> for EndpointHandler<T> {
fn message(
&self,
src_chain_id: ChainId,
message_id: MessageIdOf<T>,
req: EndpointRequest,
) -> EndpointResponse {
// ensure message is not from the self
ensure!(
T::SelfChainId::get() != src_chain_id,
Error::<T>::InvalidTransferRequest
);
// check the endpoint id
ensure!(
req.dst_endpoint == Endpoint::Id(T::SelfEndpointId::get()),
Error::<T>::UnexpectedMessage
);
// decode payload and process message
let req = match Transfer::decode(&mut req.payload.as_slice()) {
Ok(req) => req,
Err(_) => return Err(Error::<T>::InvalidPayload.into()),
};
// mint the funds to dst_account
let account_id = T::AccountIdConverter::try_convert_back(req.receiver.account_id)
.ok_or(Error::<T>::InvalidAccountId)?;
T::Currency::deposit_creating(&account_id, req.amount);
frame_system::Pallet::<T>::deposit_event(Into::<<T as Config>::RuntimeEvent>::into(
Event::<T>::IncomingTransferSuccessful {
chain_id: src_chain_id,
message_id,
},
));
Ok(vec![])
}
fn message_weight(&self) -> Weight {
T::WeightInfo::message()
}
fn message_response(
&self,
dst_chain_id: ChainId,
message_id: MessageIdOf<T>,
req: EndpointRequest,
resp: EndpointResponse,
) -> DispatchResult {
// ensure request is valid
let transfer = OutgoingTransfers::<T>::take(dst_chain_id, message_id)
.ok_or(Error::<T>::MissingTransferRequest)?;
ensure!(
req.payload == transfer.encode(),
Error::<T>::InvalidTransferRequest | _,
Identity, | random_line_split |
api-blueprint-resource.js | undocumented: true
}];
}
function renderDocumentation(req, res) {
var index = req.path.indexOf(module.exports.apiBlueprintDocsUri);
if (index === -1) {
throw new Error('Invalid documentation request path ' + req.path);
}
var mountpoint = req.path.substring(index + module.exports.apiBlueprintDocsUri.length);
if (!module.exports.resourcesUri) {
module.exports.resourcesUri = req.path.substring(0, index);
}
req.logger.debug({mountpoint: mountpoint}, 'Render API Blueprint docs');
if (mountpoint.length <= 1) {
renderAllResources(req, res);
} else if (development && _.endsWith(req.path, markdownPath)) {
renderMarkdownSource(req, res);
} else {
renderSingleResource(mountpoint, req, res);
}
}
function renderAllResources(req, res) {
if (fullHtml) {
return sendHtml(res, null, fullHtml);
}
fullHtml = fs.readFileSync(fspath.resolve(__dirname, './loading.html'), {encoding: 'utf8'});
apiBlueprint.getAllJsonDocumentation(function(error, json) {
if (error) {
return res.status(500).rdkSend(error);
}
var indexPath = fspath.resolve(__dirname, './index.md');
async.waterfall([
apiBlueprint.jsonDocumentationFromFile.bind(null, indexPath, null),
function mergeJson(indexJson, callback) {
indexJson = apiBlueprint.mergeJsonDocumentation(indexJson, json);
callback(null, indexJson);
},
prependResourcesUri,
addFieldsParameter,
addSpyForVersioningParameter,
addMissingExampleWarnings,
displayWarnings,
renderHtml
], function(error, html) {
if (error) {
return res.status(500).rdkSend(error);
}
fullHtml = html;
sendHtml(res, error, html);
});
});
}
function renderSingleResource(mountpoint, req, res) {
if (renderedHtml[mountpoint]) {
return sendHtml(res, null, renderedHtml[mountpoint]);
}
async.waterfall([
apiBlueprint.jsonDocumentationForPath.bind(null, mountpoint),
prependResourcesUri,
addFieldsParameter,
addSpyForVersioningParameter,
addMissingExampleWarnings,
displayWarnings,
renderHtml
], function cacheHtml(error, html) {
renderedHtml[mountpoint] = html;
sendHtml(res, error, html);
});
}
function renderMarkdownSource(req, res) {
var markdownPath = req.query.source;
if (!_.startsWith(markdownPath, 'http')) {
var rootPath = __dirname.substring(0, __dirname.indexOf(rootDir) + rootDir.length);
markdownPath = rootPath + markdownPath;
}
var mountpoint = req.query.mountpoint;
apiBlueprint.loadFullMarkdown(markdownPath, mountpoint, null, function(error, markdown) {
if (markdown) {
apiBlueprint.jsonDocumentationForPath(mountpoint, function(docsError, json) {
var html;
var context = {
filename: fspath.basename(req.query.source),
lines: markdown.split(/\r?\n/g)
};
if (json) {
_.each(json.warnings, function(warning) {
var location = _.first(warning.location);
if (location && location.line && location.column) {
var line = context.lines[location.line - 1];
if (!_.contains(line, '<span class="warning"')) {
var start = location.column - 1;
var end = location.column - 1 + Math.max(1, location.length);
context.lines[location.line - 1] = _.escape(line.substring(0, start)) + '<span class="warning" title="' + _.escape(warning.message) + '">' + _.escape(line.substring(start, end)) + '</span>' + _.escape(line.substring(end));
}
}
});
}
html = sourceCodeTemplate(context);
sendHtml(res, error, html);
});
} else {
sendHtml(res, error);
}
});
}
function prependResourcesUri(json, done) {
if (json.__domain && json.__domain !== 'local') {
return done(null, json);
}
var prefix = _.trimRight(module.exports.resourcesUri, '/');
async.each(json.ast.resourceGroups, function(resourceGroup, groupDone) {
async.each(resourceGroup.resources, function(resource, resourceDone) {
if (resource.uriTemplate && !_.startsWith(resource.uriTemplate, prefix)) {
resource.uriTemplate = prefix + resource.uriTemplate;
}
async.each(resource.actions, function(action, actionDone) {
var uriTemplate = dd(action)('attributes')('uriTemplate').val;
if (uriTemplate && !_.startsWith(uriTemplate, prefix)) {
action.attributes.uriTemplate = prefix + uriTemplate;
}
setImmediate(actionDone);
}, resourceDone);
}, groupDone);
}, function(error) {
done(error, json);
});
}
function addFieldsParameter(json, done) {
addQueryParameter({
name: 'fields',
description: 'Define which fields to return using:\n\n`a,b,c` comma-separated list to select multiple fields.\n\n`a/b/c` path to select a field from its parent.\n\n`a(b,c)` sub-selection to select many fields from a parent.\n\nReference: [json-mask](https://github.com/nemtsov/json-mask)',
type: 'string',
required: false,
default: '',
example: '',
values: []
}, hasJsonResponse, json, done);
}
function addSpyForVersioningParameter(json, done) |
function addQueryParameter(parameter, checkFunction, json, done) {
if (json.__domain && json.__domain !== 'local') {
return done(null, json);
}
async.each(json.ast.resourceGroups, function(resourceGroup, groupDone) {
async.each(resourceGroup.resources, function(resource, resourceDone) {
var applies = false;
_.each(resource.actions, function(action) {
if (!checkFunction(action)) {
return;
}
applies = true;
var uriTemplate = dd(action)('attributes')('uriTemplate').val;
if (uriTemplate) {
action.parameters.push(parameter);
action.attributes.uriTemplate = appendQueryParameter(uriTemplate, parameter);
}
});
if (applies && resource.uriTemplate) {
resource.parameters.push(parameter);
resource.uriTemplate = appendQueryParameter(resource.uriTemplate, parameter);
}
setImmediate(resourceDone);
}, groupDone);
}, function(error) {
done(error, json);
});
function appendQueryParameter(uriTemplate, parameter) {
uriTemplate += _.contains(uriTemplate, '{?') ? '{&' : '{?';
uriTemplate += parameter.name;
return uriTemplate + '}';
}
}
function hasJsonResponse(action) {
return !!_.find(action.examples, function(example) {
return _.find(example.responses, function(response) {
var statusCode = parseInt(response.name, 10);
return (isNaN(statusCode) || statusCode < 300) &&
_.find(response.headers, {name: 'Content-Type', value: 'application/json'});
});
});
}
function addMissingExampleWarnings(json, done) {
if (!development) {
return done(null, json);
}
_.each(json.ast.resourceGroups, function(resourceGroup) {
_.each(resourceGroup.resources, function(resource) {
_.each(resource.actions, function(action) {
_.each(action.examples, function (example) {
var requests = _.map(example.requests, withType.bind(null, 'request'));
var responses = _.map(example.responses, withType.bind(null, 'response'));
_.each(requests.concat(responses), function (item) {
if (!item.example.body || !item.example.schema) {
var contentType = (_.find(item.example.headers, function (header) {
return header.name === 'Content-Type';
}) || {}).value;
if (contentType && _.contains(contentType, 'json')) {
addWarning(item.example, item.type, resource, action);
}
return;
}
});
});
});
});
});
return done(null, json);
function withType(type, item) {
return {
example: item,
type: item.name + ' ' + type
};
}
var nextResourceId;
function addWarning(example, type, resource, action) {
if (!resource.__id) {
if (!nextResourceId) {
nextResourceId = 1;
while (findResourceById(nextResourceId, json)) {
++nextResourceId;
}
}
resource.__id = String(++nextResourceId);
}
var message = 'Please write an example ' + type;
if (!example.body && !example.schema) {
message += ' and schema';
} else if (!example.schema) {
| {
addQueryParameter({
name: 'spy-for-versioning',
description: '**DEVELOPMENT ONLY:** when `true`, generate a schema from this resource\'s response, and capture responses from external systems like JDS and VistA.\n\nSchemas are generated under `src/core/api-blueprint/schemas`, and external responses are captured under `versioning-tests/recorded-responses`.',
type: 'boolean',
required: false,
default: '',
example: '',
values: []
}, function (action) {
return development && hasJsonResponse(action);
}, json, done);
} | identifier_body |
api-blueprint-resource.js | undocumented: true
}];
}
function renderDocumentation(req, res) {
var index = req.path.indexOf(module.exports.apiBlueprintDocsUri);
if (index === -1) {
throw new Error('Invalid documentation request path ' + req.path);
}
var mountpoint = req.path.substring(index + module.exports.apiBlueprintDocsUri.length);
if (!module.exports.resourcesUri) {
module.exports.resourcesUri = req.path.substring(0, index);
}
req.logger.debug({mountpoint: mountpoint}, 'Render API Blueprint docs');
if (mountpoint.length <= 1) {
renderAllResources(req, res);
} else if (development && _.endsWith(req.path, markdownPath)) {
renderMarkdownSource(req, res);
} else {
renderSingleResource(mountpoint, req, res);
}
}
function renderAllResources(req, res) {
if (fullHtml) {
return sendHtml(res, null, fullHtml);
}
fullHtml = fs.readFileSync(fspath.resolve(__dirname, './loading.html'), {encoding: 'utf8'});
apiBlueprint.getAllJsonDocumentation(function(error, json) {
if (error) {
return res.status(500).rdkSend(error);
}
var indexPath = fspath.resolve(__dirname, './index.md');
async.waterfall([
apiBlueprint.jsonDocumentationFromFile.bind(null, indexPath, null),
function mergeJson(indexJson, callback) {
indexJson = apiBlueprint.mergeJsonDocumentation(indexJson, json);
callback(null, indexJson);
},
prependResourcesUri,
addFieldsParameter,
addSpyForVersioningParameter,
addMissingExampleWarnings,
displayWarnings,
renderHtml
], function(error, html) {
if (error) |
fullHtml = html;
sendHtml(res, error, html);
});
});
}
function renderSingleResource(mountpoint, req, res) {
if (renderedHtml[mountpoint]) {
return sendHtml(res, null, renderedHtml[mountpoint]);
}
async.waterfall([
apiBlueprint.jsonDocumentationForPath.bind(null, mountpoint),
prependResourcesUri,
addFieldsParameter,
addSpyForVersioningParameter,
addMissingExampleWarnings,
displayWarnings,
renderHtml
], function cacheHtml(error, html) {
renderedHtml[mountpoint] = html;
sendHtml(res, error, html);
});
}
function renderMarkdownSource(req, res) {
var markdownPath = req.query.source;
if (!_.startsWith(markdownPath, 'http')) {
var rootPath = __dirname.substring(0, __dirname.indexOf(rootDir) + rootDir.length);
markdownPath = rootPath + markdownPath;
}
var mountpoint = req.query.mountpoint;
apiBlueprint.loadFullMarkdown(markdownPath, mountpoint, null, function(error, markdown) {
if (markdown) {
apiBlueprint.jsonDocumentationForPath(mountpoint, function(docsError, json) {
var html;
var context = {
filename: fspath.basename(req.query.source),
lines: markdown.split(/\r?\n/g)
};
if (json) {
_.each(json.warnings, function(warning) {
var location = _.first(warning.location);
if (location && location.line && location.column) {
var line = context.lines[location.line - 1];
if (!_.contains(line, '<span class="warning"')) {
var start = location.column - 1;
var end = location.column - 1 + Math.max(1, location.length);
context.lines[location.line - 1] = _.escape(line.substring(0, start)) + '<span class="warning" title="' + _.escape(warning.message) + '">' + _.escape(line.substring(start, end)) + '</span>' + _.escape(line.substring(end));
}
}
});
}
html = sourceCodeTemplate(context);
sendHtml(res, error, html);
});
} else {
sendHtml(res, error);
}
});
}
function prependResourcesUri(json, done) {
if (json.__domain && json.__domain !== 'local') {
return done(null, json);
}
var prefix = _.trimRight(module.exports.resourcesUri, '/');
async.each(json.ast.resourceGroups, function(resourceGroup, groupDone) {
async.each(resourceGroup.resources, function(resource, resourceDone) {
if (resource.uriTemplate && !_.startsWith(resource.uriTemplate, prefix)) {
resource.uriTemplate = prefix + resource.uriTemplate;
}
async.each(resource.actions, function(action, actionDone) {
var uriTemplate = dd(action)('attributes')('uriTemplate').val;
if (uriTemplate && !_.startsWith(uriTemplate, prefix)) {
action.attributes.uriTemplate = prefix + uriTemplate;
}
setImmediate(actionDone);
}, resourceDone);
}, groupDone);
}, function(error) {
done(error, json);
});
}
function addFieldsParameter(json, done) {
addQueryParameter({
name: 'fields',
description: 'Define which fields to return using:\n\n`a,b,c` comma-separated list to select multiple fields.\n\n`a/b/c` path to select a field from its parent.\n\n`a(b,c)` sub-selection to select many fields from a parent.\n\nReference: [json-mask](https://github.com/nemtsov/json-mask)',
type: 'string',
required: false,
default: '',
example: '',
values: []
}, hasJsonResponse, json, done);
}
function addSpyForVersioningParameter(json, done) {
addQueryParameter({
name: 'spy-for-versioning',
description: '**DEVELOPMENT ONLY:** when `true`, generate a schema from this resource\'s response, and capture responses from external systems like JDS and VistA.\n\nSchemas are generated under `src/core/api-blueprint/schemas`, and external responses are captured under `versioning-tests/recorded-responses`.',
type: 'boolean',
required: false,
default: '',
example: '',
values: []
}, function (action) {
return development && hasJsonResponse(action);
}, json, done);
}
function addQueryParameter(parameter, checkFunction, json, done) {
if (json.__domain && json.__domain !== 'local') {
return done(null, json);
}
async.each(json.ast.resourceGroups, function(resourceGroup, groupDone) {
async.each(resourceGroup.resources, function(resource, resourceDone) {
var applies = false;
_.each(resource.actions, function(action) {
if (!checkFunction(action)) {
return;
}
applies = true;
var uriTemplate = dd(action)('attributes')('uriTemplate').val;
if (uriTemplate) {
action.parameters.push(parameter);
action.attributes.uriTemplate = appendQueryParameter(uriTemplate, parameter);
}
});
if (applies && resource.uriTemplate) {
resource.parameters.push(parameter);
resource.uriTemplate = appendQueryParameter(resource.uriTemplate, parameter);
}
setImmediate(resourceDone);
}, groupDone);
}, function(error) {
done(error, json);
});
function appendQueryParameter(uriTemplate, parameter) {
uriTemplate += _.contains(uriTemplate, '{?') ? '{&' : '{?';
uriTemplate += parameter.name;
return uriTemplate + '}';
}
}
function hasJsonResponse(action) {
return !!_.find(action.examples, function(example) {
return _.find(example.responses, function(response) {
var statusCode = parseInt(response.name, 10);
return (isNaN(statusCode) || statusCode < 300) &&
_.find(response.headers, {name: 'Content-Type', value: 'application/json'});
});
});
}
function addMissingExampleWarnings(json, done) {
if (!development) {
return done(null, json);
}
_.each(json.ast.resourceGroups, function(resourceGroup) {
_.each(resourceGroup.resources, function(resource) {
_.each(resource.actions, function(action) {
_.each(action.examples, function (example) {
var requests = _.map(example.requests, withType.bind(null, 'request'));
var responses = _.map(example.responses, withType.bind(null, 'response'));
_.each(requests.concat(responses), function (item) {
if (!item.example.body || !item.example.schema) {
var contentType = (_.find(item.example.headers, function (header) {
return header.name === 'Content-Type';
}) || {}).value;
if (contentType && _.contains(contentType, 'json')) {
addWarning(item.example, item.type, resource, action);
}
return;
}
});
});
});
});
});
return done(null, json);
function withType(type, item) {
return {
example: item,
type: item.name + ' ' + type
};
}
var nextResourceId;
function addWarning(example, type, resource, action) {
if (!resource.__id) {
if (!nextResourceId) {
nextResourceId = 1;
while (findResourceById(nextResourceId, json)) {
++nextResourceId;
}
}
resource.__id = String(++nextResourceId);
}
var message = 'Please write an example ' + type;
if (!example.body && !example.schema) {
message += ' and schema';
} else if (!example.schema) {
| {
return res.status(500).rdkSend(error);
} | conditional_block |
api-blueprint-resource.js | undocumented: true
}];
}
function renderDocumentation(req, res) {
var index = req.path.indexOf(module.exports.apiBlueprintDocsUri);
if (index === -1) {
throw new Error('Invalid documentation request path ' + req.path);
}
var mountpoint = req.path.substring(index + module.exports.apiBlueprintDocsUri.length);
if (!module.exports.resourcesUri) {
module.exports.resourcesUri = req.path.substring(0, index);
}
req.logger.debug({mountpoint: mountpoint}, 'Render API Blueprint docs');
if (mountpoint.length <= 1) {
renderAllResources(req, res);
} else if (development && _.endsWith(req.path, markdownPath)) {
renderMarkdownSource(req, res);
} else {
renderSingleResource(mountpoint, req, res);
}
}
function renderAllResources(req, res) {
if (fullHtml) {
return sendHtml(res, null, fullHtml);
}
fullHtml = fs.readFileSync(fspath.resolve(__dirname, './loading.html'), {encoding: 'utf8'});
apiBlueprint.getAllJsonDocumentation(function(error, json) {
if (error) {
return res.status(500).rdkSend(error);
}
var indexPath = fspath.resolve(__dirname, './index.md');
async.waterfall([
apiBlueprint.jsonDocumentationFromFile.bind(null, indexPath, null),
function mergeJson(indexJson, callback) {
indexJson = apiBlueprint.mergeJsonDocumentation(indexJson, json);
callback(null, indexJson);
},
prependResourcesUri,
addFieldsParameter,
addSpyForVersioningParameter,
addMissingExampleWarnings,
displayWarnings,
renderHtml
], function(error, html) {
if (error) {
return res.status(500).rdkSend(error);
}
fullHtml = html;
sendHtml(res, error, html);
});
});
}
function renderSingleResource(mountpoint, req, res) {
if (renderedHtml[mountpoint]) {
return sendHtml(res, null, renderedHtml[mountpoint]);
}
async.waterfall([
apiBlueprint.jsonDocumentationForPath.bind(null, mountpoint),
prependResourcesUri,
addFieldsParameter,
addSpyForVersioningParameter,
addMissingExampleWarnings,
displayWarnings,
renderHtml
], function cacheHtml(error, html) {
renderedHtml[mountpoint] = html;
sendHtml(res, error, html);
});
}
function renderMarkdownSource(req, res) {
var markdownPath = req.query.source;
if (!_.startsWith(markdownPath, 'http')) {
var rootPath = __dirname.substring(0, __dirname.indexOf(rootDir) + rootDir.length);
markdownPath = rootPath + markdownPath;
}
var mountpoint = req.query.mountpoint;
apiBlueprint.loadFullMarkdown(markdownPath, mountpoint, null, function(error, markdown) {
if (markdown) {
apiBlueprint.jsonDocumentationForPath(mountpoint, function(docsError, json) {
var html;
var context = {
filename: fspath.basename(req.query.source),
lines: markdown.split(/\r?\n/g)
};
if (json) {
_.each(json.warnings, function(warning) {
var location = _.first(warning.location);
if (location && location.line && location.column) {
var line = context.lines[location.line - 1];
if (!_.contains(line, '<span class="warning"')) {
var start = location.column - 1;
var end = location.column - 1 + Math.max(1, location.length);
context.lines[location.line - 1] = _.escape(line.substring(0, start)) + '<span class="warning" title="' + _.escape(warning.message) + '">' + _.escape(line.substring(start, end)) + '</span>' + _.escape(line.substring(end));
}
}
});
}
html = sourceCodeTemplate(context);
sendHtml(res, error, html);
});
} else {
sendHtml(res, error);
}
});
}
function prependResourcesUri(json, done) {
if (json.__domain && json.__domain !== 'local') {
return done(null, json);
}
var prefix = _.trimRight(module.exports.resourcesUri, '/');
async.each(json.ast.resourceGroups, function(resourceGroup, groupDone) {
async.each(resourceGroup.resources, function(resource, resourceDone) {
if (resource.uriTemplate && !_.startsWith(resource.uriTemplate, prefix)) {
resource.uriTemplate = prefix + resource.uriTemplate;
}
async.each(resource.actions, function(action, actionDone) {
var uriTemplate = dd(action)('attributes')('uriTemplate').val;
if (uriTemplate && !_.startsWith(uriTemplate, prefix)) {
action.attributes.uriTemplate = prefix + uriTemplate;
}
setImmediate(actionDone);
}, resourceDone);
}, groupDone);
}, function(error) {
done(error, json);
});
}
function addFieldsParameter(json, done) {
addQueryParameter({
name: 'fields',
description: 'Define which fields to return using:\n\n`a,b,c` comma-separated list to select multiple fields.\n\n`a/b/c` path to select a field from its parent.\n\n`a(b,c)` sub-selection to select many fields from a parent.\n\nReference: [json-mask](https://github.com/nemtsov/json-mask)',
type: 'string',
required: false,
default: '',
example: '',
values: []
}, hasJsonResponse, json, done);
}
function addSpyForVersioningParameter(json, done) {
addQueryParameter({
name: 'spy-for-versioning',
description: '**DEVELOPMENT ONLY:** when `true`, generate a schema from this resource\'s response, and capture responses from external systems like JDS and VistA.\n\nSchemas are generated under `src/core/api-blueprint/schemas`, and external responses are captured under `versioning-tests/recorded-responses`.',
type: 'boolean',
required: false,
default: '',
example: '',
values: []
}, function (action) {
return development && hasJsonResponse(action);
}, json, done);
}
function | (parameter, checkFunction, json, done) {
if (json.__domain && json.__domain !== 'local') {
return done(null, json);
}
async.each(json.ast.resourceGroups, function(resourceGroup, groupDone) {
async.each(resourceGroup.resources, function(resource, resourceDone) {
var applies = false;
_.each(resource.actions, function(action) {
if (!checkFunction(action)) {
return;
}
applies = true;
var uriTemplate = dd(action)('attributes')('uriTemplate').val;
if (uriTemplate) {
action.parameters.push(parameter);
action.attributes.uriTemplate = appendQueryParameter(uriTemplate, parameter);
}
});
if (applies && resource.uriTemplate) {
resource.parameters.push(parameter);
resource.uriTemplate = appendQueryParameter(resource.uriTemplate, parameter);
}
setImmediate(resourceDone);
}, groupDone);
}, function(error) {
done(error, json);
});
function appendQueryParameter(uriTemplate, parameter) {
uriTemplate += _.contains(uriTemplate, '{?') ? '{&' : '{?';
uriTemplate += parameter.name;
return uriTemplate + '}';
}
}
function hasJsonResponse(action) {
return !!_.find(action.examples, function(example) {
return _.find(example.responses, function(response) {
var statusCode = parseInt(response.name, 10);
return (isNaN(statusCode) || statusCode < 300) &&
_.find(response.headers, {name: 'Content-Type', value: 'application/json'});
});
});
}
function addMissingExampleWarnings(json, done) {
if (!development) {
return done(null, json);
}
_.each(json.ast.resourceGroups, function(resourceGroup) {
_.each(resourceGroup.resources, function(resource) {
_.each(resource.actions, function(action) {
_.each(action.examples, function (example) {
var requests = _.map(example.requests, withType.bind(null, 'request'));
var responses = _.map(example.responses, withType.bind(null, 'response'));
_.each(requests.concat(responses), function (item) {
if (!item.example.body || !item.example.schema) {
var contentType = (_.find(item.example.headers, function (header) {
return header.name === 'Content-Type';
}) || {}).value;
if (contentType && _.contains(contentType, 'json')) {
addWarning(item.example, item.type, resource, action);
}
return;
}
});
});
});
});
});
return done(null, json);
function withType(type, item) {
return {
example: item,
type: item.name + ' ' + type
};
}
var nextResourceId;
function addWarning(example, type, resource, action) {
if (!resource.__id) {
if (!nextResourceId) {
nextResourceId = 1;
while (findResourceById(nextResourceId, json)) {
++nextResourceId;
}
}
resource.__id = String(++nextResourceId);
}
var message = 'Please write an example ' + type;
if (!example.body && !example.schema) {
message += ' and schema';
} else if (!example.schema) {
message | addQueryParameter | identifier_name |
api-blueprint-resource.js | = req.query.source;
if (!_.startsWith(markdownPath, 'http')) {
var rootPath = __dirname.substring(0, __dirname.indexOf(rootDir) + rootDir.length);
markdownPath = rootPath + markdownPath;
}
var mountpoint = req.query.mountpoint;
apiBlueprint.loadFullMarkdown(markdownPath, mountpoint, null, function(error, markdown) {
if (markdown) {
apiBlueprint.jsonDocumentationForPath(mountpoint, function(docsError, json) {
var html;
var context = {
filename: fspath.basename(req.query.source),
lines: markdown.split(/\r?\n/g)
};
if (json) {
_.each(json.warnings, function(warning) {
var location = _.first(warning.location);
if (location && location.line && location.column) {
var line = context.lines[location.line - 1];
if (!_.contains(line, '<span class="warning"')) {
var start = location.column - 1;
var end = location.column - 1 + Math.max(1, location.length);
context.lines[location.line - 1] = _.escape(line.substring(0, start)) + '<span class="warning" title="' + _.escape(warning.message) + '">' + _.escape(line.substring(start, end)) + '</span>' + _.escape(line.substring(end));
}
}
});
}
html = sourceCodeTemplate(context);
sendHtml(res, error, html);
});
} else {
sendHtml(res, error);
}
});
}
function prependResourcesUri(json, done) {
if (json.__domain && json.__domain !== 'local') {
return done(null, json);
}
var prefix = _.trimRight(module.exports.resourcesUri, '/');
async.each(json.ast.resourceGroups, function(resourceGroup, groupDone) {
async.each(resourceGroup.resources, function(resource, resourceDone) {
if (resource.uriTemplate && !_.startsWith(resource.uriTemplate, prefix)) {
resource.uriTemplate = prefix + resource.uriTemplate;
}
async.each(resource.actions, function(action, actionDone) {
var uriTemplate = dd(action)('attributes')('uriTemplate').val;
if (uriTemplate && !_.startsWith(uriTemplate, prefix)) {
action.attributes.uriTemplate = prefix + uriTemplate;
}
setImmediate(actionDone);
}, resourceDone);
}, groupDone);
}, function(error) {
done(error, json);
});
}
function addFieldsParameter(json, done) {
addQueryParameter({
name: 'fields',
description: 'Define which fields to return using:\n\n`a,b,c` comma-separated list to select multiple fields.\n\n`a/b/c` path to select a field from its parent.\n\n`a(b,c)` sub-selection to select many fields from a parent.\n\nReference: [json-mask](https://github.com/nemtsov/json-mask)',
type: 'string',
required: false,
default: '',
example: '',
values: []
}, hasJsonResponse, json, done);
}
function addSpyForVersioningParameter(json, done) {
addQueryParameter({
name: 'spy-for-versioning',
description: '**DEVELOPMENT ONLY:** when `true`, generate a schema from this resource\'s response, and capture responses from external systems like JDS and VistA.\n\nSchemas are generated under `src/core/api-blueprint/schemas`, and external responses are captured under `versioning-tests/recorded-responses`.',
type: 'boolean',
required: false,
default: '',
example: '',
values: []
}, function (action) {
return development && hasJsonResponse(action);
}, json, done);
}
function addQueryParameter(parameter, checkFunction, json, done) {
if (json.__domain && json.__domain !== 'local') {
return done(null, json);
}
async.each(json.ast.resourceGroups, function(resourceGroup, groupDone) {
async.each(resourceGroup.resources, function(resource, resourceDone) {
var applies = false;
_.each(resource.actions, function(action) {
if (!checkFunction(action)) {
return;
}
applies = true;
var uriTemplate = dd(action)('attributes')('uriTemplate').val;
if (uriTemplate) {
action.parameters.push(parameter);
action.attributes.uriTemplate = appendQueryParameter(uriTemplate, parameter);
}
});
if (applies && resource.uriTemplate) {
resource.parameters.push(parameter);
resource.uriTemplate = appendQueryParameter(resource.uriTemplate, parameter);
}
setImmediate(resourceDone);
}, groupDone);
}, function(error) {
done(error, json);
});
function appendQueryParameter(uriTemplate, parameter) {
uriTemplate += _.contains(uriTemplate, '{?') ? '{&' : '{?';
uriTemplate += parameter.name;
return uriTemplate + '}';
}
}
function hasJsonResponse(action) {
return !!_.find(action.examples, function(example) {
return _.find(example.responses, function(response) {
var statusCode = parseInt(response.name, 10);
return (isNaN(statusCode) || statusCode < 300) &&
_.find(response.headers, {name: 'Content-Type', value: 'application/json'});
});
});
}
function addMissingExampleWarnings(json, done) {
if (!development) {
return done(null, json);
}
_.each(json.ast.resourceGroups, function(resourceGroup) {
_.each(resourceGroup.resources, function(resource) {
_.each(resource.actions, function(action) {
_.each(action.examples, function (example) {
var requests = _.map(example.requests, withType.bind(null, 'request'));
var responses = _.map(example.responses, withType.bind(null, 'response'));
_.each(requests.concat(responses), function (item) {
if (!item.example.body || !item.example.schema) {
var contentType = (_.find(item.example.headers, function (header) {
return header.name === 'Content-Type';
}) || {}).value;
if (contentType && _.contains(contentType, 'json')) {
addWarning(item.example, item.type, resource, action);
}
return;
}
});
});
});
});
});
return done(null, json);
function withType(type, item) {
return {
example: item,
type: item.name + ' ' + type
};
}
var nextResourceId;
function addWarning(example, type, resource, action) {
if (!resource.__id) {
if (!nextResourceId) {
nextResourceId = 1;
while (findResourceById(nextResourceId, json)) {
++nextResourceId;
}
}
resource.__id = String(++nextResourceId);
}
var message = 'Please write an example ' + type;
if (!example.body && !example.schema) {
message += ' and schema';
} else if (!example.schema) {
message = 'Please write a schema for the ' + type;
}
var title = action.name || resource.name;
if (action.method.toUpperCase() !== title.toUpperCase()) {
title = action.method + ' ' + title;
}
json.warnings.push({
code: -1,
message: message + ' for endpoint *' + title + '*',
location: [{
resourceId: resource.__id
}]
});
}
}
function displayWarnings(json, done) {
if (!development) {
return done(null, json);
}
var prefix = _.trimRight(module.exports.resourcesUri, '/') + module.exports.apiBlueprintDocsUri;
_.each(json.warnings, function(warning) {
var location = _.first(warning.location);
if (location) {
var resource = findResourceById(location.resourceId, json);
if (resource) {
var text = '::: warning\n<i class="fa fa-warning" title="API Blueprint parse warning"></i> ';
text += warning.message + '\n';
if (location.file || location.line || location.index) {
text += ' (';
if (location.file) {
var index = location.file.indexOf(rootDir);
var file = _.trimLeft(index > 0 ? location.file.substring(index + rootDir.length) : location.file, '/');
var line = location.line ? '#' + (location.line - 1) : '';
// uriTemplate without query and fragment parameters
var mountpoint = resource.uriTemplate.replace(/\{[\?&#][^\}]+\}/g, '');
index = mountpoint.indexOf(module.exports.resourcesUri);
mountpoint = (index !== -1) ? mountpoint.substring(index + module.exports.resourcesUri.length) : mountpoint;
text += 'in [' + file + '](' + prefix + markdownPath + '?source=' + encodeURIComponent(file) + '&mountpoint=' + encodeURIComponent(mountpoint) + line + ') ';
}
if (location.line) {
text += 'line ' + location.line;
} else {
text += 'index ' + location.index;
}
text += ')';
}
text += '\n:::\n\n';
resource.description += text;
}
}
});
done(null, json);
}
function findResourceById(resourceId, json) {
var resource;
_.each(json.ast.resourceGroups, function(resourceGroup) {
resource = _.find(resourceGroup.resources, {__id: resourceId});
if (resource) {
return false;
} | });
return resource;
}
function renderHtml(json, done) { | random_line_split |
|
LFEDocumentClassifier.py | *
from FileIO import *
# Handle command line arguments and set program parameters
if USE_CLI_ARGUMENTS:
args = collectCommandLineArguments()
USE_REUTERS = args.useReuters
USE_RAW_CSV = args.useCSV
CSV_FILE_PATH = args.csvPath
CSV_INPUT_COL = args.inputName
CSV_TARGET_COL = args.targetName
CLASSIFIER_NAME = args.classifier
WORD_EMBEDDING_METHOD = args.wordEmbedding
TEST_RUNS = args.testRuns
EPOCHS = args.epochs
CROSS_VALIDATE = args.crossValidate
USE_MULTI_LABEL_CLASSIFICATION = args.multiLabel
SAVE_STATS_TO_FILE = args.save
SAVE_FILE_NAME = args.fileName
REMOVE_STOPWORDS = args.removeStopWords
STEM_TEXT = args.stemText
KNN_NEIGHBOURS = args.knnNeighbours
KNN_WEIGHTS = args.knnWeight
KM_CLUSTERS = args.kmClusters
KM_N_INIT = args.kmInit
NN_BATCH_SIZE = args.nnBatchSize
NN_INTERNAL_EPOCHS = args.nnEpochs
SVM_KERNEL = args.svmKernel
SVM_DEGREE = args.svmDegree
SVM_CLASS_WEIGHT = None if args.svmClassWeight is False else 'balanced'
# GLOBAL VARIABLES
themePairs = [] # List of tuples, where the first item contains text and the second contains corresponding themes
wordEmbeddings = [] # List of words and their embedded scores per entry (words, keywords, TF-IDF etc)
bagOfWords = [] # List of all the words making up the bag of words (for feature creation)
bagOfWordsDict = dict() # Dict for quick indexing of BOW
featuresMasks = [] # Feature mask per entry to match with the bagOfWords structure/order
targetMasks = [] # Target value (class) per entry, aligns with features mask
classifier = None # Placeholder for the classifier object generated later in the pipeline
otherCategories = None # Placeholder for reuters categories (if reuters/raw CSV is being used, remains None otherwise)
categoryCount = 0 # Number of classes/themes/categories (len(set(y)))
# TODO: [PIPELINE SPLIT 1] - Take input data and spilt into input and target, pre-process and clean
if USE_REUTERS:
themePairs, otherCategories = getReutersFeatureClassPairs()
categoryCount = len(otherCategories)
elif USE_RAW_CSV:
dataFile = pd.read_csv(CSV_FILE_PATH)
# Apply all pre-processing to clean text and themes
ic = InputCleaner(dataFile, themePairs, CSV_INPUT_COL, CSV_TARGET_COL, GENERATE_1D_THEMES, USE_RAW_CSV)
ic.cleanText(REMOVE_NUMERIC, REMOVE_SINGLE_LETTERS, REMOVE_KEYWORDS, REMOVE_EXTRA_SPACES)
categoryCount = len(ic.primaryThemesCount.keys())
otherCategories = list(ic.primaryThemesCount.keys()) |
# Apply all pre-processing to clean text and themes
ic = InputCleaner(dataFile, themePairs, 'excellenceText', 'themeExcellence', GENERATE_1D_THEMES)
ic.cleanText(REMOVE_NUMERIC, REMOVE_SINGLE_LETTERS, REMOVE_KEYWORDS, REMOVE_EXTRA_SPACES)
categoryCount = len(ALL_THEMES_LIST)
# TODO: [PIPELINE SPLIT 2] - Use word embedding or other metrics to score input text
if WORD_EMBEDDING_METHOD == 'rake':
r = Rake()
for i in range(len(themePairs)):
r.extract_keywords_from_text(themePairs[i][0])
wordEmbeddings.append(r.get_ranked_phrases_with_scores())
elif WORD_EMBEDDING_METHOD == 'text_rank':
tr = TextRank(themePairs, REMOVE_STOPWORDS, STEM_TEXT)
wordEmbeddings = tr.getAllKeywords()
elif WORD_EMBEDDING_METHOD == 'word_count':
tf = TermFrequency(themePairs, REMOVE_STOPWORDS, STEM_TEXT)
wordEmbeddings = tf.getAllTermCountsPerDocument()
elif WORD_EMBEDDING_METHOD == 'tf_idf':
tf = TermFrequency(themePairs, REMOVE_STOPWORDS, STEM_TEXT)
wordEmbeddings = tf.generateAllTFIDFValues()
else:
print("ERROR - Invalid Keyword IDing method chosen")
breakpoint()
# DATA GATHERING!
print("average raw character length: " + str(getAverageTextLength(themePairs, True)))
print("average final character length: " + str(getAverageTextLength(wordEmbeddings, False)))
print("average final word count: " + str(getAverageWordCount(wordEmbeddings)))
minWords, maxWords = getMinAndMaxWordCount(wordEmbeddings)
print("min words: " + str(minWords) + "max words: " + str(maxWords))
print("total items count: " + str(len(themePairs)))
# TODO: [PIPELINE SPLIT 3] - Build features from keywords/text
bagOfWords = generateBagOfWords(wordEmbeddings, USE_THRESHOLD, KEYWORD_THRESHOLD)
bagOfWordsDict = generateBagOfWordsDict(bagOfWords)
print("Total Features: " + str(len(bagOfWords)))
# Generate the feature masks which will make up the training features for classification
for scoredPairs in wordEmbeddings:
featuresMasks.append(generateFeatureMask(bagOfWords, bagOfWordsDict, scoredPairs))
# Encode the target themes into numeric values for classification
for pair in themePairs:
if USE_MULTI_LABEL_CLASSIFICATION:
targetMasks.append(encodeThemesToValues(pair[1]))
else:
targetMasks.append(encodePrimaryThemeToValue(pair[1], USE_REUTERS, USE_RAW_CSV, otherCategories))
# Clear unused items from memory if required
if FREE_RESOURCES:
del dataFile
del themePairs
del wordEmbeddings
if not USE_REUTERS:
del ic
if WORD_EMBEDDING_METHOD == 'text_rank':
del tr
elif WORD_EMBEDDING_METHOD == 'tf_idf' or WORD_EMBEDDING_METHOD == 'word_count':
del tf
gc.collect()
# TODO: [VALIDATION SPLIT] - If using a validation set, perform preprocessing and generate feature masks
validationThemePairs = []
validationFeatureMasks = []
if USE_VALIDATION:
validationDataFile = pd.read_csv(VALIDATION_FILE_PATH)
# Apply all pre-processing to clean validation set text
val_ic = InputCleaner(validationDataFile, validationThemePairs, VALIDATION_INPUT_COL, "", GENERATE_1D_THEMES, isValidation=USE_VALIDATION)
validationThemePairs = val_ic.cleanText(REMOVE_NUMERIC, REMOVE_SINGLE_LETTERS, REMOVE_KEYWORDS, REMOVE_EXTRA_SPACES)
# Crete word embeddings
val_tf = TermFrequency(validationThemePairs, REMOVE_STOPWORDS, STEM_TEXT)
val_we = val_tf.generateAllTFIDFValues()
# Generate feature masks
for scoredPairs in val_we:
validationFeatureMasks.append(generateFeatureMask(bagOfWords, bagOfWordsDict, scoredPairs))
# TODO: [PIPELINE SPLIT 4] - Determine which classifier to use and how to initialise it
# Populate "classifier" with the chosen classifier and initialise any hyper-parameters
if CLASSIFIER_NAME == 'knn':
# TODO: Add multi-label classification to KNN
classifier = KNNClassifier(featuresMasks, targetMasks,
USE_MULTI_LABEL_CLASSIFICATION,
TEST_GROUP_SIZE,
RANDOM_STATE,
KNN_NEIGHBOURS,
KNN_WEIGHTS,
KNN_ALGORITHM,
PRINT_PROGRESS)
elif CLASSIFIER_NAME == 'cnb':
classifier = ComplementNaiveBayes(featuresMasks, targetMasks,
USE_MULTI_LABEL_CLASSIFICATION,
TEST_GROUP_SIZE,
RANDOM_STATE,
PRINT_PROGRESS)
elif CLASSIFIER_NAME == 'nn':
if NN_USE_KERAS:
classifier = MultiLayerPerceptronKeras(featuresMasks, targetMasks,
TEST_GROUP_SIZE,
RANDOM_STATE,
NN_BATCH_SIZE,
NN_INTERNAL_EPOCHS,
NN_BIAS)
else:
classifier = MultiLayerPerceptronSklearn(featuresMasks, targetMasks,
categoryCount,
TEST_GROUP_SIZE,
RANDOM_STATE,
NN_BATCH_SIZE,
PRINT_PROGRESS)
elif CLASSIFIER_NAME == 'svm':
classifier = SupportVectorMachine(featuresMasks, targetMasks,
USE_MULTI_LABEL_CLASSIFICATION,
TEST_GROUP_SIZE,
RANDOM_STATE,
SVM_KERNEL,
SVM_DEGREE,
SVM_CLASS_WEIGHT,
SVM_DECISION_SHAPE,
PRINT_PROGRESS)
elif CLASSIFIER_NAME == "km":
classifier = KMeans(featuresMasks, targetMasks,
USE_MULTI_LABEL_CLASSIFICATION,
TEST_GROUP_SIZE,
RANDOM_STATE,
KM_CLUSTERS,
KM_N_INIT)
else:
print("ERROR - Invalid classifier name chosen")
breakpoint()
# TODO: [PIPELINE SPLIT 5] - Run tests using the classifier, output results and statistics
if USE_VALIDATION:
results = decodeValueToPrimaryTheme(runValidation(classifier, validationFeatureMasks))
else:
for test in range(TEST_RUNS):
results = runTests(classifier,
EPOCHS,
USE_MULTI_LABEL_CLASSIFICATION,
CROSS_VALIDATE,
CV_FOLDS,
PRINT_PROGRESS)
if CROSS_VALIDATE:
testStats = results
else:
if USE_MULTI_LABEL_CLASSIFICATION:
testStats = getMultiLabel | else:
# Read raw .XLSX file and store as pandas data-frame
dataFile = pd.read_excel(LFE_DATA_FILE_PATH, engine='openpyxl') | random_line_split |
LFEDocumentClassifier.py | from FileIO import *
# Handle command line arguments and set program parameters
if USE_CLI_ARGUMENTS:
args = collectCommandLineArguments()
USE_REUTERS = args.useReuters
USE_RAW_CSV = args.useCSV
CSV_FILE_PATH = args.csvPath
CSV_INPUT_COL = args.inputName
CSV_TARGET_COL = args.targetName
CLASSIFIER_NAME = args.classifier
WORD_EMBEDDING_METHOD = args.wordEmbedding
TEST_RUNS = args.testRuns
EPOCHS = args.epochs
CROSS_VALIDATE = args.crossValidate
USE_MULTI_LABEL_CLASSIFICATION = args.multiLabel
SAVE_STATS_TO_FILE = args.save
SAVE_FILE_NAME = args.fileName
REMOVE_STOPWORDS = args.removeStopWords
STEM_TEXT = args.stemText
KNN_NEIGHBOURS = args.knnNeighbours
KNN_WEIGHTS = args.knnWeight
KM_CLUSTERS = args.kmClusters
KM_N_INIT = args.kmInit
NN_BATCH_SIZE = args.nnBatchSize
NN_INTERNAL_EPOCHS = args.nnEpochs
SVM_KERNEL = args.svmKernel
SVM_DEGREE = args.svmDegree
SVM_CLASS_WEIGHT = None if args.svmClassWeight is False else 'balanced'
# GLOBAL VARIABLES
themePairs = [] # List of tuples, where the first item contains text and the second contains corresponding themes
wordEmbeddings = [] # List of words and their embedded scores per entry (words, keywords, TF-IDF etc)
bagOfWords = [] # List of all the words making up the bag of words (for feature creation)
bagOfWordsDict = dict() # Dict for quick indexing of BOW
featuresMasks = [] # Feature mask per entry to match with the bagOfWords structure/order
targetMasks = [] # Target value (class) per entry, aligns with features mask
classifier = None # Placeholder for the classifier object generated later in the pipeline
otherCategories = None # Placeholder for reuters categories (if reuters/raw CSV is being used, remains None otherwise)
categoryCount = 0 # Number of classes/themes/categories (len(set(y)))
# TODO: [PIPELINE SPLIT 1] - Take input data and spilt into input and target, pre-process and clean
if USE_REUTERS:
themePairs, otherCategories = getReutersFeatureClassPairs()
categoryCount = len(otherCategories)
elif USE_RAW_CSV:
dataFile = pd.read_csv(CSV_FILE_PATH)
# Apply all pre-processing to clean text and themes
ic = InputCleaner(dataFile, themePairs, CSV_INPUT_COL, CSV_TARGET_COL, GENERATE_1D_THEMES, USE_RAW_CSV)
ic.cleanText(REMOVE_NUMERIC, REMOVE_SINGLE_LETTERS, REMOVE_KEYWORDS, REMOVE_EXTRA_SPACES)
categoryCount = len(ic.primaryThemesCount.keys())
otherCategories = list(ic.primaryThemesCount.keys())
else:
# Read raw .XLSX file and store as pandas data-frame
dataFile = pd.read_excel(LFE_DATA_FILE_PATH, engine='openpyxl')
# Apply all pre-processing to clean text and themes
ic = InputCleaner(dataFile, themePairs, 'excellenceText', 'themeExcellence', GENERATE_1D_THEMES)
ic.cleanText(REMOVE_NUMERIC, REMOVE_SINGLE_LETTERS, REMOVE_KEYWORDS, REMOVE_EXTRA_SPACES)
categoryCount = len(ALL_THEMES_LIST)
# TODO: [PIPELINE SPLIT 2] - Use word embedding or other metrics to score input text
if WORD_EMBEDDING_METHOD == 'rake':
r = Rake()
for i in range(len(themePairs)):
r.extract_keywords_from_text(themePairs[i][0])
wordEmbeddings.append(r.get_ranked_phrases_with_scores())
elif WORD_EMBEDDING_METHOD == 'text_rank':
tr = TextRank(themePairs, REMOVE_STOPWORDS, STEM_TEXT)
wordEmbeddings = tr.getAllKeywords()
elif WORD_EMBEDDING_METHOD == 'word_count':
tf = TermFrequency(themePairs, REMOVE_STOPWORDS, STEM_TEXT)
wordEmbeddings = tf.getAllTermCountsPerDocument()
elif WORD_EMBEDDING_METHOD == 'tf_idf':
tf = TermFrequency(themePairs, REMOVE_STOPWORDS, STEM_TEXT)
wordEmbeddings = tf.generateAllTFIDFValues()
else:
print("ERROR - Invalid Keyword IDing method chosen")
breakpoint()
# DATA GATHERING!
print("average raw character length: " + str(getAverageTextLength(themePairs, True)))
print("average final character length: " + str(getAverageTextLength(wordEmbeddings, False)))
print("average final word count: " + str(getAverageWordCount(wordEmbeddings)))
minWords, maxWords = getMinAndMaxWordCount(wordEmbeddings)
print("min words: " + str(minWords) + "max words: " + str(maxWords))
print("total items count: " + str(len(themePairs)))
# TODO: [PIPELINE SPLIT 3] - Build features from keywords/text
bagOfWords = generateBagOfWords(wordEmbeddings, USE_THRESHOLD, KEYWORD_THRESHOLD)
bagOfWordsDict = generateBagOfWordsDict(bagOfWords)
print("Total Features: " + str(len(bagOfWords)))
# Generate the feature masks which will make up the training features for classification
for scoredPairs in wordEmbeddings:
featuresMasks.append(generateFeatureMask(bagOfWords, bagOfWordsDict, scoredPairs))
# Encode the target themes into numeric values for classification
for pair in themePairs:
if USE_MULTI_LABEL_CLASSIFICATION:
targetMasks.append(encodeThemesToValues(pair[1]))
else:
targetMasks.append(encodePrimaryThemeToValue(pair[1], USE_REUTERS, USE_RAW_CSV, otherCategories))
# Clear unused items from memory if required
if FREE_RESOURCES:
del dataFile
del themePairs
del wordEmbeddings
if not USE_REUTERS:
del ic
if WORD_EMBEDDING_METHOD == 'text_rank':
del tr
elif WORD_EMBEDDING_METHOD == 'tf_idf' or WORD_EMBEDDING_METHOD == 'word_count':
del tf
gc.collect()
# TODO: [VALIDATION SPLIT] - If using a validation set, perform preprocessing and generate feature masks
validationThemePairs = []
validationFeatureMasks = []
if USE_VALIDATION:
validationDataFile = pd.read_csv(VALIDATION_FILE_PATH)
# Apply all pre-processing to clean validation set text
val_ic = InputCleaner(validationDataFile, validationThemePairs, VALIDATION_INPUT_COL, "", GENERATE_1D_THEMES, isValidation=USE_VALIDATION)
validationThemePairs = val_ic.cleanText(REMOVE_NUMERIC, REMOVE_SINGLE_LETTERS, REMOVE_KEYWORDS, REMOVE_EXTRA_SPACES)
# Crete word embeddings
val_tf = TermFrequency(validationThemePairs, REMOVE_STOPWORDS, STEM_TEXT)
val_we = val_tf.generateAllTFIDFValues()
# Generate feature masks
for scoredPairs in val_we:
validationFeatureMasks.append(generateFeatureMask(bagOfWords, bagOfWordsDict, scoredPairs))
# TODO: [PIPELINE SPLIT 4] - Determine which classifier to use and how to initialise it
# Populate "classifier" with the chosen classifier and initialise any hyper-parameters
if CLASSIFIER_NAME == 'knn':
# TODO: Add multi-label classification to KNN
classifier = KNNClassifier(featuresMasks, targetMasks,
USE_MULTI_LABEL_CLASSIFICATION,
TEST_GROUP_SIZE,
RANDOM_STATE,
KNN_NEIGHBOURS,
KNN_WEIGHTS,
KNN_ALGORITHM,
PRINT_PROGRESS)
elif CLASSIFIER_NAME == 'cnb':
|
elif CLASSIFIER_NAME == 'nn':
if NN_USE_KERAS:
classifier = MultiLayerPerceptronKeras(featuresMasks, targetMasks,
TEST_GROUP_SIZE,
RANDOM_STATE,
NN_BATCH_SIZE,
NN_INTERNAL_EPOCHS,
NN_BIAS)
else:
classifier = MultiLayerPerceptronSklearn(featuresMasks, targetMasks,
categoryCount,
TEST_GROUP_SIZE,
RANDOM_STATE,
NN_BATCH_SIZE,
PRINT_PROGRESS)
elif CLASSIFIER_NAME == 'svm':
classifier = SupportVectorMachine(featuresMasks, targetMasks,
USE_MULTI_LABEL_CLASSIFICATION,
TEST_GROUP_SIZE,
RANDOM_STATE,
SVM_KERNEL,
SVM_DEGREE,
SVM_CLASS_WEIGHT,
SVM_DECISION_SHAPE,
PRINT_PROGRESS)
elif CLASSIFIER_NAME == "km":
classifier = KMeans(featuresMasks, targetMasks,
USE_MULTI_LABEL_CLASSIFICATION,
TEST_GROUP_SIZE,
RANDOM_STATE,
KM_CLUSTERS,
KM_N_INIT)
else:
print("ERROR - Invalid classifier name chosen")
breakpoint()
# TODO: [PIPELINE SPLIT 5] - Run tests using the classifier, output results and statistics
if USE_VALIDATION:
results = decodeValueToPrimaryTheme(runValidation(classifier, validationFeatureMasks))
else:
for test in range(TEST_RUNS):
results = runTests(classifier,
EPOCHS,
USE_MULTI_LABEL_CLASSIFICATION,
CROSS_VALIDATE,
CV_FOLDS,
PRINT_PROGRESS)
if CROSS_VALIDATE:
testStats = results
else:
if USE_MULTI_LABEL_CLASSIFICATION:
testStats = getMultiLabelTest | classifier = ComplementNaiveBayes(featuresMasks, targetMasks,
USE_MULTI_LABEL_CLASSIFICATION,
TEST_GROUP_SIZE,
RANDOM_STATE,
PRINT_PROGRESS) | conditional_block |
state_chart.js | 0, .14)"
},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
// define the node's outer shape, which will surround the TextBlock
$(go.Shape, "RoundedRectangle", roundedRectangleParams,
{
name: "SHAPE", fill: "#ffffff", strokeWidth: 0,
stroke: null,
portId: "", // this Shape is the Node's port, not the whole Node
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.TextBlock,
{
font: "bold small-caps 11pt helvetica, bold arial, sans-serif",
margin: 7,
stroke: "rgba(0, 0, 0, .87)",
// editable: true // editing the text automatically updates the model data
},
new go.Binding("text").makeTwoWay())
);
// unlike the normal selection Adornment, this one includes a Button
myDiagram.nodeTemplate.selectionAdornmentTemplate =
$(go.Adornment, "Spot",
$(go.Panel, "Auto",
$(go.Shape, "RoundedRectangle", roundedRectangleParams,
{fill: null, stroke: "#7986cb", strokeWidth: 3}),
$(go.Placeholder) // a Placeholder sizes itself to the selected Node
),
// the button to create a "next" node, at the top-right corner
// $("Button",
// {
// alignment: go.Spot.TopRight,
// click: addNodeAndLink // this function is defined below
// },
// $(go.Shape, "PlusLine", {width: 6, height: 6})
// ), // end button
// the button to create a "next" node, at the top-right corner
// $("Button",
// {
// alignment: go.Spot.BottomRight,
// click: addPeropertyNode // this function is defined below
// },
// $(go.Shape, "PlusLine", {width: 6, height: 6})
// ) // end button
); // end Adornment
myDiagram.nodeTemplateMap.add("Start",
$(go.Node, "Spot", {desiredSize: new go.Size(75, 75)},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
$(go.Shape, "Circle",
{
fill: "#52ce60", /* green */
stroke: null,
portId: "",
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.TextBlock, "Start",
{
font: "bold 16pt helvetica, bold arial, sans-serif",
stroke: "whitesmoke"
})
)
);
myDiagram.nodeTemplateMap.add("End",
$(go.Node, "Spot", {desiredSize: new go.Size(75, 75)},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
$(go.Shape, "Circle",
{
fill: "maroon",
stroke: null,
portId: "",
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.Shape, "Circle", {
fill: null,
desiredSize: new go.Size(65, 65),
strokeWidth: 2,
stroke: "whitesmoke"
}),
$(go.TextBlock, "End",
{
font: "bold 16pt helvetica, bold arial, sans-serif",
stroke: "whitesmoke"
})
)
);
// clicking the button inserts a new node to the right of the selected node,
// and adds a link to that new node
function addNodeAndLink(e, obj) {
var adornment = obj.part;
var diagram = e.diagram;
diagram.startTransaction("Add State");
// get the node data for which the user clicked the button
var fromNode = adornment.adornedPart;
var fromData = fromNode.data;
// create a new "State" data object, positioned off to the right of the adorned Node
var toData = {text: "新状态"};
var p = fromNode.location.copy();
p.x += 200;
toData.loc = go.Point.stringify(p); // the "loc" property is a string, not a Point object
// add the new node data to the model
var model = diagram.model;
model.addNodeData(toData);
// create a link data from the old node data to the new node data
var linkdata = {
from: model.getKeyForNodeData(fromData), // or just: fromData.id
to: model.getKeyForNodeData(toData),
text: "动作"
};
// and add the link data to the model
model.addLinkData(linkdata);
// select the new Node
var newnode = diagram.findNodeForData(toData);
diagram.select(newnode);
diagram.commitTransaction("Add State");
// if the new node is off-screen, scroll the diagram to show the new node
diagram.scrollToRect(newnode.actualBounds);
}
// 单击创建状态所涉及到的物体
function addPeropertyNode(e, obj) {
var adornment = obj.part;
var diagram = e.diagram;
diagram.startTransaction("Add State");
// get the node data for which the user clicked the button
var fromNode = adornment.adornedPart;
var fromData = fromNode.data;
// create a new "State" data object, positioned off to the right of the adorned Node
var toData = {text: "物体"};
var p = fromNode.location.copy();
p.x += 200;
toData.loc = go.Point.stringify(p); // the "loc" property is a string, not a Point object
// add the new node data to the model
var model = diagram.model;
model.addNodeData(toData);
// create a link data from the old node data to the new node data
var linkdata = {
from: model.getKeyForNodeData(fromData), // or just: fromData.id
to: model.getKeyForNodeData(toData),
text: "涉及"
};
// and add the link data to the model
model.addLinkData(linkdata);
// select the new Node
var newnode = diagram.findNodeForData(toData);
diagram.select(newnode);
diagram.commitTransaction("Add State");
// if the new node is off-screen, scroll the diagram to show the new node
diagram.scrollToRect(newnode.actualBounds);
}
// replace the default Link template in the linkTemplateMap
myDiagram.linkTemplate =
$(go.Link, // the whole link panel
{
curve: go.Link.Bezier,
adjusting: go.Link.Stretch,
reshapable: true, relinkableFrom: true, relinkableTo: true,
toShortLength: 3
},
new go.Binding("points").makeTwoWay(),
new go.Binding("curviness"),
$(go.Shape, // the link shape
{strokeWidth: 1.5},
new go.Binding('stroke', 'progress', function (progress) {
return progress ? "#52ce60" /* green */ : 'black';
}),
new go.Binding('strokeWidth', 'progress', function (progress) {
return progress ? 2.5 : 1.5;
})
),
$(go.Shape, // the arrowhead
{toArrow: "standard", stroke: null},
new go.Binding('fill', 'progress', function (progress) {
return progress ? "#52ce60" /* green */ : 'black';
}),
),
$(go.Panel, "Auto",
$(go.Shape, // the label background, which becomes transparent around the edges
{
fill: $(go.Brush, "Radial",
{0: "rgb(245, 245, 245)", 0.7: "rgb(245, 245, 245)", 1: "rgba(245, 245, 245, 0)"}),
stroke: null
}),
$(go.TextBlock, "动作", // the label text
{
textAlign: "center",
font: "9pt helvetica, arial, sans-serif",
margin: 4,
// editable: true // enable in-place editing
},
// editing the text automatically updates the model data
new go.Binding("text").makeTwoWay())
)
);
| random_line_split |
||
state_chart.js | () {
go.ForceDirectedLayout.call(this);
this._isObserving = false;
}
go.Diagram.inherit(ContinuousForceDirectedLayout, go.ForceDirectedLayout);
ContinuousForceDirectedLayout.prototype.isFixed = function (v) {
return v.node.isSelected;
};
// optimization: reuse the ForceDirectedNetwork rather than re-create it each time
ContinuousForceDirectedLayout.prototype.doLayout = function (coll) {
if (!this._isObserving) {
this._isObserving = true;
// cacheing the network means we need to recreate it if nodes or links have been added or removed or relinked,
// so we need to track structural model changes to discard the saved network.
var lay = this;
this.diagram.addModelChangedListener(function (e) {
// modelChanges include a few cases that we don't actually care about, such as
// "nodeCategory" or "linkToPortId", but we'll go ahead and recreate the network anyway.
// Also clear the network when replacing the model.
if (e.modelChange !== "" ||
(e.change === go.ChangedEvent.Transaction && e.propertyName === "StartingFirstTransaction")) {
lay.network = null;
}
});
}
var net = this.network;
if (net === null) { // the first time, just create the network as normal
this.network = net = this.makeNetwork(coll);
} else { // but on reuse we need to update the LayoutVertex.bounds for selected nodes
this.diagram.nodes.each(function (n) {
var v = net.findVertex(n);
if (v !== null) v.bounds = n.actualBounds;
});
}
// now perform the normal layout
go.ForceDirectedLayout.prototype.doLayout.call(this, coll);
// doLayout normally discards the LayoutNetwork by setting Layout.network to null;
// here we remember it for next time
this.network = net;
};
// end ContinuousForceDirectedLayout
function chart_init() {
if (window.goSamples) goSamples(); // init for these samples -- you don't need to call this
var $ = go.GraphObject.make; // for conciseness in defining templates
// some constants that will be reused within templates
var roundedRectangleParams = {
parameter1: 2, // set the rounded corner
spot1: go.Spot.TopLeft, spot2: go.Spot.BottomRight // make content go all the way to inside edges of rounded corners
};
myDiagram =
$(go.Diagram, "myDiagramDiv", // must name or refer to the DIV HTML element
{
"animationManager.initialAnimationStyle": go.AnimationManager.None,
"InitialAnimationStarting": function (e) {
var animation = e.subject.defaultAnimation;
animation.easing = go.Animation.EaseOutExpo;
animation.duration = 900;
animation.add(e.diagram, 'scale', 0.1, 1);
animation.add(e.diagram, 'opacity', 0, 1);
},
// have mouse wheel events zoom in and out instead of scroll up and down
"toolManager.mouseWheelBehavior": go.ToolManager.WheelZoom,
// support double-click in background creating a new node
// "clickCreatingTool.archetypeNodeData": {text: "新状态"},
// enable undo & redo
"undoManager.isEnabled": true,
// layout: new DemoForceDirectedLayout(), // use custom layout
// other Layout properties are set by the layout function, defined below
layout:
$(ContinuousForceDirectedLayout, // automatically spread nodes apart while dragging
{defaultSpringLength: 60, defaultElectricalCharge: 120}),
// do an extra layout at the end of a move
// layout: $(go.LayeredDigraphLayout),
// other Layout properties are set by the layout function, defined below
positionComputation: function (diagram, pt) {
return new go.Point(Math.floor(pt.x), Math.floor(pt.y));
}
});
myDiagram.addDiagramListener("ObjectSingleClicked", function (e) {
var part = e.subject.part;
if (!(part instanceof go.Link)) {
if (part.data.m_type === 'object') {
findTarget(part.data.id);
}
if (part.data.m_type === 'state') {
console.log("重新加载状态"+part.data.id);
reloadScene(part.data.id);
}
}
});
// define the Node template
myDiagram.nodeTemplate =
$(go.Node, "Auto",
{
locationSpot: go.Spot.TopCenter,
isShadowed: true, shadowBlur: 1,
shadowOffset: new go.Point(0, 1),
shadowColor: "rgba(0, 0, 0, .14)"
},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
// define the node's outer shape, which will surround the TextBlock
$(go.Shape, "RoundedRectangle", roundedRectangleParams,
{
name: "SHAPE", fill: "#ffffff", strokeWidth: 0,
stroke: null,
portId: "", // this Shape is the Node's port, not the whole Node
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.TextBlock,
{
font: "bold small-caps 11pt helvetica, bold arial, sans-serif",
margin: 7,
stroke: "rgba(0, 0, 0, .87)",
// editable: true // editing the text automatically updates the model data
},
new go.Binding("text").makeTwoWay())
);
// unlike the normal selection Adornment, this one includes a Button
myDiagram.nodeTemplate.selectionAdornmentTemplate =
$(go.Adornment, "Spot",
$(go.Panel, "Auto",
$(go.Shape, "RoundedRectangle", roundedRectangleParams,
{fill: null, stroke: "#7986cb", strokeWidth: 3}),
$(go.Placeholder) // a Placeholder sizes itself to the selected Node
),
// the button to create a "next" node, at the top-right corner
// $("Button",
// {
// alignment: go.Spot.TopRight,
// click: addNodeAndLink // this function is defined below
// },
// $(go.Shape, "PlusLine", {width: 6, height: 6})
// ), // end button
// the button to create a "next" node, at the top-right corner
// $("Button",
// {
// alignment: go.Spot.BottomRight,
// click: addPeropertyNode // this function is defined below
// },
// $(go.Shape, "PlusLine", {width: 6, height: 6})
// ) // end button
); // end Adornment
myDiagram.nodeTemplateMap.add("Start",
$(go.Node, "Spot", {desiredSize: new go.Size(75, 75)},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
$(go.Shape, "Circle",
{
fill: "#52ce60", /* green */
stroke: null,
portId: "",
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.TextBlock, "Start",
{
font: "bold 16pt helvetica, bold arial, sans-serif",
stroke: "whitesmoke"
})
)
);
myDiagram.nodeTemplateMap.add("End",
$(go.Node, "Spot", {desiredSize: new go.Size(75, 75)},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
$(go.Shape, "Circle",
{
fill: "maroon",
stroke: null,
portId: "",
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.Shape, "Circle", {
fill: null,
desiredSize: new go.Size(65, 65),
strokeWidth: 2,
stroke: "whitesmoke"
}),
$(go.TextBlock, "End",
{
font: "bold 16pt helvetica, bold arial, sans-serif",
stroke: "whitesmoke"
})
)
);
// clicking the button inserts a new node to the right of the selected node,
// and adds a link to that new node
function addNodeAndLink(e, obj) {
var adornment = obj.part;
var diagram = e.diagram;
diagram.startTransaction("Add State");
// get the node data for which the user clicked the button
| ContinuousForceDirectedLayout | identifier_name |
|
state_chart.js | clear the network when replacing the model.
if (e.modelChange !== "" ||
(e.change === go.ChangedEvent.Transaction && e.propertyName === "StartingFirstTransaction")) {
lay.network = null;
}
});
}
var net = this.network;
if (net === null) { // the first time, just create the network as normal
this.network = net = this.makeNetwork(coll);
} else { // but on reuse we need to update the LayoutVertex.bounds for selected nodes
this.diagram.nodes.each(function (n) {
var v = net.findVertex(n);
if (v !== null) v.bounds = n.actualBounds;
});
}
// now perform the normal layout
go.ForceDirectedLayout.prototype.doLayout.call(this, coll);
// doLayout normally discards the LayoutNetwork by setting Layout.network to null;
// here we remember it for next time
this.network = net;
};
// end ContinuousForceDirectedLayout
function chart_init() {
if (window.goSamples) goSamples(); // init for these samples -- you don't need to call this
var $ = go.GraphObject.make; // for conciseness in defining templates
// some constants that will be reused within templates
var roundedRectangleParams = {
parameter1: 2, // set the rounded corner
spot1: go.Spot.TopLeft, spot2: go.Spot.BottomRight // make content go all the way to inside edges of rounded corners
};
myDiagram =
$(go.Diagram, "myDiagramDiv", // must name or refer to the DIV HTML element
{
"animationManager.initialAnimationStyle": go.AnimationManager.None,
"InitialAnimationStarting": function (e) {
var animation = e.subject.defaultAnimation;
animation.easing = go.Animation.EaseOutExpo;
animation.duration = 900;
animation.add(e.diagram, 'scale', 0.1, 1);
animation.add(e.diagram, 'opacity', 0, 1);
},
// have mouse wheel events zoom in and out instead of scroll up and down
"toolManager.mouseWheelBehavior": go.ToolManager.WheelZoom,
// support double-click in background creating a new node
// "clickCreatingTool.archetypeNodeData": {text: "新状态"},
// enable undo & redo
"undoManager.isEnabled": true,
// layout: new DemoForceDirectedLayout(), // use custom layout
// other Layout properties are set by the layout function, defined below
layout:
$(ContinuousForceDirectedLayout, // automatically spread nodes apart while dragging
{defaultSpringLength: 60, defaultElectricalCharge: 120}),
// do an extra layout at the end of a move
// layout: $(go.LayeredDigraphLayout),
// other Layout properties are set by the layout function, defined below
positionComputation: function (diagram, pt) {
return new go.Point(Math.floor(pt.x), Math.floor(pt.y));
}
});
myDiagram.addDiagramListener("ObjectSingleClicked", function (e) {
var part = e.subject.part;
if (!(part instanceof go.Link)) {
if (part.data.m_type === 'object') {
findTarget(part.data.id);
}
if (part.data.m_type === 'state') {
console.log("重新加载状态"+part.data.id);
reloadScene(part.data.id);
}
}
});
// define the Node template
myDiagram.nodeTemplate =
$(go.Node, "Auto",
{
locationSpot: go.Spot.TopCenter,
isShadowed: true, shadowBlur: 1,
shadowOffset: new go.Point(0, 1),
shadowColor: "rgba(0, 0, 0, .14)"
},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
// define the node's outer shape, which will surround the TextBlock
$(go.Shape, "RoundedRectangle", roundedRectangleParams,
{
name: "SHAPE", fill: "#ffffff", strokeWidth: 0,
stroke: null,
portId: "", // this Shape is the Node's port, not the whole Node
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.TextBlock,
{
font: "bold small-caps 11pt helvetica, bold arial, sans-serif",
margin: 7,
stroke: "rgba(0, 0, 0, .87)",
// editable: true // editing the text automatically updates the model data
},
new go.Binding("text").makeTwoWay())
);
// unlike the normal selection Adornment, this one includes a Button
myDiagram.nodeTemplate.selectionAdornmentTemplate =
$(go.Adornment, "Spot",
$(go.Panel, "Auto",
$(go.Shape, "RoundedRectangle", roundedRectangleParams,
{fill: null, stroke: "#7986cb", strokeWidth: 3}),
$(go.Placeholder) // a Placeholder sizes itself to the selected Node
),
// the button to create a "next" node, at the top-right corner
// $("Button",
// {
// alignment: go.Spot.TopRight,
// click: addNodeAndLink // this function is defined below
// },
// $(go.Shape, "PlusLine", {width: 6, height: 6})
// ), // end button
// the button to create a "next" node, at the top-right corner
// $("Button",
// {
// alignment: go.Spot.BottomRight,
// click: addPeropertyNode // this function is defined below
// },
// $(go.Shape, "PlusLine", {width: 6, height: 6})
// ) // end button
); // end Adornment
myDiagram.nodeTemplateMap.add("Start",
$(go.Node, "Spot", {desiredSize: new go.Size(75, 75)},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
$(go.Shape, "Circle",
{
fill: "#52ce60", /* green */
stroke: null,
portId: "",
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.TextBlock, "Start",
{
font: "bold 16pt helvetica, bold arial, sans-serif",
stroke: "whitesmoke"
})
)
);
myDiagram.nodeTemplateMap.add("End",
$(go.Node, "Spot", {desiredSize: new go.Size(75, 75)},
new go.Binding("location", "loc", go.Point.parse).makeTwoWay(go.Point.stringify),
$(go.Shape, "Circle",
{
fill: "maroon",
stroke: null,
portId: "",
fromLinkable: true, fromLinkableSelfNode: true, fromLinkableDuplicates: true,
toLinkable: true, toLinkableSelfNode: true, toLinkableDuplicates: true,
cursor: "pointer"
}),
$(go.Shape, "Circle", {
fill: null,
desiredSize: new go.Size(65, 65),
strokeWidth: 2,
stroke: "whitesmoke"
}),
$(go.TextBlock, "End",
{
font: "bold 16pt helvetica, bold arial, sans-serif",
stroke: "whitesmoke"
})
)
);
// clicking the button inserts a new node to the right of the selected node,
// and adds a link to that new node
function addNodeAndLink(e, obj) {
var ador | text: "动作"
};
// and add the link data to the model
model.addLinkData(linkdata);
// select the new Node
var newnode | nment = obj.part;
var diagram = e.diagram;
diagram.startTransaction("Add State");
// get the node data for which the user clicked the button
var fromNode = adornment.adornedPart;
var fromData = fromNode.data;
// create a new "State" data object, positioned off to the right of the adorned Node
var toData = {text: "新状态"};
var p = fromNode.location.copy();
p.x += 200;
toData.loc = go.Point.stringify(p); // the "loc" property is a string, not a Point object
// add the new node data to the model
var model = diagram.model;
model.addNodeData(toData);
// create a link data from the old node data to the new node data
var linkdata = {
from: model.getKeyForNodeData(fromData), // or just: fromData.id
to: model.getKeyForNodeData(toData), | identifier_body |
multisig.go | menu.Option(menuItemStrings[sign], sign, false, func(opt wmenu.Opt) error {
wmenu.Clear()
id, proceed:= getSignParams()
if proceed == false {
fmt.Println("Transaction cancelled")
} else {
if SCID != "" {
sendTransaction(SCID, "Sign", "", 0, id)
} else {
fmt.Println("Please enter a SCID (Menu Option 1)\n")
}
}
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayUnsigned], displayUnsigned, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 1, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displaySigned], displaySigned, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 2, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayAll], displayAll, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 0, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayID], displayID, false, func(opt wmenu.Opt) error {
wmenu.Clear()
txIDno:=getID()
displayByID(SCID, txIDno)
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[exit], exit, false, func(opt wmenu.Opt) error {
wmenu.Clear()
return nil //Exit
})
menu.Action(func(opts []wmenu.Opt) error {
if len(opts) != 1 {
return errors.New("wrong number of options chosen")
}
wmenu.Clear()
mm := mainMenu()
return mm.Run()
})
return menu
}
//Get SCID, save to memory
func getSCID() {
scanner := bufio.NewScanner(os.Stdin)
var text string
fmt.Print("Enter SCID: ")
scanner.Scan()
text = scanner.Text()
wmenu.Clear()
SCID = text
fmt.Println("SCID entered: ", text)
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
}
//Get tx ID to display
func getID() string {
scanner := bufio.NewScanner(os.Stdin)
var text string
fmt.Print("Enter Transaction ID No: ")
scanner.Scan()
text = scanner.Text()
wmenu.Clear()
fmt.Println("Transaction ID No entered: ", text)
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
return text
}
func pressToContinue() {
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
//wmenu.Clear()
| }
//Enter deposit amount, return value.
func getDepositAmount() (int64, bool) {
scanner := bufio.NewScanner(os.Stdin)
var amountString string
fmt.Print("Enter deposit amount in Dero: ")
scanner.Scan()
amountString = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to deposit %s Dero? Enter Y/N (Yes/No)", amountString)
confirmed:=askForConfirmation()
if confirmed == true {
amountFloat, err := strconv.ParseFloat(amountString, 64) //convert to float64
if err != nil {
fmt.Println(err)
return 0, false
}
amount:= int64(amountFloat * 1000000000000)
return amount, true
} else {
return 0, false
}
}
//Enter recipient address and amount, return values.
func getSendParams() (string, int64, bool) {
scanner := bufio.NewScanner(os.Stdin)
var recipient string
var amountString string
fmt.Print("Enter recipient address: ")
scanner.Scan()
recipient = scanner.Text()
wmenu.Clear()
fmt.Print("Enter deposit amount in Dero: ")
scanner.Scan()
amountString = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to send %s Dero to %s? Enter Y/N (Yes/No)", amountString, recipient)
confirmed:=askForConfirmation()
if confirmed == true {
amountFloat, err := strconv.ParseFloat(amountString, 64) //convert to float64
if err != nil {
fmt.Println(err)
return "", 0, false
}
amount:= int64(amountFloat * 1000000000000)
return recipient, amount, true
} else {
return "", 0, false
}
}
//Enter transaction ID, return value.
func getSignParams() (string, bool) {
scanner := bufio.NewScanner(os.Stdin)
var id string
fmt.Print("Enter transaction ID: ")
scanner.Scan()
id = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to sign transaction %s? Enter Y/N (Yes/No)", id)
confirmed:=askForConfirmation()
if confirmed == true {
return id, true
} else {
return "", false
}
}
// The following 3 functions were taken directly from https://gist.github.com/albrow/5882501
// askForConfirmation uses Scanln to parse user input. A user must type in "yes" or "no" and
// then press enter. It has fuzzy matching, so "y", "Y", "yes", "YES", and "Yes" all count as
// confirmations. If the input is not recognized, it will ask again. The function does not return
// until it gets a valid response from the user. Typically, you should use fmt to print out a question
// before calling askForConfirmation. E.g. fmt.Println("WARNING: Are you sure? (yes/no)")
func askForConfirmation() bool {
var response string
_, err := fmt.Scanln(&response)
if err != nil {
fmt.Println("Error")
}
okayResponses := []string{"y", "Y", "yes", "Yes", "YES"}
nokayResponses := []string{"n", "N", "no", "No", "NO"}
if containsString(okayResponses, response) {
return true
} else if containsString(nokayResponses, response) {
return false
} else {
fmt.Println("Please type yes or no and then press enter:")
return askForConfirmation()
}
}
// posString returns the first index of element in slice.
// If slice does not contain element, returns -1.
func posString(slice []string, element string) int {
for index, elem := range slice {
if elem == element {
return index
}
}
return -1
}
// containsString returns true if slice contains element
func containsString(slice []string, element string) bool {
return !(posString(slice, element) == -1)
}
/*-----------------------------------------------------------RPC Functions-----------------------------------------------------------------*/
//sendTransaction: send a transaction to the wallet or sign a transaction. entry should be "Send" or "Sign".
func sendTransaction(scid string, entry string, to string, amount int64, id string) {
walletURL:= "http://127.0.0.1:30309/json_rpc"
var amountString string
if amount == 0 {
amountString = ""
} else {
amountString = strconv.FormatInt(amount, 10)
}
data:= PayloadGeneral{
Jsonrpc: "2.0",
ID: "0",
Method: "transfer_split",
Params: Params2{
Mixin: 5,
GetTxKey: true,
ScTx: ScTx2{
Entrypoint: entry,
Scid: scid,
Value: 0,
Params: Params3{
To: to,
Amount: amountString,
ID: id,
},
},
},
}
payloadBytes, err := json.Marshal(data)
if err != nil {
fmt.Println(err)
return
}
body := bytes.NewReader(payloadBytes)
_, err=rpcPost(body, walletURL)
if err != nil {
fmt.Println(err)
return
}
//println(result)
fmt.Println("Transaction sent to wallet!")
}
//deposit: Deposit Dero to SC
func deposit(scid string, amount int64) {
walletURL:= "http://127.0.0.1:30309/json_rpc"
data:= PayloadDeposit{
Jsonrpc: "2.0",
ID: "0",
Method: "transfer_split",
Params: Params{
Mixin: 5,
GetTxKey: true,
ScTx: ScTx{
Entrypoint: "Deposit",
Scid: scid,
Value: amount,
},
},
| random_line_split |
|
multisig.go | .Option(menuItemStrings[sign], sign, false, func(opt wmenu.Opt) error {
wmenu.Clear()
id, proceed:= getSignParams()
if proceed == false {
fmt.Println("Transaction cancelled")
} else {
if SCID != "" {
sendTransaction(SCID, "Sign", "", 0, id)
} else {
fmt.Println("Please enter a SCID (Menu Option 1)\n")
}
}
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayUnsigned], displayUnsigned, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 1, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displaySigned], displaySigned, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 2, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayAll], displayAll, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 0, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayID], displayID, false, func(opt wmenu.Opt) error {
wmenu.Clear()
txIDno:=getID()
displayByID(SCID, txIDno)
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[exit], exit, false, func(opt wmenu.Opt) error {
wmenu.Clear()
return nil //Exit
})
menu.Action(func(opts []wmenu.Opt) error {
if len(opts) != 1 {
return errors.New("wrong number of options chosen")
}
wmenu.Clear()
mm := mainMenu()
return mm.Run()
})
return menu
}
//Get SCID, save to memory
func getSCID() {
scanner := bufio.NewScanner(os.Stdin)
var text string
fmt.Print("Enter SCID: ")
scanner.Scan()
text = scanner.Text()
wmenu.Clear()
SCID = text
fmt.Println("SCID entered: ", text)
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
}
//Get tx ID to display
func getID() string {
scanner := bufio.NewScanner(os.Stdin)
var text string
fmt.Print("Enter Transaction ID No: ")
scanner.Scan()
text = scanner.Text()
wmenu.Clear()
fmt.Println("Transaction ID No entered: ", text)
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
return text
}
func pressToContinue() {
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
//wmenu.Clear()
}
//Enter deposit amount, return value.
func getDepositAmount() (int64, bool) {
scanner := bufio.NewScanner(os.Stdin)
var amountString string
fmt.Print("Enter deposit amount in Dero: ")
scanner.Scan()
amountString = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to deposit %s Dero? Enter Y/N (Yes/No)", amountString)
confirmed:=askForConfirmation()
if confirmed == true {
amountFloat, err := strconv.ParseFloat(amountString, 64) //convert to float64
if err != nil {
fmt.Println(err)
return 0, false
}
amount:= int64(amountFloat * 1000000000000)
return amount, true
} else {
return 0, false
}
}
//Enter recipient address and amount, return values.
func getSendParams() (string, int64, bool) {
scanner := bufio.NewScanner(os.Stdin)
var recipient string
var amountString string
fmt.Print("Enter recipient address: ")
scanner.Scan()
recipient = scanner.Text()
wmenu.Clear()
fmt.Print("Enter deposit amount in Dero: ")
scanner.Scan()
amountString = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to send %s Dero to %s? Enter Y/N (Yes/No)", amountString, recipient)
confirmed:=askForConfirmation()
if confirmed == true {
amountFloat, err := strconv.ParseFloat(amountString, 64) //convert to float64
if err != nil {
fmt.Println(err)
return "", 0, false
}
amount:= int64(amountFloat * 1000000000000)
return recipient, amount, true
} else {
return "", 0, false
}
}
//Enter transaction ID, return value.
func getSignParams() (string, bool) {
scanner := bufio.NewScanner(os.Stdin)
var id string
fmt.Print("Enter transaction ID: ")
scanner.Scan()
id = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to sign transaction %s? Enter Y/N (Yes/No)", id)
confirmed:=askForConfirmation()
if confirmed == true {
return id, true
} else {
return "", false
}
}
// The following 3 functions were taken directly from https://gist.github.com/albrow/5882501
// askForConfirmation uses Scanln to parse user input. A user must type in "yes" or "no" and
// then press enter. It has fuzzy matching, so "y", "Y", "yes", "YES", and "Yes" all count as
// confirmations. If the input is not recognized, it will ask again. The function does not return
// until it gets a valid response from the user. Typically, you should use fmt to print out a question
// before calling askForConfirmation. E.g. fmt.Println("WARNING: Are you sure? (yes/no)")
func askForConfirmation() bool {
var response string
_, err := fmt.Scanln(&response)
if err != nil {
fmt.Println("Error")
}
okayResponses := []string{"y", "Y", "yes", "Yes", "YES"}
nokayResponses := []string{"n", "N", "no", "No", "NO"}
if containsString(okayResponses, response) {
return true
} else if containsString(nokayResponses, response) {
return false
} else {
fmt.Println("Please type yes or no and then press enter:")
return askForConfirmation()
}
}
// posString returns the first index of element in slice.
// If slice does not contain element, returns -1.
func posString(slice []string, element string) int {
for index, elem := range slice {
if elem == element |
}
return -1
}
// containsString returns true if slice contains element
func containsString(slice []string, element string) bool {
return !(posString(slice, element) == -1)
}
/*-----------------------------------------------------------RPC Functions-----------------------------------------------------------------*/
//sendTransaction: send a transaction to the wallet or sign a transaction. entry should be "Send" or "Sign".
func sendTransaction(scid string, entry string, to string, amount int64, id string) {
walletURL:= "http://127.0.0.1:30309/json_rpc"
var amountString string
if amount == 0 {
amountString = ""
} else {
amountString = strconv.FormatInt(amount, 10)
}
data:= PayloadGeneral{
Jsonrpc: "2.0",
ID: "0",
Method: "transfer_split",
Params: Params2{
Mixin: 5,
GetTxKey: true,
ScTx: ScTx2{
Entrypoint: entry,
Scid: scid,
Value: 0,
Params: Params3{
To: to,
Amount: amountString,
ID: id,
},
},
},
}
payloadBytes, err := json.Marshal(data)
if err != nil {
fmt.Println(err)
return
}
body := bytes.NewReader(payloadBytes)
_, err=rpcPost(body, walletURL)
if err != nil {
fmt.Println(err)
return
}
//println(result)
fmt.Println("Transaction sent to wallet!")
}
//deposit: Deposit Dero to SC
func deposit(scid string, amount int64) {
walletURL:= "http://127.0.0.1:30309/json_rpc"
data:= PayloadDeposit{
Jsonrpc: "2.0",
ID: "0",
Method: "transfer_split",
Params: Params{
Mixin: 5,
GetTxKey: true,
ScTx: ScTx{
Entrypoint: "Deposit",
Scid: scid,
Value: amount,
},
},
| {
return index
} | conditional_block |
multisig.go | .Option(menuItemStrings[sign], sign, false, func(opt wmenu.Opt) error {
wmenu.Clear()
id, proceed:= getSignParams()
if proceed == false {
fmt.Println("Transaction cancelled")
} else {
if SCID != "" {
sendTransaction(SCID, "Sign", "", 0, id)
} else {
fmt.Println("Please enter a SCID (Menu Option 1)\n")
}
}
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayUnsigned], displayUnsigned, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 1, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displaySigned], displaySigned, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 2, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayAll], displayAll, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 0, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayID], displayID, false, func(opt wmenu.Opt) error {
wmenu.Clear()
txIDno:=getID()
displayByID(SCID, txIDno)
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[exit], exit, false, func(opt wmenu.Opt) error {
wmenu.Clear()
return nil //Exit
})
menu.Action(func(opts []wmenu.Opt) error {
if len(opts) != 1 {
return errors.New("wrong number of options chosen")
}
wmenu.Clear()
mm := mainMenu()
return mm.Run()
})
return menu
}
//Get SCID, save to memory
func getSCID() {
scanner := bufio.NewScanner(os.Stdin)
var text string
fmt.Print("Enter SCID: ")
scanner.Scan()
text = scanner.Text()
wmenu.Clear()
SCID = text
fmt.Println("SCID entered: ", text)
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
}
//Get tx ID to display
func getID() string {
scanner := bufio.NewScanner(os.Stdin)
var text string
fmt.Print("Enter Transaction ID No: ")
scanner.Scan()
text = scanner.Text()
wmenu.Clear()
fmt.Println("Transaction ID No entered: ", text)
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
return text
}
func pressToContinue() {
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
//wmenu.Clear()
}
//Enter deposit amount, return value.
func getDepositAmount() (int64, bool) {
scanner := bufio.NewScanner(os.Stdin)
var amountString string
fmt.Print("Enter deposit amount in Dero: ")
scanner.Scan()
amountString = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to deposit %s Dero? Enter Y/N (Yes/No)", amountString)
confirmed:=askForConfirmation()
if confirmed == true {
amountFloat, err := strconv.ParseFloat(amountString, 64) //convert to float64
if err != nil {
fmt.Println(err)
return 0, false
}
amount:= int64(amountFloat * 1000000000000)
return amount, true
} else {
return 0, false
}
}
//Enter recipient address and amount, return values.
func getSendParams() (string, int64, bool) {
scanner := bufio.NewScanner(os.Stdin)
var recipient string
var amountString string
fmt.Print("Enter recipient address: ")
scanner.Scan()
recipient = scanner.Text()
wmenu.Clear()
fmt.Print("Enter deposit amount in Dero: ")
scanner.Scan()
amountString = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to send %s Dero to %s? Enter Y/N (Yes/No)", amountString, recipient)
confirmed:=askForConfirmation()
if confirmed == true {
amountFloat, err := strconv.ParseFloat(amountString, 64) //convert to float64
if err != nil {
fmt.Println(err)
return "", 0, false
}
amount:= int64(amountFloat * 1000000000000)
return recipient, amount, true
} else {
return "", 0, false
}
}
//Enter transaction ID, return value.
func getSignParams() (string, bool) {
scanner := bufio.NewScanner(os.Stdin)
var id string
fmt.Print("Enter transaction ID: ")
scanner.Scan()
id = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to sign transaction %s? Enter Y/N (Yes/No)", id)
confirmed:=askForConfirmation()
if confirmed == true {
return id, true
} else {
return "", false
}
}
// The following 3 functions were taken directly from https://gist.github.com/albrow/5882501
// askForConfirmation uses Scanln to parse user input. A user must type in "yes" or "no" and
// then press enter. It has fuzzy matching, so "y", "Y", "yes", "YES", and "Yes" all count as
// confirmations. If the input is not recognized, it will ask again. The function does not return
// until it gets a valid response from the user. Typically, you should use fmt to print out a question
// before calling askForConfirmation. E.g. fmt.Println("WARNING: Are you sure? (yes/no)")
func askForConfirmation() bool {
var response string
_, err := fmt.Scanln(&response)
if err != nil {
fmt.Println("Error")
}
okayResponses := []string{"y", "Y", "yes", "Yes", "YES"}
nokayResponses := []string{"n", "N", "no", "No", "NO"}
if containsString(okayResponses, response) {
return true
} else if containsString(nokayResponses, response) {
return false
} else {
fmt.Println("Please type yes or no and then press enter:")
return askForConfirmation()
}
}
// posString returns the first index of element in slice.
// If slice does not contain element, returns -1.
func posString(slice []string, element string) int {
for index, elem := range slice {
if elem == element {
return index
}
}
return -1
}
// containsString returns true if slice contains element
func containsString(slice []string, element string) bool |
/*-----------------------------------------------------------RPC Functions-----------------------------------------------------------------*/
//sendTransaction: send a transaction to the wallet or sign a transaction. entry should be "Send" or "Sign".
func sendTransaction(scid string, entry string, to string, amount int64, id string) {
walletURL:= "http://127.0.0.1:30309/json_rpc"
var amountString string
if amount == 0 {
amountString = ""
} else {
amountString = strconv.FormatInt(amount, 10)
}
data:= PayloadGeneral{
Jsonrpc: "2.0",
ID: "0",
Method: "transfer_split",
Params: Params2{
Mixin: 5,
GetTxKey: true,
ScTx: ScTx2{
Entrypoint: entry,
Scid: scid,
Value: 0,
Params: Params3{
To: to,
Amount: amountString,
ID: id,
},
},
},
}
payloadBytes, err := json.Marshal(data)
if err != nil {
fmt.Println(err)
return
}
body := bytes.NewReader(payloadBytes)
_, err=rpcPost(body, walletURL)
if err != nil {
fmt.Println(err)
return
}
//println(result)
fmt.Println("Transaction sent to wallet!")
}
//deposit: Deposit Dero to SC
func deposit(scid string, amount int64) {
walletURL:= "http://127.0.0.1:30309/json_rpc"
data:= PayloadDeposit{
Jsonrpc: "2.0",
ID: "0",
Method: "transfer_split",
Params: Params{
Mixin: 5,
GetTxKey: true,
ScTx: ScTx{
Entrypoint: "Deposit",
Scid: scid,
Value: amount,
},
},
| {
return !(posString(slice, element) == -1)
} | identifier_body |
multisig.go | := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayUnsigned], displayUnsigned, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 1, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displaySigned], displaySigned, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 2, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayAll], displayAll, false, func(opt wmenu.Opt) error {
wmenu.Clear()
displayTransactions(SCID, 0, "")
pressToContinue()
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[displayID], displayID, false, func(opt wmenu.Opt) error {
wmenu.Clear()
txIDno:=getID()
displayByID(SCID, txIDno)
mm := mainMenu()
return mm.Run()
})
menu.Option(menuItemStrings[exit], exit, false, func(opt wmenu.Opt) error {
wmenu.Clear()
return nil //Exit
})
menu.Action(func(opts []wmenu.Opt) error {
if len(opts) != 1 {
return errors.New("wrong number of options chosen")
}
wmenu.Clear()
mm := mainMenu()
return mm.Run()
})
return menu
}
//Get SCID, save to memory
func getSCID() {
scanner := bufio.NewScanner(os.Stdin)
var text string
fmt.Print("Enter SCID: ")
scanner.Scan()
text = scanner.Text()
wmenu.Clear()
SCID = text
fmt.Println("SCID entered: ", text)
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
}
//Get tx ID to display
func getID() string {
scanner := bufio.NewScanner(os.Stdin)
var text string
fmt.Print("Enter Transaction ID No: ")
scanner.Scan()
text = scanner.Text()
wmenu.Clear()
fmt.Println("Transaction ID No entered: ", text)
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
return text
}
func pressToContinue() {
fmt.Print("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
//wmenu.Clear()
}
//Enter deposit amount, return value.
func getDepositAmount() (int64, bool) {
scanner := bufio.NewScanner(os.Stdin)
var amountString string
fmt.Print("Enter deposit amount in Dero: ")
scanner.Scan()
amountString = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to deposit %s Dero? Enter Y/N (Yes/No)", amountString)
confirmed:=askForConfirmation()
if confirmed == true {
amountFloat, err := strconv.ParseFloat(amountString, 64) //convert to float64
if err != nil {
fmt.Println(err)
return 0, false
}
amount:= int64(amountFloat * 1000000000000)
return amount, true
} else {
return 0, false
}
}
//Enter recipient address and amount, return values.
func getSendParams() (string, int64, bool) {
scanner := bufio.NewScanner(os.Stdin)
var recipient string
var amountString string
fmt.Print("Enter recipient address: ")
scanner.Scan()
recipient = scanner.Text()
wmenu.Clear()
fmt.Print("Enter deposit amount in Dero: ")
scanner.Scan()
amountString = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to send %s Dero to %s? Enter Y/N (Yes/No)", amountString, recipient)
confirmed:=askForConfirmation()
if confirmed == true {
amountFloat, err := strconv.ParseFloat(amountString, 64) //convert to float64
if err != nil {
fmt.Println(err)
return "", 0, false
}
amount:= int64(amountFloat * 1000000000000)
return recipient, amount, true
} else {
return "", 0, false
}
}
//Enter transaction ID, return value.
func getSignParams() (string, bool) {
scanner := bufio.NewScanner(os.Stdin)
var id string
fmt.Print("Enter transaction ID: ")
scanner.Scan()
id = scanner.Text()
wmenu.Clear()
fmt.Printf("Do you want to sign transaction %s? Enter Y/N (Yes/No)", id)
confirmed:=askForConfirmation()
if confirmed == true {
return id, true
} else {
return "", false
}
}
// The following 3 functions were taken directly from https://gist.github.com/albrow/5882501
// askForConfirmation uses Scanln to parse user input. A user must type in "yes" or "no" and
// then press enter. It has fuzzy matching, so "y", "Y", "yes", "YES", and "Yes" all count as
// confirmations. If the input is not recognized, it will ask again. The function does not return
// until it gets a valid response from the user. Typically, you should use fmt to print out a question
// before calling askForConfirmation. E.g. fmt.Println("WARNING: Are you sure? (yes/no)")
func askForConfirmation() bool {
var response string
_, err := fmt.Scanln(&response)
if err != nil {
fmt.Println("Error")
}
okayResponses := []string{"y", "Y", "yes", "Yes", "YES"}
nokayResponses := []string{"n", "N", "no", "No", "NO"}
if containsString(okayResponses, response) {
return true
} else if containsString(nokayResponses, response) {
return false
} else {
fmt.Println("Please type yes or no and then press enter:")
return askForConfirmation()
}
}
// posString returns the first index of element in slice.
// If slice does not contain element, returns -1.
func posString(slice []string, element string) int {
for index, elem := range slice {
if elem == element {
return index
}
}
return -1
}
// containsString returns true if slice contains element
func containsString(slice []string, element string) bool {
return !(posString(slice, element) == -1)
}
/*-----------------------------------------------------------RPC Functions-----------------------------------------------------------------*/
//sendTransaction: send a transaction to the wallet or sign a transaction. entry should be "Send" or "Sign".
func sendTransaction(scid string, entry string, to string, amount int64, id string) {
walletURL:= "http://127.0.0.1:30309/json_rpc"
var amountString string
if amount == 0 {
amountString = ""
} else {
amountString = strconv.FormatInt(amount, 10)
}
data:= PayloadGeneral{
Jsonrpc: "2.0",
ID: "0",
Method: "transfer_split",
Params: Params2{
Mixin: 5,
GetTxKey: true,
ScTx: ScTx2{
Entrypoint: entry,
Scid: scid,
Value: 0,
Params: Params3{
To: to,
Amount: amountString,
ID: id,
},
},
},
}
payloadBytes, err := json.Marshal(data)
if err != nil {
fmt.Println(err)
return
}
body := bytes.NewReader(payloadBytes)
_, err=rpcPost(body, walletURL)
if err != nil {
fmt.Println(err)
return
}
//println(result)
fmt.Println("Transaction sent to wallet!")
}
//deposit: Deposit Dero to SC
func deposit(scid string, amount int64) {
walletURL:= "http://127.0.0.1:30309/json_rpc"
data:= PayloadDeposit{
Jsonrpc: "2.0",
ID: "0",
Method: "transfer_split",
Params: Params{
Mixin: 5,
GetTxKey: true,
ScTx: ScTx{
Entrypoint: "Deposit",
Scid: scid,
Value: amount,
},
},
}
payloadBytes, err := json.Marshal(data)
if err != nil {
fmt.Println(err)
return
}
body := bytes.NewReader(payloadBytes)
_, err=rpcPost(body, walletURL)
if err != nil {
fmt.Println(err)
return
}
//println(result)
fmt.Println("Deposit sent to wallet!")
}
//getKeysFromDaemon: send RPC call with list of keys, do error checking, return raw data in string form for JSON extraction
func | getKeysFromDaemon | identifier_name |
|
local.rs | // by python, but they're not, so...
file_dbs: ShardedLmdb::new(files_root, 100 * GIGABYTES, executor.clone(), lease_time)
.map(Arc::new),
directory_dbs: ShardedLmdb::new(
directories_root,
5 * GIGABYTES,
executor.clone(),
lease_time,
)
.map(Arc::new),
executor,
}),
})
}
pub fn executor(&self) -> &task_executor::Executor {
&self.inner.executor
}
pub async fn entry_type(&self, fingerprint: Fingerprint) -> Result<Option<EntryType>, String> {
if fingerprint == EMPTY_DIGEST.0 {
// Technically this is valid as both; choose Directory in case a caller is checking whether
// it _can_ be a Directory.
return Ok(Some(EntryType::Directory));
}
// In parallel, check for the given fingerprint in both databases.
let d_dbs = self.inner.directory_dbs.clone()?;
let is_dir = d_dbs.exists(fingerprint);
let f_dbs = self.inner.file_dbs.clone()?;
let is_file = f_dbs.exists(fingerprint);
// TODO: Could technically use select to return slightly more quickly with the first
// affirmative answer, but this is simpler.
match future::try_join(is_dir, is_file).await? {
(true, _) => Ok(Some(EntryType::Directory)),
(_, true) => Ok(Some(EntryType::File)),
(false, false) => Ok(None),
}
}
pub async fn lease_all(
&self,
digests: impl Iterator<Item = (Digest, EntryType)>,
) -> Result<(), String> {
// NB: Lease extension happens periodically in the background, so this code needn't be parallel.
for (digest, entry_type) in digests {
let dbs = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
dbs?
.lease(digest.0)
.await
.map_err(|err| format!("Error leasing digest {:?}: {}", digest, err))?;
}
Ok(())
}
///
/// Attempts to shrink the stored files to be no bigger than target_bytes
/// (excluding lmdb overhead).
///
/// Returns the size it was shrunk to, which may be larger than target_bytes.
///
/// TODO: Use LMDB database statistics when lmdb-rs exposes them.
///
pub fn shrink(
&self,
target_bytes: usize,
shrink_behavior: ShrinkBehavior,
) -> Result<usize, String> {
let mut used_bytes: usize = 0;
let mut fingerprints_by_expired_ago = BinaryHeap::new();
self.aged_fingerprints(
EntryType::File,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
self.aged_fingerprints(
EntryType::Directory,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
while used_bytes > target_bytes {
let aged_fingerprint = fingerprints_by_expired_ago
.pop()
.expect("lmdb corruption detected, sum of size of blobs exceeded stored blobs");
if aged_fingerprint.expired_seconds_ago == 0 {
// Ran out of expired blobs - everything remaining is leased and cannot be collected.
return Ok(used_bytes);
}
let lmdbs = match aged_fingerprint.entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let (env, database, lease_database) = lmdbs.clone()?.get(&aged_fingerprint.fingerprint);
{
env
.begin_rw_txn()
.and_then(|mut txn| {
let key = VersionedFingerprint::new(
aged_fingerprint.fingerprint,
ShardedLmdb::schema_version(),
);
txn.del(database, &key, None)?;
txn
.del(lease_database, &key, None)
.or_else(|err| match err {
NotFound => Ok(()),
err => Err(err),
})?;
used_bytes -= aged_fingerprint.size_bytes;
txn.commit()
})
.map_err(|err| format!("Error garbage collecting: {}", err))?;
}
}
if shrink_behavior == ShrinkBehavior::Compact {
self.inner.file_dbs.clone()?.compact()?;
}
Ok(used_bytes)
}
fn aged_fingerprints(
&self,
entry_type: EntryType,
used_bytes: &mut usize,
fingerprints_by_expired_ago: &mut BinaryHeap<AgedFingerprint>,
) -> Result<(), String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
for &(ref env, ref database, ref lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
*used_bytes += bytes.len();
// Random access into the lease_database is slower than iterating, but hopefully garbage
// collection is rare enough that we can get away with this, rather than do two passes
// here (either to populate leases into pre-populated AgedFingerprints, or to read sizes
// when we delete from lmdb to track how much we've freed).
let lease_until_unix_timestamp = txn
.get(*lease_database, &key)
.map(|b| {
let mut array = [0_u8; 8];
array.copy_from_slice(b);
u64::from_le_bytes(array)
})
.unwrap_or_else(|e| match e {
NotFound => 0,
e => panic!("Error reading lease, probable lmdb corruption: {:?}", e),
});
let leased_until = time::UNIX_EPOCH + Duration::from_secs(lease_until_unix_timestamp);
let expired_seconds_ago = time::SystemTime::now()
.duration_since(leased_until)
.map(|t| t.as_secs())
// 0 indicates unleased.
.unwrap_or(0);
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
fingerprints_by_expired_ago.push(AgedFingerprint {
expired_seconds_ago,
fingerprint,
size_bytes: bytes.len(),
entry_type,
});
}
}
Ok(())
}
pub async fn store_bytes(
&self,
entry_type: EntryType,
bytes: Bytes,
initial_lease: bool,
) -> Result<Digest, String> {
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
let bytes2 = bytes.clone();
let digest = self
.inner
.executor
.spawn_blocking(move || {
let fingerprint = {
let mut hasher = Sha256::default();
hasher.input(&bytes);
Fingerprint::from_bytes_unsafe(hasher.fixed_result().as_slice())
};
Digest(fingerprint, bytes.len())
})
.await;
dbs?.store_bytes(digest.0, bytes2, initial_lease).await?;
Ok(digest)
}
///
/// Loads bytes from the underlying LMDB store using the given function. Because the database is
/// blocking, this accepts a function that views a slice rather than returning a clone of the
/// data. The upshot is that the database is able to provide slices directly into shared memory.
///
/// The provided function is guaranteed to be called in a context where it is safe to block.
///
pub async fn load_bytes_with<T: Send + 'static, F: Fn(&[u8]) -> T + Send + Sync + 'static>(
&self,
entry_type: EntryType,
digest: Digest,
f: F,
) -> Result<Option<T>, String> {
if digest == EMPTY_DIGEST {
// Avoid I/O for this case. This allows some client-provided operations (like merging
// snapshots) to work without needing to first store the empty snapshot.
//
// To maintain the guarantee that the given function is called in a blocking context, we
// spawn it as a task.
return Ok(Some(self.executor().spawn_blocking(move || f(&[])).await));
}
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
dbs?.load_bytes_with(digest.0, move |bytes| {
if bytes.len() == digest.1 {
Ok(f(bytes))
} else | {
Err(format!("Got hash collision reading from store - digest {:?} was requested, but retrieved bytes with that fingerprint had length {}. Congratulations, you may have broken sha256! Underlying bytes: {:?}", digest, bytes.len(), bytes))
} | conditional_block |
|
local.rs | : AsRef<Path>>(
executor: task_executor::Executor,
path: P,
) -> Result<ByteStore, String> {
Self::new_with_lease_time(executor, path, DEFAULT_LEASE_TIME)
}
pub fn new_with_lease_time<P: AsRef<Path>>(
executor: task_executor::Executor,
path: P,
lease_time: Duration,
) -> Result<ByteStore, String> {
let root = path.as_ref();
let files_root = root.join("files");
let directories_root = root.join("directories");
Ok(ByteStore {
inner: Arc::new(InnerStore {
// We want these stores to be allowed to grow very large, in case we are on a system with
// large disks which doesn't want to GC a lot.
// It doesn't reflect space allocated on disk, or RAM allocated (it may be reflected in
// VIRT but not RSS). There is no practical upper bound on this number, so we set them
// ridiculously high.
// However! We set them lower than we'd otherwise choose because sometimes we see tests on
// travis fail because they can't allocate virtual memory, if there are multiple Stores
// in memory at the same time. We don't know why they're not efficiently garbage collected
// by python, but they're not, so...
file_dbs: ShardedLmdb::new(files_root, 100 * GIGABYTES, executor.clone(), lease_time)
.map(Arc::new),
directory_dbs: ShardedLmdb::new(
directories_root,
5 * GIGABYTES,
executor.clone(),
lease_time,
)
.map(Arc::new),
executor,
}),
})
}
pub fn executor(&self) -> &task_executor::Executor {
&self.inner.executor
}
pub async fn entry_type(&self, fingerprint: Fingerprint) -> Result<Option<EntryType>, String> {
if fingerprint == EMPTY_DIGEST.0 {
// Technically this is valid as both; choose Directory in case a caller is checking whether
// it _can_ be a Directory.
return Ok(Some(EntryType::Directory));
}
// In parallel, check for the given fingerprint in both databases.
let d_dbs = self.inner.directory_dbs.clone()?;
let is_dir = d_dbs.exists(fingerprint);
let f_dbs = self.inner.file_dbs.clone()?;
let is_file = f_dbs.exists(fingerprint);
// TODO: Could technically use select to return slightly more quickly with the first
// affirmative answer, but this is simpler.
match future::try_join(is_dir, is_file).await? {
(true, _) => Ok(Some(EntryType::Directory)),
(_, true) => Ok(Some(EntryType::File)),
(false, false) => Ok(None),
}
}
pub async fn lease_all(
&self,
digests: impl Iterator<Item = (Digest, EntryType)>,
) -> Result<(), String> {
// NB: Lease extension happens periodically in the background, so this code needn't be parallel.
for (digest, entry_type) in digests {
let dbs = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
dbs?
.lease(digest.0)
.await
.map_err(|err| format!("Error leasing digest {:?}: {}", digest, err))?;
}
Ok(())
}
///
/// Attempts to shrink the stored files to be no bigger than target_bytes
/// (excluding lmdb overhead).
///
/// Returns the size it was shrunk to, which may be larger than target_bytes.
///
/// TODO: Use LMDB database statistics when lmdb-rs exposes them.
///
pub fn shrink(
&self,
target_bytes: usize,
shrink_behavior: ShrinkBehavior,
) -> Result<usize, String> {
let mut used_bytes: usize = 0;
let mut fingerprints_by_expired_ago = BinaryHeap::new();
self.aged_fingerprints(
EntryType::File,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
self.aged_fingerprints(
EntryType::Directory,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
while used_bytes > target_bytes {
let aged_fingerprint = fingerprints_by_expired_ago
.pop()
.expect("lmdb corruption detected, sum of size of blobs exceeded stored blobs");
if aged_fingerprint.expired_seconds_ago == 0 {
// Ran out of expired blobs - everything remaining is leased and cannot be collected.
return Ok(used_bytes);
}
let lmdbs = match aged_fingerprint.entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let (env, database, lease_database) = lmdbs.clone()?.get(&aged_fingerprint.fingerprint);
{ | .begin_rw_txn()
.and_then(|mut txn| {
let key = VersionedFingerprint::new(
aged_fingerprint.fingerprint,
ShardedLmdb::schema_version(),
);
txn.del(database, &key, None)?;
txn
.del(lease_database, &key, None)
.or_else(|err| match err {
NotFound => Ok(()),
err => Err(err),
})?;
used_bytes -= aged_fingerprint.size_bytes;
txn.commit()
})
.map_err(|err| format!("Error garbage collecting: {}", err))?;
}
}
if shrink_behavior == ShrinkBehavior::Compact {
self.inner.file_dbs.clone()?.compact()?;
}
Ok(used_bytes)
}
fn aged_fingerprints(
&self,
entry_type: EntryType,
used_bytes: &mut usize,
fingerprints_by_expired_ago: &mut BinaryHeap<AgedFingerprint>,
) -> Result<(), String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
for &(ref env, ref database, ref lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
*used_bytes += bytes.len();
// Random access into the lease_database is slower than iterating, but hopefully garbage
// collection is rare enough that we can get away with this, rather than do two passes
// here (either to populate leases into pre-populated AgedFingerprints, or to read sizes
// when we delete from lmdb to track how much we've freed).
let lease_until_unix_timestamp = txn
.get(*lease_database, &key)
.map(|b| {
let mut array = [0_u8; 8];
array.copy_from_slice(b);
u64::from_le_bytes(array)
})
.unwrap_or_else(|e| match e {
NotFound => 0,
e => panic!("Error reading lease, probable lmdb corruption: {:?}", e),
});
let leased_until = time::UNIX_EPOCH + Duration::from_secs(lease_until_unix_timestamp);
let expired_seconds_ago = time::SystemTime::now()
.duration_since(leased_until)
.map(|t| t.as_secs())
// 0 indicates unleased.
.unwrap_or(0);
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
fingerprints_by_expired_ago.push(AgedFingerprint {
expired_seconds_ago,
fingerprint,
size_bytes: bytes.len(),
entry_type,
});
}
}
Ok(())
}
pub async fn store_bytes(
&self,
entry_type: EntryType,
bytes: Bytes,
initial_lease: bool,
) -> Result<Digest, String> {
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
let bytes2 = bytes.clone();
let digest = self
.inner
.executor
.spawn_blocking(move || {
let fingerprint = {
let mut hasher = Sha256::default();
hasher.input(&bytes);
Fingerprint::from_bytes_unsafe(hasher.fixed_result().as_slice())
};
Digest(fingerprint, bytes.len())
})
.await;
dbs?.store_bytes(digest.0, bytes2, initial_lease).await?;
Ok(digest)
}
///
/// Loads bytes from the underlying LMDB store using the given function. Because the database is
/// blocking, this accepts a function that views a slice rather than returning a clone of the
/// data. The upshot is that the database is able to provide slices directly into shared memory.
///
/// The provided function is guaranteed to be called in a context where it is safe to block | env | random_line_split |
local.rs | ,
path: P,
) -> Result<ByteStore, String> {
Self::new_with_lease_time(executor, path, DEFAULT_LEASE_TIME)
}
pub fn new_with_lease_time<P: AsRef<Path>>(
executor: task_executor::Executor,
path: P,
lease_time: Duration,
) -> Result<ByteStore, String> {
let root = path.as_ref();
let files_root = root.join("files");
let directories_root = root.join("directories");
Ok(ByteStore {
inner: Arc::new(InnerStore {
// We want these stores to be allowed to grow very large, in case we are on a system with
// large disks which doesn't want to GC a lot.
// It doesn't reflect space allocated on disk, or RAM allocated (it may be reflected in
// VIRT but not RSS). There is no practical upper bound on this number, so we set them
// ridiculously high.
// However! We set them lower than we'd otherwise choose because sometimes we see tests on
// travis fail because they can't allocate virtual memory, if there are multiple Stores
// in memory at the same time. We don't know why they're not efficiently garbage collected
// by python, but they're not, so...
file_dbs: ShardedLmdb::new(files_root, 100 * GIGABYTES, executor.clone(), lease_time)
.map(Arc::new),
directory_dbs: ShardedLmdb::new(
directories_root,
5 * GIGABYTES,
executor.clone(),
lease_time,
)
.map(Arc::new),
executor,
}),
})
}
pub fn executor(&self) -> &task_executor::Executor {
&self.inner.executor
}
pub async fn entry_type(&self, fingerprint: Fingerprint) -> Result<Option<EntryType>, String> {
if fingerprint == EMPTY_DIGEST.0 {
// Technically this is valid as both; choose Directory in case a caller is checking whether
// it _can_ be a Directory.
return Ok(Some(EntryType::Directory));
}
// In parallel, check for the given fingerprint in both databases.
let d_dbs = self.inner.directory_dbs.clone()?;
let is_dir = d_dbs.exists(fingerprint);
let f_dbs = self.inner.file_dbs.clone()?;
let is_file = f_dbs.exists(fingerprint);
// TODO: Could technically use select to return slightly more quickly with the first
// affirmative answer, but this is simpler.
match future::try_join(is_dir, is_file).await? {
(true, _) => Ok(Some(EntryType::Directory)),
(_, true) => Ok(Some(EntryType::File)),
(false, false) => Ok(None),
}
}
pub async fn lease_all(
&self,
digests: impl Iterator<Item = (Digest, EntryType)>,
) -> Result<(), String> {
// NB: Lease extension happens periodically in the background, so this code needn't be parallel.
for (digest, entry_type) in digests {
let dbs = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
dbs?
.lease(digest.0)
.await
.map_err(|err| format!("Error leasing digest {:?}: {}", digest, err))?;
}
Ok(())
}
///
/// Attempts to shrink the stored files to be no bigger than target_bytes
/// (excluding lmdb overhead).
///
/// Returns the size it was shrunk to, which may be larger than target_bytes.
///
/// TODO: Use LMDB database statistics when lmdb-rs exposes them.
///
pub fn shrink(
&self,
target_bytes: usize,
shrink_behavior: ShrinkBehavior,
) -> Result<usize, String> {
let mut used_bytes: usize = 0;
let mut fingerprints_by_expired_ago = BinaryHeap::new();
self.aged_fingerprints(
EntryType::File,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
self.aged_fingerprints(
EntryType::Directory,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
while used_bytes > target_bytes {
let aged_fingerprint = fingerprints_by_expired_ago
.pop()
.expect("lmdb corruption detected, sum of size of blobs exceeded stored blobs");
if aged_fingerprint.expired_seconds_ago == 0 {
// Ran out of expired blobs - everything remaining is leased and cannot be collected.
return Ok(used_bytes);
}
let lmdbs = match aged_fingerprint.entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let (env, database, lease_database) = lmdbs.clone()?.get(&aged_fingerprint.fingerprint);
{
env
.begin_rw_txn()
.and_then(|mut txn| {
let key = VersionedFingerprint::new(
aged_fingerprint.fingerprint,
ShardedLmdb::schema_version(),
);
txn.del(database, &key, None)?;
txn
.del(lease_database, &key, None)
.or_else(|err| match err {
NotFound => Ok(()),
err => Err(err),
})?;
used_bytes -= aged_fingerprint.size_bytes;
txn.commit()
})
.map_err(|err| format!("Error garbage collecting: {}", err))?;
}
}
if shrink_behavior == ShrinkBehavior::Compact {
self.inner.file_dbs.clone()?.compact()?;
}
Ok(used_bytes)
}
fn aged_fingerprints(
&self,
entry_type: EntryType,
used_bytes: &mut usize,
fingerprints_by_expired_ago: &mut BinaryHeap<AgedFingerprint>,
) -> Result<(), String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
for &(ref env, ref database, ref lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
*used_bytes += bytes.len();
// Random access into the lease_database is slower than iterating, but hopefully garbage
// collection is rare enough that we can get away with this, rather than do two passes
// here (either to populate leases into pre-populated AgedFingerprints, or to read sizes
// when we delete from lmdb to track how much we've freed).
let lease_until_unix_timestamp = txn
.get(*lease_database, &key)
.map(|b| {
let mut array = [0_u8; 8];
array.copy_from_slice(b);
u64::from_le_bytes(array)
})
.unwrap_or_else(|e| match e {
NotFound => 0,
e => panic!("Error reading lease, probable lmdb corruption: {:?}", e),
});
let leased_until = time::UNIX_EPOCH + Duration::from_secs(lease_until_unix_timestamp);
let expired_seconds_ago = time::SystemTime::now()
.duration_since(leased_until)
.map(|t| t.as_secs())
// 0 indicates unleased.
.unwrap_or(0);
let v = VersionedFingerprint::from_bytes_unsafe(key);
let fingerprint = v.get_fingerprint();
fingerprints_by_expired_ago.push(AgedFingerprint {
expired_seconds_ago,
fingerprint,
size_bytes: bytes.len(),
entry_type,
});
}
}
Ok(())
}
pub async fn store_bytes(
&self,
entry_type: EntryType,
bytes: Bytes,
initial_lease: bool,
) -> Result<Digest, String> {
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
let bytes2 = bytes.clone();
let digest = self
.inner
.executor
.spawn_blocking(move || {
let fingerprint = {
let mut hasher = Sha256::default();
hasher.input(&bytes);
Fingerprint::from_bytes_unsafe(hasher.fixed_result().as_slice())
};
Digest(fingerprint, bytes.len())
})
.await;
dbs?.store_bytes(digest.0, bytes2, initial_lease).await?;
Ok(digest)
}
///
/// Loads bytes from the underlying LMDB store using the given function. Because the database is
/// blocking, this accepts a function that views a slice rather than returning a clone of the
/// data. The upshot is that the database is able to provide slices directly into shared memory.
///
/// The provided function is guaranteed to be called in a context where it is safe to block.
///
pub async fn | load_bytes_with | identifier_name |
|
runtime.rs | palette::pixel::{Srgb};
use rusttype::{FontCollection};
use tiled;
use calcium_game::{LoopTimer};
use calcium_rendering::{Error};
use calcium_rendering::texture::{Texture};
use calcium_rendering_2d::render_data::{RenderBatch, ShaderMode, Rectangle, Projection, RenderData, RenderSet, UvMode};
use calcium_rendering_2d::{Renderer2DTarget};
use calcium_rendering_context::{Runtime, Context};
use calcium_rendering::raw::{RendererRaw};
use model::{Map};
use view::{MapRenderer};
struct FriendlyUnit<R: RendererRaw> {
name: String,
tex: Arc<Texture<R>>,
selecttex: Arc<Texture<R>>,
position: Point2<f32>,
size: Vector2<f32>,
speed: f32,
selected: bool,
tabrel: f32,
}
impl <R: RendererRaw> FriendlyUnit<R> {
pub fn new(name: String, tex: Arc<Texture<R>>, selecttex: Arc<Texture<R>>, position: Point2<f32>, size: Vector2<f32>, speed: f32) -> FriendlyUnit<R> {
FriendlyUnit {name: name, tex: tex, selecttex: selecttex, position: position, size: size, speed: speed, selected: false, tabrel: 0.0}
}
pub fn update(&mut self, delta: f32, selected: bool, pinput: &PlayerInput) {
/* do update-y things */
self.tabrel -= delta;
if self.tabrel <= 0.0 && pinput.tab {
//println!("I am {}, Selection Status: {}.", self.name, selected);
self.tabrel = 0.1;
self.selected = selected;
}
if self.selected |
}
pub fn render(&mut self, batches: &mut Vec<RenderBatch<R>>) {
//let mut batches = Vec::new();
let mut normaltexture = RenderBatch::new(
ShaderMode::Texture(self.tex.clone()), UvMode::YDown
);
normaltexture.push_rectangle_full_texture(
// position is centered in the texture
Rectangle::new(self.position + -self.size/2.0, self.position + self.size/2.0)
);
batches.push(normaltexture);
if self.selected {
let mut selectiontexture = RenderBatch::new(
ShaderMode::Texture(self.selecttex.clone()), UvMode::YDown
);
selectiontexture.push_rectangle_full_texture(
Rectangle::new(self.position + -self.size, self.position + self.size)
);
batches.push(selectiontexture);
}
}
pub fn get_position(&mut self) -> Point2<f32> {
self.position
}
pub fn get_name(&mut self) -> &String {
&self.name
}
}
struct PlayerInput {
pub w: bool,
pub a: bool,
pub s: bool,
pub d: bool,
pub tab: bool,
}
pub struct StaticRuntime {
pub log: Logger,
}
impl Runtime for StaticRuntime {
fn run<C: Context>(self, context: C) -> Result<(), Error> {
info!(self.log, "Loading program");
// Set up everything we need to render
let window_settings = WindowSettings::new("RPG Game", [1280, 720]);
let (mut renderer, mut window) =
context.renderer(Some(self.log.clone()), &window_settings)?;
let mut simple2d_renderer = context.simple2d_renderer(&mut renderer)?;
let mut simple2d_render_target = Renderer2DTarget::new(
true, &renderer, &simple2d_renderer
);
let mut ui_renderer = FlowyRenderer::new(&mut renderer)?;
let mut ui = Ui::new();
let root_id = ui.elements.root_id();
let font = FontCollection::from_bytes(
::ttf_noto_sans::REGULAR
).into_font().unwrap();
ui.fonts.push(font);
let fps = Element::new(Style {
position: Position::Relative(Point2::new(0.0, 0.0), SideH::Right, SideV::Top),
size: Size::units(120.0, 14.0),
text_color: Srgb::new(1.0, 1.0, 1.0).into(),
text_size: 14.0,
.. Style::new()
});
let fps_id = ui.elements.add_child(fps, root_id);
{
let fpso = &mut ui.elements[fps_id];
fpso.set_text(format!("test text"));
}
// Units data
let friendly_texture = Texture::new()
.from_file("./assets/friendly.png")
.with_nearest_sampling()
.build(&mut renderer)?;
let selection_texture = Texture::new()
.from_file("./assets/selection.png")
.with_nearest_sampling()
.build(&mut renderer)?;
// Set up the game map's tiles
let map_path = PathBuf::from("./assets/test_map.tmx");
let tmap = tiled::parse_file(&map_path).unwrap();
let map = Map::new(&tmap, &self.log);
let map_renderer = MapRenderer::new(&tmap, &map_path, &mut renderer)?;
let mut players_units = Vec::new();
let alfred = FriendlyUnit::new(String::from("Alfred"), friendly_texture.clone(), selection_texture.clone(), Point2::new(200.0,200.0), Vector2::new(32.0,32.0), 256.0 );
let bertil = FriendlyUnit::new(String::from("Bertil"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,300.0), Vector2::new(32.0,32.0), 256.0 );
let carl = FriendlyUnit::new(String::from("Carl"), friendly_texture.clone(), selection_texture.clone(), Point2::new(400.0,400.0), Vector2::new(32.0,32.0), 256.0 );
let dagobert = FriendlyUnit::new(String::from("Dagobert"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,500.0), Vector2::new(32.0,32.0), 256.0 );
players_units.push(alfred);
players_units.push(bertil);
players_units.push(carl);
players_units.push(dagobert);
let (mut selected_unit, mut tabrelease) = (3,0.1);
let (mut left_pressed, mut right_pressed, mut up_pressed, mut down_pressed, mut tab_pressed) =
(false, false, false, false, false);
// Run the actual game loop
let mut timer = LoopTimer::start();
info!(self.log, "Finished loading, starting main loop");
while !window.should_close() {
let delta = timer.tick();
// Handle input
while let Some(event) = window.poll_event() {
// Let the context handle anything needed
context.handle_event(&event, &mut renderer, &mut window);
match event {
Input::Button(ButtonArgs {state, button, scancode: _scancode}) => {
let press = state == ButtonState::Press;
match button {
Button::Keyboard(Key::A) =>
left_pressed = press,
Button::Keyboard(Key::D) =>
right_pressed = press,
Button::Keyboard(Key::W) =>
up_pressed = press,
Button::Keyboard(Key::S) =>
down_pressed = press,
Button::Keyboard(Key::Tab) =>
tab_pressed = press,
_ => {},
}
},
_ => {},
}
}
let pinput = PlayerInput {w: up_pressed, a: left_pressed, s: down_pressed, d: right_pressed, tab: tab_pressed};
{
let fpso = &mut ui.elements[fps_id];
fpso.style_mut().position = Position::Relative(players_units[selected_unit].get_position(), SideH::Left, SideV::Top);
fpso.set_text(players_units[selected_unit].get_name().clone());
}
// TODO: kill this
tabrelease -= delta;
if tabrelease <= 0.0 && tab_pressed {
if selected_unit == 3 {
selected_unit = 0;
}
else {
selected_unit+=1;
}
tabrelease = 0.1;
println!("selected unit is now {}", selected_unit);
}
// Update the player units
for (i, unit) in players_units.iter_mut().enumerate() {
unit.update(delta, i == selected_unit, &pinput);
}
// Set up the rendering data we'll need
let mut render_data = RenderData::new();
let mut world_batches = Vec::new();
let camera_size = renderer.size().cast();
// Render the tiles
map | {
if pinput.w {self.position.y -= self.speed * delta;}
if pinput.a {self.position.x -= self.speed * delta;}
if pinput.s {self.position.y += self.speed * delta;}
if pinput.d {self.position.x += self.speed * delta;}
} | conditional_block |
runtime.rs | palette::pixel::{Srgb};
use rusttype::{FontCollection};
use tiled;
use calcium_game::{LoopTimer};
use calcium_rendering::{Error};
use calcium_rendering::texture::{Texture};
use calcium_rendering_2d::render_data::{RenderBatch, ShaderMode, Rectangle, Projection, RenderData, RenderSet, UvMode};
use calcium_rendering_2d::{Renderer2DTarget};
use calcium_rendering_context::{Runtime, Context};
use calcium_rendering::raw::{RendererRaw};
use model::{Map};
use view::{MapRenderer};
struct FriendlyUnit<R: RendererRaw> {
name: String,
tex: Arc<Texture<R>>,
selecttex: Arc<Texture<R>>,
position: Point2<f32>,
size: Vector2<f32>,
speed: f32,
selected: bool,
tabrel: f32,
}
impl <R: RendererRaw> FriendlyUnit<R> {
pub fn new(name: String, tex: Arc<Texture<R>>, selecttex: Arc<Texture<R>>, position: Point2<f32>, size: Vector2<f32>, speed: f32) -> FriendlyUnit<R> {
FriendlyUnit {name: name, tex: tex, selecttex: selecttex, position: position, size: size, speed: speed, selected: false, tabrel: 0.0}
}
pub fn update(&mut self, delta: f32, selected: bool, pinput: &PlayerInput) {
/* do update-y things */
self.tabrel -= delta;
if self.tabrel <= 0.0 && pinput.tab {
//println!("I am {}, Selection Status: {}.", self.name, selected);
self.tabrel = 0.1;
self.selected = selected;
}
if self.selected {
if pinput.w {self.position.y -= self.speed * delta;}
if pinput.a {self.position.x -= self.speed * delta;}
if pinput.s {self.position.y += self.speed * delta;}
if pinput.d {self.position.x += self.speed * delta;}
}
}
pub fn render(&mut self, batches: &mut Vec<RenderBatch<R>>) {
//let mut batches = Vec::new();
let mut normaltexture = RenderBatch::new(
ShaderMode::Texture(self.tex.clone()), UvMode::YDown
);
normaltexture.push_rectangle_full_texture(
// position is centered in the texture
Rectangle::new(self.position + -self.size/2.0, self.position + self.size/2.0)
);
batches.push(normaltexture);
if self.selected {
let mut selectiontexture = RenderBatch::new(
ShaderMode::Texture(self.selecttex.clone()), UvMode::YDown
);
selectiontexture.push_rectangle_full_texture(
Rectangle::new(self.position + -self.size, self.position + self.size)
);
batches.push(selectiontexture);
}
}
pub fn | (&mut self) -> Point2<f32> {
self.position
}
pub fn get_name(&mut self) -> &String {
&self.name
}
}
struct PlayerInput {
pub w: bool,
pub a: bool,
pub s: bool,
pub d: bool,
pub tab: bool,
}
pub struct StaticRuntime {
pub log: Logger,
}
impl Runtime for StaticRuntime {
fn run<C: Context>(self, context: C) -> Result<(), Error> {
info!(self.log, "Loading program");
// Set up everything we need to render
let window_settings = WindowSettings::new("RPG Game", [1280, 720]);
let (mut renderer, mut window) =
context.renderer(Some(self.log.clone()), &window_settings)?;
let mut simple2d_renderer = context.simple2d_renderer(&mut renderer)?;
let mut simple2d_render_target = Renderer2DTarget::new(
true, &renderer, &simple2d_renderer
);
let mut ui_renderer = FlowyRenderer::new(&mut renderer)?;
let mut ui = Ui::new();
let root_id = ui.elements.root_id();
let font = FontCollection::from_bytes(
::ttf_noto_sans::REGULAR
).into_font().unwrap();
ui.fonts.push(font);
let fps = Element::new(Style {
position: Position::Relative(Point2::new(0.0, 0.0), SideH::Right, SideV::Top),
size: Size::units(120.0, 14.0),
text_color: Srgb::new(1.0, 1.0, 1.0).into(),
text_size: 14.0,
.. Style::new()
});
let fps_id = ui.elements.add_child(fps, root_id);
{
let fpso = &mut ui.elements[fps_id];
fpso.set_text(format!("test text"));
}
// Units data
let friendly_texture = Texture::new()
.from_file("./assets/friendly.png")
.with_nearest_sampling()
.build(&mut renderer)?;
let selection_texture = Texture::new()
.from_file("./assets/selection.png")
.with_nearest_sampling()
.build(&mut renderer)?;
// Set up the game map's tiles
let map_path = PathBuf::from("./assets/test_map.tmx");
let tmap = tiled::parse_file(&map_path).unwrap();
let map = Map::new(&tmap, &self.log);
let map_renderer = MapRenderer::new(&tmap, &map_path, &mut renderer)?;
let mut players_units = Vec::new();
let alfred = FriendlyUnit::new(String::from("Alfred"), friendly_texture.clone(), selection_texture.clone(), Point2::new(200.0,200.0), Vector2::new(32.0,32.0), 256.0 );
let bertil = FriendlyUnit::new(String::from("Bertil"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,300.0), Vector2::new(32.0,32.0), 256.0 );
let carl = FriendlyUnit::new(String::from("Carl"), friendly_texture.clone(), selection_texture.clone(), Point2::new(400.0,400.0), Vector2::new(32.0,32.0), 256.0 );
let dagobert = FriendlyUnit::new(String::from("Dagobert"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,500.0), Vector2::new(32.0,32.0), 256.0 );
players_units.push(alfred);
players_units.push(bertil);
players_units.push(carl);
players_units.push(dagobert);
let (mut selected_unit, mut tabrelease) = (3,0.1);
let (mut left_pressed, mut right_pressed, mut up_pressed, mut down_pressed, mut tab_pressed) =
(false, false, false, false, false);
// Run the actual game loop
let mut timer = LoopTimer::start();
info!(self.log, "Finished loading, starting main loop");
while !window.should_close() {
let delta = timer.tick();
// Handle input
while let Some(event) = window.poll_event() {
// Let the context handle anything needed
context.handle_event(&event, &mut renderer, &mut window);
match event {
Input::Button(ButtonArgs {state, button, scancode: _scancode}) => {
let press = state == ButtonState::Press;
match button {
Button::Keyboard(Key::A) =>
left_pressed = press,
Button::Keyboard(Key::D) =>
right_pressed = press,
Button::Keyboard(Key::W) =>
up_pressed = press,
Button::Keyboard(Key::S) =>
down_pressed = press,
Button::Keyboard(Key::Tab) =>
tab_pressed = press,
_ => {},
}
},
_ => {},
}
}
let pinput = PlayerInput {w: up_pressed, a: left_pressed, s: down_pressed, d: right_pressed, tab: tab_pressed};
{
let fpso = &mut ui.elements[fps_id];
fpso.style_mut().position = Position::Relative(players_units[selected_unit].get_position(), SideH::Left, SideV::Top);
fpso.set_text(players_units[selected_unit].get_name().clone());
}
// TODO: kill this
tabrelease -= delta;
if tabrelease <= 0.0 && tab_pressed {
if selected_unit == 3 {
selected_unit = 0;
}
else {
selected_unit+=1;
}
tabrelease = 0.1;
println!("selected unit is now {}", selected_unit);
}
// Update the player units
for (i, unit) in players_units.iter_mut().enumerate() {
unit.update(delta, i == selected_unit, &pinput);
}
// Set up the rendering data we'll need
let mut render_data = RenderData::new();
let mut world_batches = Vec::new();
let camera_size = renderer.size().cast();
// Render the tiles
map_renderer | get_position | identifier_name |
runtime.rs | use palette::pixel::{Srgb};
use rusttype::{FontCollection};
use tiled;
use calcium_game::{LoopTimer};
use calcium_rendering::{Error};
use calcium_rendering::texture::{Texture};
use calcium_rendering_2d::render_data::{RenderBatch, ShaderMode, Rectangle, Projection, RenderData, RenderSet, UvMode};
use calcium_rendering_2d::{Renderer2DTarget};
use calcium_rendering_context::{Runtime, Context};
use calcium_rendering::raw::{RendererRaw};
use model::{Map};
use view::{MapRenderer};
struct FriendlyUnit<R: RendererRaw> {
name: String,
tex: Arc<Texture<R>>,
selecttex: Arc<Texture<R>>,
position: Point2<f32>,
size: Vector2<f32>,
speed: f32,
selected: bool,
tabrel: f32,
}
impl <R: RendererRaw> FriendlyUnit<R> {
pub fn new(name: String, tex: Arc<Texture<R>>, selecttex: Arc<Texture<R>>, position: Point2<f32>, size: Vector2<f32>, speed: f32) -> FriendlyUnit<R> {
FriendlyUnit {name: name, tex: tex, selecttex: selecttex, position: position, size: size, speed: speed, selected: false, tabrel: 0.0}
}
pub fn update(&mut self, delta: f32, selected: bool, pinput: &PlayerInput) {
/* do update-y things */
self.tabrel -= delta;
if self.tabrel <= 0.0 && pinput.tab {
//println!("I am {}, Selection Status: {}.", self.name, selected);
self.tabrel = 0.1;
self.selected = selected;
}
if self.selected {
if pinput.w {self.position.y -= self.speed * delta;}
if pinput.a {self.position.x -= self.speed * delta;}
if pinput.s {self.position.y += self.speed * delta;}
if pinput.d {self.position.x += self.speed * delta;}
} | );
normaltexture.push_rectangle_full_texture(
// position is centered in the texture
Rectangle::new(self.position + -self.size/2.0, self.position + self.size/2.0)
);
batches.push(normaltexture);
if self.selected {
let mut selectiontexture = RenderBatch::new(
ShaderMode::Texture(self.selecttex.clone()), UvMode::YDown
);
selectiontexture.push_rectangle_full_texture(
Rectangle::new(self.position + -self.size, self.position + self.size)
);
batches.push(selectiontexture);
}
}
pub fn get_position(&mut self) -> Point2<f32> {
self.position
}
pub fn get_name(&mut self) -> &String {
&self.name
}
}
struct PlayerInput {
pub w: bool,
pub a: bool,
pub s: bool,
pub d: bool,
pub tab: bool,
}
pub struct StaticRuntime {
pub log: Logger,
}
impl Runtime for StaticRuntime {
fn run<C: Context>(self, context: C) -> Result<(), Error> {
info!(self.log, "Loading program");
// Set up everything we need to render
let window_settings = WindowSettings::new("RPG Game", [1280, 720]);
let (mut renderer, mut window) =
context.renderer(Some(self.log.clone()), &window_settings)?;
let mut simple2d_renderer = context.simple2d_renderer(&mut renderer)?;
let mut simple2d_render_target = Renderer2DTarget::new(
true, &renderer, &simple2d_renderer
);
let mut ui_renderer = FlowyRenderer::new(&mut renderer)?;
let mut ui = Ui::new();
let root_id = ui.elements.root_id();
let font = FontCollection::from_bytes(
::ttf_noto_sans::REGULAR
).into_font().unwrap();
ui.fonts.push(font);
let fps = Element::new(Style {
position: Position::Relative(Point2::new(0.0, 0.0), SideH::Right, SideV::Top),
size: Size::units(120.0, 14.0),
text_color: Srgb::new(1.0, 1.0, 1.0).into(),
text_size: 14.0,
.. Style::new()
});
let fps_id = ui.elements.add_child(fps, root_id);
{
let fpso = &mut ui.elements[fps_id];
fpso.set_text(format!("test text"));
}
// Units data
let friendly_texture = Texture::new()
.from_file("./assets/friendly.png")
.with_nearest_sampling()
.build(&mut renderer)?;
let selection_texture = Texture::new()
.from_file("./assets/selection.png")
.with_nearest_sampling()
.build(&mut renderer)?;
// Set up the game map's tiles
let map_path = PathBuf::from("./assets/test_map.tmx");
let tmap = tiled::parse_file(&map_path).unwrap();
let map = Map::new(&tmap, &self.log);
let map_renderer = MapRenderer::new(&tmap, &map_path, &mut renderer)?;
let mut players_units = Vec::new();
let alfred = FriendlyUnit::new(String::from("Alfred"), friendly_texture.clone(), selection_texture.clone(), Point2::new(200.0,200.0), Vector2::new(32.0,32.0), 256.0 );
let bertil = FriendlyUnit::new(String::from("Bertil"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,300.0), Vector2::new(32.0,32.0), 256.0 );
let carl = FriendlyUnit::new(String::from("Carl"), friendly_texture.clone(), selection_texture.clone(), Point2::new(400.0,400.0), Vector2::new(32.0,32.0), 256.0 );
let dagobert = FriendlyUnit::new(String::from("Dagobert"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,500.0), Vector2::new(32.0,32.0), 256.0 );
players_units.push(alfred);
players_units.push(bertil);
players_units.push(carl);
players_units.push(dagobert);
let (mut selected_unit, mut tabrelease) = (3,0.1);
let (mut left_pressed, mut right_pressed, mut up_pressed, mut down_pressed, mut tab_pressed) =
(false, false, false, false, false);
// Run the actual game loop
let mut timer = LoopTimer::start();
info!(self.log, "Finished loading, starting main loop");
while !window.should_close() {
let delta = timer.tick();
// Handle input
while let Some(event) = window.poll_event() {
// Let the context handle anything needed
context.handle_event(&event, &mut renderer, &mut window);
match event {
Input::Button(ButtonArgs {state, button, scancode: _scancode}) => {
let press = state == ButtonState::Press;
match button {
Button::Keyboard(Key::A) =>
left_pressed = press,
Button::Keyboard(Key::D) =>
right_pressed = press,
Button::Keyboard(Key::W) =>
up_pressed = press,
Button::Keyboard(Key::S) =>
down_pressed = press,
Button::Keyboard(Key::Tab) =>
tab_pressed = press,
_ => {},
}
},
_ => {},
}
}
let pinput = PlayerInput {w: up_pressed, a: left_pressed, s: down_pressed, d: right_pressed, tab: tab_pressed};
{
let fpso = &mut ui.elements[fps_id];
fpso.style_mut().position = Position::Relative(players_units[selected_unit].get_position(), SideH::Left, SideV::Top);
fpso.set_text(players_units[selected_unit].get_name().clone());
}
// TODO: kill this
tabrelease -= delta;
if tabrelease <= 0.0 && tab_pressed {
if selected_unit == 3 {
selected_unit = 0;
}
else {
selected_unit+=1;
}
tabrelease = 0.1;
println!("selected unit is now {}", selected_unit);
}
// Update the player units
for (i, unit) in players_units.iter_mut().enumerate() {
unit.update(delta, i == selected_unit, &pinput);
}
// Set up the rendering data we'll need
let mut render_data = RenderData::new();
let mut world_batches = Vec::new();
let camera_size = renderer.size().cast();
// Render the tiles
map_renderer.render | }
pub fn render(&mut self, batches: &mut Vec<RenderBatch<R>>) {
//let mut batches = Vec::new();
let mut normaltexture = RenderBatch::new(
ShaderMode::Texture(self.tex.clone()), UvMode::YDown | random_line_split |
runtime.rs | palette::pixel::{Srgb};
use rusttype::{FontCollection};
use tiled;
use calcium_game::{LoopTimer};
use calcium_rendering::{Error};
use calcium_rendering::texture::{Texture};
use calcium_rendering_2d::render_data::{RenderBatch, ShaderMode, Rectangle, Projection, RenderData, RenderSet, UvMode};
use calcium_rendering_2d::{Renderer2DTarget};
use calcium_rendering_context::{Runtime, Context};
use calcium_rendering::raw::{RendererRaw};
use model::{Map};
use view::{MapRenderer};
struct FriendlyUnit<R: RendererRaw> {
name: String,
tex: Arc<Texture<R>>,
selecttex: Arc<Texture<R>>,
position: Point2<f32>,
size: Vector2<f32>,
speed: f32,
selected: bool,
tabrel: f32,
}
impl <R: RendererRaw> FriendlyUnit<R> {
pub fn new(name: String, tex: Arc<Texture<R>>, selecttex: Arc<Texture<R>>, position: Point2<f32>, size: Vector2<f32>, speed: f32) -> FriendlyUnit<R> {
FriendlyUnit {name: name, tex: tex, selecttex: selecttex, position: position, size: size, speed: speed, selected: false, tabrel: 0.0}
}
pub fn update(&mut self, delta: f32, selected: bool, pinput: &PlayerInput) {
/* do update-y things */
self.tabrel -= delta;
if self.tabrel <= 0.0 && pinput.tab {
//println!("I am {}, Selection Status: {}.", self.name, selected);
self.tabrel = 0.1;
self.selected = selected;
}
if self.selected {
if pinput.w {self.position.y -= self.speed * delta;}
if pinput.a {self.position.x -= self.speed * delta;}
if pinput.s {self.position.y += self.speed * delta;}
if pinput.d {self.position.x += self.speed * delta;}
}
}
pub fn render(&mut self, batches: &mut Vec<RenderBatch<R>>) | }
pub fn get_position(&mut self) -> Point2<f32> {
self.position
}
pub fn get_name(&mut self) -> &String {
&self.name
}
}
struct PlayerInput {
pub w: bool,
pub a: bool,
pub s: bool,
pub d: bool,
pub tab: bool,
}
pub struct StaticRuntime {
pub log: Logger,
}
impl Runtime for StaticRuntime {
fn run<C: Context>(self, context: C) -> Result<(), Error> {
info!(self.log, "Loading program");
// Set up everything we need to render
let window_settings = WindowSettings::new("RPG Game", [1280, 720]);
let (mut renderer, mut window) =
context.renderer(Some(self.log.clone()), &window_settings)?;
let mut simple2d_renderer = context.simple2d_renderer(&mut renderer)?;
let mut simple2d_render_target = Renderer2DTarget::new(
true, &renderer, &simple2d_renderer
);
let mut ui_renderer = FlowyRenderer::new(&mut renderer)?;
let mut ui = Ui::new();
let root_id = ui.elements.root_id();
let font = FontCollection::from_bytes(
::ttf_noto_sans::REGULAR
).into_font().unwrap();
ui.fonts.push(font);
let fps = Element::new(Style {
position: Position::Relative(Point2::new(0.0, 0.0), SideH::Right, SideV::Top),
size: Size::units(120.0, 14.0),
text_color: Srgb::new(1.0, 1.0, 1.0).into(),
text_size: 14.0,
.. Style::new()
});
let fps_id = ui.elements.add_child(fps, root_id);
{
let fpso = &mut ui.elements[fps_id];
fpso.set_text(format!("test text"));
}
// Units data
let friendly_texture = Texture::new()
.from_file("./assets/friendly.png")
.with_nearest_sampling()
.build(&mut renderer)?;
let selection_texture = Texture::new()
.from_file("./assets/selection.png")
.with_nearest_sampling()
.build(&mut renderer)?;
// Set up the game map's tiles
let map_path = PathBuf::from("./assets/test_map.tmx");
let tmap = tiled::parse_file(&map_path).unwrap();
let map = Map::new(&tmap, &self.log);
let map_renderer = MapRenderer::new(&tmap, &map_path, &mut renderer)?;
let mut players_units = Vec::new();
let alfred = FriendlyUnit::new(String::from("Alfred"), friendly_texture.clone(), selection_texture.clone(), Point2::new(200.0,200.0), Vector2::new(32.0,32.0), 256.0 );
let bertil = FriendlyUnit::new(String::from("Bertil"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,300.0), Vector2::new(32.0,32.0), 256.0 );
let carl = FriendlyUnit::new(String::from("Carl"), friendly_texture.clone(), selection_texture.clone(), Point2::new(400.0,400.0), Vector2::new(32.0,32.0), 256.0 );
let dagobert = FriendlyUnit::new(String::from("Dagobert"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,500.0), Vector2::new(32.0,32.0), 256.0 );
players_units.push(alfred);
players_units.push(bertil);
players_units.push(carl);
players_units.push(dagobert);
let (mut selected_unit, mut tabrelease) = (3,0.1);
let (mut left_pressed, mut right_pressed, mut up_pressed, mut down_pressed, mut tab_pressed) =
(false, false, false, false, false);
// Run the actual game loop
let mut timer = LoopTimer::start();
info!(self.log, "Finished loading, starting main loop");
while !window.should_close() {
let delta = timer.tick();
// Handle input
while let Some(event) = window.poll_event() {
// Let the context handle anything needed
context.handle_event(&event, &mut renderer, &mut window);
match event {
Input::Button(ButtonArgs {state, button, scancode: _scancode}) => {
let press = state == ButtonState::Press;
match button {
Button::Keyboard(Key::A) =>
left_pressed = press,
Button::Keyboard(Key::D) =>
right_pressed = press,
Button::Keyboard(Key::W) =>
up_pressed = press,
Button::Keyboard(Key::S) =>
down_pressed = press,
Button::Keyboard(Key::Tab) =>
tab_pressed = press,
_ => {},
}
},
_ => {},
}
}
let pinput = PlayerInput {w: up_pressed, a: left_pressed, s: down_pressed, d: right_pressed, tab: tab_pressed};
{
let fpso = &mut ui.elements[fps_id];
fpso.style_mut().position = Position::Relative(players_units[selected_unit].get_position(), SideH::Left, SideV::Top);
fpso.set_text(players_units[selected_unit].get_name().clone());
}
// TODO: kill this
tabrelease -= delta;
if tabrelease <= 0.0 && tab_pressed {
if selected_unit == 3 {
selected_unit = 0;
}
else {
selected_unit+=1;
}
tabrelease = 0.1;
println!("selected unit is now {}", selected_unit);
}
// Update the player units
for (i, unit) in players_units.iter_mut().enumerate() {
unit.update(delta, i == selected_unit, &pinput);
}
// Set up the rendering data we'll need
let mut render_data = RenderData::new();
let mut world_batches = Vec::new();
let camera_size = renderer.size().cast();
// Render the tiles
map_renderer | {
//let mut batches = Vec::new();
let mut normaltexture = RenderBatch::new(
ShaderMode::Texture(self.tex.clone()), UvMode::YDown
);
normaltexture.push_rectangle_full_texture(
// position is centered in the texture
Rectangle::new(self.position + -self.size/2.0, self.position + self.size/2.0)
);
batches.push(normaltexture);
if self.selected {
let mut selectiontexture = RenderBatch::new(
ShaderMode::Texture(self.selecttex.clone()), UvMode::YDown
);
selectiontexture.push_rectangle_full_texture(
Rectangle::new(self.position + -self.size, self.position + self.size)
);
batches.push(selectiontexture);
} | identifier_body |
manager.py | elif self._filesystem.exists(self._artifacts_directory):
self._port.limit_archived_results_count()
# Rename the existing results folder for archiving.
self._port.rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(
self._artifacts_directory)
exit_code = self._port.setup_test_run()
if exit_code:
_log.error('Build setup failed')
return exit_code
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update('Checking system dependencies ...')
exit_code = self._port.check_sys_deps()
if exit_code:
return exit_code
return exit_codes.OK_EXIT_STATUS
def _run_tests(self,
tests_to_run,
tests_to_skip,
repeat_each,
iterations,
num_workers,
retry_attempt=0):
test_inputs = []
for _ in range(iterations):
for test in tests_to_run:
for _ in range(repeat_each):
test_inputs.append(
self._test_input_for_file(test, retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers,
retry_attempt)
def _start_servers(self, tests_to_run):
if any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if (self._port.requires_http_server()
or any(self._is_http_test(test) for test in tests_to_run)):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(
additional_dirs={},
number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug('Flushing stdout')
sys.stdout.flush()
_log.debug('Flushing stderr')
sys.stderr.flush()
_log.debug('Cleaning up port')
self._port.clean_up_test_run()
if self._sink:
_log.debug('Closing sink')
self._sink.close()
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
# reset static variables for Failure type classes
test_failures.AbstractTestResultType.port = self._port
test_failures.AbstractTestResultType.result_directory = self._results_directory
test_failures.AbstractTestResultType.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.items():
if result.type != ResultType.Crash:
continue
for failure in result.failures:
if (not isinstance(failure, test_failures.FailureCrash)
or failure.has_log):
continue
crashed_processes.append(
[test, failure.process_name, failure.pid])
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(crashed_processes,
start_time) or {}
for test, sample_file in sample_files.items():
test_failures.AbstractTestResultType.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(
test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(self._results_directory,
artifacts_sub_dir,
artifact_relative_path)
self._filesystem.maybe_make_directory(
self._filesystem.dirname(artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact(
'sample_file',
self._filesystem.join(artifacts_sub_dir,
artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(
crashed_processes, start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.items():
test_failures.AbstractTestResultType.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(
test_result.artifacts, force_overwrite=True)
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING
# since retrying missing expectations is silly. But that's a bit tricky since we
# only consider the last retry attempt for the count of unexpected regressions.
return [
result.test_name
for result in run_results.unexpected_results_by_name.values()
if result.type != ResultType.Pass
]
def _write_json_files(self, summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories):
_log.debug("Writing JSON files in %s.", self._artifacts_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(
initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._artifacts_directory,
'times_ms.json')
json_results_generator.write_json(self._filesystem, times_trie,
times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(
self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie,
bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._artifacts_directory,
'stats.json')
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._artifacts_directory,
'full_results.json')
json_results_generator.write_json(
self._filesystem, summarized_full_results, full_results_path)
full_results_jsonp_path = self._filesystem.join(
self._artifacts_directory, 'full_results_jsonp.js')
json_results_generator.write_json(
self._filesystem,
summarized_full_results,
full_results_jsonp_path,
callback='ADD_FULL_RESULTS')
failing_results_path = self._filesystem.join(self._artifacts_directory,
'failing_results.json')
# We write failing_results.json out as jsonp because we need to load it
# from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(
self._filesystem,
summarized_failing_results,
failing_results_path,
callback='ADD_RESULTS')
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem,
summarized_full_results,
self._options.json_test_results)
if self._options.write_run_histories_to:
json_results_generator.write_json(
self._filesystem, run_histories,
self._options.write_run_histories_to)
_log.debug('Finished writing JSON files.')
def _copy_results_html_file(self, destination_dir, filename):
"""Copies a file from the template directory to the results directory."""
files_to_copy = [filename, filename + ".version"]
template_dir = self._path_finder.path_from_blink_tools(
'blinkpy', 'web_tests')
for filename in files_to_copy:
source_path = self._filesystem.join(template_dir, filename)
destination_path = self._filesystem.join(destination_dir, filename)
# Note that the results.html template file won't exist when
# we're using a MockFileSystem during unit tests, so make sure
# it exists before we try to copy it.
if self._filesystem.exists(source_path):
self._filesystem.copyfile(source_path, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
| return int(worker_name.split('/')[1]) if worker_name else -1 | identifier_body |
|
manager.py | ':
# Restore the test order to user specified order.
# base.tests() may change the order as it returns tests in the
# real, external/wpt, virtual order.
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(
len(all_test_names), len(test_names), len(tests_to_run),
self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
# Keep executing to produce valid (but empty) results.
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
# If --test-list is passed, or if no test narrowing is specified,
# default to 3 retries. Otherwise [e.g. if tests are being passed by
# name], default to 0 retries.
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run, tests_to_skip,
should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info("Finally stop servers and clean up")
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(
self._port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(
self._port,
self._options,
self._expectations,
initial_results,
all_retry_results,
only_include_failing=True)
run_histories = test_run_results.test_run_histories(
self._options, self._expectations, initial_results,
all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is
test_run_results.InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if (self._options.show_results
and (exit_code or initial_results.total_failures)):
self._port.show_results_html_file(
self._filesystem.join(self._artifacts_directory,
'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code, summarized_full_results,
summarized_failing_results,
initial_results, all_retry_results)
def _register_termination_handler(self):
if self._port.host.platform.is_win():
signum = signal.SIGBREAK
else:
signum = signal.SIGTERM
signal.signal(signum, self._on_termination)
def _on_termination(self, signum, _frame):
self._printer.write_update(
'Received signal "%s" (%d) in %d' %
(signal.strsignal(signum), signum, os.getpid()))
raise KeyboardInterrupt
def | (self, tests_to_run, tests_to_skip):
# Don't show results in a new browser window because we're already
# printing the link to diffs in the loop
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(
self._artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = 'file://' + filename + '-pretty-diff.html'
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return (initial_results, all_retry_results)
def _run_test_once(self, tests_to_run, tests_to_skip,
should_retry_failures):
num_workers = int(
self._port.num_workers(int(self._options.child_processes)))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each,
self._options.iterations, num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = (should_retry_failures
and not initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info(
'Retrying %s, attempt %d of %d...',
grammar.pluralize('unexpected failure',
len(tests_to_retry)), retry_attempt,
self._options.num_retries)
retry_results = self._run_tests(
tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return (initial_results, all_retry_results)
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
def _collect_tests(self, args):
return self._finder.find_tests(
args,
test_lists=self._options.test_list,
filter_files=self._options.isolated_script_test_filter_file,
fastest_percentile=self._options.fastest,
filters=self._options.isolated_script_test_filter)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR)
or self._is_websocket_test(test) or self._port.TEST_PATH_SEPARATOR
+ self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test
or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names,
self._expectations)
tests_to_run = [
test for test in test_names if test not in tests_to_skip
]
return tests_to | _run_test_loop | identifier_name |
manager.py | ':
# Restore the test order to user specified order.
# base.tests() may change the order as it returns tests in the
# real, external/wpt, virtual order.
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(
len(all_test_names), len(test_names), len(tests_to_run),
self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
# Keep executing to produce valid (but empty) results.
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
# If --test-list is passed, or if no test narrowing is specified,
# default to 3 retries. Otherwise [e.g. if tests are being passed by
# name], default to 0 retries.
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run, tests_to_skip,
should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info("Finally stop servers and clean up")
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(
self._port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(
self._port,
self._options,
self._expectations,
initial_results,
all_retry_results,
only_include_failing=True)
run_histories = test_run_results.test_run_histories(
self._options, self._expectations, initial_results,
all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is
test_run_results.InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if (self._options.show_results
and (exit_code or initial_results.total_failures)):
self._port.show_results_html_file(
self._filesystem.join(self._artifacts_directory,
'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code, summarized_full_results,
summarized_failing_results,
initial_results, all_retry_results)
def _register_termination_handler(self):
if self._port.host.platform.is_win():
signum = signal.SIGBREAK
else:
signum = signal.SIGTERM
signal.signal(signum, self._on_termination)
def _on_termination(self, signum, _frame):
self._printer.write_update(
'Received signal "%s" (%d) in %d' %
(signal.strsignal(signum), signum, os.getpid()))
raise KeyboardInterrupt
def _run_test_loop(self, tests_to_run, tests_to_skip):
# Don't show results in a new browser window because we're already
# printing the link to diffs in the loop
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(
self._artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = 'file://' + filename + '-pretty-diff.html'
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return (initial_results, all_retry_results)
def _run_test_once(self, tests_to_run, tests_to_skip,
should_retry_failures):
num_workers = int(
self._port.num_workers(int(self._options.child_processes)))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each,
self._options.iterations, num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = (should_retry_failures
and not initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info(
'Retrying %s, attempt %d of %d...',
grammar.pluralize('unexpected failure',
len(tests_to_retry)), retry_attempt,
self._options.num_retries)
retry_results = self._run_tests(
tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return (initial_results, all_retry_results)
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
|
test_names += list(set(original_test_names) - set(test_names))
return test_names
def _collect_tests(self, args):
return self._finder.find_tests(
args,
test_lists=self._options.test_list,
filter_files=self._options.isolated_script_test_filter_file,
fastest_percentile=self._options.fastest,
filters=self._options.isolated_script_test_filter)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR)
or self._is_websocket_test(test) or self._port.TEST_PATH_SEPARATOR
+ self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test
or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names,
self._expectations)
tests_to_run = [
test for test in test_names if test not in tests_to_skip
]
return tests | if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test) | conditional_block |
manager.py | ':
# Restore the test order to user specified order.
# base.tests() may change the order as it returns tests in the
# real, external/wpt, virtual order.
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(
len(all_test_names), len(test_names), len(tests_to_run),
self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
# Keep executing to produce valid (but empty) results.
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
# If --test-list is passed, or if no test narrowing is specified,
# default to 3 retries. Otherwise [e.g. if tests are being passed by
# name], default to 0 retries.
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run, tests_to_skip,
should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info("Finally stop servers and clean up")
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(
self._port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(
self._port,
self._options,
self._expectations,
initial_results,
all_retry_results,
only_include_failing=True)
run_histories = test_run_results.test_run_histories(
self._options, self._expectations, initial_results,
all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
| exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if (self._options.show_results
and (exit_code or initial_results.total_failures)):
self._port.show_results_html_file(
self._filesystem.join(self._artifacts_directory,
'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code, summarized_full_results,
summarized_failing_results,
initial_results, all_retry_results)
def _register_termination_handler(self):
if self._port.host.platform.is_win():
signum = signal.SIGBREAK
else:
signum = signal.SIGTERM
signal.signal(signum, self._on_termination)
def _on_termination(self, signum, _frame):
self._printer.write_update(
'Received signal "%s" (%d) in %d' %
(signal.strsignal(signum), signum, os.getpid()))
raise KeyboardInterrupt
def _run_test_loop(self, tests_to_run, tests_to_skip):
# Don't show results in a new browser window because we're already
# printing the link to diffs in the loop
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(
self._artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = 'file://' + filename + '-pretty-diff.html'
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return (initial_results, all_retry_results)
def _run_test_once(self, tests_to_run, tests_to_skip,
should_retry_failures):
num_workers = int(
self._port.num_workers(int(self._options.child_processes)))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each,
self._options.iterations, num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = (should_retry_failures
and not initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info(
'Retrying %s, attempt %d of %d...',
grammar.pluralize('unexpected failure',
len(tests_to_retry)), retry_attempt,
self._options.num_retries)
retry_results = self._run_tests(
tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return (initial_results, all_retry_results)
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
def _collect_tests(self, args):
return self._finder.find_tests(
args,
test_lists=self._options.test_list,
filter_files=self._options.isolated_script_test_filter_file,
fastest_percentile=self._options.fastest,
filters=self._options.isolated_script_test_filter)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR)
or self._is_websocket_test(test) or self._port.TEST_PATH_SEPARATOR
+ self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test
or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names,
self._expectations)
tests_to_run = [
test for test in test_names if test not in tests_to_skip
]
return tests_to | self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is
test_run_results.InterruptReason.EXTERNAL_SIGNAL): | random_line_split |
decision_tree.py |
oldEntropy = calEntropy(dataSet)
bestIndex = -1
maxInfoGainRotio = 0.0
for index in range(labelNum):
newEntropy = 0.0
splitInfo = 0.0
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newEntropy += p * calEntropy(subDataSet)
splitInfo -= p * log(p, 2) # index标签的熵
infoGain = oldEntropy - newEntropy
if splitInfo == 0.0:
continue
infoGainRatio = infoGain / splitInfo # 计算信息增益
if infoGainRatio > maxInfoGainRotio:
maxInfoGainRotio = infoGainRatio
bestIndex = index
return bestIndex
def selectBestAttrIndex_CART(dataSet):
labelNum = len(dataSet[0])-1
bestIndex = -1
minGini = float("inf") # 所有attribute 中最小gini系数
for index in range(labelNum):
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
newGini = 0.0
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newGini += p * calGini(subDataSet)
if newGini < minGini:
minGini = newGini
bestIndex = index
return bestIndex
def createTree(dataSet, oriAttr, oriAttrUniValSet, algorithm = 'ID3'):
attr = oriAttr[:] # 输入的一份拷贝,不改动输入的属性
attrUniValSet = oriAttrUniValSet[:]
labelList = [entry[-1] for entry in dataSet]
if len(labelList) == labelList.count(labelList[0]): # 1. 所有样本标签相同,那么该节点为记为该标签叶子节点
return labelList[0]
if len(attr) == 0: # 2. 没有可以分类的属性
return Counter(labelList).most_common(1)[0][0] # 返回出现次数最多的标签
# dataSet 为空?dataSet 中所有属性的收益相同?
bestAttrIndex = selectBestAttrIndex(dataSet, algorithm) # 获得收益最大的属性下标,2. 数据集中所有样本在所有属性上增益相同
bestAttr = attr[bestAttrIndex] # 获得收益最大属性
resTree = {bestAttr : {}} # 构建字典树
del(attr[bestAttrIndex]) # 删除收益最大属性,与split后的dataSet相同长度
valueSet = attrUniValSet[bestAttrIndex] #B1
del(attrUniValSet[bestAttrIndex]) #B1
for value in valueSet: # 为每个value创建分支
subDataSet = splitDataSet(dataSet, bestAttrIndex, value)
if len(subDataSet) == 0: # 3. 数据集为空,预测标签为父节点出现最多的标签
resTree[bestAttr][value] = Counter(labelList).most_common(1)[0][0]
else:
cpyAttr = attr[:] # 创建attr的副本,避免直接传需要用到的引用进函数 #B1
resTree[bestAttr][value] = createTree(subDataSet, cpyAttr, attrUniValSet, algorithm) # 分支字典 {attribute0 : {low : {}, med : {}, high : {}, vhigh : {}}} #B1 B2
return resTree
def createAttrUniValSet(dataSet):
attrUniValSet = []
for attrIndex in range(len(dataSet[0])-1): # 遍历每个属性
attrList = [entry[attrIndex] for entry in dataSet]
attrUniValSet.append(set(attrList))
return attrUniValSet
def classifierVec(testVec, attr, tree):
tempTree = copy.deepcopy(tree) # 深复制
while(isinstance(tempTree, dict)):
nodeName = list(tempTree.keys())[0] # 获得标签 outlook {'outlook':{}}
nodeAttrIndex = attr.index(nodeName) # 获得标签 outlook 在 attr 的下标 0
branch = testVec[nodeAttrIndex] # 获得分支值 2 ,用于{2:{windy:{}}}
tempTree = tempTree[nodeName][branch]
return tempTree
def classifierSet(testDataSet, attr, tree):
resLabel = []
for testVec in testDataSet:
resLabel.append(classifierVec(testVec, attr, tree))
return resLabel
def saveTree(path, tree):
with open(path, 'w') as wf:
wf.write(repr(tree)) # 将决策树字典结果当做字符串写入文件
# print("Write done!\nThe file looks like:")
# with open(path, 'r') as rf:
# sample = rf.read()
# print(sample)
def loadTree(path):
with open(path, 'r') as rf:
tree = eval(rf.read())
return tree
def loadCarDataSet(path):
with open(path, 'r') as csvfile:
entries = csv.reader(csvfile)
dataSet = list(entries) # 获得数据集二维列表
attr = ['attr' + str(i) for i in range(len(dataSet[0])-1)] # 获得属性向量
return dataSet, attr
def saveCarDataRes(path, carDataSetRes):
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(carDataSetRes)
def calAccuracy(dataSet, resVec):
if len(dataSet) != len(resVec):
print("Length of dataSet no equal length of resVec!")
return
dataLabelVec = [entry[-1] for entry in dataSet]
correctCount = 0
for i in range(len(resVec)):
if dataSet[i][-1] == resVec[i]:
correctCount += 1
accuracy = float(correctCount)/len(resVec)
return accuracy
# main函数中的选择函数
def mainTrainTree():
print("说明:训练集是train.csv,验证集是validate.csv,由Car_train.csv随机分配得到,比例为3:1")
print("使用train.csv建立决策树")
carDataSet, carAttr = loadCarDataSet('./data/train.csv')
carUniValSet = createAttrUniValSet(carDataSet)
print("正在训练ID3决策树...", end='')
car_ID3_Tree = createTree(carDataSet, carAttr, carUniValSet)
saveTree('./output/car_ID3_Tree/car_ID3_Tree.txt', car_ID3_Tree)
print("完成,保存为'./output/car_ID3_Tree/car_ID3_Tree.txt'")
print("正在绘制ID3决策树图像...", end='')
carTreePlotter.createPlot(car_ID3_Tree, "./output/car_ID3_Tree/car_ID3_Tree.png")
print("完成,保存为'./output/car_ID3_Tree/car_ID3_Tree.png'")
print("正在训练C4.5决策树...", end='')
car_C45_Tree = createTree(carDataSet, carAttr, carUniValSet, 'C4.5')
saveTree('./output/car_C45_Tree/car_C45_Tree.txt', car_C45_Tree)
print("完成,保存为'./output/car_ID3_Tree/car_C45_Tree.txt'")
print("正在绘制C4.5决策树图像...", end='')
carTreePlotter.createPlot(car_C45_Tree, "./output/car_C45_Tree/car_C45_Tree.png")
print("完成,保存为'./output/car_ID3_Tree/car_C45_Tree.png'")
print("正在训练CART决策树...", end='')
car_CART_Tree = createTree(carDataSet, carAttr, carUniValSet, 'CART')
saveTree('./output/car_CART_Tree/car_CART_Tree.txt', car_CART_Tree)
print("完成,保存为'./output/car_ID3_Tree/car_CART_Tree.txt'")
print("正在绘制CART决策树图像...", end='')
carTreePlotter.createPlot(car_CART_Tree, "./output/car_CART_Tree/car_CART_Tree.png")
print("完成,保存为'./output/car_CART_Tree/car_CART_Tree.png'")
def mainCalAccu():
carTestSet, carTestAttr = loadCarDataSet('./data/validate.csv')
print("正在用ID3决策树计算验证集...", end='')
car_ID3_Tree = loadTree('./output/car_ID3_Tree/car_ID3_Tree.txt')
car_ID3_SetRes = classifierSet(carTestSet, carTestAttr, car_ID3_Tree)
car_ID3_accuracy = calAccuracy(carTestSet, car_ID3_SetRes)
print("完成,准确率为 %f" % car_ID3_accuracy)
| random_line_split |
||
decision_tree.py | float(labelsCount[key])/entryNum # propotion 特定标签占总标签比例
entropy -= propotion * log(propotion, 2)
return entropy
def calGini(dataSet):
"""
输入:二维数据集
输出:二维数据集的基尼系数
描述:计算数据集的基尼系数,基尼系数越大数据集越混乱
"""
entryNum = len(dataSet)
labelsCount = {}
for entry in dataSet:
label = entry[-1]
if label not in labelsCount.keys():
labelsCount[label] = 0
labelsCount[label] += 1
gini = 1.0
for key in labelsCount:
p = float(labelsCount[key])/entryNum
gini -= p * p # 1-p1^2-p2^2
return gini
def splitDataSet(dataSet, col, value):
"""
输入:二维数据集,属性列index,值
输出:从dataSet分离出来的subDataSet
描述:
将dataSet的col列中与value相同的样本组成一个新的subDataSet
CART的分离方法与普通方法并无区别
"""
subDataSet = []
for entry in dataSet:
if entry[col] == value: # 将col属性中值为value的行挑出
subEntry = entry[:col]
subEntry.extend(entry[col+1:])
subDataSet.append(subEntry)
return subDataSet
def selectBestAttrIndex(dataSet, algorithm):
"""
输入:二维数据集
输出:熵减最大的属性在 dataSet 中的下标
描述:
先计算dataSet的熵,然后通过属性数目,遍历计算按照每个属性划分得到的熵;
比较得到熵减最大的属性,返回它在dataSet中属性的index。
"""
if algorithm == 'ID3':
return selectBestAttrIndex_ID3(dataSet)
elif algorithm == 'C4.5':
return selectBestAttrIndex_C45(dataSet)
elif algorithm == 'CART':
return selectBestAttrIndex_CART(dataSet)
def selectBestAttrIndex_ID3(dataSet):
labelNum = len(dataSet[0])-1 # 属性attribute数目
oldEntropy = calEntropy(dataSet)
bestIndex = -1
maxInfoGain = 0.0
for index in range(labelNum):
newEntropy = 0.0
attrValueList = [entry[index] for entry in dataSet] # 获得dataSet中每个属性的所有value的列表
attrValueSet = set(attrValueList) # 获得value列表的不重复set,在ID3和C4.5中遍历计算每个value的熵,CART中用value进行二分类计算gini系数
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue) # 分离出col=index, value = uniqueValue 的数据集
p = float(len(subDataSet)) / len(dataSet) # 计算子数据集占总数据比例
newEntropy += p * calEntropy(subDataSet)
infoGain = oldEntropy - newEntropy
if infoGain > maxInfoGain:
maxInfoGain = infoGain
bestIndex = index
return bestIndex
def selectBestAttrIndex_C45(dataSet):
labelNum = len(dataSet[0])-1
oldEntropy = calEntropy(dataSet)
bestIndex = -1
maxInfoGainRotio = 0.0
for index in range(labelNum):
newEntropy = 0.0
splitInfo = 0.0
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newEntropy += p * calEntropy(subDataSet)
splitInfo -= p * log(p, 2) # index标签的熵
infoGain = oldEntropy - newEntropy
if splitInfo == 0.0:
continue
infoGainRatio = infoGain / splitInfo # 计算信息增益
if infoGainRatio > maxInfoGainRotio:
maxInfoGainRotio = infoGainRatio
bestIndex = index
return bestIndex
def selectBestAttrIndex_CART(dataSet):
labelNum = len(dataSet[0])-1
bestIndex = -1
minGini = float("inf") # 所有attribute 中最小gini系数
for index in range(labelNum):
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
newGini = 0.0
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newGini += p * calGini(subDataSet)
if newGini < minGini:
minGini = newGini
bestIndex = index
return bestIndex
def createTree(dataSet, oriAttr, oriAttrUniValSet, algorithm = 'ID3'):
attr = oriAttr[:] # 输入的一份拷贝,不改动输入的属性
attrUniValSet = oriAttrUniValSet[:]
labelList = [entry[-1] for entry in dataSet]
if len(labelList) == labelList.count(labelList[0]): # 1. 所有样本标签相同,那么该节点为记为该标签叶子节点
return labelList[0]
if len(attr) == 0: # 2. 没有可以分类的属性
return Counter(labelList).most_common(1)[0][0] # 返回出现次数最多的标签
# dataSet 为空?dataSet 中所有属性的收益相同?
bestAttrIndex = selectBestAttrIndex(dataSet, algorithm) # 获得收益最大的属性下标,2. 数据集中所有样本在所有属性上增益相同
bestAttr = attr[bestAttrIndex] # 获得收益最大属性
resTree = {bestAttr : {}} # 构建字典树
del(attr[bestAttrIndex]) # 删除收益最大属性,与split后的dataSet相同长度
valueSet = attrUniValSet[bestAttrIndex] #B1
del(attrUniValSet[bestAttrIndex]) #B1
for value in valueSet: # 为每个value创建分支
subDataSet = splitDataSet(dataSet, bestAttrIndex, value)
if len(subDataSet) == 0: # 3. 数据集为空,预测标签为父节点出现最多的标签
resTree[bestAttr][value] = Counter(labelList).most_common(1)[0][0]
else:
cpyAttr = attr[:] # 创建attr的副本,避免直接传需要用到的引用进函数 #B1
resTree[bestAttr][value] = createTree(subDataSet, cpyAttr, attrUniValSet, algorithm) # 分支字典 {attribute0 : {low : {}, med : {}, high : {}, vhigh : {}}} #B1 B2
return resTree
def createAttrUniValSet(dataSet):
attrUniValSet = []
for attrIndex in range(len(dataSet[0])-1): # 遍历每个属性
attrList = [entry[attrIndex] for entry in dataSet]
attrUniValSet.append(set(attrList))
return attrUniValSet
def classifierVec(testVec, attr, tree):
tempTree = copy.deepcopy(tree) # 深复制
while(isinstance(tempTree, dict)):
nodeName = list(tempTree.keys())[0] # 获得标签 outlook {'outlook':{}}
nodeAttrIndex = attr.index(nodeName) # 获得标签 outlook 在 attr 的下标 0
branch = testVec[nodeAttrIndex] # 获得分支值 2 ,用于{2:{windy:{}}}
tempTree = tempTree[nodeName][branch]
return tempTree
def classifierSet(testDataSet, attr, tree):
resLabel = []
for testVec in testDataSet:
resLabel.append(classifierVec(testVec, attr, tree))
return resLabel
def saveTree(path, tree):
with open(path, 'w') as wf:
wf.write(repr(tree)) # 将决策树字典结果当做字符串写入文件
# print("Write done!\nThe file looks like:")
# with open(path, 'r') as rf:
# sample = rf.read()
# print(sample)
def loadTree(path):
with open(path, 'r') as rf:
tree = eval(rf.read())
return tree
def loadCarDataSet(path):
with open(path, 'r') as csvfile:
entries = csv.reader(csvfile)
dataSet = list(entries) # 获得数据集二维列表
attr = ['attr' + str(i) for i in range(len(dataSet[0])-1)] | :
propotion = | conditional_block |
|
decision_tree.py |
subEntry = entry[:col]
subEntry.extend(entry[col+1:])
subDataSet.append(subEntry)
return subDataSet
def selectBestAttrIndex(dataSet, algorithm):
"""
输入:二维数据集
输出:熵减最大的属性在 dataSet 中的下标
描述:
先计算dataSet的熵,然后通过属性数目,遍历计算按照每个属性划分得到的熵;
比较得到熵减最大的属性,返回它在dataSet中属性的index。
"""
if algorithm == 'ID3':
return selectBestAttrIndex_ID3(dataSet)
elif algorithm == 'C4.5':
return selectBestAttrIndex_C45(dataSet)
elif algorithm == 'CART':
return selectBestAttrIndex_CART(dataSet)
def selectBestAttrIndex_ID3(dataSet):
labelNum = len(dataSet[0])-1 # 属性attribute数目
oldEntropy = calEntropy(dataSet)
bestIndex = -1
maxInfoGain = 0.0
for index in range(labelNum):
newEntropy = 0.0
attrValueList = [entry[index] for entry in dataSet] # 获得dataSet中每个属性的所有value的列表
attrValueSet = set(attrValueList) # 获得value列表的不重复set,在ID3和C4.5中遍历计算每个value的熵,CART中用value进行二分类计算gini系数
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue) # 分离出col=index, value = uniqueValue 的数据集
p = float(len(subDataSet)) / len(dataSet) # 计算子数据集占总数据比例
newEntropy += p * calEntropy(subDataSet)
infoGain = oldEntropy - newEntropy
if infoGain > maxInfoGain:
maxInfoGain = infoGain
bestIndex = index
return bestIndex
def selectBestAttrIndex_C45(dataSet):
labelNum = len(dataSet[0])-1
oldEntropy = calEntropy(dataSet)
bestIndex = -1
maxInfoGainRotio = 0.0
for index in range(labelNum):
newEntropy = 0.0
splitInfo = 0.0
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newEntropy += p * calEntropy(subDataSet)
splitInfo -= p * log(p, 2) # index标签的熵
infoGain = oldEntropy - newEntropy
if splitInfo == 0.0:
continue
infoGainRatio = infoGain / splitInfo # 计算信息增益
if infoGainRatio > maxInfoGainRotio:
maxInfoGainRotio = infoGainRatio
bestIndex = index
return bestIndex
def selectBestAttrIndex_CART(dataSet):
labelNum = len(dataSet[0])-1
bestIndex = -1
minGini = float("inf") # 所有attribute 中最小gini系数
for index in range(labelNum):
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
newGini = 0.0
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newGini += p * calGini(subDataSet)
if newGini < minGini:
minGini = newGini
bestIndex = index
return bestIndex
def createTree(dataSet, oriAttr, oriAttrUniValSet, algorithm = 'ID3'):
attr = oriAttr[:] # 输入的一份拷贝,不改动输入的属性
attrUniValSet = oriAttrUniValSet[:]
labelList = [entry[-1] for entry in dataSet]
if len(labelList) == labelList.count(labelList[0]): # 1. 所有样本标签相同,那么该节点为记为该标签叶子节点
return labelList[0]
if len(attr) == 0: # 2. 没有可以分类的属性
return Counter(labelList).most_common(1)[0][0] # 返回出现次数最多的标签
# dataSet 为空?dataSet 中所有属性的收益相同?
bestAttrIndex = selectBestAttrIndex(dataSet, algorithm) # 获得收益最大的属性下标,2. 数据集中所有样本在所有属性上增益相同
bestAttr = attr[bestAttrIndex] # 获得收益最大属性
resTree = {bestAttr : {}} # 构建字典树
del(attr[bestAttrIndex]) # 删除收益最大属性,与split后的dataSet相同长度
valueSet = attrUniValSet[bestAttrIndex] #B1
del(attrUniValSet[bestAttrIndex]) #B1
for value in valueSet: # 为每个value创建分支
subDataSet = splitDataSet(dataSet, bestAttrIndex, value)
if len(subDataSet) == 0: # 3. 数据集为空,预测标签为父节点出现最多的标签
resTree[bestAttr][value] = Counter(labelList).most_common(1)[0][0]
else:
cpyAttr = attr[:] # 创建attr的副本,避免直接传需要用到的引用进函数 #B1
resTree[bestAttr][value] = createTree(subDataSet, cpyAttr, attrUniValSet, algorithm) # 分支字典 {attribute0 : {low : {}, med : {}, high : {}, vhigh : {}}} #B1 B2
return resTree
def createAttrUniValSet(dataSet):
attrUniValSet = []
for attrIndex in range(len(dataSet[0])-1): # 遍历每个属性
attrList = [entry[attrIndex] for entry in dataSet]
attrUniValSet.append(set(attrList))
return attrUniValSet
def classifierVec(testVec, attr, tree):
tempTree = copy.deepcopy(tree) # 深复制
while(isinstance(tempTree, dict)):
nodeName = list(tempTree.keys())[0] # 获得标签 outlook {'outlook':{}}
nodeAttrIndex = attr.index(nodeName) # 获得标签 outlook 在 attr 的下标 0
branch = testVec[nodeAttrIndex] # 获得分支值 2 ,用于{2:{windy:{}}}
tempTree = tempTree[nodeName][branch]
return tempTree
def classifierSet(testDataSet, attr, tree):
resLabel = []
for testVec in testDataSet:
resLabel.append(classifierVec(testVec, attr, tree))
return resLabel
def saveTree(path, tree):
with open(path, 'w') as wf:
wf.write(repr(tree)) # 将决策树字典结果当做字符串写入文件
# print("Write done!\nThe file looks like:")
# with open(path, 'r') as rf:
# sample = rf.read()
# print(sample)
def loadTree(path) | tries) # 获得数据集二维列表
attr = ['attr' + str(i) for i in range(len(dataSet[0])-1)] # 获得属性向量
return dataSet, attr
def saveCarDataRes(path, carDataSetRes):
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(carDataSetRes)
def calAccuracy(dataSet, resVec):
if len(dataSet) != len(resVec):
print("Length of dataSet no equal length of resVec!")
return
dataLabelVec = [entry[-1] for entry in dataSet]
correctCount = 0
for i in range(len(resVec)):
if dataSet[i][-1] == resVec[i]:
correctCount += 1
accuracy = float(correctCount)/len(resVec)
return accuracy
# main函数中的选择函数
def mainTrainTree():
print("说明:训练集是train.csv,验证集是validate.csv,由Car_train.csv随机分配得到,比例为3:1")
print("使用train.csv建立决策树")
carDataSet, carAttr = loadCarDataSet('./data/train.csv')
carUniValSet = createAttrUniValSet(carDataSet)
print("正在训练ID3决策树...", end='')
car_ID3_Tree = createTree(carDataSet, carAttr, carUniValSet)
saveTree('./output/car_ID3_Tree/car_ID3_Tree.txt', car_ID3_Tree)
print("完成,保存为'./output | :
with open(path, 'r') as rf:
tree = eval(rf.read())
return tree
def loadCarDataSet(path):
with open(path, 'r') as csvfile:
entries = csv.reader(csvfile)
dataSet = list(en | identifier_body |
decision_tree.py | ��得dataSet中每个属性的所有value的列表
attrValueSet = set(attrValueList) # 获得value列表的不重复set,在ID3和C4.5中遍历计算每个value的熵,CART中用value进行二分类计算gini系数
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue) # 分离出col=index, value = uniqueValue 的数据集
p = float(len(subDataSet)) / len(dataSet) # 计算子数据集占总数据比例
newEntropy += p * calEntropy(subDataSet)
infoGain = oldEntropy - newEntropy
if infoGain > maxInfoGain:
maxInfoGain = infoGain
bestIndex = index
return bestIndex
def selectBestAttrIndex_C45(dataSet):
labelNum = len(dataSet[0])-1
oldEntropy = calEntropy(dataSet)
bestIndex = -1
maxInfoGainRotio = 0.0
for index in range(labelNum):
newEntropy = 0.0
splitInfo = 0.0
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newEntropy += p * calEntropy(subDataSet)
splitInfo -= p * log(p, 2) # index标签的熵
infoGain = oldEntropy - newEntropy
if splitInfo == 0.0:
continue
infoGainRatio = infoGain / splitInfo # 计算信息增益
if infoGainRatio > maxInfoGainRotio:
maxInfoGainRotio = infoGainRatio
bestIndex = index
return bestIndex
def selectBestAttrIndex_CART(dataSet):
labelNum = len(dataSet[0])-1
bestIndex = -1
minGini = float("inf") # 所有attribute 中最小gini系数
for index in range(labelNum):
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
newGini = 0.0
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newGini += p * calGini(subDataSet)
if newGini < minGini:
minGini = newGini
bestIndex = index
return bestIndex
def createTree(dataSet, oriAttr, oriAttrUniValSet, algorithm = 'ID3'):
attr = oriAttr[:] # 输入的一份拷贝,不改动输入的属性
attrUniValSet = oriAttrUniValSet[:]
labelList = [entry[-1] for entry in dataSet]
if len(labelList) == labelList.count(labelList[0]): # 1. 所有样本标签相同,那么该节点为记为该标签叶子节点
return labelList[0]
if len(attr) == 0: # 2. 没有可以分类的属性
return Counter(labelList).most_common(1)[0][0] # 返回出现次数最多的标签
# dataSet 为空?dataSet 中所有属性的收益相同?
bestAttrIndex = selectBestAttrIndex(dataSet, algorithm) # 获得收益最大的属性下标,2. 数据集中所有样本在所有属性上增益相同
bestAttr = attr[bestAttrIndex] # 获得收益最大属性
resTree = {bestAttr : {}} # 构建字典树
del(attr[bestAttrIndex]) # 删除收益最大属性,与split后的dataSet相同长度
valueSet = attrUniValSet[bestAttrIndex] #B1
del(attrUniValSet[bestAttrIndex]) #B1
for value in valueSet: # 为每个value创建分支
subDataSet = splitDataSet(dataSet, bestAttrIndex, value)
if len(subDataSet) == 0: # 3. 数据集为空,预测标签为父节点出现最多的标签
resTree[bestAttr][value] = Counter(labelList).most_common(1)[0][0]
else:
cpyAttr = attr[:] # 创建attr的副本,避免直接传需要用到的引用进函数 #B1
resTree[bestAttr][value] = createTree(subDataSet, cpyAttr, attrUniValSet, algorithm) # 分支字典 {attribute0 : {low : {}, med : {}, high : {}, vhigh : {}}} #B1 B2
return resTree
def createAttrUniValSet(dataSet):
attrUniValSet = []
for attrIndex in range(len(dataSet[0])-1): # 遍历每个属性
attrList = [entry[attrIndex] for entry in dataSet]
attrUniValSet.append(set(attrList))
return attrUniValSet
def classifierVec(testVec, attr, tree):
tempTree = copy.deepcopy(tree) # 深复制
while(isinstance(tempTree, dict)):
nodeName = list(tempTree.keys())[0] # 获得标签 outlook {'outlook':{}}
nodeAttrIndex = attr.index(nodeName) # 获得标签 outlook 在 attr 的下标 0
branch = testVec[nodeAttrIndex] # 获得分支值 2 ,用于{2:{windy:{}}}
tempTree = tempTree[nodeName][branch]
return tempTree
def classifierSet(testDataSet, attr, tree):
resLabel = []
for testVec in testDataSet:
resLabel.append(classifierVec(testVec, attr, tree))
return resLabel
def saveTree(path, tree):
with open(path, 'w') as wf:
wf.write(repr(tree)) # 将决策树字典结果当做字符串写入文件
# print("Write done!\nThe file looks like:")
# with open(path, 'r') as rf:
# sample = rf.read()
# print(sample)
def loadTree(path):
with open(path, 'r') as rf:
tree = eval(rf.read())
return tree
def loadCarDataSet(path):
with open(path, 'r') as csvfile:
entries = csv.reader(csvfile)
dataSet = list(entries) # 获得数据集二维列表
attr = ['attr' + str(i) for i in range(len(dataSet[0])-1)] # 获得属性向量
return dataSet, attr
def saveCarDataRes(path, carDataSetRes):
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(carDataSetRes)
def calAccuracy(dataSet, resVec):
if len(dataSet) != len(resVec):
print("Length of dataSet no equal length of resVec!")
return
dataLabelVec = [entry[-1] for entry in dataSet]
correctCount = 0
for i in range(len(resVec)):
if dataSet[i][-1] == resVec[i]:
correctCount += 1
accuracy = float(correctCount)/len(resVec)
return accuracy
# main函数中的选择函数
def mainTrainTree():
print("说明:训练集是train.csv,验证集是validate.csv,由Car_train.csv随机分配得到,比例为3:1")
print("使用train.csv建立决策树")
carDataSet, carAttr = loadCarDataSet('./data/train.csv')
carUniValSet = createAttrUniValSet(carDataSet)
print("正在训练ID3决策树...", end='')
car_ID3_Tree = createTree(carDataSet, carAttr, carUniValSet)
saveTree('./output/car_ID3_Tree/car_ID3_Tree.txt', car_ID3_Tree)
print("完成,保存为'./output/car_ID3_Tree/car_ID3_Tree.txt'")
print("正在绘制ID3决策树图像...", end='')
carTreePlotter.createPlot(car_ID3_Tree, "./output/car_ID3_Tree/car_ID3_Tree.png")
print("完成,保存为'./output/car_ID3_Tree/car_ID3_Tree.png'")
print("正在训练C4.5决策树...", end='')
car_C45_Tree = createTree(carDataSet, carAttr, carUniValSet, 'C4.5')
saveTree('./output/car_C45_Tree/car_C45_Tree.txt', car_C45_Tree)
print("完成,保存为'./output/car_ID3_Tree/car_C45_Tree.txt'")
print("正在绘制C4.5决策树图像...", end='')
carTreePlotter.createPlot(car_C45_Tree, "./output/car_C45_Tree/car_C45_Tree.png")
print("完成,保存为'./output/car_ID3_Tree/car_C45_Tree.png'")
print("正在训练CART决策树...", end='')
car_CART_Tree = createTree(carDataSet, carAttr, carUniValSet, 'CART')
saveTree('./output/car_CA | RT_Tree/car_C | identifier_name |
|
bitfinex.py |
def _get_v2_symbols(self, assets):
"""
Workaround to support Bitfinex v2
TODO: Might require a separate asset dictionary
:param assets:
:return:
"""
v2_symbols = []
for asset in assets:
v2_symbols.append(self._get_v2_symbol(asset))
return v2_symbols
def _create_order(self, order_status):
"""
Create a Catalyst order object from a Bitfinex order dictionary
:param order_status:
:return: Order
"""
if order_status['is_cancelled']:
status = ORDER_STATUS.CANCELLED
elif not order_status['is_live']:
log.info('found executed order {}'.format(order_status))
status = ORDER_STATUS.FILLED
else:
status = ORDER_STATUS.OPEN
amount = float(order_status['original_amount'])
filled = float(order_status['executed_amount'])
if order_status['side'] == 'sell':
amount = -amount
filled = -filled
price = float(order_status['price'])
order_type = order_status['type']
stop_price = None
limit_price = None
# TODO: is this comprehensive enough?
if order_type.endswith('limit'):
limit_price = price
elif order_type.endswith('stop'):
stop_price = price
executed_price = float(order_status['avg_execution_price'])
# TODO: bitfinex does not specify comission. I could calculate it but not sure if it's worth it.
commission = None
date = pd.Timestamp.utcfromtimestamp(float(order_status['timestamp']))
date = pytz.utc.localize(date)
order = Order(
dt=date,
asset=self.assets[order_status['symbol']],
amount=amount,
stop=stop_price,
limit=limit_price,
filled=filled,
id=str(order_status['id']),
commission=commission
)
order.status = status
return order, executed_price
def get_balances(self):
log.debug('retrieving wallets balances')
try:
self.ask_request()
response = self._request('balances', None)
balances = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in balances:
raise ExchangeRequestError(
error='unable to fetch balance {}'.format(balances['message'])
)
std_balances = dict()
for balance in balances:
currency = balance['currency'].lower()
std_balances[currency] = float(balance['available'])
return std_balances
@property
def account(self):
account = Account()
account.settled_cash = None
account.accrued_interest = None
account.buying_power = None
account.equity_with_loan = None
account.total_positions_value = None
account.total_positions_exposure = None
account.regt_equity = None
account.regt_margin = None
account.initial_margin_requirement = None
account.maintenance_margin_requirement = None
account.available_funds = None
account.excess_liquidity = None
account.cushion = None
account.day_trades_remaining = None
account.leverage = None
account.net_leverage = None
account.net_liquidation = None
return account
@property
def time_skew(self):
# TODO: research the time skew conditions
return pd.Timedelta('0s')
def get_account(self):
# TODO: fetch account data and keep in cache
return None
def get_candles(self, data_frequency, assets, bar_count=None,
start_dt=None, end_dt=None):
"""
Retrieve OHLVC candles from Bitfinex
:param data_frequency:
:param assets:
:param bar_count:
:return:
Available Frequencies
---------------------
'1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D',
'1M'
"""
freq_match = re.match(r'([0-9].*)(m|h|d)', data_frequency, re.M | re.I)
if freq_match:
number = int(freq_match.group(1))
unit = freq_match.group(2)
if unit == 'd':
converted_unit = 'D'
else:
converted_unit = unit
frequency = '{}{}'.format(number, converted_unit)
allowed_frequencies = ['1m', '5m', '15m', '30m', '1h', '3h', '6h',
'12h', '1D', '7D', '14D', '1M']
if frequency not in allowed_frequencies:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
elif data_frequency == 'minute':
frequency = '1m'
elif data_frequency == 'daily':
frequency = '1D'
else:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
# Making sure that assets are iterable
asset_list = [assets] if isinstance(assets, TradingPair) else assets
ohlc_map = dict()
for asset in asset_list:
symbol = self._get_v2_symbol(asset)
url = '{url}/v2/candles/trade:{frequency}:{symbol}'.format(
url=self.url,
frequency=frequency,
symbol=symbol
)
if bar_count:
is_list = True
url += '/hist?limit={}'.format(int(bar_count))
def get_ms(date):
epoch = datetime.datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
return (date - epoch).total_seconds() * 1000.0
if start_dt is not None:
start_ms = get_ms(start_dt)
url += '&start={0:f}'.format(start_ms)
if end_dt is not None:
end_ms = get_ms(end_dt)
url += '&end={0:f}'.format(end_ms)
else:
is_list = False
url += '/last'
try:
self.ask_request()
response = requests.get(url)
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response.content:
raise ExchangeRequestError(
error='Unable to retrieve candles: {}'.format(
response.content)
)
candles = response.json()
def ohlc_from_candle(candle):
last_traded = pd.Timestamp.utcfromtimestamp(
candle[0] / 1000.0)
last_traded = last_traded.replace(tzinfo=pytz.UTC)
ohlc = dict(
open=np.float64(candle[1]),
high=np.float64(candle[3]),
low=np.float64(candle[4]),
close=np.float64(candle[2]),
volume=np.float64(candle[5]),
price=np.float64(candle[2]),
last_traded=last_traded
)
return ohlc
if is_list:
ohlc_bars = []
# We can to list candles from old to new
for candle in reversed(candles):
ohlc = ohlc_from_candle(candle)
ohlc_bars.append(ohlc)
ohlc_map[asset] = ohlc_bars
else:
ohlc = ohlc_from_candle(candles)
ohlc_map[asset] = ohlc
return ohlc_map[assets] \
if isinstance(assets, TradingPair) else ohlc_map
def create_order(self, asset, amount, is_buy, style):
"""
Creating order on the exchange.
:param asset:
:param amount:
:param is_buy:
:param style:
:return:
"""
exchange_symbol = self.get_symbol(asset)
if isinstance(style, ExchangeLimitOrder) \
or isinstance(style, ExchangeStopLimitOrder):
price = style.get_limit_price(is_buy)
order_type = 'limit'
elif isinstance(style, ExchangeStopOrder):
price = style.get_stop_price(is_buy)
order_type = 'stop'
else:
raise InvalidOrderStyle(exchange=self.name,
style=style.__class__.__name__)
req = dict(
symbol=exchange_symbol,
amount=str(float(abs(amount))),
price="{:.20f}".format(float(price)),
side='buy' if is_buy else 'sell',
type='exchange ' + order_type, # TODO: support margin trades
exchange=self.name,
is_hidden=False,
is_postonly=False,
use_all_available=0,
ocoorder=False,
buy_price_oco=0,
sell_price_oco=0
)
date = pd.Timestamp.utcnow()
try:
self.ask_request()
response = self._request('order/new', req)
order_status = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in order_status:
raise ExchangeRequestError(
error='unable to create Bitfinex order {}'.format(
order_status['message'])
)
order_id = str(order_status['id'])
order = Order(
dt=date,
| pair = asset.symbol.split('_')
symbol = 't' + pair[0].upper() + pair[1].upper()
return symbol | identifier_body |
|
bitfinex.py | ize(date)
order = Order(
dt=date,
asset=self.assets[order_status['symbol']],
amount=amount,
stop=stop_price,
limit=limit_price,
filled=filled,
id=str(order_status['id']),
commission=commission
)
order.status = status
return order, executed_price
def get_balances(self):
log.debug('retrieving wallets balances')
try:
self.ask_request()
response = self._request('balances', None)
balances = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in balances:
raise ExchangeRequestError(
error='unable to fetch balance {}'.format(balances['message'])
)
std_balances = dict()
for balance in balances:
currency = balance['currency'].lower()
std_balances[currency] = float(balance['available'])
return std_balances
@property
def account(self):
account = Account()
account.settled_cash = None
account.accrued_interest = None
account.buying_power = None
account.equity_with_loan = None
account.total_positions_value = None
account.total_positions_exposure = None
account.regt_equity = None
account.regt_margin = None
account.initial_margin_requirement = None
account.maintenance_margin_requirement = None
account.available_funds = None
account.excess_liquidity = None
account.cushion = None
account.day_trades_remaining = None
account.leverage = None
account.net_leverage = None
account.net_liquidation = None
return account
@property
def time_skew(self):
# TODO: research the time skew conditions
return pd.Timedelta('0s')
def get_account(self):
# TODO: fetch account data and keep in cache
return None
def get_candles(self, data_frequency, assets, bar_count=None,
start_dt=None, end_dt=None):
"""
Retrieve OHLVC candles from Bitfinex
:param data_frequency:
:param assets:
:param bar_count:
:return:
Available Frequencies
---------------------
'1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D',
'1M'
"""
freq_match = re.match(r'([0-9].*)(m|h|d)', data_frequency, re.M | re.I)
if freq_match:
number = int(freq_match.group(1))
unit = freq_match.group(2)
if unit == 'd':
converted_unit = 'D'
else:
converted_unit = unit
frequency = '{}{}'.format(number, converted_unit)
allowed_frequencies = ['1m', '5m', '15m', '30m', '1h', '3h', '6h',
'12h', '1D', '7D', '14D', '1M']
if frequency not in allowed_frequencies:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
elif data_frequency == 'minute':
frequency = '1m'
elif data_frequency == 'daily':
frequency = '1D'
else:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
# Making sure that assets are iterable
asset_list = [assets] if isinstance(assets, TradingPair) else assets
ohlc_map = dict()
for asset in asset_list:
symbol = self._get_v2_symbol(asset)
url = '{url}/v2/candles/trade:{frequency}:{symbol}'.format(
url=self.url,
frequency=frequency,
symbol=symbol
)
if bar_count:
is_list = True
url += '/hist?limit={}'.format(int(bar_count))
def get_ms(date):
epoch = datetime.datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
return (date - epoch).total_seconds() * 1000.0
if start_dt is not None:
start_ms = get_ms(start_dt)
url += '&start={0:f}'.format(start_ms)
if end_dt is not None:
end_ms = get_ms(end_dt)
url += '&end={0:f}'.format(end_ms)
else:
is_list = False
url += '/last'
try:
self.ask_request()
response = requests.get(url)
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response.content:
raise ExchangeRequestError(
error='Unable to retrieve candles: {}'.format(
response.content)
)
candles = response.json()
def ohlc_from_candle(candle):
last_traded = pd.Timestamp.utcfromtimestamp(
candle[0] / 1000.0)
last_traded = last_traded.replace(tzinfo=pytz.UTC)
ohlc = dict(
open=np.float64(candle[1]),
high=np.float64(candle[3]),
low=np.float64(candle[4]),
close=np.float64(candle[2]),
volume=np.float64(candle[5]),
price=np.float64(candle[2]),
last_traded=last_traded
)
return ohlc
if is_list:
ohlc_bars = []
# We can to list candles from old to new
for candle in reversed(candles):
ohlc = ohlc_from_candle(candle)
ohlc_bars.append(ohlc)
ohlc_map[asset] = ohlc_bars
else:
ohlc = ohlc_from_candle(candles)
ohlc_map[asset] = ohlc
return ohlc_map[assets] \
if isinstance(assets, TradingPair) else ohlc_map
def create_order(self, asset, amount, is_buy, style):
"""
Creating order on the exchange.
:param asset:
:param amount:
:param is_buy:
:param style:
:return:
"""
exchange_symbol = self.get_symbol(asset)
if isinstance(style, ExchangeLimitOrder) \
or isinstance(style, ExchangeStopLimitOrder):
price = style.get_limit_price(is_buy)
order_type = 'limit'
elif isinstance(style, ExchangeStopOrder):
price = style.get_stop_price(is_buy)
order_type = 'stop'
else:
raise InvalidOrderStyle(exchange=self.name,
style=style.__class__.__name__)
req = dict(
symbol=exchange_symbol,
amount=str(float(abs(amount))),
price="{:.20f}".format(float(price)),
side='buy' if is_buy else 'sell',
type='exchange ' + order_type, # TODO: support margin trades
exchange=self.name,
is_hidden=False,
is_postonly=False,
use_all_available=0,
ocoorder=False,
buy_price_oco=0,
sell_price_oco=0
)
date = pd.Timestamp.utcnow()
try:
self.ask_request()
response = self._request('order/new', req)
order_status = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in order_status:
raise ExchangeRequestError(
error='unable to create Bitfinex order {}'.format(
order_status['message'])
)
order_id = str(order_status['id'])
order = Order(
dt=date,
asset=asset,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
return order
def get_open_orders(self, asset=None):
"""Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not None, return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If no asset is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset.
"""
try:
self.ask_request()
response = self._request('orders', None)
order_statuses = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in order_statuses:
raise ExchangeRequestError(
error='Unable to retrieve open orders: {}'.format(
order_statuses['message'])
)
orders = []
for order_status in order_statuses:
order, executed_price = self._create_order(order_status)
if asset is None or asset == order.sid:
orders.append(order)
return orders
def get_order(self, order_id):
"""Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object.
"""
try:
self.ask_request() | response = self._request(
'order/status', {'order_id': int(order_id)}) | random_line_split |
|
bitfinex.py | t' + pair[0].upper() + pair[1].upper()
return symbol
def _get_v2_symbols(self, assets):
"""
Workaround to support Bitfinex v2
TODO: Might require a separate asset dictionary
:param assets:
:return:
"""
v2_symbols = []
for asset in assets:
v2_symbols.append(self._get_v2_symbol(asset))
return v2_symbols
def _create_order(self, order_status):
"""
Create a Catalyst order object from a Bitfinex order dictionary
:param order_status:
:return: Order
"""
if order_status['is_cancelled']:
status = ORDER_STATUS.CANCELLED
elif not order_status['is_live']:
log.info('found executed order {}'.format(order_status))
status = ORDER_STATUS.FILLED
else:
status = ORDER_STATUS.OPEN
amount = float(order_status['original_amount'])
filled = float(order_status['executed_amount'])
if order_status['side'] == 'sell':
amount = -amount
filled = -filled
price = float(order_status['price'])
order_type = order_status['type']
stop_price = None
limit_price = None
# TODO: is this comprehensive enough?
if order_type.endswith('limit'):
limit_price = price
elif order_type.endswith('stop'):
stop_price = price
executed_price = float(order_status['avg_execution_price'])
# TODO: bitfinex does not specify comission. I could calculate it but not sure if it's worth it.
commission = None
date = pd.Timestamp.utcfromtimestamp(float(order_status['timestamp']))
date = pytz.utc.localize(date)
order = Order(
dt=date,
asset=self.assets[order_status['symbol']],
amount=amount,
stop=stop_price,
limit=limit_price,
filled=filled,
id=str(order_status['id']),
commission=commission
)
order.status = status
return order, executed_price
def get_balances(self):
log.debug('retrieving wallets balances')
try:
self.ask_request()
response = self._request('balances', None)
balances = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in balances:
raise ExchangeRequestError(
error='unable to fetch balance {}'.format(balances['message'])
)
std_balances = dict()
for balance in balances:
currency = balance['currency'].lower()
std_balances[currency] = float(balance['available'])
return std_balances
@property
def account(self):
account = Account()
account.settled_cash = None
account.accrued_interest = None
account.buying_power = None
account.equity_with_loan = None
account.total_positions_value = None
account.total_positions_exposure = None
account.regt_equity = None
account.regt_margin = None
account.initial_margin_requirement = None
account.maintenance_margin_requirement = None
account.available_funds = None
account.excess_liquidity = None
account.cushion = None
account.day_trades_remaining = None
account.leverage = None
account.net_leverage = None
account.net_liquidation = None
return account
@property
def time_skew(self):
# TODO: research the time skew conditions
return pd.Timedelta('0s')
def get_account(self):
# TODO: fetch account data and keep in cache
return None
def get_candles(self, data_frequency, assets, bar_count=None,
start_dt=None, end_dt=None):
"""
Retrieve OHLVC candles from Bitfinex
:param data_frequency:
:param assets:
:param bar_count:
:return:
Available Frequencies
---------------------
'1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D',
'1M'
"""
freq_match = re.match(r'([0-9].*)(m|h|d)', data_frequency, re.M | re.I)
if freq_match:
number = int(freq_match.group(1))
unit = freq_match.group(2)
if unit == 'd':
converted_unit = 'D'
else:
converted_unit = unit
frequency = '{}{}'.format(number, converted_unit)
allowed_frequencies = ['1m', '5m', '15m', '30m', '1h', '3h', '6h',
'12h', '1D', '7D', '14D', '1M']
if frequency not in allowed_frequencies:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
elif data_frequency == 'minute':
frequency = '1m'
elif data_frequency == 'daily':
frequency = '1D'
else:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
# Making sure that assets are iterable
asset_list = [assets] if isinstance(assets, TradingPair) else assets
ohlc_map = dict()
for asset in asset_list:
symbol = self._get_v2_symbol(asset)
url = '{url}/v2/candles/trade:{frequency}:{symbol}'.format(
url=self.url,
frequency=frequency,
symbol=symbol
)
if bar_count:
is_list = True
url += '/hist?limit={}'.format(int(bar_count))
def get_ms(date):
epoch = datetime.datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
return (date - epoch).total_seconds() * 1000.0
if start_dt is not None:
|
if end_dt is not None:
end_ms = get_ms(end_dt)
url += '&end={0:f}'.format(end_ms)
else:
is_list = False
url += '/last'
try:
self.ask_request()
response = requests.get(url)
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response.content:
raise ExchangeRequestError(
error='Unable to retrieve candles: {}'.format(
response.content)
)
candles = response.json()
def ohlc_from_candle(candle):
last_traded = pd.Timestamp.utcfromtimestamp(
candle[0] / 1000.0)
last_traded = last_traded.replace(tzinfo=pytz.UTC)
ohlc = dict(
open=np.float64(candle[1]),
high=np.float64(candle[3]),
low=np.float64(candle[4]),
close=np.float64(candle[2]),
volume=np.float64(candle[5]),
price=np.float64(candle[2]),
last_traded=last_traded
)
return ohlc
if is_list:
ohlc_bars = []
# We can to list candles from old to new
for candle in reversed(candles):
ohlc = ohlc_from_candle(candle)
ohlc_bars.append(ohlc)
ohlc_map[asset] = ohlc_bars
else:
ohlc = ohlc_from_candle(candles)
ohlc_map[asset] = ohlc
return ohlc_map[assets] \
if isinstance(assets, TradingPair) else ohlc_map
def create_order(self, asset, amount, is_buy, style):
"""
Creating order on the exchange.
:param asset:
:param amount:
:param is_buy:
:param style:
:return:
"""
exchange_symbol = self.get_symbol(asset)
if isinstance(style, ExchangeLimitOrder) \
or isinstance(style, ExchangeStopLimitOrder):
price = style.get_limit_price(is_buy)
order_type = 'limit'
elif isinstance(style, ExchangeStopOrder):
price = style.get_stop_price(is_buy)
order_type = 'stop'
else:
raise InvalidOrderStyle(exchange=self.name,
style=style.__class__.__name__)
req = dict(
symbol=exchange_symbol,
amount=str(float(abs(amount))),
price="{:.20f}".format(float(price)),
side='buy' if is_buy else 'sell',
type='exchange ' + order_type, # TODO: support margin trades
exchange=self.name,
is_hidden=False,
is_postonly=False,
use_all_available=0,
ocoorder=False,
buy_price_oco=0,
sell_price_oco=0
)
date = pd.Timestamp.utcnow()
try:
self.ask_request()
response = self._request('order/new', req)
order_status = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in order_status:
raise ExchangeRequestError(
error='unable to create Bitfinex order {}'.format(
order_status['message'])
)
order_id = str(order_status['id'])
order = Order(
dt=date,
asset=asset,
amount=amount,
| start_ms = get_ms(start_dt)
url += '&start={0:f}'.format(start_ms) | conditional_block |
bitfinex.py | t' + pair[0].upper() + pair[1].upper()
return symbol
def _get_v2_symbols(self, assets):
"""
Workaround to support Bitfinex v2
TODO: Might require a separate asset dictionary
:param assets:
:return:
"""
v2_symbols = []
for asset in assets:
v2_symbols.append(self._get_v2_symbol(asset))
return v2_symbols
def _create_order(self, order_status):
"""
Create a Catalyst order object from a Bitfinex order dictionary
:param order_status:
:return: Order
"""
if order_status['is_cancelled']:
status = ORDER_STATUS.CANCELLED
elif not order_status['is_live']:
log.info('found executed order {}'.format(order_status))
status = ORDER_STATUS.FILLED
else:
status = ORDER_STATUS.OPEN
amount = float(order_status['original_amount'])
filled = float(order_status['executed_amount'])
if order_status['side'] == 'sell':
amount = -amount
filled = -filled
price = float(order_status['price'])
order_type = order_status['type']
stop_price = None
limit_price = None
# TODO: is this comprehensive enough?
if order_type.endswith('limit'):
limit_price = price
elif order_type.endswith('stop'):
stop_price = price
executed_price = float(order_status['avg_execution_price'])
# TODO: bitfinex does not specify comission. I could calculate it but not sure if it's worth it.
commission = None
date = pd.Timestamp.utcfromtimestamp(float(order_status['timestamp']))
date = pytz.utc.localize(date)
order = Order(
dt=date,
asset=self.assets[order_status['symbol']],
amount=amount,
stop=stop_price,
limit=limit_price,
filled=filled,
id=str(order_status['id']),
commission=commission
)
order.status = status
return order, executed_price
def get_balances(self):
log.debug('retrieving wallets balances')
try:
self.ask_request()
response = self._request('balances', None)
balances = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in balances:
raise ExchangeRequestError(
error='unable to fetch balance {}'.format(balances['message'])
)
std_balances = dict()
for balance in balances:
currency = balance['currency'].lower()
std_balances[currency] = float(balance['available'])
return std_balances
@property
def account(self):
account = Account()
account.settled_cash = None
account.accrued_interest = None
account.buying_power = None
account.equity_with_loan = None
account.total_positions_value = None
account.total_positions_exposure = None
account.regt_equity = None
account.regt_margin = None
account.initial_margin_requirement = None
account.maintenance_margin_requirement = None
account.available_funds = None
account.excess_liquidity = None
account.cushion = None
account.day_trades_remaining = None
account.leverage = None
account.net_leverage = None
account.net_liquidation = None
return account
@property
def time_skew(self):
# TODO: research the time skew conditions
return pd.Timedelta('0s')
def | (self):
# TODO: fetch account data and keep in cache
return None
def get_candles(self, data_frequency, assets, bar_count=None,
start_dt=None, end_dt=None):
"""
Retrieve OHLVC candles from Bitfinex
:param data_frequency:
:param assets:
:param bar_count:
:return:
Available Frequencies
---------------------
'1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D',
'1M'
"""
freq_match = re.match(r'([0-9].*)(m|h|d)', data_frequency, re.M | re.I)
if freq_match:
number = int(freq_match.group(1))
unit = freq_match.group(2)
if unit == 'd':
converted_unit = 'D'
else:
converted_unit = unit
frequency = '{}{}'.format(number, converted_unit)
allowed_frequencies = ['1m', '5m', '15m', '30m', '1h', '3h', '6h',
'12h', '1D', '7D', '14D', '1M']
if frequency not in allowed_frequencies:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
elif data_frequency == 'minute':
frequency = '1m'
elif data_frequency == 'daily':
frequency = '1D'
else:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
# Making sure that assets are iterable
asset_list = [assets] if isinstance(assets, TradingPair) else assets
ohlc_map = dict()
for asset in asset_list:
symbol = self._get_v2_symbol(asset)
url = '{url}/v2/candles/trade:{frequency}:{symbol}'.format(
url=self.url,
frequency=frequency,
symbol=symbol
)
if bar_count:
is_list = True
url += '/hist?limit={}'.format(int(bar_count))
def get_ms(date):
epoch = datetime.datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
return (date - epoch).total_seconds() * 1000.0
if start_dt is not None:
start_ms = get_ms(start_dt)
url += '&start={0:f}'.format(start_ms)
if end_dt is not None:
end_ms = get_ms(end_dt)
url += '&end={0:f}'.format(end_ms)
else:
is_list = False
url += '/last'
try:
self.ask_request()
response = requests.get(url)
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response.content:
raise ExchangeRequestError(
error='Unable to retrieve candles: {}'.format(
response.content)
)
candles = response.json()
def ohlc_from_candle(candle):
last_traded = pd.Timestamp.utcfromtimestamp(
candle[0] / 1000.0)
last_traded = last_traded.replace(tzinfo=pytz.UTC)
ohlc = dict(
open=np.float64(candle[1]),
high=np.float64(candle[3]),
low=np.float64(candle[4]),
close=np.float64(candle[2]),
volume=np.float64(candle[5]),
price=np.float64(candle[2]),
last_traded=last_traded
)
return ohlc
if is_list:
ohlc_bars = []
# We can to list candles from old to new
for candle in reversed(candles):
ohlc = ohlc_from_candle(candle)
ohlc_bars.append(ohlc)
ohlc_map[asset] = ohlc_bars
else:
ohlc = ohlc_from_candle(candles)
ohlc_map[asset] = ohlc
return ohlc_map[assets] \
if isinstance(assets, TradingPair) else ohlc_map
def create_order(self, asset, amount, is_buy, style):
"""
Creating order on the exchange.
:param asset:
:param amount:
:param is_buy:
:param style:
:return:
"""
exchange_symbol = self.get_symbol(asset)
if isinstance(style, ExchangeLimitOrder) \
or isinstance(style, ExchangeStopLimitOrder):
price = style.get_limit_price(is_buy)
order_type = 'limit'
elif isinstance(style, ExchangeStopOrder):
price = style.get_stop_price(is_buy)
order_type = 'stop'
else:
raise InvalidOrderStyle(exchange=self.name,
style=style.__class__.__name__)
req = dict(
symbol=exchange_symbol,
amount=str(float(abs(amount))),
price="{:.20f}".format(float(price)),
side='buy' if is_buy else 'sell',
type='exchange ' + order_type, # TODO: support margin trades
exchange=self.name,
is_hidden=False,
is_postonly=False,
use_all_available=0,
ocoorder=False,
buy_price_oco=0,
sell_price_oco=0
)
date = pd.Timestamp.utcnow()
try:
self.ask_request()
response = self._request('order/new', req)
order_status = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in order_status:
raise ExchangeRequestError(
error='unable to create Bitfinex order {}'.format(
order_status['message'])
)
order_id = str(order_status['id'])
order = Order(
dt=date,
asset=asset,
amount=amount,
| get_account | identifier_name |
compile.py | ('#'):
typ, arg = 'channel', arg[1:]
elif ':' in arg:
typ, arg = arg.split(':', 1)
else:
typ = 'str'
data = {}
if '(' in typ and typ.endswith(')'):
typ, typarg = typ.split('(', 1)
typarg = typarg[:-1]
data['type-argument'] = typarg
# make sure the type is a known argument type
if not typ in ['flag', 'literal']:
warn('type does not take argument: {}'.format(typ))
# make sure the type is known
if not typ in ['str', 'int', 'flag', 'literal', 'channel']:
warn('unknown type: {}'.format(typ))
data['type'] = typ
ret['name'] = check_name(arg)
return data
def parse_arg(arg_orig):
b, arg = unpack_brackets(arg_orig)
ret = {}
if not b:
# literal
return (['left', 'right'], {'type': 'literal', 'type-argument': arg})
elif b == '<':
typ = parse_inner_arg(arg, ret)
ret.update(typ)
return (['left', 'right'], ret)
elif b == '[':
ret['type'] = 'optional'
ret['inner'] = parse_inner_arg(arg, ret)
return (['left'], ret)
elif b == '(':
ret['type'] = 'optional'
ret['inner'] = parse_inner_arg(arg, ret)
return (['right'], ret)
else:
warn('cannot parse argument: {}'.format(arg_orig))
def check_name(name):
name = name.strip()
if not name: # names should have length
warn('zero-length name')
if name.lower() != name: # names should be lower-case
warn('name not lowcased: {}'.format(name))
if len(name.split()) > 1: # names should have no whitespace
warn('name has whitespace: {}'.format(name))
# names should be [a-z][0-9] and - only
if not all(c.isalpha() or c.isdigit() or c == '-' for c in name):
warn('name has invalid characters: {}'.format(name))
return name
def check_verb(verb):
if not verb.upper() == verb: # verbs should be upper case
warn('verb not upcased: {}'.format(verb))
if verb.isnumeric():
# numerics must be 000 formatted
if verb != '{:03d}'.format(int(verb)):
warn('invalid numeric format: {}'.format(verb))
verb = int(verb)
# numerics must be within this range
if verb <= 0 or verb > 999:
warn('invalid numeric code: {}'.format(verb))
return verb
def parse_format(fmt, data):
data['format'] = fmt
# do our own tokenizing, to force balanced parens but handle : outside
tokens = []
expectstack = []
expectmap = {'(': ')', '[': ']', '<': '>'}
gather = ''
split_on_space = True
for c in fmt: | split_on_space = False
continue
if split_on_space and c.isspace():
if gather:
tokens.append(gather)
gather = ''
else:
gather += c
if gather:
tokens.append(gather)
if expectstack:
warn('unbalanced brackets, expecting: {}'.format(expectstack))
# there should be at least a verb
if not tokens:
warn('no verb found')
verb = tokens[0]
args = tokens[1:]
data['verb'] = check_verb(verb)
if isinstance(data['verb'], int):
data['type'] = 'numeric'
else:
data['type'] = 'text'
associativity = set(['left', 'right'])
data['arguments'] = []
argnames = []
for a in args:
assoc, arg = parse_arg(a)
associativity = associativity.intersection(assoc)
if 'name' in arg:
# arguments must be unique
if arg['name'] in argnames:
warn('non-unique argument name: {}'.format(arg['name']))
argnames.append(arg['name'])
data['arguments'].append(arg)
# rectify associativities
if not associativity:
warn('mixed associativities')
associativity = list(associativity)
associativity.sort()
data['associativity'] = associativity[0]
# numerics all have targets
if data['type'] == 'numeric':
if len(data['arguments']) < 1 or data['arguments'][0].get('name') != 'target' or data['arguments'][0].get('type') != 'str':
print(data['arguments'][0])
warn('numerics need a <target> argument')
# a bunch of literals next to each other is always an error
last_type = None
for arg in data['arguments']:
if arg['type'] == 'literal' and last_type == 'literal':
warn('two successive literals, you need a :')
break
last_type = arg['type']
section_names = []
def check_section(title, data):
required = ['name']
# must have these fields
for k in required:
if not k in data:
warn('required field `{}` missing'.format(k))
return None
# validate name
data['name'] = check_name(data['name'])
# section names must be unique
if data['name'] in section_names:
warn('non-unique section name: {}'.format(data['name']))
section_names.append(data['name'])
# add title
data['title'] = title
return data
message_names = []
message_verbs = {}
def check_message(fmt, data):
required = ['name']
# must have these fields
for k in required:
if not k in data:
warn('required field `{}` missing'.format(k))
return None
# fill in computed details from format
parse_format(fmt, data)
# validate name
data['name'] = check_name(data['name'])
# message names must be unique
if data['name'] in message_names:
warn('non-unique message name: {}'.format(data['name']))
message_names.append(data['name'])
# message verbs must be unique
if data['verb'] in message_verbs:
warn('non-unique verb: {}'.format(data['verb']))
message_verbs[data['verb']] = data['name']
# related is a comma-seperated list
if 'related' in data:
data['related'] = [check_verb(r.strip()) for r in data['related'].split(',')]
# only refer to section by name
data['section'] = data['section']['name']
return data
def check_version(ver, data):
if not '.' in ver:
warn('invalid version format')
return (0, 0)
maj, min = ver.split('.', 1)
if not maj.isnumeric() or not min.isnumeric():
warn('invalid version format')
return (0, 0)
return (int(maj), int(min))
def check_whole(data):
# make sure all related verbs actually exist
# and resolve them into names
for msg in data['messages']:
resolved_rel = []
for rel in msg.get('related', []):
if not rel in message_verbs:
warn('unknown related verb for {}: {}'.format(msg['verb'], rel))
else:
resolved_rel.append(message_verbs[rel])
if resolved_rel:
msg['related'] = resolved_rel
return data
def create_description(f, fname):
lineno = 0
lastheaderno = 0
room_for_header = True
sections = []
messages = []
version = None
header = None
gather = {}
fields = {
'Version': [],
'Section': ['name', 'url'],
'Message': ['name', 'related', 'documentation'],
}
warnings = 0
def local_warn(s):
nonlocal warnings
warnings += 1
if 'verb' in gather:
print('{}:{}: (verb {}) {}'.format(fname, lastheaderno, gather['verb'], s))
else:
print('{}:{}: {}'.format(fname, lastheaderno, s))
global warn
warn = local_warn
def emit():
nonlocal header, gather, sections, messages, version
if header is not None:
if header[0] == 'Version':
if version:
warn('only one version allowed')
version = check_version(header[1], gather)
elif header[0] == 'Section':
section = check_section(header[1], gather)
if section:
sections.append(section)
elif header[0] == 'Message':
if sections:
gather['section'] = sections[-1]
message = check_message(header[1], gather)
if message:
messages.append(message)
else:
| if c in expectmap:
expectstack.append(expectmap[c])
if expectstack and c == expectstack[-1]:
expectstack.pop()
if c == ':' and not expectstack: | random_line_split |
compile.py | '):
typ, arg = 'channel', arg[1:]
elif ':' in arg:
typ, arg = arg.split(':', 1)
else:
typ = 'str'
data = {}
if '(' in typ and typ.endswith(')'):
typ, typarg = typ.split('(', 1)
typarg = typarg[:-1]
data['type-argument'] = typarg
# make sure the type is a known argument type
if not typ in ['flag', 'literal']:
warn('type does not take argument: {}'.format(typ))
# make sure the type is known
if not typ in ['str', 'int', 'flag', 'literal', 'channel']:
warn('unknown type: {}'.format(typ))
data['type'] = typ
ret['name'] = check_name(arg)
return data
def parse_arg(arg_orig):
b, arg = unpack_brackets(arg_orig)
ret = {}
if not b:
# literal
return (['left', 'right'], {'type': 'literal', 'type-argument': arg})
elif b == '<':
typ = parse_inner_arg(arg, ret)
ret.update(typ)
return (['left', 'right'], ret)
elif b == '[':
ret['type'] = 'optional'
ret['inner'] = parse_inner_arg(arg, ret)
return (['left'], ret)
elif b == '(':
ret['type'] = 'optional'
ret['inner'] = parse_inner_arg(arg, ret)
return (['right'], ret)
else:
warn('cannot parse argument: {}'.format(arg_orig))
def check_name(name):
name = name.strip()
if not name: # names should have length
warn('zero-length name')
if name.lower() != name: # names should be lower-case
warn('name not lowcased: {}'.format(name))
if len(name.split()) > 1: # names should have no whitespace
|
# names should be [a-z][0-9] and - only
if not all(c.isalpha() or c.isdigit() or c == '-' for c in name):
warn('name has invalid characters: {}'.format(name))
return name
def check_verb(verb):
if not verb.upper() == verb: # verbs should be upper case
warn('verb not upcased: {}'.format(verb))
if verb.isnumeric():
# numerics must be 000 formatted
if verb != '{:03d}'.format(int(verb)):
warn('invalid numeric format: {}'.format(verb))
verb = int(verb)
# numerics must be within this range
if verb <= 0 or verb > 999:
warn('invalid numeric code: {}'.format(verb))
return verb
def parse_format(fmt, data):
data['format'] = fmt
# do our own tokenizing, to force balanced parens but handle : outside
tokens = []
expectstack = []
expectmap = {'(': ')', '[': ']', '<': '>'}
gather = ''
split_on_space = True
for c in fmt:
if c in expectmap:
expectstack.append(expectmap[c])
if expectstack and c == expectstack[-1]:
expectstack.pop()
if c == ':' and not expectstack:
split_on_space = False
continue
if split_on_space and c.isspace():
if gather:
tokens.append(gather)
gather = ''
else:
gather += c
if gather:
tokens.append(gather)
if expectstack:
warn('unbalanced brackets, expecting: {}'.format(expectstack))
# there should be at least a verb
if not tokens:
warn('no verb found')
verb = tokens[0]
args = tokens[1:]
data['verb'] = check_verb(verb)
if isinstance(data['verb'], int):
data['type'] = 'numeric'
else:
data['type'] = 'text'
associativity = set(['left', 'right'])
data['arguments'] = []
argnames = []
for a in args:
assoc, arg = parse_arg(a)
associativity = associativity.intersection(assoc)
if 'name' in arg:
# arguments must be unique
if arg['name'] in argnames:
warn('non-unique argument name: {}'.format(arg['name']))
argnames.append(arg['name'])
data['arguments'].append(arg)
# rectify associativities
if not associativity:
warn('mixed associativities')
associativity = list(associativity)
associativity.sort()
data['associativity'] = associativity[0]
# numerics all have targets
if data['type'] == 'numeric':
if len(data['arguments']) < 1 or data['arguments'][0].get('name') != 'target' or data['arguments'][0].get('type') != 'str':
print(data['arguments'][0])
warn('numerics need a <target> argument')
# a bunch of literals next to each other is always an error
last_type = None
for arg in data['arguments']:
if arg['type'] == 'literal' and last_type == 'literal':
warn('two successive literals, you need a :')
break
last_type = arg['type']
section_names = []
def check_section(title, data):
required = ['name']
# must have these fields
for k in required:
if not k in data:
warn('required field `{}` missing'.format(k))
return None
# validate name
data['name'] = check_name(data['name'])
# section names must be unique
if data['name'] in section_names:
warn('non-unique section name: {}'.format(data['name']))
section_names.append(data['name'])
# add title
data['title'] = title
return data
message_names = []
message_verbs = {}
def check_message(fmt, data):
required = ['name']
# must have these fields
for k in required:
if not k in data:
warn('required field `{}` missing'.format(k))
return None
# fill in computed details from format
parse_format(fmt, data)
# validate name
data['name'] = check_name(data['name'])
# message names must be unique
if data['name'] in message_names:
warn('non-unique message name: {}'.format(data['name']))
message_names.append(data['name'])
# message verbs must be unique
if data['verb'] in message_verbs:
warn('non-unique verb: {}'.format(data['verb']))
message_verbs[data['verb']] = data['name']
# related is a comma-seperated list
if 'related' in data:
data['related'] = [check_verb(r.strip()) for r in data['related'].split(',')]
# only refer to section by name
data['section'] = data['section']['name']
return data
def check_version(ver, data):
if not '.' in ver:
warn('invalid version format')
return (0, 0)
maj, min = ver.split('.', 1)
if not maj.isnumeric() or not min.isnumeric():
warn('invalid version format')
return (0, 0)
return (int(maj), int(min))
def check_whole(data):
# make sure all related verbs actually exist
# and resolve them into names
for msg in data['messages']:
resolved_rel = []
for rel in msg.get('related', []):
if not rel in message_verbs:
warn('unknown related verb for {}: {}'.format(msg['verb'], rel))
else:
resolved_rel.append(message_verbs[rel])
if resolved_rel:
msg['related'] = resolved_rel
return data
def create_description(f, fname):
lineno = 0
lastheaderno = 0
room_for_header = True
sections = []
messages = []
version = None
header = None
gather = {}
fields = {
'Version': [],
'Section': ['name', 'url'],
'Message': ['name', 'related', 'documentation'],
}
warnings = 0
def local_warn(s):
nonlocal warnings
warnings += 1
if 'verb' in gather:
print('{}:{}: (verb {}) {}'.format(fname, lastheaderno, gather['verb'], s))
else:
print('{}:{}: {}'.format(fname, lastheaderno, s))
global warn
warn = local_warn
def emit():
nonlocal header, gather, sections, messages, version
if header is not None:
if header[0] == 'Version':
if version:
warn('only one version allowed')
version = check_version(header[1], gather)
elif header[0] == 'Section':
section = check_section(header[1], gather)
if section:
sections.append(section)
elif header[0] == 'Message':
if sections:
gather['section'] = sections[-1]
message = check_message(header[1], gather)
if message:
messages.append(message)
else:
| warn('name has whitespace: {}'.format(name)) | conditional_block |
compile.py | '):
typ, arg = 'channel', arg[1:]
elif ':' in arg:
typ, arg = arg.split(':', 1)
else:
typ = 'str'
data = {}
if '(' in typ and typ.endswith(')'):
typ, typarg = typ.split('(', 1)
typarg = typarg[:-1]
data['type-argument'] = typarg
# make sure the type is a known argument type
if not typ in ['flag', 'literal']:
warn('type does not take argument: {}'.format(typ))
# make sure the type is known
if not typ in ['str', 'int', 'flag', 'literal', 'channel']:
warn('unknown type: {}'.format(typ))
data['type'] = typ
ret['name'] = check_name(arg)
return data
def parse_arg(arg_orig):
b, arg = unpack_brackets(arg_orig)
ret = {}
if not b:
# literal
return (['left', 'right'], {'type': 'literal', 'type-argument': arg})
elif b == '<':
typ = parse_inner_arg(arg, ret)
ret.update(typ)
return (['left', 'right'], ret)
elif b == '[':
ret['type'] = 'optional'
ret['inner'] = parse_inner_arg(arg, ret)
return (['left'], ret)
elif b == '(':
ret['type'] = 'optional'
ret['inner'] = parse_inner_arg(arg, ret)
return (['right'], ret)
else:
warn('cannot parse argument: {}'.format(arg_orig))
def check_name(name):
name = name.strip()
if not name: # names should have length
warn('zero-length name')
if name.lower() != name: # names should be lower-case
warn('name not lowcased: {}'.format(name))
if len(name.split()) > 1: # names should have no whitespace
warn('name has whitespace: {}'.format(name))
# names should be [a-z][0-9] and - only
if not all(c.isalpha() or c.isdigit() or c == '-' for c in name):
warn('name has invalid characters: {}'.format(name))
return name
def check_verb(verb):
if not verb.upper() == verb: # verbs should be upper case
warn('verb not upcased: {}'.format(verb))
if verb.isnumeric():
# numerics must be 000 formatted
if verb != '{:03d}'.format(int(verb)):
warn('invalid numeric format: {}'.format(verb))
verb = int(verb)
# numerics must be within this range
if verb <= 0 or verb > 999:
warn('invalid numeric code: {}'.format(verb))
return verb
def | (fmt, data):
data['format'] = fmt
# do our own tokenizing, to force balanced parens but handle : outside
tokens = []
expectstack = []
expectmap = {'(': ')', '[': ']', '<': '>'}
gather = ''
split_on_space = True
for c in fmt:
if c in expectmap:
expectstack.append(expectmap[c])
if expectstack and c == expectstack[-1]:
expectstack.pop()
if c == ':' and not expectstack:
split_on_space = False
continue
if split_on_space and c.isspace():
if gather:
tokens.append(gather)
gather = ''
else:
gather += c
if gather:
tokens.append(gather)
if expectstack:
warn('unbalanced brackets, expecting: {}'.format(expectstack))
# there should be at least a verb
if not tokens:
warn('no verb found')
verb = tokens[0]
args = tokens[1:]
data['verb'] = check_verb(verb)
if isinstance(data['verb'], int):
data['type'] = 'numeric'
else:
data['type'] = 'text'
associativity = set(['left', 'right'])
data['arguments'] = []
argnames = []
for a in args:
assoc, arg = parse_arg(a)
associativity = associativity.intersection(assoc)
if 'name' in arg:
# arguments must be unique
if arg['name'] in argnames:
warn('non-unique argument name: {}'.format(arg['name']))
argnames.append(arg['name'])
data['arguments'].append(arg)
# rectify associativities
if not associativity:
warn('mixed associativities')
associativity = list(associativity)
associativity.sort()
data['associativity'] = associativity[0]
# numerics all have targets
if data['type'] == 'numeric':
if len(data['arguments']) < 1 or data['arguments'][0].get('name') != 'target' or data['arguments'][0].get('type') != 'str':
print(data['arguments'][0])
warn('numerics need a <target> argument')
# a bunch of literals next to each other is always an error
last_type = None
for arg in data['arguments']:
if arg['type'] == 'literal' and last_type == 'literal':
warn('two successive literals, you need a :')
break
last_type = arg['type']
section_names = []
def check_section(title, data):
required = ['name']
# must have these fields
for k in required:
if not k in data:
warn('required field `{}` missing'.format(k))
return None
# validate name
data['name'] = check_name(data['name'])
# section names must be unique
if data['name'] in section_names:
warn('non-unique section name: {}'.format(data['name']))
section_names.append(data['name'])
# add title
data['title'] = title
return data
message_names = []
message_verbs = {}
def check_message(fmt, data):
required = ['name']
# must have these fields
for k in required:
if not k in data:
warn('required field `{}` missing'.format(k))
return None
# fill in computed details from format
parse_format(fmt, data)
# validate name
data['name'] = check_name(data['name'])
# message names must be unique
if data['name'] in message_names:
warn('non-unique message name: {}'.format(data['name']))
message_names.append(data['name'])
# message verbs must be unique
if data['verb'] in message_verbs:
warn('non-unique verb: {}'.format(data['verb']))
message_verbs[data['verb']] = data['name']
# related is a comma-seperated list
if 'related' in data:
data['related'] = [check_verb(r.strip()) for r in data['related'].split(',')]
# only refer to section by name
data['section'] = data['section']['name']
return data
def check_version(ver, data):
if not '.' in ver:
warn('invalid version format')
return (0, 0)
maj, min = ver.split('.', 1)
if not maj.isnumeric() or not min.isnumeric():
warn('invalid version format')
return (0, 0)
return (int(maj), int(min))
def check_whole(data):
# make sure all related verbs actually exist
# and resolve them into names
for msg in data['messages']:
resolved_rel = []
for rel in msg.get('related', []):
if not rel in message_verbs:
warn('unknown related verb for {}: {}'.format(msg['verb'], rel))
else:
resolved_rel.append(message_verbs[rel])
if resolved_rel:
msg['related'] = resolved_rel
return data
def create_description(f, fname):
lineno = 0
lastheaderno = 0
room_for_header = True
sections = []
messages = []
version = None
header = None
gather = {}
fields = {
'Version': [],
'Section': ['name', 'url'],
'Message': ['name', 'related', 'documentation'],
}
warnings = 0
def local_warn(s):
nonlocal warnings
warnings += 1
if 'verb' in gather:
print('{}:{}: (verb {}) {}'.format(fname, lastheaderno, gather['verb'], s))
else:
print('{}:{}: {}'.format(fname, lastheaderno, s))
global warn
warn = local_warn
def emit():
nonlocal header, gather, sections, messages, version
if header is not None:
if header[0] == 'Version':
if version:
warn('only one version allowed')
version = check_version(header[1], gather)
elif header[0] == 'Section':
section = check_section(header[1], gather)
if section:
sections.append(section)
elif header[0] == 'Message':
if sections:
gather['section'] = sections[-1]
message = check_message(header[1], gather)
if message:
messages.append(message)
else:
| parse_format | identifier_name |
compile.py | '):
typ, arg = 'channel', arg[1:]
elif ':' in arg:
typ, arg = arg.split(':', 1)
else:
typ = 'str'
data = {}
if '(' in typ and typ.endswith(')'):
typ, typarg = typ.split('(', 1)
typarg = typarg[:-1]
data['type-argument'] = typarg
# make sure the type is a known argument type
if not typ in ['flag', 'literal']:
warn('type does not take argument: {}'.format(typ))
# make sure the type is known
if not typ in ['str', 'int', 'flag', 'literal', 'channel']:
warn('unknown type: {}'.format(typ))
data['type'] = typ
ret['name'] = check_name(arg)
return data
def parse_arg(arg_orig):
|
def check_name(name):
name = name.strip()
if not name: # names should have length
warn('zero-length name')
if name.lower() != name: # names should be lower-case
warn('name not lowcased: {}'.format(name))
if len(name.split()) > 1: # names should have no whitespace
warn('name has whitespace: {}'.format(name))
# names should be [a-z][0-9] and - only
if not all(c.isalpha() or c.isdigit() or c == '-' for c in name):
warn('name has invalid characters: {}'.format(name))
return name
def check_verb(verb):
if not verb.upper() == verb: # verbs should be upper case
warn('verb not upcased: {}'.format(verb))
if verb.isnumeric():
# numerics must be 000 formatted
if verb != '{:03d}'.format(int(verb)):
warn('invalid numeric format: {}'.format(verb))
verb = int(verb)
# numerics must be within this range
if verb <= 0 or verb > 999:
warn('invalid numeric code: {}'.format(verb))
return verb
def parse_format(fmt, data):
data['format'] = fmt
# do our own tokenizing, to force balanced parens but handle : outside
tokens = []
expectstack = []
expectmap = {'(': ')', '[': ']', '<': '>'}
gather = ''
split_on_space = True
for c in fmt:
if c in expectmap:
expectstack.append(expectmap[c])
if expectstack and c == expectstack[-1]:
expectstack.pop()
if c == ':' and not expectstack:
split_on_space = False
continue
if split_on_space and c.isspace():
if gather:
tokens.append(gather)
gather = ''
else:
gather += c
if gather:
tokens.append(gather)
if expectstack:
warn('unbalanced brackets, expecting: {}'.format(expectstack))
# there should be at least a verb
if not tokens:
warn('no verb found')
verb = tokens[0]
args = tokens[1:]
data['verb'] = check_verb(verb)
if isinstance(data['verb'], int):
data['type'] = 'numeric'
else:
data['type'] = 'text'
associativity = set(['left', 'right'])
data['arguments'] = []
argnames = []
for a in args:
assoc, arg = parse_arg(a)
associativity = associativity.intersection(assoc)
if 'name' in arg:
# arguments must be unique
if arg['name'] in argnames:
warn('non-unique argument name: {}'.format(arg['name']))
argnames.append(arg['name'])
data['arguments'].append(arg)
# rectify associativities
if not associativity:
warn('mixed associativities')
associativity = list(associativity)
associativity.sort()
data['associativity'] = associativity[0]
# numerics all have targets
if data['type'] == 'numeric':
if len(data['arguments']) < 1 or data['arguments'][0].get('name') != 'target' or data['arguments'][0].get('type') != 'str':
print(data['arguments'][0])
warn('numerics need a <target> argument')
# a bunch of literals next to each other is always an error
last_type = None
for arg in data['arguments']:
if arg['type'] == 'literal' and last_type == 'literal':
warn('two successive literals, you need a :')
break
last_type = arg['type']
section_names = []
def check_section(title, data):
required = ['name']
# must have these fields
for k in required:
if not k in data:
warn('required field `{}` missing'.format(k))
return None
# validate name
data['name'] = check_name(data['name'])
# section names must be unique
if data['name'] in section_names:
warn('non-unique section name: {}'.format(data['name']))
section_names.append(data['name'])
# add title
data['title'] = title
return data
message_names = []
message_verbs = {}
def check_message(fmt, data):
required = ['name']
# must have these fields
for k in required:
if not k in data:
warn('required field `{}` missing'.format(k))
return None
# fill in computed details from format
parse_format(fmt, data)
# validate name
data['name'] = check_name(data['name'])
# message names must be unique
if data['name'] in message_names:
warn('non-unique message name: {}'.format(data['name']))
message_names.append(data['name'])
# message verbs must be unique
if data['verb'] in message_verbs:
warn('non-unique verb: {}'.format(data['verb']))
message_verbs[data['verb']] = data['name']
# related is a comma-seperated list
if 'related' in data:
data['related'] = [check_verb(r.strip()) for r in data['related'].split(',')]
# only refer to section by name
data['section'] = data['section']['name']
return data
def check_version(ver, data):
if not '.' in ver:
warn('invalid version format')
return (0, 0)
maj, min = ver.split('.', 1)
if not maj.isnumeric() or not min.isnumeric():
warn('invalid version format')
return (0, 0)
return (int(maj), int(min))
def check_whole(data):
# make sure all related verbs actually exist
# and resolve them into names
for msg in data['messages']:
resolved_rel = []
for rel in msg.get('related', []):
if not rel in message_verbs:
warn('unknown related verb for {}: {}'.format(msg['verb'], rel))
else:
resolved_rel.append(message_verbs[rel])
if resolved_rel:
msg['related'] = resolved_rel
return data
def create_description(f, fname):
lineno = 0
lastheaderno = 0
room_for_header = True
sections = []
messages = []
version = None
header = None
gather = {}
fields = {
'Version': [],
'Section': ['name', 'url'],
'Message': ['name', 'related', 'documentation'],
}
warnings = 0
def local_warn(s):
nonlocal warnings
warnings += 1
if 'verb' in gather:
print('{}:{}: (verb {}) {}'.format(fname, lastheaderno, gather['verb'], s))
else:
print('{}:{}: {}'.format(fname, lastheaderno, s))
global warn
warn = local_warn
def emit():
nonlocal header, gather, sections, messages, version
if header is not None:
if header[0] == 'Version':
if version:
warn('only one version allowed')
version = check_version(header[1], gather)
elif header[0] == 'Section':
section = check_section(header[1], gather)
if section:
sections.append(section)
elif header[0] == 'Message':
if sections:
gather['section'] = sections[-1]
message = check_message(header[1], gather)
if message:
messages.append(message)
else:
| b, arg = unpack_brackets(arg_orig)
ret = {}
if not b:
# literal
return (['left', 'right'], {'type': 'literal', 'type-argument': arg})
elif b == '<':
typ = parse_inner_arg(arg, ret)
ret.update(typ)
return (['left', 'right'], ret)
elif b == '[':
ret['type'] = 'optional'
ret['inner'] = parse_inner_arg(arg, ret)
return (['left'], ret)
elif b == '(':
ret['type'] = 'optional'
ret['inner'] = parse_inner_arg(arg, ret)
return (['right'], ret)
else:
warn('cannot parse argument: {}'.format(arg_orig)) | identifier_body |
splayTree.py | .value = value
self.parent = None
self.left = None
self.right = None
def search(self, value):
n = self._find(value)
n._splay()
return n
def insert(self, value):
"""
Inserts a new node with the specified value into the tree, which is then
splayed around it.
O(n), amortized O(log n).
"""
insertion_point = self._find(value)
n = SplayNode(value)
# value already in the tree; add at leftmost position in right subtreepa
if value == insertion_point.value:
if insertion_point.right is None:
insertion_point.right = n
n.parent = insertion_point
else:
insertion_point = insertion_point.right
while insertion_point.left is not None:
insertion_point = insertion_point.left
insertion_point.left = n
n.parent = insertion_point
# value belongs to the left
elif value < insertion_point.value:
insertion_point.left = n
n.parent = insertion_point
# value belongs to the right
else:
insertion_point.right = n
n.parent = insertion_point
n._splay()
return n # return new root
def delete(self, value):
"""
Searches for the specified value. If found, splays the tree around it;
removes it from the tree; finds its immediate predecessor; splays the
left subtree around that node; and attaches it to the right subtree. If
not found, splays the tree around its nearest parent. Returns the new
root.
O(n), amortized O(log n).
"""
n = self._find(value) # find and splay relevant node
n._splay()
if n.value == value: # only if value actually found
left, right = n._uproot()
# there is a left child: splay around its maximum, connect to right
if left is not None:
while left.right is not None:
left = left.right
left._splay()
left.right = right
if right is not None:
right.parent = left
n = left
# there is no left child: all we need is the right
else:
n = right
return n # new root of the entire tree
def contains_deprecated(self, value):
"""
Returns whether or not the specified value exists in this tree. Does
not splay the tree, since it does not return a node.
O(n), amortized O(log n).
"""
n = self._find(value)
return n.value == value
def contains(self, value):
"""
Returns whether or not the specified value exists in this tree. Splays
the tree, and returns a tuple of (bool, newRoot).
O(n), amortized O(log n).
"""
n = self.search(value)
return (n.value==value, n)
def getRoot(self):
"""
Returns the root of the tree this node is in. Helpful if the root was
changed but pointers were not updated.
O(n), amortized O(log n).
"""
n = self
while n.parent is not None:
n = n.parent
return n
def __iter__(self):
"""
Generates a sorted traversal of the values in the tree.
O(n^2), amortized O(n log n), but on average much better because one
long _successor call typically causes many short ones.
"""
n = self.getRoot()
while n.left is not None:
n = n.left
while True:
yield n.value
n = n._successor()
if n is None:
break
def __str__(self):
# recur in children
left = str(self.left) if self.left is not None else ""
right = str(self.right) if self.right is not None else ""
# combine with this node's string
return "(" + left + str(self.value) + right + ")"
def _find(self, value):
"""
Finds the given value in the tree rooted at this tree, or its would-be
parent if not found. Runs in time linear in the height of the tree.
Does not splay the tree. Will raise an error if value is not comparable
with the values already in the tree.
O(n), amortized O(log n)
"""
# case 1: look deeper, left
if self.value > value and self.left is not None:
return self.left._find(value)
# case 2: look deeper, right
if self.value < value and self.right is not None:
return self.right._find(value)
# case 3: found it, or nothing to find
else:
return self
def _splay(self):
"""
Splay the tree around the node until it is the root.
O(n), amortized O(log n)
"""
if self.parent is not None: # case 1: already the root -> do nothing
grandparent = self.parent.parent
if grandparent is None: # case 2: one rotation to root
self._zig()
else: # case 3: multiple rotations to root
rightOfLeft = self.parent.right is self and grandparent.left is self.parent
leftOfRight = self.parent.left is self and grandparent.right is self.parent
if rightOfLeft or leftOfRight: # case 3a: zigzag
self._zig()
self._zig()
else: # case 3b: zigzig
self.parent._zig()
self._zig()
self._splay() # recur until case 1 or 2 applies
def _zig(self):
"""
Perform one zig (or zag) operation to rotate the node upward.
O(1)
"""
if self.parent is not None:
if self is self.parent.left:
self._rotateClockwise()
elif self is self.parent.right:
self._rotateCounterclockwise()
def _rotateClockwise(self):
"""
Perform one AVL-style rotation clockwise. Requires a rightward parent.
O(1)
"""
p, g, r = self.parent, self.parent.parent, self.right
# connect right child to parent
if r is not None:
r.parent = p
p.left = r
# move parent down + right
p.parent = self
self.right = p
# move this node up + right
self.parent = g
if g is not None:
if p is g.left:
g.left = self
elif p is g.right:
g.right = self
def _rotateCounterclockwise(self):
"""
Perform one AVL-style rotation counterclockwise. Requires a leftward parent.
O(1)
"""
p, g, l = self.parent, self.parent.parent, self.left
# connect left child to parent
if l is not None:
|
p.right = l
# move parent down + left
p.parent = self
self.left = p
# move this node up + left
self.parent = g
if g is not None:
if p is g.left:
g.left = self
elif p is g.right:
g.right = self
def _uproot(self):
"""
Detaches the root node from its children and returns them.
O(1)
"""
left, right = self.left, self.right
if left is not None:
left.parent = None
if right is not None:
right.parent = None
return left, right
def _successor(self):
"""
Gets the successor to this node. Useful for making an inorder
traversal, e.g. to print as sorted.
"""
if self.right is None:
# get first rightward ancestor
m = self
n = m.parent
while n is not None and m is n.right:
m = n
n = n.parent
else:
# get leftmost of right child
n = self.right
while n.left is not None:
n = n.left
return n
class SplayTree():
def __init__(self, typing=None):
self.typing = typing # may delay typing and infer upon first addition
self.root = None
self._size = 0
@property # treeInstance.size may be accessed like an attribute, but not set
def size(self):
return self._size
def insert(self, value):
"""inserts value into tree. sets type if this hasn't been done yet."""
if self.typing is None: # first insertion: set type of this tree
self.typing = type(value)
else: # perform type check
if type(value) != self.typing:
raise TypeError("Type " + str(type(value)) + " is incompatible" +
" with tree of type " + str(self.typing) + ".")
# TODO allow different yet comparable types
# if no error:
if self.root is None:
self.root = SplayNode(value)
else:
self.root = self | l.parent = p | conditional_block |
splayTree.py | self.left = None
self.right = None
def search(self, value):
n = self._find(value)
n._splay()
return n
def insert(self, value):
"""
Inserts a new node with the specified value into the tree, which is then
splayed around it.
O(n), amortized O(log n).
"""
insertion_point = self._find(value)
n = SplayNode(value)
# value already in the tree; add at leftmost position in right subtreepa
if value == insertion_point.value:
if insertion_point.right is None:
insertion_point.right = n
n.parent = insertion_point
else:
insertion_point = insertion_point.right
while insertion_point.left is not None:
insertion_point = insertion_point.left
insertion_point.left = n
n.parent = insertion_point
# value belongs to the left
elif value < insertion_point.value:
insertion_point.left = n
n.parent = insertion_point
# value belongs to the right
else:
insertion_point.right = n
n.parent = insertion_point
n._splay()
return n # return new root
def delete(self, value):
"""
Searches for the specified value. If found, splays the tree around it;
removes it from the tree; finds its immediate predecessor; splays the
left subtree around that node; and attaches it to the right subtree. If
not found, splays the tree around its nearest parent. Returns the new
root.
O(n), amortized O(log n).
"""
n = self._find(value) # find and splay relevant node
n._splay()
if n.value == value: # only if value actually found
left, right = n._uproot()
# there is a left child: splay around its maximum, connect to right
if left is not None:
while left.right is not None:
left = left.right
left._splay()
left.right = right
if right is not None:
right.parent = left
n = left
# there is no left child: all we need is the right
else:
n = right
return n # new root of the entire tree
def contains_deprecated(self, value):
"""
Returns whether or not the specified value exists in this tree. Does
not splay the tree, since it does not return a node.
O(n), amortized O(log n).
"""
n = self._find(value)
return n.value == value
def contains(self, value):
"""
Returns whether or not the specified value exists in this tree. Splays
the tree, and returns a tuple of (bool, newRoot).
O(n), amortized O(log n).
"""
n = self.search(value)
return (n.value==value, n)
def getRoot(self):
"""
Returns the root of the tree this node is in. Helpful if the root was
changed but pointers were not updated.
O(n), amortized O(log n).
"""
n = self
while n.parent is not None:
n = n.parent
return n
def __iter__(self):
"""
Generates a sorted traversal of the values in the tree.
O(n^2), amortized O(n log n), but on average much better because one
long _successor call typically causes many short ones.
"""
n = self.getRoot()
while n.left is not None:
n = n.left
while True:
yield n.value
n = n._successor()
if n is None:
break
def __str__(self):
# recur in children
left = str(self.left) if self.left is not None else ""
right = str(self.right) if self.right is not None else ""
# combine with this node's string
return "(" + left + str(self.value) + right + ")"
def _find(self, value):
"""
Finds the given value in the tree rooted at this tree, or its would-be
parent if not found. Runs in time linear in the height of the tree.
Does not splay the tree. Will raise an error if value is not comparable
with the values already in the tree.
O(n), amortized O(log n)
"""
# case 1: look deeper, left
if self.value > value and self.left is not None:
return self.left._find(value)
# case 2: look deeper, right
if self.value < value and self.right is not None:
return self.right._find(value)
# case 3: found it, or nothing to find
else:
return self
def _splay(self):
"""
Splay the tree around the node until it is the root.
O(n), amortized O(log n)
"""
if self.parent is not None: # case 1: already the root -> do nothing
grandparent = self.parent.parent
if grandparent is None: # case 2: one rotation to root
self._zig()
else: # case 3: multiple rotations to root
rightOfLeft = self.parent.right is self and grandparent.left is self.parent
leftOfRight = self.parent.left is self and grandparent.right is self.parent
if rightOfLeft or leftOfRight: # case 3a: zigzag
self._zig()
self._zig()
else: # case 3b: zigzig
self.parent._zig()
self._zig()
self._splay() # recur until case 1 or 2 applies
def _zig(self):
"""
Perform one zig (or zag) operation to rotate the node upward.
O(1)
"""
if self.parent is not None:
if self is self.parent.left:
self._rotateClockwise()
elif self is self.parent.right:
self._rotateCounterclockwise()
def _rotateClockwise(self):
"""
Perform one AVL-style rotation clockwise. Requires a rightward parent.
O(1)
"""
p, g, r = self.parent, self.parent.parent, self.right
# connect right child to parent
if r is not None:
r.parent = p
p.left = r
# move parent down + right
p.parent = self
self.right = p
# move this node up + right
self.parent = g
if g is not None:
if p is g.left:
g.left = self
elif p is g.right:
g.right = self
def _rotateCounterclockwise(self):
"""
Perform one AVL-style rotation counterclockwise. Requires a leftward parent.
O(1)
"""
p, g, l = self.parent, self.parent.parent, self.left
# connect left child to parent
if l is not None:
l.parent = p
p.right = l
# move parent down + left
p.parent = self
self.left = p
# move this node up + left
self.parent = g
if g is not None:
if p is g.left:
g.left = self
elif p is g.right:
g.right = self
def _uproot(self):
"""
Detaches the root node from its children and returns them.
O(1)
"""
left, right = self.left, self.right
if left is not None:
left.parent = None
if right is not None:
right.parent = None
return left, right
def _successor(self):
"""
Gets the successor to this node. Useful for making an inorder
traversal, e.g. to print as sorted.
"""
if self.right is None:
# get first rightward ancestor
m = self
n = m.parent
while n is not None and m is n.right:
m = n
n = n.parent
else:
# get leftmost of right child
n = self.right
while n.left is not None:
n = n.left
return n
class SplayTree():
def __init__(self, typing=None):
self.typing = typing # may delay typing and infer upon first addition
self.root = None
self._size = 0
@property # treeInstance.size may be accessed like an attribute, but not set
def size(self):
return self._size
def insert(self, value):
| """inserts value into tree. sets type if this hasn't been done yet."""
if self.typing is None: # first insertion: set type of this tree
self.typing = type(value)
else: # perform type check
if type(value) != self.typing:
raise TypeError("Type " + str(type(value)) + " is incompatible" +
" with tree of type " + str(self.typing) + ".")
# TODO allow different yet comparable types
# if no error:
if self.root is None:
self.root = SplayNode(value)
else:
self.root = self.root.insert(value)
self._size += 1 | identifier_body |
|
splayTree.py | .value = value
self.parent = None
self.left = None
self.right = None
def search(self, value):
n = self._find(value)
n._splay()
return n
| Inserts a new node with the specified value into the tree, which is then
splayed around it.
O(n), amortized O(log n).
"""
insertion_point = self._find(value)
n = SplayNode(value)
# value already in the tree; add at leftmost position in right subtreepa
if value == insertion_point.value:
if insertion_point.right is None:
insertion_point.right = n
n.parent = insertion_point
else:
insertion_point = insertion_point.right
while insertion_point.left is not None:
insertion_point = insertion_point.left
insertion_point.left = n
n.parent = insertion_point
# value belongs to the left
elif value < insertion_point.value:
insertion_point.left = n
n.parent = insertion_point
# value belongs to the right
else:
insertion_point.right = n
n.parent = insertion_point
n._splay()
return n # return new root
def delete(self, value):
"""
Searches for the specified value. If found, splays the tree around it;
removes it from the tree; finds its immediate predecessor; splays the
left subtree around that node; and attaches it to the right subtree. If
not found, splays the tree around its nearest parent. Returns the new
root.
O(n), amortized O(log n).
"""
n = self._find(value) # find and splay relevant node
n._splay()
if n.value == value: # only if value actually found
left, right = n._uproot()
# there is a left child: splay around its maximum, connect to right
if left is not None:
while left.right is not None:
left = left.right
left._splay()
left.right = right
if right is not None:
right.parent = left
n = left
# there is no left child: all we need is the right
else:
n = right
return n # new root of the entire tree
def contains_deprecated(self, value):
"""
Returns whether or not the specified value exists in this tree. Does
not splay the tree, since it does not return a node.
O(n), amortized O(log n).
"""
n = self._find(value)
return n.value == value
def contains(self, value):
"""
Returns whether or not the specified value exists in this tree. Splays
the tree, and returns a tuple of (bool, newRoot).
O(n), amortized O(log n).
"""
n = self.search(value)
return (n.value==value, n)
def getRoot(self):
"""
Returns the root of the tree this node is in. Helpful if the root was
changed but pointers were not updated.
O(n), amortized O(log n).
"""
n = self
while n.parent is not None:
n = n.parent
return n
def __iter__(self):
"""
Generates a sorted traversal of the values in the tree.
O(n^2), amortized O(n log n), but on average much better because one
long _successor call typically causes many short ones.
"""
n = self.getRoot()
while n.left is not None:
n = n.left
while True:
yield n.value
n = n._successor()
if n is None:
break
def __str__(self):
# recur in children
left = str(self.left) if self.left is not None else ""
right = str(self.right) if self.right is not None else ""
# combine with this node's string
return "(" + left + str(self.value) + right + ")"
def _find(self, value):
"""
Finds the given value in the tree rooted at this tree, or its would-be
parent if not found. Runs in time linear in the height of the tree.
Does not splay the tree. Will raise an error if value is not comparable
with the values already in the tree.
O(n), amortized O(log n)
"""
# case 1: look deeper, left
if self.value > value and self.left is not None:
return self.left._find(value)
# case 2: look deeper, right
if self.value < value and self.right is not None:
return self.right._find(value)
# case 3: found it, or nothing to find
else:
return self
def _splay(self):
"""
Splay the tree around the node until it is the root.
O(n), amortized O(log n)
"""
if self.parent is not None: # case 1: already the root -> do nothing
grandparent = self.parent.parent
if grandparent is None: # case 2: one rotation to root
self._zig()
else: # case 3: multiple rotations to root
rightOfLeft = self.parent.right is self and grandparent.left is self.parent
leftOfRight = self.parent.left is self and grandparent.right is self.parent
if rightOfLeft or leftOfRight: # case 3a: zigzag
self._zig()
self._zig()
else: # case 3b: zigzig
self.parent._zig()
self._zig()
self._splay() # recur until case 1 or 2 applies
def _zig(self):
"""
Perform one zig (or zag) operation to rotate the node upward.
O(1)
"""
if self.parent is not None:
if self is self.parent.left:
self._rotateClockwise()
elif self is self.parent.right:
self._rotateCounterclockwise()
def _rotateClockwise(self):
"""
Perform one AVL-style rotation clockwise. Requires a rightward parent.
O(1)
"""
p, g, r = self.parent, self.parent.parent, self.right
# connect right child to parent
if r is not None:
r.parent = p
p.left = r
# move parent down + right
p.parent = self
self.right = p
# move this node up + right
self.parent = g
if g is not None:
if p is g.left:
g.left = self
elif p is g.right:
g.right = self
def _rotateCounterclockwise(self):
"""
Perform one AVL-style rotation counterclockwise. Requires a leftward parent.
O(1)
"""
p, g, l = self.parent, self.parent.parent, self.left
# connect left child to parent
if l is not None:
l.parent = p
p.right = l
# move parent down + left
p.parent = self
self.left = p
# move this node up + left
self.parent = g
if g is not None:
if p is g.left:
g.left = self
elif p is g.right:
g.right = self
def _uproot(self):
"""
Detaches the root node from its children and returns them.
O(1)
"""
left, right = self.left, self.right
if left is not None:
left.parent = None
if right is not None:
right.parent = None
return left, right
def _successor(self):
"""
Gets the successor to this node. Useful for making an inorder
traversal, e.g. to print as sorted.
"""
if self.right is None:
# get first rightward ancestor
m = self
n = m.parent
while n is not None and m is n.right:
m = n
n = n.parent
else:
# get leftmost of right child
n = self.right
while n.left is not None:
n = n.left
return n
class SplayTree():
def __init__(self, typing=None):
self.typing = typing # may delay typing and infer upon first addition
self.root = None
self._size = 0
@property # treeInstance.size may be accessed like an attribute, but not set
def size(self):
return self._size
def insert(self, value):
"""inserts value into tree. sets type if this hasn't been done yet."""
if self.typing is None: # first insertion: set type of this tree
self.typing = type(value)
else: # perform type check
if type(value) != self.typing:
raise TypeError("Type " + str(type(value)) + " is incompatible" +
" with tree of type " + str(self.typing) + ".")
# TODO allow different yet comparable types
# if no error:
if self.root is None:
self.root = SplayNode(value)
else:
self.root = self | def insert(self, value):
""" | random_line_split |
splayTree.py | = value
self.parent = None
self.left = None
self.right = None
def search(self, value):
n = self._find(value)
n._splay()
return n
def insert(self, value):
"""
Inserts a new node with the specified value into the tree, which is then
splayed around it.
O(n), amortized O(log n).
"""
insertion_point = self._find(value)
n = SplayNode(value)
# value already in the tree; add at leftmost position in right subtreepa
if value == insertion_point.value:
if insertion_point.right is None:
insertion_point.right = n
n.parent = insertion_point
else:
insertion_point = insertion_point.right
while insertion_point.left is not None:
insertion_point = insertion_point.left
insertion_point.left = n
n.parent = insertion_point
# value belongs to the left
elif value < insertion_point.value:
insertion_point.left = n
n.parent = insertion_point
# value belongs to the right
else:
insertion_point.right = n
n.parent = insertion_point
n._splay()
return n # return new root
def | (self, value):
"""
Searches for the specified value. If found, splays the tree around it;
removes it from the tree; finds its immediate predecessor; splays the
left subtree around that node; and attaches it to the right subtree. If
not found, splays the tree around its nearest parent. Returns the new
root.
O(n), amortized O(log n).
"""
n = self._find(value) # find and splay relevant node
n._splay()
if n.value == value: # only if value actually found
left, right = n._uproot()
# there is a left child: splay around its maximum, connect to right
if left is not None:
while left.right is not None:
left = left.right
left._splay()
left.right = right
if right is not None:
right.parent = left
n = left
# there is no left child: all we need is the right
else:
n = right
return n # new root of the entire tree
def contains_deprecated(self, value):
"""
Returns whether or not the specified value exists in this tree. Does
not splay the tree, since it does not return a node.
O(n), amortized O(log n).
"""
n = self._find(value)
return n.value == value
def contains(self, value):
"""
Returns whether or not the specified value exists in this tree. Splays
the tree, and returns a tuple of (bool, newRoot).
O(n), amortized O(log n).
"""
n = self.search(value)
return (n.value==value, n)
def getRoot(self):
"""
Returns the root of the tree this node is in. Helpful if the root was
changed but pointers were not updated.
O(n), amortized O(log n).
"""
n = self
while n.parent is not None:
n = n.parent
return n
def __iter__(self):
"""
Generates a sorted traversal of the values in the tree.
O(n^2), amortized O(n log n), but on average much better because one
long _successor call typically causes many short ones.
"""
n = self.getRoot()
while n.left is not None:
n = n.left
while True:
yield n.value
n = n._successor()
if n is None:
break
def __str__(self):
# recur in children
left = str(self.left) if self.left is not None else ""
right = str(self.right) if self.right is not None else ""
# combine with this node's string
return "(" + left + str(self.value) + right + ")"
def _find(self, value):
"""
Finds the given value in the tree rooted at this tree, or its would-be
parent if not found. Runs in time linear in the height of the tree.
Does not splay the tree. Will raise an error if value is not comparable
with the values already in the tree.
O(n), amortized O(log n)
"""
# case 1: look deeper, left
if self.value > value and self.left is not None:
return self.left._find(value)
# case 2: look deeper, right
if self.value < value and self.right is not None:
return self.right._find(value)
# case 3: found it, or nothing to find
else:
return self
def _splay(self):
"""
Splay the tree around the node until it is the root.
O(n), amortized O(log n)
"""
if self.parent is not None: # case 1: already the root -> do nothing
grandparent = self.parent.parent
if grandparent is None: # case 2: one rotation to root
self._zig()
else: # case 3: multiple rotations to root
rightOfLeft = self.parent.right is self and grandparent.left is self.parent
leftOfRight = self.parent.left is self and grandparent.right is self.parent
if rightOfLeft or leftOfRight: # case 3a: zigzag
self._zig()
self._zig()
else: # case 3b: zigzig
self.parent._zig()
self._zig()
self._splay() # recur until case 1 or 2 applies
def _zig(self):
"""
Perform one zig (or zag) operation to rotate the node upward.
O(1)
"""
if self.parent is not None:
if self is self.parent.left:
self._rotateClockwise()
elif self is self.parent.right:
self._rotateCounterclockwise()
def _rotateClockwise(self):
"""
Perform one AVL-style rotation clockwise. Requires a rightward parent.
O(1)
"""
p, g, r = self.parent, self.parent.parent, self.right
# connect right child to parent
if r is not None:
r.parent = p
p.left = r
# move parent down + right
p.parent = self
self.right = p
# move this node up + right
self.parent = g
if g is not None:
if p is g.left:
g.left = self
elif p is g.right:
g.right = self
def _rotateCounterclockwise(self):
"""
Perform one AVL-style rotation counterclockwise. Requires a leftward parent.
O(1)
"""
p, g, l = self.parent, self.parent.parent, self.left
# connect left child to parent
if l is not None:
l.parent = p
p.right = l
# move parent down + left
p.parent = self
self.left = p
# move this node up + left
self.parent = g
if g is not None:
if p is g.left:
g.left = self
elif p is g.right:
g.right = self
def _uproot(self):
"""
Detaches the root node from its children and returns them.
O(1)
"""
left, right = self.left, self.right
if left is not None:
left.parent = None
if right is not None:
right.parent = None
return left, right
def _successor(self):
"""
Gets the successor to this node. Useful for making an inorder
traversal, e.g. to print as sorted.
"""
if self.right is None:
# get first rightward ancestor
m = self
n = m.parent
while n is not None and m is n.right:
m = n
n = n.parent
else:
# get leftmost of right child
n = self.right
while n.left is not None:
n = n.left
return n
class SplayTree():
def __init__(self, typing=None):
self.typing = typing # may delay typing and infer upon first addition
self.root = None
self._size = 0
@property # treeInstance.size may be accessed like an attribute, but not set
def size(self):
return self._size
def insert(self, value):
"""inserts value into tree. sets type if this hasn't been done yet."""
if self.typing is None: # first insertion: set type of this tree
self.typing = type(value)
else: # perform type check
if type(value) != self.typing:
raise TypeError("Type " + str(type(value)) + " is incompatible" +
" with tree of type " + str(self.typing) + ".")
# TODO allow different yet comparable types
# if no error:
if self.root is None:
self.root = SplayNode(value)
else:
self.root = self | delete | identifier_name |
catalog.py | from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
import random
import string
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
import httplib2
import json
import requests
CLIENT_ID = json.loads(open(
'client_secrets.json',
'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
app = Flask(__name__)
engine = create_engine('sqlite:///catalogwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# load static global list of categories. Not editable in this version.
categories = session.query(Category).order_by(Category.name)
# Google sign-in Oauth2 success response - initialize login session
@app.route('/gconnect', methods=['POST'])
def gconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps(
'Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application.json'
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps(
'Failed to upgrade the authorization code'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/'
'tokeninfo?access_token=%s' % access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error'), 500))
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps(
"Token's user ID doesn't match given user ID.", 401))
response.headers['Content-Type'] = 'application/json'
return response
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps(
"Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps(
'Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
# Store the access token in the session for later use
login_session['credentials'] = access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {
'access_token': credentials.access_token,
'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['email'] = data['email']
# create new user if user doesn't already exist
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
else:
login_session['user_id'] = user_id
output = "<p>You are now logged in as " + login_session['username']+"<p>"
return output
# Logout - revoke current user token and reset login_session
@app.route('/logout/', methods=['POST'])
def logout():
# only logout a user who has already logged in
credentials = login_session.get('credentials')
if credentials is None:
return 'Current user is not logged in.'
# revoke current token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % credentials
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
# reset user session
del login_session['credentials']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['user_id']
return 'Successfully logged out.'
else:
return 'Failed to revoke token for given user.'
# main catalog - latest 10 items in descending datetime order
@app.route('/')
@app.route('/catalog/')
def catalog():
state = ''.join(random.choice(
string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
items = session.query(Item).order_by(Item.created.desc()).limit(10)
if 'username' not in login_session:
return render_template(
'publiccatalog.html',
categories=categories,
items=items, STATE=state)
return render_template(
'catalog.html',
categories=categories,
items=items,
STATE=state)
# single category listing - all items in category
@app.route('/catalog/<category>/')
def showCategory(category):
cat = session.query(Category).filter_by(name=category).one_or_none()
if cat is not None:
catItems = session.query(Item).filter_by(
category_id=cat.id).order_by(Item.name)
if 'username' not in login_session:
return render_template(
'publiccategory.html',
category=category,
categories=categories,
items=catItems)
return render_template(
'category.html',
category=category,
categories=categories,
items=catItems)
return redirect(url_for('catalog'))
# new item creation
@app.route('/catalog/new/', methods=['GET', 'POST'])
def newItem():
if 'username' not in login_session:
flash('Not authorized to create new item.')
return redirect('/catalog/')
if request.method == 'POST':
newItem = Item(
name=request.form['name'],
description=request.form['description'],
category_id=int(request.form['category']),
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash('New item created!')
return redirect(url_for('catalog'))
else:
return render_template('newItem.html', categories=categories)
# single item listing
@app.route('/catalog/<category>/<item>/')
def showItem(category, item):
showItem = session.query(Item).filter_by(name=item).one_or_none()
creator = getUserInfo(showItem.user_id)
if showItem is not None:
if 'username' in login_session:
if creator.id == login_session['user_id']:
return render_template('item.html', item=showItem)
return render_template('publicitem.html', item=showItem)
return redirect(url_for('catalog'))
# JSON API endpoint for single item name and description
@app.route('/catalog/<category>/<item>/api/')
def itemApi(category, item):
apiItem = session.query(Item).filter_by(name=item).one_or_none()
if apiItem is not None:
return jsonify(item=apiItem.serialize)
return redirect(url_for('catalog'))
# edit item
@app.route('/catalog/<item>/edit/', methods=['GET', 'POST'])
def editItem(item):
editItem = session.query(Item).filter_by(name=item).one_or_none()
if editItem is not None:
creator = getUserInfo(editItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
editItem.name = request.form['name']
editItem.description = request.form['description']
editItem.category_id = request.form['category']
session.add(editItem)
session.commit()
flash('Item edited!')
return redirect(
url_for(
'showItem',
category=editItem.category.name,
item=editItem.name))
else:
return render_template(
'editItem.html',
item=editItem,
categories=categories)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# delete item
@app.route('/catalog/<item>/delete/', methods=['GET', 'POST'])
def | (item):
delItem = session.query(Item).filter_by(name=item).one_or_none()
if delItem is not None:
creator = getUserInfo(delItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
session.delete(delItem)
session.commit()
flash('Item deleted!')
else:
return render_template('deleteItem.html', item=delItem)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# function to retrieve user ID from email address
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# function to retrieve User from user ID
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user | deleteItem | identifier_name |
catalog.py |
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
import random
import string
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
import httplib2
import json
import requests
CLIENT_ID = json.loads(open(
'client_secrets.json',
'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
app = Flask(__name__)
engine = create_engine('sqlite:///catalogwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# load static global list of categories. Not editable in this version.
categories = session.query(Category).order_by(Category.name)
# Google sign-in Oauth2 success response - initialize login session
@app.route('/gconnect', methods=['POST'])
def gconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps(
'Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application.json'
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code) | return response
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/'
'tokeninfo?access_token=%s' % access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error'), 500))
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps(
"Token's user ID doesn't match given user ID.", 401))
response.headers['Content-Type'] = 'application/json'
return response
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps(
"Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps(
'Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
# Store the access token in the session for later use
login_session['credentials'] = access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {
'access_token': credentials.access_token,
'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['email'] = data['email']
# create new user if user doesn't already exist
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
else:
login_session['user_id'] = user_id
output = "<p>You are now logged in as " + login_session['username']+"<p>"
return output
# Logout - revoke current user token and reset login_session
@app.route('/logout/', methods=['POST'])
def logout():
# only logout a user who has already logged in
credentials = login_session.get('credentials')
if credentials is None:
return 'Current user is not logged in.'
# revoke current token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % credentials
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
# reset user session
del login_session['credentials']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['user_id']
return 'Successfully logged out.'
else:
return 'Failed to revoke token for given user.'
# main catalog - latest 10 items in descending datetime order
@app.route('/')
@app.route('/catalog/')
def catalog():
state = ''.join(random.choice(
string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
items = session.query(Item).order_by(Item.created.desc()).limit(10)
if 'username' not in login_session:
return render_template(
'publiccatalog.html',
categories=categories,
items=items, STATE=state)
return render_template(
'catalog.html',
categories=categories,
items=items,
STATE=state)
# single category listing - all items in category
@app.route('/catalog/<category>/')
def showCategory(category):
cat = session.query(Category).filter_by(name=category).one_or_none()
if cat is not None:
catItems = session.query(Item).filter_by(
category_id=cat.id).order_by(Item.name)
if 'username' not in login_session:
return render_template(
'publiccategory.html',
category=category,
categories=categories,
items=catItems)
return render_template(
'category.html',
category=category,
categories=categories,
items=catItems)
return redirect(url_for('catalog'))
# new item creation
@app.route('/catalog/new/', methods=['GET', 'POST'])
def newItem():
if 'username' not in login_session:
flash('Not authorized to create new item.')
return redirect('/catalog/')
if request.method == 'POST':
newItem = Item(
name=request.form['name'],
description=request.form['description'],
category_id=int(request.form['category']),
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash('New item created!')
return redirect(url_for('catalog'))
else:
return render_template('newItem.html', categories=categories)
# single item listing
@app.route('/catalog/<category>/<item>/')
def showItem(category, item):
showItem = session.query(Item).filter_by(name=item).one_or_none()
creator = getUserInfo(showItem.user_id)
if showItem is not None:
if 'username' in login_session:
if creator.id == login_session['user_id']:
return render_template('item.html', item=showItem)
return render_template('publicitem.html', item=showItem)
return redirect(url_for('catalog'))
# JSON API endpoint for single item name and description
@app.route('/catalog/<category>/<item>/api/')
def itemApi(category, item):
apiItem = session.query(Item).filter_by(name=item).one_or_none()
if apiItem is not None:
return jsonify(item=apiItem.serialize)
return redirect(url_for('catalog'))
# edit item
@app.route('/catalog/<item>/edit/', methods=['GET', 'POST'])
def editItem(item):
editItem = session.query(Item).filter_by(name=item).one_or_none()
if editItem is not None:
creator = getUserInfo(editItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
editItem.name = request.form['name']
editItem.description = request.form['description']
editItem.category_id = request.form['category']
session.add(editItem)
session.commit()
flash('Item edited!')
return redirect(
url_for(
'showItem',
category=editItem.category.name,
item=editItem.name))
else:
return render_template(
'editItem.html',
item=editItem,
categories=categories)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# delete item
@app.route('/catalog/<item>/delete/', methods=['GET', 'POST'])
def deleteItem(item):
delItem = session.query(Item).filter_by(name=item).one_or_none()
if delItem is not None:
creator = getUserInfo(delItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
session.delete(delItem)
session.commit()
flash('Item deleted!')
else:
return render_template('deleteItem.html', item=delItem)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# function to retrieve user ID from email address
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# function to retrieve User from user ID
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user | except FlowExchangeError:
response = make_response(json.dumps(
'Failed to upgrade the authorization code'), 401)
response.headers['Content-Type'] = 'application/json' | random_line_split |
catalog.py |
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
import random
import string
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
import httplib2
import json
import requests
CLIENT_ID = json.loads(open(
'client_secrets.json',
'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
app = Flask(__name__)
engine = create_engine('sqlite:///catalogwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# load static global list of categories. Not editable in this version.
categories = session.query(Category).order_by(Category.name)
# Google sign-in Oauth2 success response - initialize login session
@app.route('/gconnect', methods=['POST'])
def gconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps(
'Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application.json'
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps(
'Failed to upgrade the authorization code'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/'
'tokeninfo?access_token=%s' % access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error'), 500))
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps(
"Token's user ID doesn't match given user ID.", 401))
response.headers['Content-Type'] = 'application/json'
return response
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps(
"Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps(
'Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
# Store the access token in the session for later use
login_session['credentials'] = access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {
'access_token': credentials.access_token,
'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['email'] = data['email']
# create new user if user doesn't already exist
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
else:
login_session['user_id'] = user_id
output = "<p>You are now logged in as " + login_session['username']+"<p>"
return output
# Logout - revoke current user token and reset login_session
@app.route('/logout/', methods=['POST'])
def logout():
# only logout a user who has already logged in
credentials = login_session.get('credentials')
if credentials is None:
return 'Current user is not logged in.'
# revoke current token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % credentials
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
# reset user session
del login_session['credentials']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['user_id']
return 'Successfully logged out.'
else:
return 'Failed to revoke token for given user.'
# main catalog - latest 10 items in descending datetime order
@app.route('/')
@app.route('/catalog/')
def catalog():
state = ''.join(random.choice(
string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
items = session.query(Item).order_by(Item.created.desc()).limit(10)
if 'username' not in login_session:
return render_template(
'publiccatalog.html',
categories=categories,
items=items, STATE=state)
return render_template(
'catalog.html',
categories=categories,
items=items,
STATE=state)
# single category listing - all items in category
@app.route('/catalog/<category>/')
def showCategory(category):
cat = session.query(Category).filter_by(name=category).one_or_none()
if cat is not None:
catItems = session.query(Item).filter_by(
category_id=cat.id).order_by(Item.name)
if 'username' not in login_session:
return render_template(
'publiccategory.html',
category=category,
categories=categories,
items=catItems)
return render_template(
'category.html',
category=category,
categories=categories,
items=catItems)
return redirect(url_for('catalog'))
# new item creation
@app.route('/catalog/new/', methods=['GET', 'POST'])
def newItem():
if 'username' not in login_session:
flash('Not authorized to create new item.')
return redirect('/catalog/')
if request.method == 'POST':
newItem = Item(
name=request.form['name'],
description=request.form['description'],
category_id=int(request.form['category']),
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash('New item created!')
return redirect(url_for('catalog'))
else:
return render_template('newItem.html', categories=categories)
# single item listing
@app.route('/catalog/<category>/<item>/')
def showItem(category, item):
showItem = session.query(Item).filter_by(name=item).one_or_none()
creator = getUserInfo(showItem.user_id)
if showItem is not None:
if 'username' in login_session:
if creator.id == login_session['user_id']:
return render_template('item.html', item=showItem)
return render_template('publicitem.html', item=showItem)
return redirect(url_for('catalog'))
# JSON API endpoint for single item name and description
@app.route('/catalog/<category>/<item>/api/')
def itemApi(category, item):
apiItem = session.query(Item).filter_by(name=item).one_or_none()
if apiItem is not None:
return jsonify(item=apiItem.serialize)
return redirect(url_for('catalog'))
# edit item
@app.route('/catalog/<item>/edit/', methods=['GET', 'POST'])
def editItem(item):
editItem = session.query(Item).filter_by(name=item).one_or_none()
if editItem is not None:
creator = getUserInfo(editItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
editItem.name = request.form['name']
editItem.description = request.form['description']
editItem.category_id = request.form['category']
session.add(editItem)
session.commit()
flash('Item edited!')
return redirect(
url_for(
'showItem',
category=editItem.category.name,
item=editItem.name))
else:
return render_template(
'editItem.html',
item=editItem,
categories=categories)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# delete item
@app.route('/catalog/<item>/delete/', methods=['GET', 'POST'])
def deleteItem(item):
delItem = session.query(Item).filter_by(name=item).one_or_none()
if delItem is not None:
creator = getUserInfo(delItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
session.delete(delItem)
session.commit()
flash('Item deleted!')
else:
return render_template('deleteItem.html', item=delItem)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# function to retrieve user ID from email address
def getUserID(email):
|
# function to retrieve User from user ID
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user | try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None | identifier_body |
catalog.py | from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
import random
import string
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
import httplib2
import json
import requests
CLIENT_ID = json.loads(open(
'client_secrets.json',
'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog App"
app = Flask(__name__)
engine = create_engine('sqlite:///catalogwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# load static global list of categories. Not editable in this version.
categories = session.query(Category).order_by(Category.name)
# Google sign-in Oauth2 success response - initialize login session
@app.route('/gconnect', methods=['POST'])
def gconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps(
'Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application.json'
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps(
'Failed to upgrade the authorization code'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/'
'tokeninfo?access_token=%s' % access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error'), 500))
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps(
"Token's user ID doesn't match given user ID.", 401))
response.headers['Content-Type'] = 'application/json'
return response
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps(
"Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps(
'Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
# Store the access token in the session for later use
login_session['credentials'] = access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {
'access_token': credentials.access_token,
'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['email'] = data['email']
# create new user if user doesn't already exist
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
else:
login_session['user_id'] = user_id
output = "<p>You are now logged in as " + login_session['username']+"<p>"
return output
# Logout - revoke current user token and reset login_session
@app.route('/logout/', methods=['POST'])
def logout():
# only logout a user who has already logged in
credentials = login_session.get('credentials')
if credentials is None:
return 'Current user is not logged in.'
# revoke current token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % credentials
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
# reset user session
del login_session['credentials']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['user_id']
return 'Successfully logged out.'
else:
return 'Failed to revoke token for given user.'
# main catalog - latest 10 items in descending datetime order
@app.route('/')
@app.route('/catalog/')
def catalog():
state = ''.join(random.choice(
string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
items = session.query(Item).order_by(Item.created.desc()).limit(10)
if 'username' not in login_session:
|
return render_template(
'catalog.html',
categories=categories,
items=items,
STATE=state)
# single category listing - all items in category
@app.route('/catalog/<category>/')
def showCategory(category):
cat = session.query(Category).filter_by(name=category).one_or_none()
if cat is not None:
catItems = session.query(Item).filter_by(
category_id=cat.id).order_by(Item.name)
if 'username' not in login_session:
return render_template(
'publiccategory.html',
category=category,
categories=categories,
items=catItems)
return render_template(
'category.html',
category=category,
categories=categories,
items=catItems)
return redirect(url_for('catalog'))
# new item creation
@app.route('/catalog/new/', methods=['GET', 'POST'])
def newItem():
if 'username' not in login_session:
flash('Not authorized to create new item.')
return redirect('/catalog/')
if request.method == 'POST':
newItem = Item(
name=request.form['name'],
description=request.form['description'],
category_id=int(request.form['category']),
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash('New item created!')
return redirect(url_for('catalog'))
else:
return render_template('newItem.html', categories=categories)
# single item listing
@app.route('/catalog/<category>/<item>/')
def showItem(category, item):
showItem = session.query(Item).filter_by(name=item).one_or_none()
creator = getUserInfo(showItem.user_id)
if showItem is not None:
if 'username' in login_session:
if creator.id == login_session['user_id']:
return render_template('item.html', item=showItem)
return render_template('publicitem.html', item=showItem)
return redirect(url_for('catalog'))
# JSON API endpoint for single item name and description
@app.route('/catalog/<category>/<item>/api/')
def itemApi(category, item):
apiItem = session.query(Item).filter_by(name=item).one_or_none()
if apiItem is not None:
return jsonify(item=apiItem.serialize)
return redirect(url_for('catalog'))
# edit item
@app.route('/catalog/<item>/edit/', methods=['GET', 'POST'])
def editItem(item):
editItem = session.query(Item).filter_by(name=item).one_or_none()
if editItem is not None:
creator = getUserInfo(editItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
editItem.name = request.form['name']
editItem.description = request.form['description']
editItem.category_id = request.form['category']
session.add(editItem)
session.commit()
flash('Item edited!')
return redirect(
url_for(
'showItem',
category=editItem.category.name,
item=editItem.name))
else:
return render_template(
'editItem.html',
item=editItem,
categories=categories)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# delete item
@app.route('/catalog/<item>/delete/', methods=['GET', 'POST'])
def deleteItem(item):
delItem = session.query(Item).filter_by(name=item).one_or_none()
if delItem is not None:
creator = getUserInfo(delItem.user_id)
if 'username' in login_session:
if creator.id == login_session[user_id]:
if request.method == 'POST':
session.delete(delItem)
session.commit()
flash('Item deleted!')
else:
return render_template('deleteItem.html', item=delItem)
flash('Not authorized to edit item.')
return redirect(url_for('catalog'))
# function to retrieve user ID from email address
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# function to retrieve User from user ID
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user | return render_template(
'publiccatalog.html',
categories=categories,
items=items, STATE=state) | conditional_block |
consumer.go | config,
client: client,
consumer: scsmr,
read: make(map[int32]int64),
acked: make(map[int32]int64),
partIDs: make([]int32, 0),
messages: make(chan *sarama.ConsumerMessage),
errors: make(chan *sarama.ConsumerError),
}
// Register consumer group and consumer itself
if err := consumer.register(); err != nil {
consumer.closeAll()
return nil, err
}
consumer.closer.Go(consumer.signalLoop)
if config.CommitEvery > 0 {
consumer.closer.Go(consumer.commitLoop)
}
return consumer, nil
}
// Messages returns the read channel for the messages that are returned by the broker
func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
// Errors returns the read channel for any errors that occurred while consuming the partition.
// You have to read this channel to prevent the consumer from deadlock.
func (c *Consumer) Errors() <-chan *sarama.ConsumerError { return c.errors }
// Claims exposes the partIDs partition ID
func (c *Consumer) Claims() []int32 {
c.pLock.Lock()
ids := c.partIDs
c.pLock.Unlock()
return ids
}
// ID exposes the consumer ID
func (c *Consumer) ID() string { return c.id }
// Group exposes the group name
func (c *Consumer) Group() string { return c.group }
// Topic exposes the group topic
func (c *Consumer) Topic() string { return c.topic }
// Offset manually retrives the stored offset for a partition ID
func (c *Consumer) Offset(partitionID int32) (int64, error) {
return c.zoo.Offset(c.group, c.topic, partitionID)
}
// Ack marks a consumer message as processed and stores the offset
// for the next Commit() call.
func (c *Consumer) Ack(msg *sarama.ConsumerMessage) {
c.aLock.Lock()
if msg.Offset > c.acked[msg.Partition] {
c.acked[msg.Partition] = msg.Offset
}
c.aLock.Unlock()
}
// Commit persists ack'd offsets
func (c *Consumer) Commit() error {
snap := c.resetAcked()
if len(snap) < 1 {
return nil
}
for partitionID, offset := range snap {
// fmt.Printf("$,%s,%d,%d\n", c.id, partitionID, offset+1)
if err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {
return err
}
}
return nil
}
// Close closes the consumer instance.
// Also triggers a final Commit() call.
func (c *Consumer) Close() error {
c.closer.Kill(nil)
return c.closer.Wait()
}
// LOOPS
// Main signal loop
func (c *Consumer) signalLoop() error {
claims := make(Claims)
for {
// Check if shutdown was requested
select {
case <-c.closer.Dying():
return c.shutdown(claims)
default:
}
// Start a rebalance cycle
watch, err := c.rebalance(claims)
if err != nil {
c.config.Notifier.RebalanceError(c, err)
c.reset(claims)
continue
}
// Start a goroutine for each partition
done := make(chan struct{})
errs := make(chan struct{}, len(claims))
wait := new(sync.WaitGroup)
for _, pcsm := range claims {
wait.Add(1)
go c.consumeLoop(done, errs, wait, pcsm)
}
// Wait for signals
select {
case <-c.closer.Dying(): // on Close()
close(done)
wait.Wait()
return c.shutdown(claims)
case <-watch: // on rebalance signal
close(done)
wait.Wait()
case <-errs: // on consume errors
close(done)
wait.Wait()
}
}
}
// Commit loop, triggers periodic commits configured in CommitEvery
func (c *Consumer) commitLoop() error {
for {
select {
case <-c.closer.Dying():
return nil
case <-time.After(c.config.CommitEvery):
}
if err := c.Commit(); err != nil {
c.config.Notifier.CommitError(c, err)
}
}
}
// Message consumer loop for a single partition consumer
func (c *Consumer) consumeLoop(done, errs chan struct{}, wait *sync.WaitGroup, pcsm sarama.PartitionConsumer) {
defer wait.Done()
for {
select {
case msg := <-pcsm.Messages():
// fmt.Printf("*,%s,%d,%d\n", c.id, msg.Partition, msg.Offset)
select {
case c.messages <- msg:
// fmt.Printf("+,%s,%d,%d\n", c.id, msg.Partition, msg.Offset)
c.rLock.Lock()
c.read[msg.Partition] = msg.Offset + 1
c.rLock.Unlock()
if c.config.AutoAck {
c.Ack(msg)
}
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
case msg := <-pcsm.Errors():
if msg.Err == sarama.ErrOffsetOutOfRange {
offset, err := c.client.GetOffset(c.topic, msg.Partition, sarama.EarliestOffset)
if err == nil {
c.rLock.Lock()
c.read[msg.Partition] = offset
c.rLock.Unlock()
}
errs <- struct{}{}
}
select {
case c.errors <- msg:
// fmt.Printf("!,%s,%d,%s\n", c.id, msg.Partition, msg.Error())
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
}
}
// PRIVATE
// Shutdown the consumer, triggered by the main loop
func (c *Consumer) shutdown(claims Claims) error {
err := c.reset(claims)
c.closeAll()
return err
}
// Close all connections and channels
func (c *Consumer) closeAll() {
close(c.messages)
close(c.errors)
c.zoo.Close()
c.consumer.Close()
if c.ownClient {
c.client.Close()
}
}
// Rebalance cycle, triggered by the main loop
func (c *Consumer) rebalance(claims Claims) (<-chan zk.Event, error) {
c.config.Notifier.RebalanceStart(c)
// Commit and release existing claims
if err := c.reset(claims); err != nil {
return nil, err
}
// Fetch consumer list
consumerIDs, watch, err := c.zoo.Consumers(c.group, c.topic)
if err != nil {
return nil, err
}
// Fetch partitions list
partitions, err := c.partitions()
if err != nil {
return nil, err
}
// Determine partitions and claim if changed
partitions = partitions.Select(c.id, consumerIDs)
// Make new claims
for _, part := range partitions {
pcsm, err := c.claim(part.ID)
if err != nil {
return nil, err
}
claims[part.ID] = pcsm
}
c.pLock.Lock()
c.partIDs = claims.PartitionIDs()
c.pLock.Unlock()
c.config.Notifier.RebalanceOK(c)
return watch, nil
}
// Commits offset and releases all claims
func (c *Consumer) reset(claims Claims) (err error) {
// Commit BEFORE releasing locks on partitions
err = c.Commit()
// Close all existing consumers (async)
wait := sync.WaitGroup{}
for _, pcsm := range claims {
wait.Add(1)
go func(c sarama.PartitionConsumer) {
defer wait.Done()
c.Close()
}(pcsm)
}
wait.Wait()
// Release claimed partitions, ignore errors
for partitionID := range claims {
c.zoo.Release(c.group, c.topic, partitionID, c.id)
delete(claims, partitionID)
}
return
}
// Claims a partition
func (c *Consumer) claim(partitionID int32) (sarama.PartitionConsumer, error) {
err := c.zoo.Claim(c.group, c.topic, partitionID, c.id)
if err != nil {
return nil, err
}
offset, err := c.Offset(partitionID)
if err != nil {
return nil, err
} else if offset < 1 {
offset = c.config.DefaultOffsetMode
}
c.rLock.Lock()
last := c.read[partitionID]
c.rLock.Unlock()
if offset < last {
offset = last
}
// fmt.Printf(">,%s,%d,%d\n", c.id, partitionID, offset)
return c.consumer.ConsumePartition(c.topic, partitionID, offset)
}
// Registers consumer with zookeeper
func (c *Consumer) register() error | {
if err := c.zoo.RegisterGroup(c.group, c.topic); err != nil {
return err
}
if err := c.zoo.RegisterConsumer(c.group, c.id, c.topic); err != nil {
return err
}
return nil
} | identifier_body |
|
consumer.go | Notifier{Logger}
}
if c.CommitEvery < 10*time.Millisecond {
c.CommitEvery = 0
}
if c.DefaultOffsetMode != sarama.OffsetOldest && c.DefaultOffsetMode != sarama.OffsetOldest {
c.DefaultOffsetMode = sarama.OffsetOldest
}
if c.ZKSessionTimeout == 0 {
c.ZKSessionTimeout = time.Second
}
}
type Consumer struct {
id, group, topic string
client sarama.Client
consumer sarama.Consumer
config *Config
zoo *ZK
messages chan *sarama.ConsumerMessage
errors chan *sarama.ConsumerError
read map[int32]int64
rLock sync.Mutex
acked map[int32]int64
aLock sync.Mutex
partIDs []int32
pLock sync.Mutex
notifier Notifier
closer tomb.Tomb
ownClient bool
}
// NewConsumer creates a new consumer instance.
// You MUST call Close() to avoid leaks.
func NewConsumer(addrs, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {
if config == nil {
config = new(Config)
}
client, err := sarama.NewClient(addrs, config.Config)
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client, zookeepers, group, topic, config)
if err != nil {
client.Close()
return nil, err
}
c.ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer for a given topic, reuing an existing client
// You MUST call Close() to avoid leaks.
func NewConsumerFromClient(client sarama.Client, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {
if config == nil {
config = new(Config)
}
config.normalize()
// Validate configuration
if err := config.Validate(); err != nil {
return nil, err
} else if topic == "" {
return nil, sarama.ConfigurationError("Empty topic")
} else if group == "" {
return nil, sarama.ConfigurationError("Empty group")
}
// Generate unique consumer ID
id := config.customID
if id == "" {
prefix := config.IDPrefix
if prefix == "" {
prefix = group
}
id = newGUID(prefix)
}
// Create sarama consumer instance
scsmr, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, err
}
// Connect to zookeeper
zoo, err := NewZK(zookeepers, config.ZKSessionTimeout)
if err != nil {
scsmr.Close()
return nil, err
}
// Initialize consumer
consumer := &Consumer{
id: id,
group: group,
topic: topic,
zoo: zoo,
config: config,
client: client,
consumer: scsmr,
read: make(map[int32]int64),
acked: make(map[int32]int64),
partIDs: make([]int32, 0),
messages: make(chan *sarama.ConsumerMessage),
errors: make(chan *sarama.ConsumerError),
}
// Register consumer group and consumer itself
if err := consumer.register(); err != nil {
consumer.closeAll()
return nil, err
}
consumer.closer.Go(consumer.signalLoop)
if config.CommitEvery > 0 {
consumer.closer.Go(consumer.commitLoop)
}
return consumer, nil
}
// Messages returns the read channel for the messages that are returned by the broker
func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
// Errors returns the read channel for any errors that occurred while consuming the partition.
// You have to read this channel to prevent the consumer from deadlock.
func (c *Consumer) Errors() <-chan *sarama.ConsumerError { return c.errors }
// Claims exposes the partIDs partition ID
func (c *Consumer) Claims() []int32 {
c.pLock.Lock()
ids := c.partIDs
c.pLock.Unlock()
return ids
}
// ID exposes the consumer ID
func (c *Consumer) ID() string { return c.id }
// Group exposes the group name
func (c *Consumer) | () string { return c.group }
// Topic exposes the group topic
func (c *Consumer) Topic() string { return c.topic }
// Offset manually retrives the stored offset for a partition ID
func (c *Consumer) Offset(partitionID int32) (int64, error) {
return c.zoo.Offset(c.group, c.topic, partitionID)
}
// Ack marks a consumer message as processed and stores the offset
// for the next Commit() call.
func (c *Consumer) Ack(msg *sarama.ConsumerMessage) {
c.aLock.Lock()
if msg.Offset > c.acked[msg.Partition] {
c.acked[msg.Partition] = msg.Offset
}
c.aLock.Unlock()
}
// Commit persists ack'd offsets
func (c *Consumer) Commit() error {
snap := c.resetAcked()
if len(snap) < 1 {
return nil
}
for partitionID, offset := range snap {
// fmt.Printf("$,%s,%d,%d\n", c.id, partitionID, offset+1)
if err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {
return err
}
}
return nil
}
// Close closes the consumer instance.
// Also triggers a final Commit() call.
func (c *Consumer) Close() error {
c.closer.Kill(nil)
return c.closer.Wait()
}
// LOOPS
// Main signal loop
func (c *Consumer) signalLoop() error {
claims := make(Claims)
for {
// Check if shutdown was requested
select {
case <-c.closer.Dying():
return c.shutdown(claims)
default:
}
// Start a rebalance cycle
watch, err := c.rebalance(claims)
if err != nil {
c.config.Notifier.RebalanceError(c, err)
c.reset(claims)
continue
}
// Start a goroutine for each partition
done := make(chan struct{})
errs := make(chan struct{}, len(claims))
wait := new(sync.WaitGroup)
for _, pcsm := range claims {
wait.Add(1)
go c.consumeLoop(done, errs, wait, pcsm)
}
// Wait for signals
select {
case <-c.closer.Dying(): // on Close()
close(done)
wait.Wait()
return c.shutdown(claims)
case <-watch: // on rebalance signal
close(done)
wait.Wait()
case <-errs: // on consume errors
close(done)
wait.Wait()
}
}
}
// Commit loop, triggers periodic commits configured in CommitEvery
func (c *Consumer) commitLoop() error {
for {
select {
case <-c.closer.Dying():
return nil
case <-time.After(c.config.CommitEvery):
}
if err := c.Commit(); err != nil {
c.config.Notifier.CommitError(c, err)
}
}
}
// Message consumer loop for a single partition consumer
func (c *Consumer) consumeLoop(done, errs chan struct{}, wait *sync.WaitGroup, pcsm sarama.PartitionConsumer) {
defer wait.Done()
for {
select {
case msg := <-pcsm.Messages():
// fmt.Printf("*,%s,%d,%d\n", c.id, msg.Partition, msg.Offset)
select {
case c.messages <- msg:
// fmt.Printf("+,%s,%d,%d\n", c.id, msg.Partition, msg.Offset)
c.rLock.Lock()
c.read[msg.Partition] = msg.Offset + 1
c.rLock.Unlock()
if c.config.AutoAck {
c.Ack(msg)
}
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
case msg := <-pcsm.Errors():
if msg.Err == sarama.ErrOffsetOutOfRange {
offset, err := c.client.GetOffset(c.topic, msg.Partition, sarama.EarliestOffset)
if err == nil {
c.rLock.Lock()
c.read[msg.Partition] = offset
c.rLock.Unlock()
}
errs <- struct{}{}
}
select {
case c.errors <- msg:
// fmt.Printf("!,%s,%d,%s\n", c.id, msg.Partition, msg.Error())
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
}
}
// PRIVATE
// Shutdown the consumer, triggered by the main loop
func (c *Consumer) shutdown(claims Claims) error {
err := c.reset(claims)
c.closeAll()
return err
}
// Close all connections and channels
func (c *Consumer) closeAll() {
close(c.messages)
| Group | identifier_name |
consumer.go | Notifier{Logger}
}
if c.CommitEvery < 10*time.Millisecond {
c.CommitEvery = 0
}
if c.DefaultOffsetMode != sarama.OffsetOldest && c.DefaultOffsetMode != sarama.OffsetOldest {
c.DefaultOffsetMode = sarama.OffsetOldest
}
if c.ZKSessionTimeout == 0 {
c.ZKSessionTimeout = time.Second
}
}
type Consumer struct {
id, group, topic string
client sarama.Client
consumer sarama.Consumer
config *Config
zoo *ZK
messages chan *sarama.ConsumerMessage
errors chan *sarama.ConsumerError
read map[int32]int64
rLock sync.Mutex
acked map[int32]int64
aLock sync.Mutex
partIDs []int32
pLock sync.Mutex
notifier Notifier
closer tomb.Tomb
ownClient bool
}
// NewConsumer creates a new consumer instance.
// You MUST call Close() to avoid leaks.
func NewConsumer(addrs, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {
if config == nil {
config = new(Config)
}
client, err := sarama.NewClient(addrs, config.Config)
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client, zookeepers, group, topic, config)
if err != nil {
client.Close()
return nil, err
}
c.ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer for a given topic, reuing an existing client
// You MUST call Close() to avoid leaks.
func NewConsumerFromClient(client sarama.Client, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {
if config == nil {
config = new(Config)
}
config.normalize()
// Validate configuration
if err := config.Validate(); err != nil {
return nil, err
} else if topic == "" {
return nil, sarama.ConfigurationError("Empty topic")
} else if group == "" {
return nil, sarama.ConfigurationError("Empty group")
}
// Generate unique consumer ID
id := config.customID
if id == "" {
prefix := config.IDPrefix
if prefix == "" {
prefix = group
}
id = newGUID(prefix)
}
// Create sarama consumer instance
scsmr, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, err
}
// Connect to zookeeper
zoo, err := NewZK(zookeepers, config.ZKSessionTimeout)
if err != nil {
scsmr.Close()
return nil, err
}
// Initialize consumer
consumer := &Consumer{
id: id,
group: group,
topic: topic,
zoo: zoo,
config: config,
client: client,
consumer: scsmr,
read: make(map[int32]int64),
acked: make(map[int32]int64),
partIDs: make([]int32, 0),
messages: make(chan *sarama.ConsumerMessage),
errors: make(chan *sarama.ConsumerError),
}
// Register consumer group and consumer itself
if err := consumer.register(); err != nil {
consumer.closeAll()
return nil, err
}
consumer.closer.Go(consumer.signalLoop)
if config.CommitEvery > 0 {
consumer.closer.Go(consumer.commitLoop)
}
return consumer, nil
}
// Messages returns the read channel for the messages that are returned by the broker
func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
// Errors returns the read channel for any errors that occurred while consuming the partition.
// You have to read this channel to prevent the consumer from deadlock.
func (c *Consumer) Errors() <-chan *sarama.ConsumerError { return c.errors }
// Claims exposes the partIDs partition ID
func (c *Consumer) Claims() []int32 {
c.pLock.Lock()
ids := c.partIDs
c.pLock.Unlock()
return ids
}
// ID exposes the consumer ID
func (c *Consumer) ID() string { return c.id }
// Group exposes the group name
func (c *Consumer) Group() string { return c.group }
// Topic exposes the group topic
func (c *Consumer) Topic() string { return c.topic }
// Offset manually retrives the stored offset for a partition ID
func (c *Consumer) Offset(partitionID int32) (int64, error) {
return c.zoo.Offset(c.group, c.topic, partitionID)
}
// Ack marks a consumer message as processed and stores the offset
// for the next Commit() call.
func (c *Consumer) Ack(msg *sarama.ConsumerMessage) {
c.aLock.Lock()
if msg.Offset > c.acked[msg.Partition] {
c.acked[msg.Partition] = msg.Offset
}
c.aLock.Unlock()
}
// Commit persists ack'd offsets
func (c *Consumer) Commit() error {
snap := c.resetAcked()
if len(snap) < 1 {
return nil
}
for partitionID, offset := range snap {
// fmt.Printf("$,%s,%d,%d\n", c.id, partitionID, offset+1)
if err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {
return err
}
}
return nil
}
// Close closes the consumer instance.
// Also triggers a final Commit() call.
func (c *Consumer) Close() error {
c.closer.Kill(nil)
return c.closer.Wait()
}
// LOOPS
// Main signal loop
func (c *Consumer) signalLoop() error {
claims := make(Claims)
for {
// Check if shutdown was requested
select {
case <-c.closer.Dying():
return c.shutdown(claims)
default:
}
// Start a rebalance cycle
watch, err := c.rebalance(claims)
if err != nil {
c.config.Notifier.RebalanceError(c, err)
c.reset(claims)
continue
}
// Start a goroutine for each partition
done := make(chan struct{})
errs := make(chan struct{}, len(claims))
wait := new(sync.WaitGroup)
for _, pcsm := range claims {
wait.Add(1)
go c.consumeLoop(done, errs, wait, pcsm)
}
// Wait for signals
select {
case <-c.closer.Dying(): // on Close()
close(done)
wait.Wait()
return c.shutdown(claims)
case <-watch: // on rebalance signal
close(done)
wait.Wait()
case <-errs: // on consume errors
close(done)
wait.Wait()
}
}
}
// Commit loop, triggers periodic commits configured in CommitEvery
func (c *Consumer) commitLoop() error {
for {
select {
case <-c.closer.Dying():
return nil
case <-time.After(c.config.CommitEvery):
}
if err := c.Commit(); err != nil {
c.config.Notifier.CommitError(c, err)
}
}
}
// Message consumer loop for a single partition consumer
func (c *Consumer) consumeLoop(done, errs chan struct{}, wait *sync.WaitGroup, pcsm sarama.PartitionConsumer) {
defer wait.Done()
for {
select {
case msg := <-pcsm.Messages():
// fmt.Printf("*,%s,%d,%d\n", c.id, msg.Partition, msg.Offset)
select {
case c.messages <- msg:
// fmt.Printf("+,%s,%d,%d\n", c.id, msg.Partition, msg.Offset)
c.rLock.Lock()
c.read[msg.Partition] = msg.Offset + 1
c.rLock.Unlock()
if c.config.AutoAck {
c.Ack(msg)
}
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
case msg := <-pcsm.Errors():
if msg.Err == sarama.ErrOffsetOutOfRange |
select {
case c.errors <- msg:
// fmt.Printf("!,%s,%d,%s\n", c.id, msg.Partition, msg.Error())
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
}
}
// PRIVATE
// Shutdown the consumer, triggered by the main loop
func (c *Consumer) shutdown(claims Claims) error {
err := c.reset(claims)
c.closeAll()
return err
}
// Close all connections and channels
func (c *Consumer) closeAll() {
close(c.messages | {
offset, err := c.client.GetOffset(c.topic, msg.Partition, sarama.EarliestOffset)
if err == nil {
c.rLock.Lock()
c.read[msg.Partition] = offset
c.rLock.Unlock()
}
errs <- struct{}{}
} | conditional_block |
consumer.go | Notifier{Logger}
}
if c.CommitEvery < 10*time.Millisecond {
c.CommitEvery = 0
}
if c.DefaultOffsetMode != sarama.OffsetOldest && c.DefaultOffsetMode != sarama.OffsetOldest {
c.DefaultOffsetMode = sarama.OffsetOldest
}
if c.ZKSessionTimeout == 0 {
c.ZKSessionTimeout = time.Second
}
}
type Consumer struct {
id, group, topic string
client sarama.Client
consumer sarama.Consumer
config *Config
zoo *ZK
messages chan *sarama.ConsumerMessage
errors chan *sarama.ConsumerError
read map[int32]int64
rLock sync.Mutex
acked map[int32]int64
aLock sync.Mutex
partIDs []int32
pLock sync.Mutex
notifier Notifier
closer tomb.Tomb
ownClient bool
}
// NewConsumer creates a new consumer instance.
// You MUST call Close() to avoid leaks.
func NewConsumer(addrs, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {
if config == nil {
config = new(Config)
}
client, err := sarama.NewClient(addrs, config.Config)
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client, zookeepers, group, topic, config)
if err != nil {
client.Close()
return nil, err
}
c.ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer for a given topic, reuing an existing client
// You MUST call Close() to avoid leaks.
func NewConsumerFromClient(client sarama.Client, zookeepers []string, group, topic string, config *Config) (*Consumer, error) {
if config == nil {
config = new(Config)
}
config.normalize()
// Validate configuration
if err := config.Validate(); err != nil {
return nil, err
} else if topic == "" {
return nil, sarama.ConfigurationError("Empty topic")
} else if group == "" {
return nil, sarama.ConfigurationError("Empty group")
}
// Generate unique consumer ID
id := config.customID
if id == "" {
prefix := config.IDPrefix
if prefix == "" {
prefix = group
}
id = newGUID(prefix)
}
// Create sarama consumer instance
scsmr, err := sarama.NewConsumerFromClient(client)
if err != nil {
return nil, err
}
// Connect to zookeeper
zoo, err := NewZK(zookeepers, config.ZKSessionTimeout)
if err != nil {
scsmr.Close()
return nil, err
}
// Initialize consumer
consumer := &Consumer{
id: id,
group: group,
topic: topic,
zoo: zoo,
config: config,
client: client,
consumer: scsmr,
read: make(map[int32]int64),
acked: make(map[int32]int64),
partIDs: make([]int32, 0),
messages: make(chan *sarama.ConsumerMessage),
errors: make(chan *sarama.ConsumerError),
}
// Register consumer group and consumer itself
if err := consumer.register(); err != nil {
consumer.closeAll()
return nil, err
}
consumer.closer.Go(consumer.signalLoop)
if config.CommitEvery > 0 {
consumer.closer.Go(consumer.commitLoop)
}
return consumer, nil
}
// Messages returns the read channel for the messages that are returned by the broker
func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
// Errors returns the read channel for any errors that occurred while consuming the partition.
// You have to read this channel to prevent the consumer from deadlock.
func (c *Consumer) Errors() <-chan *sarama.ConsumerError { return c.errors }
// Claims exposes the partIDs partition ID
func (c *Consumer) Claims() []int32 {
c.pLock.Lock()
ids := c.partIDs
c.pLock.Unlock()
return ids
}
// ID exposes the consumer ID
func (c *Consumer) ID() string { return c.id }
// Group exposes the group name
func (c *Consumer) Group() string { return c.group }
// Topic exposes the group topic
func (c *Consumer) Topic() string { return c.topic }
// Offset manually retrives the stored offset for a partition ID
func (c *Consumer) Offset(partitionID int32) (int64, error) {
return c.zoo.Offset(c.group, c.topic, partitionID)
}
// Ack marks a consumer message as processed and stores the offset
// for the next Commit() call.
func (c *Consumer) Ack(msg *sarama.ConsumerMessage) {
c.aLock.Lock()
if msg.Offset > c.acked[msg.Partition] {
c.acked[msg.Partition] = msg.Offset
}
c.aLock.Unlock()
}
// Commit persists ack'd offsets
func (c *Consumer) Commit() error {
snap := c.resetAcked()
if len(snap) < 1 {
return nil
}
for partitionID, offset := range snap {
// fmt.Printf("$,%s,%d,%d\n", c.id, partitionID, offset+1)
if err := c.zoo.Commit(c.group, c.topic, partitionID, offset+1); err != nil {
return err
}
}
return nil
}
// Close closes the consumer instance.
// Also triggers a final Commit() call.
func (c *Consumer) Close() error {
c.closer.Kill(nil)
return c.closer.Wait()
}
// LOOPS
// Main signal loop
func (c *Consumer) signalLoop() error {
claims := make(Claims)
for {
// Check if shutdown was requested
select {
case <-c.closer.Dying():
return c.shutdown(claims)
default:
}
// Start a rebalance cycle
watch, err := c.rebalance(claims)
if err != nil {
c.config.Notifier.RebalanceError(c, err)
c.reset(claims)
continue
}
// Start a goroutine for each partition
done := make(chan struct{})
errs := make(chan struct{}, len(claims))
wait := new(sync.WaitGroup)
for _, pcsm := range claims {
wait.Add(1)
go c.consumeLoop(done, errs, wait, pcsm)
}
// Wait for signals
select {
case <-c.closer.Dying(): // on Close()
close(done)
wait.Wait()
return c.shutdown(claims)
case <-watch: // on rebalance signal
close(done)
wait.Wait()
case <-errs: // on consume errors
close(done)
wait.Wait()
}
}
}
// Commit loop, triggers periodic commits configured in CommitEvery
func (c *Consumer) commitLoop() error {
for {
select {
case <-c.closer.Dying():
return nil
case <-time.After(c.config.CommitEvery):
}
if err := c.Commit(); err != nil {
c.config.Notifier.CommitError(c, err)
}
}
}
// Message consumer loop for a single partition consumer
func (c *Consumer) consumeLoop(done, errs chan struct{}, wait *sync.WaitGroup, pcsm sarama.PartitionConsumer) {
defer wait.Done()
for {
select {
case msg := <-pcsm.Messages():
// fmt.Printf("*,%s,%d,%d\n", c.id, msg.Partition, msg.Offset)
select {
case c.messages <- msg:
// fmt.Printf("+,%s,%d,%d\n", c.id, msg.Partition, msg.Offset)
c.rLock.Lock()
c.read[msg.Partition] = msg.Offset + 1
c.rLock.Unlock()
if c.config.AutoAck {
c.Ack(msg)
}
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
case msg := <-pcsm.Errors():
if msg.Err == sarama.ErrOffsetOutOfRange {
offset, err := c.client.GetOffset(c.topic, msg.Partition, sarama.EarliestOffset) | c.read[msg.Partition] = offset
c.rLock.Unlock()
}
errs <- struct{}{}
}
select {
case c.errors <- msg:
// fmt.Printf("!,%s,%d,%s\n", c.id, msg.Partition, msg.Error())
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
case <-done:
// fmt.Printf("@,%s\n", c.id)
return
}
}
}
// PRIVATE
// Shutdown the consumer, triggered by the main loop
func (c *Consumer) shutdown(claims Claims) error {
err := c.reset(claims)
c.closeAll()
return err
}
// Close all connections and channels
func (c *Consumer) closeAll() {
close(c.messages)
close | if err == nil {
c.rLock.Lock() | random_line_split |
meetingPlanner.py | class, which hosts our graph of DateNode nodes
class DateGraph:
def __init__(self,begDate,endDate):
listDates = getDateRange(begDate,endDate)
#list that holds dates in the graph and maps each date to date data
self.dates={}
#at initialization (initializes data for every date in the range)
for x in listDates:
self.dates[x]=DateData()
#list of users that are all present
self.completelyPresent=set()
#list of users that are completely absent
self.completelyAbsent=set()
#adding a specific node based on user-timedate key pair
def addUserDate(self,user,timedate):
#date that the user is absent must be a valid date (guarunteed to exist upon initialization of the graph)
|
#adding a user that is completely absent
def addAbsentUser(self,user):
self.completelyAbsent.add(user)
#adding a user that is completely present
def addPresentUser(self,user):
self.completelyPresent.add(user)
#getting all the users that are absent on a specific date
def getAbsentUsers(self,timedate):
absentees=[]
for x in self.dates[timedate].usersAbsent:
absentees.append(x)
for x in self.completelyAbsent:
absentees.append(x)
return absentees
#counts the degree of a node
def countDegree(self,timedate):
#count the degree here
#we just count the number of users associated to this date
count=0
for person in self.dates[timedate].usersAbsent:
count+=1
return count
#method to get the best dates in the graph
def getBestDates(self):
#goes through all the dates and gets a list of the best dates (based on least people missing)
#i do one pass to find the date with the least people absent (least degree)
#then i do another pass to add other dates with the same degree and add them to the list
bestDate=None
bestDates=[]
for x in self.dates:
if (bestDate==None):
bestDate=x
else :
if (self.countDegree(bestDate)>self.countDegree(x)):
#then x has the least people absent
bestDate=x
#second pass to add all the dates with this least associativity
for x in self.dates:
if (self.countDegree(x)==self.countDegree(bestDate)):
bestDates.append(x)
#returning the list of best dates, ready to be printed out to the user
return bestDates
#function to print out the list of best dates and the users attending
def printBestDates(self):
listDates = self.getBestDates()
if (len(listDates)>1):
print("Best Dates and Absentees:\n")
print("------------------------------------------------------\n")
else :
print("Best Date and Absentees:\n")
print("------------------------------------------------------\n")
#now iterating through the dates given and printing out the date and the absentees
for x in listDates:
print(x.strftime("%b/%d/%Y\n"))
print("Absent:")
listAbsent = self.getAbsentUsers(x)
if (len(listAbsent)==0):
print("NOBODY ABSENT!!!")
else:
for y in listAbsent:
if (y!=len(listAbsent)-1):
print(" "+y+",")
else:
print(" "+y)
print("\n")
print("------------------------------------------------------\n")
#some functions to help with date stuff
#gets all the dates in the given range (returns a list of datetimes)
def getDateRange(begDate,endDate):
testDate=datetime.date(begDate.year,begDate.month,begDate.day)
dates=[]
while(testDate <=endDate):
dates.append(testDate)
testDate+=datetime.timedelta(days=1)
return dates
#function to check if a date is in the range specified by the user
def isValidDate(enteredDate, begDate,endDate):
if (enteredDate<=endDate and enteredDate>=begDate):
return True
else:
return False
#function to turn a string in form of "mm/dd/yy" into a datetime
def getDateTime(enteredDate):
#print("date"+enteredDate)
#print("month"+enteredDate[0:2]+"\n")
#print("day"+enteredDate[3:5]+"\n")
#print("year"+enteredDate[6:10]+"\n")
#date=datetime.date(int(enteredDate[6:9]),int(enteredDate[0:1]),int(enteredDate[3:4]))
#return date
dateTime =datetime.datetime.strptime(enteredDate,"%m/%d/%Y")
return datetime.date(dateTime.year,dateTime.month,dateTime.day)
#when user is entering data about other users, they can optionally say ALL ABSENT Except ..., ALL ABSENT, NOT ABSENT, NOT ABSENT except..., in addition to just listing dates
#if a user is all absent, then they can just be excluded from the graph basically.
#likewise, if a user is not absent, they can also be excluded from the graph
def main():
#creating a graph to use
while True:
dateRange = input("Please enter a date range for the event in the format mm/dd/yyyy:mm/dd/yyyy\n")
#removing whitespace from the entire date, if any
dateRange=dateRange.replace(" ","")
if (len(dateRange)!=21):
print("Date range could not be recognized! Please enter a valid date range in the format mm/dd/yyyy:mm/dd/yyyy\n")
continue
#splitting up ending date string and beginning date string
dateRange=dateRange.split(":")
begDate=dateRange[0]
endDate=dateRange[1]
#turning our date strings into actual date objects for future usefulness
#exception checking
try:
begDate=getDateTime(begDate)
endDate=getDateTime(endDate)
except :
print("The date range could not be recognized! Please try again in the format mm/dd/yyyy:mm/dd/yyyy\n")
continue
if (endDate<begDate):
print("The ending date is earlier than the starting date! Please enter a valid date range!")
continue
#creating our date graph with the given date range
graph = DateGraph(begDate,endDate)
#list of all usernames entered in this operation
users=set()
print("Now you will be prompted to enter information for each user associated with this event: \n")
break
while True:
user=input("please enter the person's name. Enter - if you want to stop.\n")
user=user.replace(" ","")
if (user=="-"):
break
else :
users.add(user)
#this is so user cannot select allAbsent or allpresent after picking a date
userSelectedDate=False
while True:
dates=None
if (userSelectedDate):
dates=input("Please enter an individual date in the form mm/dd/yyyy or a range in the form mm/dd/yyyy:mm/dd/yyyy, or - if you wish to stop entering dates.")
else:
dates=input("Please enter either ALL ABSENT, ALL PRESENT, a date range of the form mm/dd/yyyy:mm/dd/yyyy in which the individual WOULD BE ABSENT, or an individual date of the form mm/dd/yyyy in which the individual WOULD BE ABSENT and hit enter. When you wish to stop please enter - .\n")
dates=dates.replace(" ","")
dates=dates.lower()
if (dates=="allabsent"):
if (userSelectedDate):
print("I am sorry, it seems there was an invalid input! Please try again: \n")
else:
#we have to check off all absent
graph.addAbsentUser(user)
break
elif (dates=="-"):
#then we stop
break
elif (dates=="allpresent"):
if (userSelectedDate):
print("I am sorry, it seems there was an invalid input! Please try again: \n")
else:
#we have to check off all present
#in other words, do nothing, because all present doesnt really matter
break
elif (len(dates)==21 and dates[10]==":"):
#we have a date range
#we should check if start date is valid and end date is valid
dates=dates.split(":")
start = dates[0]
end = dates[1]
#exception handling
try:
start = getDateTime(start)
end=getDateTime(end)
except:
print("The date range could not be recognized. Please try again in the format mm/dd/yyyy:mm/dd/yyyy \n")
continue
if (isValidDate(start,begDate,endDate) and isValidDate(end,begDate,endDate)):
#then i need to add all the dates in this range to the dictionary
datesList = getDateRange(start,end)
for x in datesList:
#I need to add the date user pair to the graph
graph.addUserDate(user,x)
userSelectedDate=True
else:
| dataToConsider = self.dates[timedate]
dataToConsider.usersAbsent.add(user)
dataToConsider.degree=len(dataToConsider.usersAbsent) | identifier_body |
meetingPlanner.py | class, which hosts our graph of DateNode nodes
class DateGraph:
def __init__(self,begDate,endDate):
listDates = getDateRange(begDate,endDate)
#list that holds dates in the graph and maps each date to date data
self.dates={}
#at initialization (initializes data for every date in the range)
for x in listDates:
self.dates[x]=DateData()
#list of users that are all present
self.completelyPresent=set()
#list of users that are completely absent
self.completelyAbsent=set()
#adding a specific node based on user-timedate key pair
def addUserDate(self,user,timedate):
#date that the user is absent must be a valid date (guarunteed to exist upon initialization of the graph)
dataToConsider = self.dates[timedate]
dataToConsider.usersAbsent.add(user)
dataToConsider.degree=len(dataToConsider.usersAbsent)
#adding a user that is completely absent
def addAbsentUser(self,user):
self.completelyAbsent.add(user)
#adding a user that is completely present
def addPresentUser(self,user):
self.completelyPresent.add(user)
#getting all the users that are absent on a specific date
def getAbsentUsers(self,timedate):
absentees=[]
for x in self.dates[timedate].usersAbsent:
absentees.append(x)
for x in self.completelyAbsent:
absentees.append(x)
return absentees
#counts the degree of a node
def countDegree(self,timedate):
#count the degree here
#we just count the number of users associated to this date
count=0
for person in self.dates[timedate].usersAbsent:
count+=1
return count
#method to get the best dates in the graph
def getBestDates(self):
#goes through all the dates and gets a list of the best dates (based on least people missing)
#i do one pass to find the date with the least people absent (least degree)
#then i do another pass to add other dates with the same degree and add them to the list
bestDate=None
bestDates=[]
for x in self.dates:
if (bestDate==None):
bestDate=x
else :
if (self.countDegree(bestDate)>self.countDegree(x)):
#then x has the least people absent
bestDate=x
#second pass to add all the dates with this least associativity
for x in self.dates:
if (self.countDegree(x)==self.countDegree(bestDate)):
bestDates.append(x)
#returning the list of best dates, ready to be printed out to the user
return bestDates
#function to print out the list of best dates and the users attending
def printBestDates(self):
listDates = self.getBestDates()
if (len(listDates)>1):
print("Best Dates and Absentees:\n")
print("------------------------------------------------------\n")
else :
print("Best Date and Absentees:\n")
print("------------------------------------------------------\n")
#now iterating through the dates given and printing out the date and the absentees
for x in listDates:
print(x.strftime("%b/%d/%Y\n"))
print("Absent:")
listAbsent = self.getAbsentUsers(x)
if (len(listAbsent)==0):
print("NOBODY ABSENT!!!")
else:
for y in listAbsent:
if (y!=len(listAbsent)-1):
print(" "+y+",")
else:
print(" "+y)
print("\n")
print("------------------------------------------------------\n")
#some functions to help with date stuff
#gets all the dates in the given range (returns a list of datetimes)
def getDateRange(begDate,endDate):
testDate=datetime.date(begDate.year,begDate.month,begDate.day)
dates=[]
while(testDate <=endDate):
dates.append(testDate)
testDate+=datetime.timedelta(days=1)
return dates
#function to check if a date is in the range specified by the user
def isValidDate(enteredDate, begDate,endDate):
if (enteredDate<=endDate and enteredDate>=begDate):
return True
else:
return False
#function to turn a string in form of "mm/dd/yy" into a datetime
def getDateTime(enteredDate):
#print("date"+enteredDate)
#print("month"+enteredDate[0:2]+"\n")
#print("day"+enteredDate[3:5]+"\n")
#print("year"+enteredDate[6:10]+"\n")
#date=datetime.date(int(enteredDate[6:9]),int(enteredDate[0:1]),int(enteredDate[3:4]))
#return date
dateTime =datetime.datetime.strptime(enteredDate,"%m/%d/%Y")
return datetime.date(dateTime.year,dateTime.month,dateTime.day)
#when user is entering data about other users, they can optionally say ALL ABSENT Except ..., ALL ABSENT, NOT ABSENT, NOT ABSENT except..., in addition to just listing dates
#if a user is all absent, then they can just be excluded from the graph basically.
#likewise, if a user is not absent, they can also be excluded from the graph
def main():
#creating a graph to use
while True:
dateRange = input("Please enter a date range for the event in the format mm/dd/yyyy:mm/dd/yyyy\n")
#removing whitespace from the entire date, if any
dateRange=dateRange.replace(" ","")
if (len(dateRange)!=21):
print("Date range could not be recognized! Please enter a valid date range in the format mm/dd/yyyy:mm/dd/yyyy\n")
continue
#splitting up ending date string and beginning date string
dateRange=dateRange.split(":")
begDate=dateRange[0]
endDate=dateRange[1]
#turning our date strings into actual date objects for future usefulness
#exception checking
try:
begDate=getDateTime(begDate)
endDate=getDateTime(endDate)
except :
print("The date range could not be recognized! Please try again in the format mm/dd/yyyy:mm/dd/yyyy\n")
continue
if (endDate<begDate):
print("The ending date is earlier than the starting date! Please enter a valid date range!")
continue
#creating our date graph with the given date range
graph = DateGraph(begDate,endDate)
#list of all usernames entered in this operation
users=set()
print("Now you will be prompted to enter information for each user associated with this event: \n")
break
while True:
user=input("please enter the person's name. Enter - if you want to stop.\n")
user=user.replace(" ","")
if (user=="-"):
break
else :
users.add(user)
#this is so user cannot select allAbsent or allpresent after picking a date
userSelectedDate=False
while True:
| if (userSelectedDate):
print("I am sorry, it seems there was an invalid input! Please try again: \n")
else:
#we have to check off all present
#in other words, do nothing, because all present doesnt really matter
break
elif (len(dates)==21 and dates[10]==":"):
#we have a date range
#we should check if start date is valid and end date is valid
dates=dates.split(":")
start = dates[0]
end = dates[1]
#exception handling
try:
start = getDateTime(start)
end=getDateTime(end)
except:
print("The date range could not be recognized. Please try again in the format mm/dd/yyyy:mm/dd/yyyy \n")
continue
if (isValidDate(start,begDate,endDate) and isValidDate(end,begDate,endDate)):
#then i need to add all the dates in this range to the dictionary
datesList = getDateRange(start,end)
for x in datesList:
#I need to add the date user pair to the graph
graph.addUserDate(user,x)
userSelectedDate=True
else:
| dates=None
if (userSelectedDate):
dates=input("Please enter an individual date in the form mm/dd/yyyy or a range in the form mm/dd/yyyy:mm/dd/yyyy, or - if you wish to stop entering dates.")
else:
dates=input("Please enter either ALL ABSENT, ALL PRESENT, a date range of the form mm/dd/yyyy:mm/dd/yyyy in which the individual WOULD BE ABSENT, or an individual date of the form mm/dd/yyyy in which the individual WOULD BE ABSENT and hit enter. When you wish to stop please enter - .\n")
dates=dates.replace(" ","")
dates=dates.lower()
if (dates=="allabsent"):
if (userSelectedDate):
print("I am sorry, it seems there was an invalid input! Please try again: \n")
else:
#we have to check off all absent
graph.addAbsentUser(user)
break
elif (dates=="-"):
#then we stop
break
elif (dates=="allpresent"): | conditional_block |
meetingPlanner.py | class, which hosts our graph of DateNode nodes
class DateGraph:
def __init__(self,begDate,endDate):
listDates = getDateRange(begDate,endDate)
#list that holds dates in the graph and maps each date to date data
self.dates={}
#at initialization (initializes data for every date in the range)
for x in listDates:
self.dates[x]=DateData()
#list of users that are all present
self.completelyPresent=set()
#list of users that are completely absent
self.completelyAbsent=set()
#adding a specific node based on user-timedate key pair
def addUserDate(self,user,timedate):
#date that the user is absent must be a valid date (guarunteed to exist upon initialization of the graph)
dataToConsider = self.dates[timedate]
dataToConsider.usersAbsent.add(user)
dataToConsider.degree=len(dataToConsider.usersAbsent)
#adding a user that is completely absent
def addAbsentUser(self,user):
self.completelyAbsent.add(user)
#adding a user that is completely present
def addPresentUser(self,user):
self.completelyPresent.add(user)
#getting all the users that are absent on a specific date
def | (self,timedate):
absentees=[]
for x in self.dates[timedate].usersAbsent:
absentees.append(x)
for x in self.completelyAbsent:
absentees.append(x)
return absentees
#counts the degree of a node
def countDegree(self,timedate):
#count the degree here
#we just count the number of users associated to this date
count=0
for person in self.dates[timedate].usersAbsent:
count+=1
return count
#method to get the best dates in the graph
def getBestDates(self):
#goes through all the dates and gets a list of the best dates (based on least people missing)
#i do one pass to find the date with the least people absent (least degree)
#then i do another pass to add other dates with the same degree and add them to the list
bestDate=None
bestDates=[]
for x in self.dates:
if (bestDate==None):
bestDate=x
else :
if (self.countDegree(bestDate)>self.countDegree(x)):
#then x has the least people absent
bestDate=x
#second pass to add all the dates with this least associativity
for x in self.dates:
if (self.countDegree(x)==self.countDegree(bestDate)):
bestDates.append(x)
#returning the list of best dates, ready to be printed out to the user
return bestDates
#function to print out the list of best dates and the users attending
def printBestDates(self):
listDates = self.getBestDates()
if (len(listDates)>1):
print("Best Dates and Absentees:\n")
print("------------------------------------------------------\n")
else :
print("Best Date and Absentees:\n")
print("------------------------------------------------------\n")
#now iterating through the dates given and printing out the date and the absentees
for x in listDates:
print(x.strftime("%b/%d/%Y\n"))
print("Absent:")
listAbsent = self.getAbsentUsers(x)
if (len(listAbsent)==0):
print("NOBODY ABSENT!!!")
else:
for y in listAbsent:
if (y!=len(listAbsent)-1):
print(" "+y+",")
else:
print(" "+y)
print("\n")
print("------------------------------------------------------\n")
#some functions to help with date stuff
#gets all the dates in the given range (returns a list of datetimes)
def getDateRange(begDate,endDate):
testDate=datetime.date(begDate.year,begDate.month,begDate.day)
dates=[]
while(testDate <=endDate):
dates.append(testDate)
testDate+=datetime.timedelta(days=1)
return dates
#function to check if a date is in the range specified by the user
def isValidDate(enteredDate, begDate,endDate):
if (enteredDate<=endDate and enteredDate>=begDate):
return True
else:
return False
#function to turn a string in form of "mm/dd/yy" into a datetime
def getDateTime(enteredDate):
#print("date"+enteredDate)
#print("month"+enteredDate[0:2]+"\n")
#print("day"+enteredDate[3:5]+"\n")
#print("year"+enteredDate[6:10]+"\n")
#date=datetime.date(int(enteredDate[6:9]),int(enteredDate[0:1]),int(enteredDate[3:4]))
#return date
dateTime =datetime.datetime.strptime(enteredDate,"%m/%d/%Y")
return datetime.date(dateTime.year,dateTime.month,dateTime.day)
#when user is entering data about other users, they can optionally say ALL ABSENT Except ..., ALL ABSENT, NOT ABSENT, NOT ABSENT except..., in addition to just listing dates
#if a user is all absent, then they can just be excluded from the graph basically.
#likewise, if a user is not absent, they can also be excluded from the graph
def main():
#creating a graph to use
while True:
dateRange = input("Please enter a date range for the event in the format mm/dd/yyyy:mm/dd/yyyy\n")
#removing whitespace from the entire date, if any
dateRange=dateRange.replace(" ","")
if (len(dateRange)!=21):
print("Date range could not be recognized! Please enter a valid date range in the format mm/dd/yyyy:mm/dd/yyyy\n")
continue
#splitting up ending date string and beginning date string
dateRange=dateRange.split(":")
begDate=dateRange[0]
endDate=dateRange[1]
#turning our date strings into actual date objects for future usefulness
#exception checking
try:
begDate=getDateTime(begDate)
endDate=getDateTime(endDate)
except :
print("The date range could not be recognized! Please try again in the format mm/dd/yyyy:mm/dd/yyyy\n")
continue
if (endDate<begDate):
print("The ending date is earlier than the starting date! Please enter a valid date range!")
continue
#creating our date graph with the given date range
graph = DateGraph(begDate,endDate)
#list of all usernames entered in this operation
users=set()
print("Now you will be prompted to enter information for each user associated with this event: \n")
break
while True:
user=input("please enter the person's name. Enter - if you want to stop.\n")
user=user.replace(" ","")
if (user=="-"):
break
else :
users.add(user)
#this is so user cannot select allAbsent or allpresent after picking a date
userSelectedDate=False
while True:
dates=None
if (userSelectedDate):
dates=input("Please enter an individual date in the form mm/dd/yyyy or a range in the form mm/dd/yyyy:mm/dd/yyyy, or - if you wish to stop entering dates.")
else:
dates=input("Please enter either ALL ABSENT, ALL PRESENT, a date range of the form mm/dd/yyyy:mm/dd/yyyy in which the individual WOULD BE ABSENT, or an individual date of the form mm/dd/yyyy in which the individual WOULD BE ABSENT and hit enter. When you wish to stop please enter - .\n")
dates=dates.replace(" ","")
dates=dates.lower()
if (dates=="allabsent"):
if (userSelectedDate):
print("I am sorry, it seems there was an invalid input! Please try again: \n")
else:
#we have to check off all absent
graph.addAbsentUser(user)
break
elif (dates=="-"):
#then we stop
break
elif (dates=="allpresent"):
if (userSelectedDate):
print("I am sorry, it seems there was an invalid input! Please try again: \n")
else:
#we have to check off all present
#in other words, do nothing, because all present doesnt really matter
break
elif (len(dates)==21 and dates[10]==":"):
#we have a date range
#we should check if start date is valid and end date is valid
dates=dates.split(":")
start = dates[0]
end = dates[1]
#exception handling
try:
start = getDateTime(start)
end=getDateTime(end)
except:
print("The date range could not be recognized. Please try again in the format mm/dd/yyyy:mm/dd/yyyy \n")
continue
if (isValidDate(start,begDate,endDate) and isValidDate(end,begDate,endDate)):
#then i need to add all the dates in this range to the dictionary
datesList = getDateRange(start,end)
for x in datesList:
#I need to add the date user pair to the graph
graph.addUserDate(user,x)
userSelectedDate=True
else | getAbsentUsers | identifier_name |
meetingPlanner.py | class, which hosts our graph of DateNode nodes
class DateGraph:
def __init__(self,begDate,endDate):
listDates = getDateRange(begDate,endDate)
#list that holds dates in the graph and maps each date to date data
self.dates={}
#at initialization (initializes data for every date in the range)
for x in listDates:
self.dates[x]=DateData()
#list of users that are all present
self.completelyPresent=set()
#list of users that are completely absent
self.completelyAbsent=set()
#adding a specific node based on user-timedate key pair
def addUserDate(self,user,timedate):
#date that the user is absent must be a valid date (guarunteed to exist upon initialization of the graph)
dataToConsider = self.dates[timedate]
dataToConsider.usersAbsent.add(user)
dataToConsider.degree=len(dataToConsider.usersAbsent)
#adding a user that is completely absent
def addAbsentUser(self,user):
self.completelyAbsent.add(user)
#adding a user that is completely present
def addPresentUser(self,user):
self.completelyPresent.add(user)
#getting all the users that are absent on a specific date
def getAbsentUsers(self,timedate):
absentees=[]
for x in self.dates[timedate].usersAbsent:
absentees.append(x)
for x in self.completelyAbsent:
absentees.append(x)
return absentees
#counts the degree of a node
def countDegree(self,timedate):
#count the degree here
#we just count the number of users associated to this date
count=0
for person in self.dates[timedate].usersAbsent:
count+=1
return count
#method to get the best dates in the graph
def getBestDates(self):
#goes through all the dates and gets a list of the best dates (based on least people missing)
#i do one pass to find the date with the least people absent (least degree)
#then i do another pass to add other dates with the same degree and add them to the list
bestDate=None
bestDates=[]
for x in self.dates:
if (bestDate==None):
bestDate=x
else :
if (self.countDegree(bestDate)>self.countDegree(x)):
#then x has the least people absent
bestDate=x
#second pass to add all the dates with this least associativity
for x in self.dates:
if (self.countDegree(x)==self.countDegree(bestDate)):
bestDates.append(x)
#returning the list of best dates, ready to be printed out to the user
return bestDates
#function to print out the list of best dates and the users attending
def printBestDates(self):
listDates = self.getBestDates()
if (len(listDates)>1):
print("Best Dates and Absentees:\n")
print("------------------------------------------------------\n")
else :
print("Best Date and Absentees:\n")
print("------------------------------------------------------\n")
#now iterating through the dates given and printing out the date and the absentees
for x in listDates:
print(x.strftime("%b/%d/%Y\n"))
print("Absent:")
listAbsent = self.getAbsentUsers(x)
if (len(listAbsent)==0):
print("NOBODY ABSENT!!!")
else:
for y in listAbsent: | print("\n")
print("------------------------------------------------------\n")
#some functions to help with date stuff
#gets all the dates in the given range (returns a list of datetimes)
def getDateRange(begDate,endDate):
testDate=datetime.date(begDate.year,begDate.month,begDate.day)
dates=[]
while(testDate <=endDate):
dates.append(testDate)
testDate+=datetime.timedelta(days=1)
return dates
#function to check if a date is in the range specified by the user
def isValidDate(enteredDate, begDate,endDate):
if (enteredDate<=endDate and enteredDate>=begDate):
return True
else:
return False
#function to turn a string in form of "mm/dd/yy" into a datetime
def getDateTime(enteredDate):
#print("date"+enteredDate)
#print("month"+enteredDate[0:2]+"\n")
#print("day"+enteredDate[3:5]+"\n")
#print("year"+enteredDate[6:10]+"\n")
#date=datetime.date(int(enteredDate[6:9]),int(enteredDate[0:1]),int(enteredDate[3:4]))
#return date
dateTime =datetime.datetime.strptime(enteredDate,"%m/%d/%Y")
return datetime.date(dateTime.year,dateTime.month,dateTime.day)
#when user is entering data about other users, they can optionally say ALL ABSENT Except ..., ALL ABSENT, NOT ABSENT, NOT ABSENT except..., in addition to just listing dates
#if a user is all absent, then they can just be excluded from the graph basically.
#likewise, if a user is not absent, they can also be excluded from the graph
def main():
#creating a graph to use
while True:
dateRange = input("Please enter a date range for the event in the format mm/dd/yyyy:mm/dd/yyyy\n")
#removing whitespace from the entire date, if any
dateRange=dateRange.replace(" ","")
if (len(dateRange)!=21):
print("Date range could not be recognized! Please enter a valid date range in the format mm/dd/yyyy:mm/dd/yyyy\n")
continue
#splitting up ending date string and beginning date string
dateRange=dateRange.split(":")
begDate=dateRange[0]
endDate=dateRange[1]
#turning our date strings into actual date objects for future usefulness
#exception checking
try:
begDate=getDateTime(begDate)
endDate=getDateTime(endDate)
except :
print("The date range could not be recognized! Please try again in the format mm/dd/yyyy:mm/dd/yyyy\n")
continue
if (endDate<begDate):
print("The ending date is earlier than the starting date! Please enter a valid date range!")
continue
#creating our date graph with the given date range
graph = DateGraph(begDate,endDate)
#list of all usernames entered in this operation
users=set()
print("Now you will be prompted to enter information for each user associated with this event: \n")
break
while True:
user=input("please enter the person's name. Enter - if you want to stop.\n")
user=user.replace(" ","")
if (user=="-"):
break
else :
users.add(user)
#this is so user cannot select allAbsent or allpresent after picking a date
userSelectedDate=False
while True:
dates=None
if (userSelectedDate):
dates=input("Please enter an individual date in the form mm/dd/yyyy or a range in the form mm/dd/yyyy:mm/dd/yyyy, or - if you wish to stop entering dates.")
else:
dates=input("Please enter either ALL ABSENT, ALL PRESENT, a date range of the form mm/dd/yyyy:mm/dd/yyyy in which the individual WOULD BE ABSENT, or an individual date of the form mm/dd/yyyy in which the individual WOULD BE ABSENT and hit enter. When you wish to stop please enter - .\n")
dates=dates.replace(" ","")
dates=dates.lower()
if (dates=="allabsent"):
if (userSelectedDate):
print("I am sorry, it seems there was an invalid input! Please try again: \n")
else:
#we have to check off all absent
graph.addAbsentUser(user)
break
elif (dates=="-"):
#then we stop
break
elif (dates=="allpresent"):
if (userSelectedDate):
print("I am sorry, it seems there was an invalid input! Please try again: \n")
else:
#we have to check off all present
#in other words, do nothing, because all present doesnt really matter
break
elif (len(dates)==21 and dates[10]==":"):
#we have a date range
#we should check if start date is valid and end date is valid
dates=dates.split(":")
start = dates[0]
end = dates[1]
#exception handling
try:
start = getDateTime(start)
end=getDateTime(end)
except:
print("The date range could not be recognized. Please try again in the format mm/dd/yyyy:mm/dd/yyyy \n")
continue
if (isValidDate(start,begDate,endDate) and isValidDate(end,begDate,endDate)):
#then i need to add all the dates in this range to the dictionary
datesList = getDateRange(start,end)
for x in datesList:
#I need to add the date user pair to the graph
graph.addUserDate(user,x)
userSelectedDate=True
else:
| if (y!=len(listAbsent)-1):
print(" "+y+",")
else:
print(" "+y)
| random_line_split |
supervisor.go | ' of the child processes (that is, the child
// processes after the terminated child process in the start order)
// are terminated. Then the terminated child process and all
// child processes after it are restarted
SupervisorStrategyRestForOne = SupervisorStrategyType("rest_for_one")
// SupervisorStrategySimpleOneForOne A simplified one_for_one supervisor, where all
// child processes are dynamically added instances
// of the same process type, that is, running the same code.
SupervisorStrategySimpleOneForOne = SupervisorStrategyType("simple_one_for_one")
// Restart types:
// SupervisorStrategyRestartPermanent child process is always restarted
SupervisorStrategyRestartPermanent = SupervisorStrategyRestart("permanent")
// SupervisorStrategyRestartTemporary child process is never restarted
// (not even when the supervisor restart strategy is rest_for_one
// or one_for_all and a sibling death causes the temporary process
// to be terminated)
SupervisorStrategyRestartTemporary = SupervisorStrategyRestart("temporary")
// SupervisorStrategyRestartTransient child process is restarted only if
// it terminates abnormally, that is, with an exit reason other
// than normal, shutdown.
SupervisorStrategyRestartTransient = SupervisorStrategyRestart("transient")
supervisorChildStateStart = 0
supervisorChildStateRunning = 1
supervisorChildStateDisabled = -1
)
type supervisorChildState int
// SupervisorSpec
type SupervisorSpec struct {
Name string
Children []SupervisorChildSpec
Strategy SupervisorStrategy
restarts []int64
}
// SupervisorChildSpec
type SupervisorChildSpec struct {
Name string
Child ProcessBehavior
Options ProcessOptions
Args []etf.Term
state supervisorChildState // for internal usage
process Process
}
// Supervisor is implementation of ProcessBehavior interface
type Supervisor struct{}
type messageStartChild struct {
name string
args []etf.Term
}
// ProcessInit
func (sv *Supervisor) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) {
behavior, ok := p.Behavior().(SupervisorBehavior)
if !ok {
return ProcessState{}, fmt.Errorf("ProcessInit: not a SupervisorBehavior")
}
spec, err := behavior.Init(args...)
if err != nil {
return ProcessState{}, err
}
lib.Log("[%s] SUPERVISOR %q with restart strategy: %s[%s] ", p.NodeName(), p.Name(), spec.Strategy.Type, spec.Strategy.Restart)
p.SetTrapExit(true)
return ProcessState{
Process: p,
State: &spec,
}, nil
}
// ProcessLoop
func (sv *Supervisor) ProcessLoop(ps ProcessState, started chan<- bool) string {
spec := ps.State.(*SupervisorSpec)
if spec.Strategy.Type != SupervisorStrategySimpleOneForOne {
startChildren(ps, spec)
}
waitTerminatingProcesses := []etf.Pid{}
chs := ps.ProcessChannels()
started <- true
for {
select {
case ex := <-chs.GracefulExit:
if ex.From == ps.Self() {
// stop supervisor gracefully
for i := range spec.Children {
p := spec.Children[i].process
if p != nil && p.IsAlive() {
p.Exit(ex.Reason)
}
}
return ex.Reason
}
waitTerminatingProcesses = handleMessageExit(ps, ex, spec, waitTerminatingProcesses)
case <-ps.Context().Done():
return "kill"
case direct := <-chs.Direct:
value, err := handleDirect(ps, spec, direct.Message)
ps.PutSyncReply(direct.Ref, value, err)
case <-chs.Mailbox:
// do nothing
}
}
}
// StartChild dynamically starts a child process with given name of child spec which is defined by Init call.
func (sv *Supervisor) StartChild(supervisor Process, name string, args ...etf.Term) (Process, error) {
message := messageStartChild{
name: name,
args: args,
}
value, err := supervisor.Direct(message)
if err != nil {
return nil, err
}
process, ok := value.(Process)
if !ok {
return nil, fmt.Errorf("internal error: can't start child %#v", value)
}
return process, nil
}
func startChildren(supervisor Process, spec *SupervisorSpec) {
spec.restarts = append(spec.restarts, time.Now().Unix())
if len(spec.restarts) > int(spec.Strategy.Intensity) {
period := time.Now().Unix() - spec.restarts[0]
if period <= int64(spec.Strategy.Period) {
lib.Warning("Supervisor %q. Restart intensity is exceeded (%d restarts for %d seconds)",
spec.Name, spec.Strategy.Intensity, spec.Strategy.Period)
supervisor.Kill()
return
}
spec.restarts = spec.restarts[1:]
}
for i := range spec.Children {
switch spec.Children[i].state {
case supervisorChildStateDisabled:
spec.Children[i].process = nil
case supervisorChildStateRunning:
continue
case supervisorChildStateStart:
spec.Children[i].state = supervisorChildStateRunning
process := startChild(supervisor, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Options, spec.Children[i].Args...)
spec.Children[i].process = process
default:
panic("Incorrect supervisorChildState")
}
}
}
func startChild(supervisor Process, name string, child ProcessBehavior, opts ProcessOptions, args ...etf.Term) Process {
opts.GroupLeader = supervisor
if leader := supervisor.GroupLeader(); leader != nil {
opts.GroupLeader = leader
}
// Child process shouldn't ignore supervisor termination (via TrapExit).
// Using the supervisor's Context makes the child terminate if the supervisor is terminated.
opts.Context = supervisor.Context()
process, err := supervisor.Spawn(name, opts, child, args...)
if err != nil {
panic(err.Error())
}
supervisor.Link(process.Self())
return process
}
func | (supervisor Process, spec *SupervisorSpec, message interface{}) (interface{}, error) {
switch m := message.(type) {
case MessageDirectChildren:
children := []etf.Pid{}
for i := range spec.Children {
if spec.Children[i].process == nil {
continue
}
children = append(children, spec.Children[i].process.Self())
}
return children, nil
case messageStartChild:
childSpec, err := lookupSpecByName(m.name, spec.Children)
if err != nil {
return nil, err
}
childSpec.state = supervisorChildStateStart
if len(m.args) > 0 {
childSpec.Args = m.args
}
// Dinamically started child can't be registered with a name.
childSpec.Name = ""
process := startChild(supervisor, childSpec.Name, childSpec.Child, childSpec.Options, childSpec.Args...)
childSpec.process = process
spec.Children = append(spec.Children, childSpec)
return process, nil
default:
}
return nil, lib.ErrUnsupportedRequest
}
func handleMessageExit(p Process, exit ProcessGracefulExitRequest, spec *SupervisorSpec, wait []etf.Pid) []etf.Pid {
terminated := exit.From
reason := exit.Reason
isChild := false
// We should make sure if it was an exit message from the supervisor's child
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
isChild = true
break
}
}
if !isChild && reason != "restart" {
return wait
}
if len(wait) > 0 {
for i := range wait {
if wait[i] == terminated {
wait[i] = wait[0]
wait = wait[1:]
break
}
}
if len(wait) == 0 {
// it was the last one. lets restart all terminated children
// which hasn't supervisorChildStateDisabled state
startChildren(p, spec)
}
return wait
}
switch spec.Strategy.Type {
case SupervisorStrategyOneForAll:
for i := range spec.Children {
if spec.Children[i].state != supervisorChildStateRunning {
continue
}
child := spec.Children[i].process
if child == nil {
continue
}
spec.Children[i].process = nil
if haveToDisableChild(spec.Strategy.Restart, reason) {
spec.Children[i].state = supervisorChildStateDisabled
break
}
if spec.Children[i].state == supervisorChildStateDisabled {
continue
}
spec.Children[i].state = supervisorChildStateStart
if child.Self() == terminated {
if len(spec.Children) == i+1 && len(wait) == 0 {
// it was the last one. nothing to waiting for
startChildren(p, spec)
}
continue
}
child.Exit("restart")
wait = append(wait, child.Self())
}
case SupervisorStrategyRestForOne:
isRest := false
for i := range spec.Children {
child := spec.Children[i].process | handleDirect | identifier_name |
supervisor.go | if p != nil && p.IsAlive() {
p.Exit(ex.Reason)
}
}
return ex.Reason
}
waitTerminatingProcesses = handleMessageExit(ps, ex, spec, waitTerminatingProcesses)
case <-ps.Context().Done():
return "kill"
case direct := <-chs.Direct:
value, err := handleDirect(ps, spec, direct.Message)
ps.PutSyncReply(direct.Ref, value, err)
case <-chs.Mailbox:
// do nothing
}
}
}
// StartChild dynamically starts a child process with given name of child spec which is defined by Init call.
func (sv *Supervisor) StartChild(supervisor Process, name string, args ...etf.Term) (Process, error) {
message := messageStartChild{
name: name,
args: args,
}
value, err := supervisor.Direct(message)
if err != nil {
return nil, err
}
process, ok := value.(Process)
if !ok {
return nil, fmt.Errorf("internal error: can't start child %#v", value)
}
return process, nil
}
func startChildren(supervisor Process, spec *SupervisorSpec) {
spec.restarts = append(spec.restarts, time.Now().Unix())
if len(spec.restarts) > int(spec.Strategy.Intensity) {
period := time.Now().Unix() - spec.restarts[0]
if period <= int64(spec.Strategy.Period) {
lib.Warning("Supervisor %q. Restart intensity is exceeded (%d restarts for %d seconds)",
spec.Name, spec.Strategy.Intensity, spec.Strategy.Period)
supervisor.Kill()
return
}
spec.restarts = spec.restarts[1:]
}
for i := range spec.Children {
switch spec.Children[i].state {
case supervisorChildStateDisabled:
spec.Children[i].process = nil
case supervisorChildStateRunning:
continue
case supervisorChildStateStart:
spec.Children[i].state = supervisorChildStateRunning
process := startChild(supervisor, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Options, spec.Children[i].Args...)
spec.Children[i].process = process
default:
panic("Incorrect supervisorChildState")
}
}
}
func startChild(supervisor Process, name string, child ProcessBehavior, opts ProcessOptions, args ...etf.Term) Process {
opts.GroupLeader = supervisor
if leader := supervisor.GroupLeader(); leader != nil {
opts.GroupLeader = leader
}
// Child process shouldn't ignore supervisor termination (via TrapExit).
// Using the supervisor's Context makes the child terminate if the supervisor is terminated.
opts.Context = supervisor.Context()
process, err := supervisor.Spawn(name, opts, child, args...)
if err != nil {
panic(err.Error())
}
supervisor.Link(process.Self())
return process
}
func handleDirect(supervisor Process, spec *SupervisorSpec, message interface{}) (interface{}, error) {
switch m := message.(type) {
case MessageDirectChildren:
children := []etf.Pid{}
for i := range spec.Children {
if spec.Children[i].process == nil {
continue
}
children = append(children, spec.Children[i].process.Self())
}
return children, nil
case messageStartChild:
childSpec, err := lookupSpecByName(m.name, spec.Children)
if err != nil {
return nil, err
}
childSpec.state = supervisorChildStateStart
if len(m.args) > 0 {
childSpec.Args = m.args
}
// Dinamically started child can't be registered with a name.
childSpec.Name = ""
process := startChild(supervisor, childSpec.Name, childSpec.Child, childSpec.Options, childSpec.Args...)
childSpec.process = process
spec.Children = append(spec.Children, childSpec)
return process, nil
default:
}
return nil, lib.ErrUnsupportedRequest
}
func handleMessageExit(p Process, exit ProcessGracefulExitRequest, spec *SupervisorSpec, wait []etf.Pid) []etf.Pid {
terminated := exit.From
reason := exit.Reason
isChild := false
// We should make sure if it was an exit message from the supervisor's child
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
isChild = true
break
}
}
if !isChild && reason != "restart" {
return wait
}
if len(wait) > 0 {
for i := range wait {
if wait[i] == terminated {
wait[i] = wait[0]
wait = wait[1:]
break
}
}
if len(wait) == 0 {
// it was the last one. lets restart all terminated children
// which hasn't supervisorChildStateDisabled state
startChildren(p, spec)
}
return wait
}
switch spec.Strategy.Type {
case SupervisorStrategyOneForAll:
for i := range spec.Children {
if spec.Children[i].state != supervisorChildStateRunning {
continue
}
child := spec.Children[i].process
if child == nil {
continue
}
spec.Children[i].process = nil
if haveToDisableChild(spec.Strategy.Restart, reason) {
spec.Children[i].state = supervisorChildStateDisabled
break
}
if spec.Children[i].state == supervisorChildStateDisabled {
continue
}
spec.Children[i].state = supervisorChildStateStart
if child.Self() == terminated {
if len(spec.Children) == i+1 && len(wait) == 0 {
// it was the last one. nothing to waiting for
startChildren(p, spec)
}
continue
}
child.Exit("restart")
wait = append(wait, child.Self())
}
case SupervisorStrategyRestForOne:
isRest := false
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
isRest = true
spec.Children[i].process = nil
if haveToDisableChild(spec.Strategy.Restart, reason) {
spec.Children[i].state = supervisorChildStateDisabled
break
} else {
spec.Children[i].state = supervisorChildStateStart
}
if len(spec.Children) == i+1 && len(wait) == 0 {
// it was the last one. nothing to waiting for
startChildren(p, spec)
}
continue
}
if isRest && spec.Children[i].state == supervisorChildStateRunning {
child.Exit("restart")
spec.Children[i].process = nil
wait = append(wait, child.Self())
if haveToDisableChild(spec.Strategy.Restart, "restart") {
spec.Children[i].state = supervisorChildStateDisabled
} else {
spec.Children[i].state = supervisorChildStateStart
}
}
}
case SupervisorStrategyOneForOne:
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
spec.Children[i].process = nil
if haveToDisableChild(spec.Strategy.Restart, reason) {
spec.Children[i].state = supervisorChildStateDisabled
} else {
spec.Children[i].state = supervisorChildStateStart
}
startChildren(p, spec)
break
}
}
case SupervisorStrategySimpleOneForOne:
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
if haveToDisableChild(spec.Strategy.Restart, reason) {
// wont be restarted due to restart strategy
spec.Children[i] = spec.Children[0]
spec.Children = spec.Children[1:]
break
}
process := startChild(p, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Options, spec.Children[i].Args...)
spec.Children[i].process = process
break
}
}
}
// check if all children are disabled. stop this process with reason "normal"
shouldStop := true
for i := range spec.Children {
if spec.Children[i].state == supervisorChildStateDisabled {
continue
}
shouldStop = false
break
}
if shouldStop {
p.Exit("normal")
}
return wait
}
func haveToDisableChild(strategy SupervisorStrategyRestart, reason string) bool {
switch strategy {
case SupervisorStrategyRestartTransient:
if reason == "shutdown" || reason == "normal" {
return true
}
case SupervisorStrategyRestartTemporary:
return true
}
return false
}
func lookupSpecByName(specName string, spec []SupervisorChildSpec) (SupervisorChildSpec, error) | {
for i := range spec {
if spec[i].Name == specName {
return spec[i], nil
}
}
return SupervisorChildSpec{}, fmt.Errorf("unknown child")
} | identifier_body |
|
supervisor.go |
}
// Supervisor is implementation of ProcessBehavior interface
type Supervisor struct{}
type messageStartChild struct {
name string
args []etf.Term
}
// ProcessInit
func (sv *Supervisor) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) {
behavior, ok := p.Behavior().(SupervisorBehavior)
if !ok {
return ProcessState{}, fmt.Errorf("ProcessInit: not a SupervisorBehavior")
}
spec, err := behavior.Init(args...)
if err != nil {
return ProcessState{}, err
}
lib.Log("[%s] SUPERVISOR %q with restart strategy: %s[%s] ", p.NodeName(), p.Name(), spec.Strategy.Type, spec.Strategy.Restart)
p.SetTrapExit(true)
return ProcessState{
Process: p,
State: &spec,
}, nil
}
// ProcessLoop
func (sv *Supervisor) ProcessLoop(ps ProcessState, started chan<- bool) string {
spec := ps.State.(*SupervisorSpec)
if spec.Strategy.Type != SupervisorStrategySimpleOneForOne {
startChildren(ps, spec)
}
waitTerminatingProcesses := []etf.Pid{}
chs := ps.ProcessChannels()
started <- true
for {
select {
case ex := <-chs.GracefulExit:
if ex.From == ps.Self() {
// stop supervisor gracefully
for i := range spec.Children {
p := spec.Children[i].process
if p != nil && p.IsAlive() {
p.Exit(ex.Reason)
}
}
return ex.Reason
}
waitTerminatingProcesses = handleMessageExit(ps, ex, spec, waitTerminatingProcesses)
case <-ps.Context().Done():
return "kill"
case direct := <-chs.Direct:
value, err := handleDirect(ps, spec, direct.Message)
ps.PutSyncReply(direct.Ref, value, err)
case <-chs.Mailbox:
// do nothing
}
}
}
// StartChild dynamically starts a child process with given name of child spec which is defined by Init call.
func (sv *Supervisor) StartChild(supervisor Process, name string, args ...etf.Term) (Process, error) {
message := messageStartChild{
name: name,
args: args,
}
value, err := supervisor.Direct(message)
if err != nil {
return nil, err
}
process, ok := value.(Process)
if !ok {
return nil, fmt.Errorf("internal error: can't start child %#v", value)
}
return process, nil
}
func startChildren(supervisor Process, spec *SupervisorSpec) {
spec.restarts = append(spec.restarts, time.Now().Unix())
if len(spec.restarts) > int(spec.Strategy.Intensity) {
period := time.Now().Unix() - spec.restarts[0]
if period <= int64(spec.Strategy.Period) {
lib.Warning("Supervisor %q. Restart intensity is exceeded (%d restarts for %d seconds)",
spec.Name, spec.Strategy.Intensity, spec.Strategy.Period)
supervisor.Kill()
return
}
spec.restarts = spec.restarts[1:]
}
for i := range spec.Children {
switch spec.Children[i].state {
case supervisorChildStateDisabled:
spec.Children[i].process = nil
case supervisorChildStateRunning:
continue
case supervisorChildStateStart:
spec.Children[i].state = supervisorChildStateRunning
process := startChild(supervisor, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Options, spec.Children[i].Args...)
spec.Children[i].process = process
default:
panic("Incorrect supervisorChildState")
}
}
}
func startChild(supervisor Process, name string, child ProcessBehavior, opts ProcessOptions, args ...etf.Term) Process {
opts.GroupLeader = supervisor
if leader := supervisor.GroupLeader(); leader != nil {
opts.GroupLeader = leader
}
// Child process shouldn't ignore supervisor termination (via TrapExit).
// Using the supervisor's Context makes the child terminate if the supervisor is terminated.
opts.Context = supervisor.Context()
process, err := supervisor.Spawn(name, opts, child, args...)
if err != nil {
panic(err.Error())
}
supervisor.Link(process.Self())
return process
}
func handleDirect(supervisor Process, spec *SupervisorSpec, message interface{}) (interface{}, error) {
switch m := message.(type) {
case MessageDirectChildren:
children := []etf.Pid{}
for i := range spec.Children {
if spec.Children[i].process == nil {
continue
}
children = append(children, spec.Children[i].process.Self())
}
return children, nil
case messageStartChild:
childSpec, err := lookupSpecByName(m.name, spec.Children)
if err != nil {
return nil, err
}
childSpec.state = supervisorChildStateStart
if len(m.args) > 0 {
childSpec.Args = m.args
}
// Dinamically started child can't be registered with a name.
childSpec.Name = ""
process := startChild(supervisor, childSpec.Name, childSpec.Child, childSpec.Options, childSpec.Args...)
childSpec.process = process
spec.Children = append(spec.Children, childSpec)
return process, nil
default:
}
return nil, lib.ErrUnsupportedRequest
}
func handleMessageExit(p Process, exit ProcessGracefulExitRequest, spec *SupervisorSpec, wait []etf.Pid) []etf.Pid {
terminated := exit.From
reason := exit.Reason
isChild := false
// We should make sure if it was an exit message from the supervisor's child
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
isChild = true
break
}
}
if !isChild && reason != "restart" {
return wait
}
if len(wait) > 0 {
for i := range wait {
if wait[i] == terminated {
wait[i] = wait[0]
wait = wait[1:]
break
}
}
if len(wait) == 0 {
// it was the last one. lets restart all terminated children
// which hasn't supervisorChildStateDisabled state
startChildren(p, spec)
}
return wait
}
switch spec.Strategy.Type {
case SupervisorStrategyOneForAll:
for i := range spec.Children {
if spec.Children[i].state != supervisorChildStateRunning {
continue
}
child := spec.Children[i].process
if child == nil {
continue
}
spec.Children[i].process = nil
if haveToDisableChild(spec.Strategy.Restart, reason) {
spec.Children[i].state = supervisorChildStateDisabled
break
}
if spec.Children[i].state == supervisorChildStateDisabled {
continue
}
spec.Children[i].state = supervisorChildStateStart
if child.Self() == terminated {
if len(spec.Children) == i+1 && len(wait) == 0 {
// it was the last one. nothing to waiting for
startChildren(p, spec)
}
continue
}
child.Exit("restart")
wait = append(wait, child.Self())
}
case SupervisorStrategyRestForOne:
isRest := false
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
isRest = true
spec.Children[i].process = nil
if haveToDisableChild(spec.Strategy.Restart, reason) {
spec.Children[i].state = supervisorChildStateDisabled
break
} else {
spec.Children[i].state = supervisorChildStateStart
}
if len(spec.Children) == i+1 && len(wait) == 0 {
// it was the last one. nothing to waiting for
startChildren(p, spec)
}
continue
}
if isRest && spec.Children[i].state == supervisorChildStateRunning {
child.Exit("restart")
spec.Children[i].process = nil
wait = append(wait, child.Self())
if haveToDisableChild(spec.Strategy.Restart, "restart") {
spec.Children[i].state = supervisorChildStateDisabled
} else {
spec.Children[i].state = supervisorChildStateStart
}
}
}
case SupervisorStrategyOneForOne:
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
spec.Children[i].process = nil
if haveToDisableChild(spec.Strategy.Restart, reason) {
spec.Children[i].state = supervisorChildStateDisabled
} else {
spec.Children[i].state = supervisorChildStateStart
}
startChildren(p, spec)
break
}
}
case SupervisorStrategySimpleOneForOne:
for i := range spec.Children {
child := spec.Children[i].process
if child == nil | {
continue
} | conditional_block |
|
supervisor.go | ' of the child processes (that is, the child
// processes after the terminated child process in the start order)
// are terminated. Then the terminated child process and all
// child processes after it are restarted
SupervisorStrategyRestForOne = SupervisorStrategyType("rest_for_one")
// SupervisorStrategySimpleOneForOne A simplified one_for_one supervisor, where all
// child processes are dynamically added instances
// of the same process type, that is, running the same code.
SupervisorStrategySimpleOneForOne = SupervisorStrategyType("simple_one_for_one")
// Restart types:
// SupervisorStrategyRestartPermanent child process is always restarted
SupervisorStrategyRestartPermanent = SupervisorStrategyRestart("permanent")
// SupervisorStrategyRestartTemporary child process is never restarted
// (not even when the supervisor restart strategy is rest_for_one
// or one_for_all and a sibling death causes the temporary process
// to be terminated)
SupervisorStrategyRestartTemporary = SupervisorStrategyRestart("temporary")
// SupervisorStrategyRestartTransient child process is restarted only if
// it terminates abnormally, that is, with an exit reason other
// than normal, shutdown.
SupervisorStrategyRestartTransient = SupervisorStrategyRestart("transient")
supervisorChildStateStart = 0
supervisorChildStateRunning = 1
supervisorChildStateDisabled = -1
)
type supervisorChildState int
// SupervisorSpec
type SupervisorSpec struct {
Name string
Children []SupervisorChildSpec
Strategy SupervisorStrategy
restarts []int64
}
// SupervisorChildSpec
type SupervisorChildSpec struct {
Name string
Child ProcessBehavior
Options ProcessOptions
Args []etf.Term
state supervisorChildState // for internal usage
process Process
}
// Supervisor is implementation of ProcessBehavior interface
type Supervisor struct{}
type messageStartChild struct {
name string
args []etf.Term
}
// ProcessInit
func (sv *Supervisor) ProcessInit(p Process, args ...etf.Term) (ProcessState, error) {
behavior, ok := p.Behavior().(SupervisorBehavior)
if !ok {
return ProcessState{}, fmt.Errorf("ProcessInit: not a SupervisorBehavior")
}
spec, err := behavior.Init(args...)
if err != nil {
return ProcessState{}, err
}
lib.Log("[%s] SUPERVISOR %q with restart strategy: %s[%s] ", p.NodeName(), p.Name(), spec.Strategy.Type, spec.Strategy.Restart)
p.SetTrapExit(true)
return ProcessState{
Process: p,
State: &spec,
}, nil
}
// ProcessLoop
func (sv *Supervisor) ProcessLoop(ps ProcessState, started chan<- bool) string {
spec := ps.State.(*SupervisorSpec)
if spec.Strategy.Type != SupervisorStrategySimpleOneForOne {
startChildren(ps, spec)
}
waitTerminatingProcesses := []etf.Pid{}
chs := ps.ProcessChannels()
started <- true
for {
select {
case ex := <-chs.GracefulExit:
if ex.From == ps.Self() {
// stop supervisor gracefully
for i := range spec.Children {
p := spec.Children[i].process
if p != nil && p.IsAlive() {
p.Exit(ex.Reason)
}
}
return ex.Reason
}
waitTerminatingProcesses = handleMessageExit(ps, ex, spec, waitTerminatingProcesses)
case <-ps.Context().Done():
return "kill"
case direct := <-chs.Direct:
value, err := handleDirect(ps, spec, direct.Message)
ps.PutSyncReply(direct.Ref, value, err)
case <-chs.Mailbox:
// do nothing
}
}
}
// StartChild dynamically starts a child process with given name of child spec which is defined by Init call.
func (sv *Supervisor) StartChild(supervisor Process, name string, args ...etf.Term) (Process, error) {
message := messageStartChild{
name: name,
args: args,
}
value, err := supervisor.Direct(message)
if err != nil {
return nil, err
}
process, ok := value.(Process)
if !ok { | return process, nil
}
func startChildren(supervisor Process, spec *SupervisorSpec) {
spec.restarts = append(spec.restarts, time.Now().Unix())
if len(spec.restarts) > int(spec.Strategy.Intensity) {
period := time.Now().Unix() - spec.restarts[0]
if period <= int64(spec.Strategy.Period) {
lib.Warning("Supervisor %q. Restart intensity is exceeded (%d restarts for %d seconds)",
spec.Name, spec.Strategy.Intensity, spec.Strategy.Period)
supervisor.Kill()
return
}
spec.restarts = spec.restarts[1:]
}
for i := range spec.Children {
switch spec.Children[i].state {
case supervisorChildStateDisabled:
spec.Children[i].process = nil
case supervisorChildStateRunning:
continue
case supervisorChildStateStart:
spec.Children[i].state = supervisorChildStateRunning
process := startChild(supervisor, spec.Children[i].Name, spec.Children[i].Child, spec.Children[i].Options, spec.Children[i].Args...)
spec.Children[i].process = process
default:
panic("Incorrect supervisorChildState")
}
}
}
func startChild(supervisor Process, name string, child ProcessBehavior, opts ProcessOptions, args ...etf.Term) Process {
opts.GroupLeader = supervisor
if leader := supervisor.GroupLeader(); leader != nil {
opts.GroupLeader = leader
}
// Child process shouldn't ignore supervisor termination (via TrapExit).
// Using the supervisor's Context makes the child terminate if the supervisor is terminated.
opts.Context = supervisor.Context()
process, err := supervisor.Spawn(name, opts, child, args...)
if err != nil {
panic(err.Error())
}
supervisor.Link(process.Self())
return process
}
func handleDirect(supervisor Process, spec *SupervisorSpec, message interface{}) (interface{}, error) {
switch m := message.(type) {
case MessageDirectChildren:
children := []etf.Pid{}
for i := range spec.Children {
if spec.Children[i].process == nil {
continue
}
children = append(children, spec.Children[i].process.Self())
}
return children, nil
case messageStartChild:
childSpec, err := lookupSpecByName(m.name, spec.Children)
if err != nil {
return nil, err
}
childSpec.state = supervisorChildStateStart
if len(m.args) > 0 {
childSpec.Args = m.args
}
// Dinamically started child can't be registered with a name.
childSpec.Name = ""
process := startChild(supervisor, childSpec.Name, childSpec.Child, childSpec.Options, childSpec.Args...)
childSpec.process = process
spec.Children = append(spec.Children, childSpec)
return process, nil
default:
}
return nil, lib.ErrUnsupportedRequest
}
func handleMessageExit(p Process, exit ProcessGracefulExitRequest, spec *SupervisorSpec, wait []etf.Pid) []etf.Pid {
terminated := exit.From
reason := exit.Reason
isChild := false
// We should make sure if it was an exit message from the supervisor's child
for i := range spec.Children {
child := spec.Children[i].process
if child == nil {
continue
}
if child.Self() == terminated {
isChild = true
break
}
}
if !isChild && reason != "restart" {
return wait
}
if len(wait) > 0 {
for i := range wait {
if wait[i] == terminated {
wait[i] = wait[0]
wait = wait[1:]
break
}
}
if len(wait) == 0 {
// it was the last one. lets restart all terminated children
// which hasn't supervisorChildStateDisabled state
startChildren(p, spec)
}
return wait
}
switch spec.Strategy.Type {
case SupervisorStrategyOneForAll:
for i := range spec.Children {
if spec.Children[i].state != supervisorChildStateRunning {
continue
}
child := spec.Children[i].process
if child == nil {
continue
}
spec.Children[i].process = nil
if haveToDisableChild(spec.Strategy.Restart, reason) {
spec.Children[i].state = supervisorChildStateDisabled
break
}
if spec.Children[i].state == supervisorChildStateDisabled {
continue
}
spec.Children[i].state = supervisorChildStateStart
if child.Self() == terminated {
if len(spec.Children) == i+1 && len(wait) == 0 {
// it was the last one. nothing to waiting for
startChildren(p, spec)
}
continue
}
child.Exit("restart")
wait = append(wait, child.Self())
}
case SupervisorStrategyRestForOne:
isRest := false
for i := range spec.Children {
child := spec.Children[i].process
| return nil, fmt.Errorf("internal error: can't start child %#v", value)
} | random_line_split |
array.rs | isize{
fn into(self)->usize{ self as usize }
}
*/
#[derive(Debug)]
pub struct Array<T,I=i32>(pub Vec<T>,PhantomData<I>);
// my array helper fn's
impl<T:Clone,I:IndexTrait+Clone> Array<T,I>{
/// TODO - better name. preserves ordering of vec![v;count].
pub fn from_val_n(val:T, n:i32)->Self{
let v=vec![val; n as usize];
Array(v,PhantomData)
}
pub fn from_fn<F:Fn(I)->T>(count:I,f:F)->Self{
let mut v=Vec::new();
v.reserve(count.clone().my_into());
for x in 0..count.my_into() {v.push(f(I::my_from(x)))}
Array(v,PhantomData)
}
pub fn map<B,F:Fn(&T)->B>(&self,f:F)->Array<B,I>{
let mut out=Array::<B,I>::new();
out.reserve(self.len());
for x in self.iter(){
out.push(f(x))
}
out
}
}
impl<T,I:IndexTrait+Clone> Array<T,I>{
pub fn num_elems(&self)->i32{ self.0.len() as i32} // TODO - figure out generic int
pub fn new()->Self{ Array(Vec::new(),PhantomData) }
pub fn reserve(&mut self, additional: I){
self.0.reserve(additional.my_into());
}
pub fn push(&mut self,val:T){self.0.push(val)}
pub fn shrink_to_fit(&mut self){self.0.shrink_to_fit()}
pub fn truncate(&mut self, len: I){
self.0.truncate(len.my_into());
}
pub fn as_slice(&self) -> &[T]{
self.0.as_slice()
}
pub fn as_mut_slice(&mut self) -> &mut [T]{
self.0.as_mut_slice()
}
pub fn swap_remove(&mut self, index: I) -> T{
self.0.swap_remove(index.my_into())
}
pub fn insert(&mut self, index: I, element: T){
self.0.insert(index.my_into(),element)
}
pub fn remove(&mut self, index: I) -> T{
self.0.remove(index.my_into())
}
// aka filter in place
pub fn retain<F:FnMut(&T)->bool>(&mut self, f: F) {
self.0.retain(f)
}
pub fn dedup_by_key<F:FnMut(&mut T)->K, K:PartialEq<K>>(&mut self, key: F) {
self.0.dedup_by_key(key)
}
pub fn dedup_by<F:FnMut(&mut T,&mut T)->bool>(&mut self, same_bucket: F) {
self.0.dedup_by(same_bucket)
}
#[cfg(nightly_vector)]
pub fn place_back(&mut self) -> PlaceBack<T>{
self.0.place_back()
}
pub fn pop(&mut self) -> Option<T>{
self.0.pop()
}
pub fn append(&mut self, other: &mut Vec<T>){
self.0.append(other)
}
#[cfg(UseRangeArgument)]
pub fn drain<R:RangeArgument<I>>(&mut self, range: R) -> Drain<T>
{
self.0.drain(range)
}
pub fn clear(&mut self){
self.0.clear()
}
// pub fn len(&self)->I{
// self.0.len() as Index
// }
// pub fn is_empty(&self)->bool{ self.0.is_empty()}
pub fn split_off(&mut self,at:I)->Array<T>{
Array(self.0.split_off(at.my_into()),PhantomData)
}
}
impl<T:Clone,I:IndexTrait> Array<T,I>{
pub fn resize(&mut self, new_len:I, value:T){
self.0.resize(new_len.my_into(),value)
}
pub fn extend_from_slice(&mut self, other:&[T]){
self.0.extend_from_slice(other)
}
}
impl<T:Default,I:IndexTrait> Array<T,I>{
pub fn resize_default(&mut self, new_len:I){
self.0.resize_default(new_len.my_into())
}
}
impl<T:PartialEq<T>,I:IndexTrait> Array<T,I>{
pub fn dedup(&mut self){
self.0.dedup()
}
pub fn remove_item(&mut self, item:&T)->Option<T>{
self.0.remove_item(item)
}
} | {
self.0.splice(range,replace_with)
}
pub fn drain_filter<F:FnMut(&mut T)->bool>(&mut self, filter: F) -> DrainFilter<T, F> {
self.0.drain_filter(filter)
}
}
impl<T,INDEX:IndexTrait> Deref for Array<T,INDEX>{
type Target=[T];
fn deref(&self)->&Self::Target { self.0.deref() }
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
fn len(&self)->INDEX{INDEX::my_from(self.0.len())}
fn is_empty(&self)->bool{self.0.is_empty()}
fn first(&self)->Option<&T>{self.0.first()}
fn first_mut(&mut self)->Option<&mut T>{self.0.first_mut()}
fn split_first(&self)->Option<(&T,&[T])>{self.0.split_first()}
fn split_first_mut(&mut self)->Option<(&mut T, &mut [T])>{ self.0.split_first_mut() }
fn split_last(&self)->Option<(&T,&[T])>{self.0.split_last()}
fn split_last_mut(&mut self)->Option<(&mut T, &mut[T])>{self.0.split_last_mut()}
fn last(&self)->Option<&T>{self.0.last()}
fn last_mut(&mut self)->Option<&mut T>{self.0.last_mut()}
fn get<I>(&self, index:I)->Option<&<I as SliceIndex<[T]> >::Output>
where I:SliceIndex<[T]>
{
self.0.get(index)
}
fn get_mut<I>(&mut self, index:I)->Option<&mut <I as SliceIndex<[T]>>::Output>
where I:SliceIndex<[T]>
{
self.0.get_mut(index)
}
unsafe fn get_unchecked<I>(&self, index: I) -> &<I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]> {self.0.get_unchecked(index)}
unsafe fn get_unchecked_mut<I>(
&mut self,
index: I
) -> &mut <I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]>{
self.0.get_unchecked_mut(index)
}
fn as_ptr(&self)->*const T{self.0.as_ptr()}
fn as_mut_ptr(&mut self)->*mut T{self.0.as_mut_ptr()}
fn swap(&mut self, a:INDEX,b:INDEX){
self.0.swap(a.my_into(),b.my_into())
}
fn reverse(&mut self){self.0.reverse()}
fn iter(&self)->Iter<T>{self.0.iter()}
fn iter_mut(&mut self)->IterMut<T>{self.0.iter_mut()}
fn windows(&self,size:INDEX)->Windows<T>{self.0.windows(size.my_into())}
fn chunks(&self,chunk_size:INDEX)->Chunks<T>{self.0.chunks(chunk_size.my_into())}
fn chunks_mut(&mut self,chunk_size:INDEX)->ChunksMut<T>{self.0.chunks_mut(chunk_size.my_into())}
fn split_at(&self, mid: INDEX) -> (&[T], &[T]){
self.0.split_at(mid.my_into())
}
fn split_at_mut(&mut self, mid: INDEX) -> (&mut [T], &mut [T]){
self.0.split_at_mut(mid.my_into())
}
fn split<F>(&self, pred: F) -> Split<T, F>
where F:FnMut(&T)->bool
{
self.0.split(pred)
}
fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.split_mut(pred)
}
fn rsplit<F>(&self, pred: F) -> RSplit<T, F>
where F: FnMut(&T) -> bool,
{
self.0.rsplit(pred)
}
fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.rsplit_mut(pred)
}
fn splitn<F>(&self, n: INDEX, pred: F) -> SplitN<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn(n.my_into(),pred)
}
fn splitn_mut<F>(&mut self, n: INDEX, pred: F) -> SplitNMut<T, F>
where F: |
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
/// TODO - figure out how to convert RangeArguemnt indices
pub fn splice<I:IntoIterator<Item=T>,R:RangeArgument<usize>>(&mut self, range:R, replace_with:I)-> Splice<<I as IntoIterator>::IntoIter> | random_line_split |
array.rs | isize{
fn into(self)->usize{ self as usize }
}
*/
#[derive(Debug)]
pub struct Array<T,I=i32>(pub Vec<T>,PhantomData<I>);
// my array helper fn's
impl<T:Clone,I:IndexTrait+Clone> Array<T,I>{
/// TODO - better name. preserves ordering of vec![v;count].
pub fn from_val_n(val:T, n:i32)->Self{
let v=vec![val; n as usize];
Array(v,PhantomData)
}
pub fn from_fn<F:Fn(I)->T>(count:I,f:F)->Self{
let mut v=Vec::new();
v.reserve(count.clone().my_into());
for x in 0..count.my_into() {v.push(f(I::my_from(x)))}
Array(v,PhantomData)
}
pub fn map<B,F:Fn(&T)->B>(&self,f:F)->Array<B,I>{
let mut out=Array::<B,I>::new();
out.reserve(self.len());
for x in self.iter(){
out.push(f(x))
}
out
}
}
impl<T,I:IndexTrait+Clone> Array<T,I>{
pub fn num_elems(&self)->i32{ self.0.len() as i32} // TODO - figure out generic int
pub fn new()->Self{ Array(Vec::new(),PhantomData) }
pub fn reserve(&mut self, additional: I){
self.0.reserve(additional.my_into());
}
pub fn push(&mut self,val:T){self.0.push(val)}
pub fn shrink_to_fit(&mut self){self.0.shrink_to_fit()}
pub fn truncate(&mut self, len: I){
self.0.truncate(len.my_into());
}
pub fn as_slice(&self) -> &[T]{
self.0.as_slice()
}
pub fn as_mut_slice(&mut self) -> &mut [T]{
self.0.as_mut_slice()
}
pub fn swap_remove(&mut self, index: I) -> T{
self.0.swap_remove(index.my_into())
}
pub fn insert(&mut self, index: I, element: T){
self.0.insert(index.my_into(),element)
}
pub fn remove(&mut self, index: I) -> T{
self.0.remove(index.my_into())
}
// aka filter in place
pub fn retain<F:FnMut(&T)->bool>(&mut self, f: F) {
self.0.retain(f)
}
pub fn dedup_by_key<F:FnMut(&mut T)->K, K:PartialEq<K>>(&mut self, key: F) {
self.0.dedup_by_key(key)
}
pub fn dedup_by<F:FnMut(&mut T,&mut T)->bool>(&mut self, same_bucket: F) {
self.0.dedup_by(same_bucket)
}
#[cfg(nightly_vector)]
pub fn place_back(&mut self) -> PlaceBack<T>{
self.0.place_back()
}
pub fn pop(&mut self) -> Option<T>{
self.0.pop()
}
pub fn append(&mut self, other: &mut Vec<T>){
self.0.append(other)
}
#[cfg(UseRangeArgument)]
pub fn drain<R:RangeArgument<I>>(&mut self, range: R) -> Drain<T>
{
self.0.drain(range)
}
pub fn clear(&mut self){
self.0.clear()
}
// pub fn len(&self)->I{
// self.0.len() as Index
// }
// pub fn is_empty(&self)->bool{ self.0.is_empty()}
pub fn split_off(&mut self,at:I)->Array<T>{
Array(self.0.split_off(at.my_into()),PhantomData)
}
}
impl<T:Clone,I:IndexTrait> Array<T,I>{
pub fn resize(&mut self, new_len:I, value:T){
self.0.resize(new_len.my_into(),value)
}
pub fn extend_from_slice(&mut self, other:&[T]){
self.0.extend_from_slice(other)
}
}
impl<T:Default,I:IndexTrait> Array<T,I>{
pub fn resize_default(&mut self, new_len:I){
self.0.resize_default(new_len.my_into())
}
}
impl<T:PartialEq<T>,I:IndexTrait> Array<T,I>{
pub fn dedup(&mut self){
self.0.dedup()
}
pub fn remove_item(&mut self, item:&T)->Option<T>{
self.0.remove_item(item)
}
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
/// TODO - figure out how to convert RangeArguemnt indices
pub fn splice<I:IntoIterator<Item=T>,R:RangeArgument<usize>>(&mut self, range:R, replace_with:I)-> Splice<<I as IntoIterator>::IntoIter>
{
self.0.splice(range,replace_with)
}
pub fn drain_filter<F:FnMut(&mut T)->bool>(&mut self, filter: F) -> DrainFilter<T, F> {
self.0.drain_filter(filter)
}
}
impl<T,INDEX:IndexTrait> Deref for Array<T,INDEX>{
type Target=[T];
fn deref(&self)->&Self::Target { self.0.deref() }
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
fn len(&self)->INDEX{INDEX::my_from(self.0.len())}
fn is_empty(&self)->bool{self.0.is_empty()}
fn first(&self)->Option<&T>{self.0.first()}
fn first_mut(&mut self)->Option<&mut T>{self.0.first_mut()}
fn split_first(&self)->Option<(&T,&[T])>{self.0.split_first()}
fn split_first_mut(&mut self)->Option<(&mut T, &mut [T])>{ self.0.split_first_mut() }
fn split_last(&self)->Option<(&T,&[T])>{self.0.split_last()}
fn split_last_mut(&mut self)->Option<(&mut T, &mut[T])>{self.0.split_last_mut()}
fn last(&self)->Option<&T>{self.0.last()}
fn last_mut(&mut self)->Option<&mut T>{self.0.last_mut()}
fn get<I>(&self, index:I)->Option<&<I as SliceIndex<[T]> >::Output>
where I:SliceIndex<[T]>
{
self.0.get(index)
}
fn get_mut<I>(&mut self, index:I)->Option<&mut <I as SliceIndex<[T]>>::Output>
where I:SliceIndex<[T]>
{
self.0.get_mut(index)
}
unsafe fn get_unchecked<I>(&self, index: I) -> &<I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]> {self.0.get_unchecked(index)}
unsafe fn get_unchecked_mut<I>(
&mut self,
index: I
) -> &mut <I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]>{
self.0.get_unchecked_mut(index)
}
fn | (&self)->*const T{self.0.as_ptr()}
fn as_mut_ptr(&mut self)->*mut T{self.0.as_mut_ptr()}
fn swap(&mut self, a:INDEX,b:INDEX){
self.0.swap(a.my_into(),b.my_into())
}
fn reverse(&mut self){self.0.reverse()}
fn iter(&self)->Iter<T>{self.0.iter()}
fn iter_mut(&mut self)->IterMut<T>{self.0.iter_mut()}
fn windows(&self,size:INDEX)->Windows<T>{self.0.windows(size.my_into())}
fn chunks(&self,chunk_size:INDEX)->Chunks<T>{self.0.chunks(chunk_size.my_into())}
fn chunks_mut(&mut self,chunk_size:INDEX)->ChunksMut<T>{self.0.chunks_mut(chunk_size.my_into())}
fn split_at(&self, mid: INDEX) -> (&[T], &[T]){
self.0.split_at(mid.my_into())
}
fn split_at_mut(&mut self, mid: INDEX) -> (&mut [T], &mut [T]){
self.0.split_at_mut(mid.my_into())
}
fn split<F>(&self, pred: F) -> Split<T, F>
where F:FnMut(&T)->bool
{
self.0.split(pred)
}
fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.split_mut(pred)
}
fn rsplit<F>(&self, pred: F) -> RSplit<T, F>
where F: FnMut(&T) -> bool,
{
self.0.rsplit(pred)
}
fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.rsplit_mut(pred)
}
fn splitn<F>(&self, n: INDEX, pred: F) -> SplitN<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn(n.my_into(),pred)
}
fn splitn_mut<F>(&mut self, n: INDEX, pred: F) -> SplitNMut<T, F>
where F: | as_ptr | identifier_name |
array.rs | {
fn into(self)->usize{ self as usize }
}
*/
#[derive(Debug)]
pub struct Array<T,I=i32>(pub Vec<T>,PhantomData<I>);
// my array helper fn's
impl<T:Clone,I:IndexTrait+Clone> Array<T,I>{
/// TODO - better name. preserves ordering of vec![v;count].
pub fn from_val_n(val:T, n:i32)->Self{
let v=vec![val; n as usize];
Array(v,PhantomData)
}
pub fn from_fn<F:Fn(I)->T>(count:I,f:F)->Self{
let mut v=Vec::new();
v.reserve(count.clone().my_into());
for x in 0..count.my_into() {v.push(f(I::my_from(x)))}
Array(v,PhantomData)
}
pub fn map<B,F:Fn(&T)->B>(&self,f:F)->Array<B,I>{
let mut out=Array::<B,I>::new();
out.reserve(self.len());
for x in self.iter(){
out.push(f(x))
}
out
}
}
impl<T,I:IndexTrait+Clone> Array<T,I>{
pub fn num_elems(&self)->i32{ self.0.len() as i32} // TODO - figure out generic int
pub fn new()->Self{ Array(Vec::new(),PhantomData) }
pub fn reserve(&mut self, additional: I){
self.0.reserve(additional.my_into());
}
pub fn push(&mut self,val:T){self.0.push(val)}
pub fn shrink_to_fit(&mut self){self.0.shrink_to_fit()}
pub fn truncate(&mut self, len: I) |
pub fn as_slice(&self) -> &[T]{
self.0.as_slice()
}
pub fn as_mut_slice(&mut self) -> &mut [T]{
self.0.as_mut_slice()
}
pub fn swap_remove(&mut self, index: I) -> T{
self.0.swap_remove(index.my_into())
}
pub fn insert(&mut self, index: I, element: T){
self.0.insert(index.my_into(),element)
}
pub fn remove(&mut self, index: I) -> T{
self.0.remove(index.my_into())
}
// aka filter in place
pub fn retain<F:FnMut(&T)->bool>(&mut self, f: F) {
self.0.retain(f)
}
pub fn dedup_by_key<F:FnMut(&mut T)->K, K:PartialEq<K>>(&mut self, key: F) {
self.0.dedup_by_key(key)
}
pub fn dedup_by<F:FnMut(&mut T,&mut T)->bool>(&mut self, same_bucket: F) {
self.0.dedup_by(same_bucket)
}
#[cfg(nightly_vector)]
pub fn place_back(&mut self) -> PlaceBack<T>{
self.0.place_back()
}
pub fn pop(&mut self) -> Option<T>{
self.0.pop()
}
pub fn append(&mut self, other: &mut Vec<T>){
self.0.append(other)
}
#[cfg(UseRangeArgument)]
pub fn drain<R:RangeArgument<I>>(&mut self, range: R) -> Drain<T>
{
self.0.drain(range)
}
pub fn clear(&mut self){
self.0.clear()
}
// pub fn len(&self)->I{
// self.0.len() as Index
// }
// pub fn is_empty(&self)->bool{ self.0.is_empty()}
pub fn split_off(&mut self,at:I)->Array<T>{
Array(self.0.split_off(at.my_into()),PhantomData)
}
}
impl<T:Clone,I:IndexTrait> Array<T,I>{
pub fn resize(&mut self, new_len:I, value:T){
self.0.resize(new_len.my_into(),value)
}
pub fn extend_from_slice(&mut self, other:&[T]){
self.0.extend_from_slice(other)
}
}
impl<T:Default,I:IndexTrait> Array<T,I>{
pub fn resize_default(&mut self, new_len:I){
self.0.resize_default(new_len.my_into())
}
}
impl<T:PartialEq<T>,I:IndexTrait> Array<T,I>{
pub fn dedup(&mut self){
self.0.dedup()
}
pub fn remove_item(&mut self, item:&T)->Option<T>{
self.0.remove_item(item)
}
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
/// TODO - figure out how to convert RangeArguemnt indices
pub fn splice<I:IntoIterator<Item=T>,R:RangeArgument<usize>>(&mut self, range:R, replace_with:I)-> Splice<<I as IntoIterator>::IntoIter>
{
self.0.splice(range,replace_with)
}
pub fn drain_filter<F:FnMut(&mut T)->bool>(&mut self, filter: F) -> DrainFilter<T, F> {
self.0.drain_filter(filter)
}
}
impl<T,INDEX:IndexTrait> Deref for Array<T,INDEX>{
type Target=[T];
fn deref(&self)->&Self::Target { self.0.deref() }
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
fn len(&self)->INDEX{INDEX::my_from(self.0.len())}
fn is_empty(&self)->bool{self.0.is_empty()}
fn first(&self)->Option<&T>{self.0.first()}
fn first_mut(&mut self)->Option<&mut T>{self.0.first_mut()}
fn split_first(&self)->Option<(&T,&[T])>{self.0.split_first()}
fn split_first_mut(&mut self)->Option<(&mut T, &mut [T])>{ self.0.split_first_mut() }
fn split_last(&self)->Option<(&T,&[T])>{self.0.split_last()}
fn split_last_mut(&mut self)->Option<(&mut T, &mut[T])>{self.0.split_last_mut()}
fn last(&self)->Option<&T>{self.0.last()}
fn last_mut(&mut self)->Option<&mut T>{self.0.last_mut()}
fn get<I>(&self, index:I)->Option<&<I as SliceIndex<[T]> >::Output>
where I:SliceIndex<[T]>
{
self.0.get(index)
}
fn get_mut<I>(&mut self, index:I)->Option<&mut <I as SliceIndex<[T]>>::Output>
where I:SliceIndex<[T]>
{
self.0.get_mut(index)
}
unsafe fn get_unchecked<I>(&self, index: I) -> &<I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]> {self.0.get_unchecked(index)}
unsafe fn get_unchecked_mut<I>(
&mut self,
index: I
) -> &mut <I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]>{
self.0.get_unchecked_mut(index)
}
fn as_ptr(&self)->*const T{self.0.as_ptr()}
fn as_mut_ptr(&mut self)->*mut T{self.0.as_mut_ptr()}
fn swap(&mut self, a:INDEX,b:INDEX){
self.0.swap(a.my_into(),b.my_into())
}
fn reverse(&mut self){self.0.reverse()}
fn iter(&self)->Iter<T>{self.0.iter()}
fn iter_mut(&mut self)->IterMut<T>{self.0.iter_mut()}
fn windows(&self,size:INDEX)->Windows<T>{self.0.windows(size.my_into())}
fn chunks(&self,chunk_size:INDEX)->Chunks<T>{self.0.chunks(chunk_size.my_into())}
fn chunks_mut(&mut self,chunk_size:INDEX)->ChunksMut<T>{self.0.chunks_mut(chunk_size.my_into())}
fn split_at(&self, mid: INDEX) -> (&[T], &[T]){
self.0.split_at(mid.my_into())
}
fn split_at_mut(&mut self, mid: INDEX) -> (&mut [T], &mut [T]){
self.0.split_at_mut(mid.my_into())
}
fn split<F>(&self, pred: F) -> Split<T, F>
where F:FnMut(&T)->bool
{
self.0.split(pred)
}
fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.split_mut(pred)
}
fn rsplit<F>(&self, pred: F) -> RSplit<T, F>
where F: FnMut(&T) -> bool,
{
self.0.rsplit(pred)
}
fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.rsplit_mut(pred)
}
fn splitn<F>(&self, n: INDEX, pred: F) -> SplitN<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn(n.my_into(),pred)
}
fn splitn_mut<F>(&mut self, n: INDEX, pred: F) -> SplitNMut<T, F>
where F: | {
self.0.truncate(len.my_into());
} | identifier_body |
codemap.rs | <Span> {
self.primary_spans.first().cloned()
}
/// Returns all primary spans.
pub fn primary_spans(&self) -> &[Span] {
&self.primary_spans
}
/// Returns the strings to highlight. We always ensure that there
/// is an entry for each of the primary spans -- for each primary
/// span P, if there is at least one label with span P, we return
/// those labels (marked as primary). But otherwise we return
/// `SpanLabel` instances with empty labels.
pub fn span_labels(&self) -> Vec<SpanLabel> {
let is_primary = |span| self.primary_spans.contains(&span);
let mut span_labels = vec![];
for &(span, ref label) in &self.span_labels {
span_labels.push(SpanLabel {
span: span,
is_primary: is_primary(span),
label: Some(label.clone())
});
}
for &span in &self.primary_spans {
if !span_labels.iter().any(|sl| sl.span == span) {
span_labels.push(SpanLabel {
span: span,
is_primary: true,
label: None
});
}
}
span_labels
}
}
impl From<Span> for MultiSpan {
fn from(span: Span) -> MultiSpan {
MultiSpan::from_span(span)
}
}
// _____________________________________________________________________________
// Loc, LocWithOpt, FileMapAndLine, FileMapAndBytePos
//
/// A source code location used for error reporting
#[derive(Debug)]
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
#[derive(Debug)]
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
#[derive(Debug)]
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
#[derive(Debug)]
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
// _____________________________________________________________________________
// ExpnFormat, NameAndSpan, ExpnInfo, ExpnId
//
/// The source of expansion.
#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum ExpnFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute(Name),
/// e.g. `format!()`
MacroBang(Name),
}
#[derive(Clone, Hash, Debug)]
pub struct NameAndSpan {
/// The format with which the macro was invoked.
pub format: ExpnFormat,
/// Whether the macro is allowed to use #[unstable]/feature-gated
/// features internally without forcing the whole crate to opt-in
/// to them.
pub allow_internal_unstable: bool,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
impl NameAndSpan {
pub fn name(&self) -> Name {
match self.format {
ExpnFormat::MacroAttribute(s) => s,
ExpnFormat::MacroBang(s) => s,
}
}
}
/// Extra information for tracking spans of macro and syntax sugar expansion
#[derive(Hash, Debug)]
pub struct ExpnInfo {
/// The location of the actual macro invocation or syntax sugar , e.g.
/// `let x = foo!();` or `if let Some(y) = x {}`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the expansion.
pub callee: NameAndSpan
}
// _____________________________________________________________________________
// FileMap, MultiByteChar, FileName, FileLines
//
pub type FileName = String;
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap.
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The absolute path of the file that the source came from.
pub abs_path: Option<FileName>,
/// The complete source code
pub src: Option<Rc<String>>,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// The end position of this source in the CodeMap
pub end_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos>>,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar>>,
}
impl fmt::Debug for FileMap {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "FileMap({})", self.name)
}
}
/// An abstraction over the fs operations used by the Parser.
pub trait FileLoader {
/// Query the existence of a file.
fn file_exists(&self, path: &Path) -> bool;
/// Return an absolute path to a file, if possible.
fn abs_path(&self, path: &Path) -> Option<PathBuf>;
/// Read the contents of an UTF-8 file into memory.
fn read_file(&self, path: &Path) -> io::Result<String>;
}
/// A FileLoader that uses std::fs to load real files.
pub struct RealFileLoader;
impl FileLoader for RealFileLoader {
fn file_exists(&self, path: &Path) -> bool {
fs::metadata(path).is_ok()
}
fn abs_path(&self, path: &Path) -> Option<PathBuf> {
if path.is_absolute() {
Some(path.to_path_buf())
} else {
env::current_dir()
.ok()
.map(|cwd| cwd.join(path))
}
}
fn read_file(&self, path: &Path) -> io::Result<String> {
let mut src = String::new();
fs::File::open(path)?.read_to_string(&mut src)?;
Ok(src)
}
}
// _____________________________________________________________________________
// CodeMap
//
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>,
file_loader: Box<FileLoader>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
file_loader: Box::new(RealFileLoader)
}
}
pub fn with_file_loader(file_loader: Box<FileLoader>) -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
file_loader: file_loader
}
}
pub fn file_exists(&self, path: &Path) -> bool {
self.file_loader.file_exists(path)
}
pub fn load_file(&self, path: &Path) -> io::Result<Rc<FileMap>> {
let src = self.file_loader.read_file(path)?;
let abs_path = self.file_loader.abs_path(path).map(|p| p.to_str().unwrap().to_string());
Ok(self.new_filemap(path.to_str().unwrap().to_string(), abs_path, src))
}
fn next_start_pos(&self) -> usize {
let files = self.files.borrow();
match files.last() {
None => 0,
// Add one so there is some space between files. This lets us distinguish
// positions in the codemap, even in the presence of zero-length files.
Some(last) => last.end_pos.to_usize() + 1,
}
}
/// Creates a new filemap without setting its line information. If you don't
/// intend to set the line information yourself, you should use new_filemap_and_lines.
pub fn new_filemap(&self, filename: FileName, abs_path: Option<FileName>,
mut src: String) -> Rc<FileMap> {
let start_pos = self.next_start_pos();
let mut files = self.files.borrow_mut();
// Remove utf-8 BOM if any.
if src.starts_with("\u{feff}") | {
src.drain(..3);
} | conditional_block |
|
codemap.rs | Span { lo: BytePos(lo), hi: self.hi }
}
/// Returns `self` if `self` is not the dummy span, and `other` otherwise.
pub fn substitute_dummy(self, other: Span) -> Span {
if self.source_equal(&DUMMY_SPAN) { other } else { self }
}
pub fn contains(self, other: Span) -> bool {
self.lo <= other.lo && other.hi <= self.hi
}
/// Return true if the spans are equal with regards to the source text.
///
/// Use this instead of `==` when either span could be generated code,
/// and you only care that they point to the same bytes of source text.
pub fn source_equal(&self, other: &Span) -> bool {
self.lo == other.lo && self.hi == other.hi
}
/// Returns `Some(span)`, a union of `self` and `other`, on overlap.
pub fn merge(self, other: Span) -> Option<Span> {
if (self.lo <= other.lo && self.hi > other.lo) ||
(self.lo >= other.lo && self.lo < other.hi) {
Some(Span {
lo: cmp::min(self.lo, other.lo),
hi: cmp::max(self.hi, other.hi),
})
} else {
None
}
}
/// Returns `Some(span)`, where the start is trimmed by the end of `other`
pub fn trim_start(self, other: Span) -> Option<Span> {
if self.hi > other.hi {
Some(Span { lo: cmp::max(self.lo, other.hi), .. self })
} else {
None
}
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
/// A collection of spans. Spans have two orthogonal attributes:
///
/// - they can be *primary spans*. In this case they are the locus of
/// the error, and would be rendered with `^^^`.
/// - they can have a *label*. In this case, the label is written next
/// to the mark in the snippet when we render.
#[derive(Clone)]
pub struct MultiSpan {
primary_spans: Vec<Span>,
span_labels: Vec<(Span, String)>,
}
#[derive(Clone, Debug)]
pub struct SpanLabel {
/// The span we are going to include in the final snippet.
pub span: Span,
/// Is this a primary span? This is the "locus" of the message,
/// and is indicated with a `^^^^` underline, versus `----`.
pub is_primary: bool,
/// What label should we attach to this span (if any)?
pub label: Option<String>,
}
impl MultiSpan {
pub fn new() -> MultiSpan {
MultiSpan {
primary_spans: vec![],
span_labels: vec![]
}
}
pub fn from_span(primary_span: Span) -> MultiSpan {
MultiSpan {
primary_spans: vec![primary_span],
span_labels: vec![]
}
}
pub fn from_spans(vec: Vec<Span>) -> MultiSpan {
MultiSpan {
primary_spans: vec,
span_labels: vec![]
}
}
pub fn push_span_label(&mut self, span: Span, label: String) {
self.span_labels.push((span, label));
}
/// Selects the first primary span (if any)
pub fn primary_span(&self) -> Option<Span> {
self.primary_spans.first().cloned()
}
/// Returns all primary spans.
pub fn primary_spans(&self) -> &[Span] {
&self.primary_spans
}
/// Returns the strings to highlight. We always ensure that there
/// is an entry for each of the primary spans -- for each primary
/// span P, if there is at least one label with span P, we return
/// those labels (marked as primary). But otherwise we return
/// `SpanLabel` instances with empty labels.
pub fn span_labels(&self) -> Vec<SpanLabel> {
let is_primary = |span| self.primary_spans.contains(&span);
let mut span_labels = vec![];
for &(span, ref label) in &self.span_labels {
span_labels.push(SpanLabel {
span: span,
is_primary: is_primary(span),
label: Some(label.clone())
});
}
for &span in &self.primary_spans {
if !span_labels.iter().any(|sl| sl.span == span) {
span_labels.push(SpanLabel {
span: span,
is_primary: true,
label: None
});
}
}
span_labels
}
}
impl From<Span> for MultiSpan {
fn from(span: Span) -> MultiSpan {
MultiSpan::from_span(span)
}
}
// _____________________________________________________________________________
// Loc, LocWithOpt, FileMapAndLine, FileMapAndBytePos
//
/// A source code location used for error reporting
#[derive(Debug)]
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
#[derive(Debug)]
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
#[derive(Debug)]
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
#[derive(Debug)]
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
// _____________________________________________________________________________
// ExpnFormat, NameAndSpan, ExpnInfo, ExpnId
//
/// The source of expansion.
#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum ExpnFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute(Name),
/// e.g. `format!()`
MacroBang(Name),
}
#[derive(Clone, Hash, Debug)]
pub struct NameAndSpan {
/// The format with which the macro was invoked.
pub format: ExpnFormat,
/// Whether the macro is allowed to use #[unstable]/feature-gated
/// features internally without forcing the whole crate to opt-in
/// to them.
pub allow_internal_unstable: bool,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
impl NameAndSpan {
pub fn name(&self) -> Name {
match self.format {
ExpnFormat::MacroAttribute(s) => s,
ExpnFormat::MacroBang(s) => s,
}
}
}
/// Extra information for tracking spans of macro and syntax sugar expansion
#[derive(Hash, Debug)]
pub struct ExpnInfo {
/// The location of the actual macro invocation or syntax sugar , e.g.
/// `let x = foo!();` or `if let Some(y) = x {}`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the expansion.
pub callee: NameAndSpan
}
// _____________________________________________________________________________
// FileMap, MultiByteChar, FileName, FileLines
//
pub type FileName = String;
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap.
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't | /// The absolute path of the file that the source came from.
pub abs_path: Option<FileName>,
/// The complete source code
pub src: Option<Rc<String>>,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// The end position of this source in the CodeMap
pub end_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos>>,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar>>,
}
impl fmt | /// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName, | random_line_split |
codemap.rs | Span { lo: BytePos(lo), hi: self.hi }
}
/// Returns `self` if `self` is not the dummy span, and `other` otherwise.
pub fn substitute_dummy(self, other: Span) -> Span {
if self.source_equal(&DUMMY_SPAN) { other } else { self }
}
pub fn contains(self, other: Span) -> bool {
self.lo <= other.lo && other.hi <= self.hi
}
/// Return true if the spans are equal with regards to the source text.
///
/// Use this instead of `==` when either span could be generated code,
/// and you only care that they point to the same bytes of source text.
pub fn source_equal(&self, other: &Span) -> bool {
self.lo == other.lo && self.hi == other.hi
}
/// Returns `Some(span)`, a union of `self` and `other`, on overlap.
pub fn merge(self, other: Span) -> Option<Span> {
if (self.lo <= other.lo && self.hi > other.lo) ||
(self.lo >= other.lo && self.lo < other.hi) {
Some(Span {
lo: cmp::min(self.lo, other.lo),
hi: cmp::max(self.hi, other.hi),
})
} else {
None
}
}
/// Returns `Some(span)`, where the start is trimmed by the end of `other`
pub fn | (self, other: Span) -> Option<Span> {
if self.hi > other.hi {
Some(Span { lo: cmp::max(self.lo, other.hi), .. self })
} else {
None
}
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
/// A collection of spans. Spans have two orthogonal attributes:
///
/// - they can be *primary spans*. In this case they are the locus of
/// the error, and would be rendered with `^^^`.
/// - they can have a *label*. In this case, the label is written next
/// to the mark in the snippet when we render.
#[derive(Clone)]
pub struct MultiSpan {
primary_spans: Vec<Span>,
span_labels: Vec<(Span, String)>,
}
#[derive(Clone, Debug)]
pub struct SpanLabel {
/// The span we are going to include in the final snippet.
pub span: Span,
/// Is this a primary span? This is the "locus" of the message,
/// and is indicated with a `^^^^` underline, versus `----`.
pub is_primary: bool,
/// What label should we attach to this span (if any)?
pub label: Option<String>,
}
impl MultiSpan {
pub fn new() -> MultiSpan {
MultiSpan {
primary_spans: vec![],
span_labels: vec![]
}
}
pub fn from_span(primary_span: Span) -> MultiSpan {
MultiSpan {
primary_spans: vec![primary_span],
span_labels: vec![]
}
}
pub fn from_spans(vec: Vec<Span>) -> MultiSpan {
MultiSpan {
primary_spans: vec,
span_labels: vec![]
}
}
pub fn push_span_label(&mut self, span: Span, label: String) {
self.span_labels.push((span, label));
}
/// Selects the first primary span (if any)
pub fn primary_span(&self) -> Option<Span> {
self.primary_spans.first().cloned()
}
/// Returns all primary spans.
pub fn primary_spans(&self) -> &[Span] {
&self.primary_spans
}
/// Returns the strings to highlight. We always ensure that there
/// is an entry for each of the primary spans -- for each primary
/// span P, if there is at least one label with span P, we return
/// those labels (marked as primary). But otherwise we return
/// `SpanLabel` instances with empty labels.
pub fn span_labels(&self) -> Vec<SpanLabel> {
let is_primary = |span| self.primary_spans.contains(&span);
let mut span_labels = vec![];
for &(span, ref label) in &self.span_labels {
span_labels.push(SpanLabel {
span: span,
is_primary: is_primary(span),
label: Some(label.clone())
});
}
for &span in &self.primary_spans {
if !span_labels.iter().any(|sl| sl.span == span) {
span_labels.push(SpanLabel {
span: span,
is_primary: true,
label: None
});
}
}
span_labels
}
}
impl From<Span> for MultiSpan {
fn from(span: Span) -> MultiSpan {
MultiSpan::from_span(span)
}
}
// _____________________________________________________________________________
// Loc, LocWithOpt, FileMapAndLine, FileMapAndBytePos
//
/// A source code location used for error reporting
#[derive(Debug)]
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
#[derive(Debug)]
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
#[derive(Debug)]
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
#[derive(Debug)]
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
// _____________________________________________________________________________
// ExpnFormat, NameAndSpan, ExpnInfo, ExpnId
//
/// The source of expansion.
#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum ExpnFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute(Name),
/// e.g. `format!()`
MacroBang(Name),
}
#[derive(Clone, Hash, Debug)]
pub struct NameAndSpan {
/// The format with which the macro was invoked.
pub format: ExpnFormat,
/// Whether the macro is allowed to use #[unstable]/feature-gated
/// features internally without forcing the whole crate to opt-in
/// to them.
pub allow_internal_unstable: bool,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
impl NameAndSpan {
pub fn name(&self) -> Name {
match self.format {
ExpnFormat::MacroAttribute(s) => s,
ExpnFormat::MacroBang(s) => s,
}
}
}
/// Extra information for tracking spans of macro and syntax sugar expansion
#[derive(Hash, Debug)]
pub struct ExpnInfo {
/// The location of the actual macro invocation or syntax sugar , e.g.
/// `let x = foo!();` or `if let Some(y) = x {}`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the expansion.
pub callee: NameAndSpan
}
// _____________________________________________________________________________
// FileMap, MultiByteChar, FileName, FileLines
//
pub type FileName = String;
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap.
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The absolute path of the file that the source came from.
pub abs_path: Option<FileName>,
/// The complete source code
pub src: Option<Rc<String>>,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// The end position of this source in the CodeMap
pub end_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos>>,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar>>,
}
| trim_start | identifier_name |
lstm.py |
print(row)
# print(sql_select_Query)
# sql_select_Query="SELECT concat( year,Month) as Date , unit_price as data FROM oildata"
df = pd.read_sql(sql_select_Query, connection);
columnsNamesArr = df.columns.values
listOfColumnNames = list(columnsNamesArr)
print(listOfColumnNames)
print(len(listOfColumnNames))
for y in range(1, len(listOfColumnNames)):
df1=df.iloc[:,[0,y]]
df1[listOfColumnNames[0]] = pd.to_datetime(df.iloc[:, 0], format='%Y-%m')
print( df1[listOfColumnNames[y]][:2])
# df['Date'] = pd.to_datetime(df['Date'])
df1.set_index(listOfColumnNames[0], inplace=True)
#
data = df1.sort_index(ascending=True, axis=0)
from pmdarima.arima import auto_arima
# split into train and test sets
train_size = int(len(df1) * 0.80)
test_size = len(df1) - train_size
train = data[listOfColumnNames[y]][:train_size]
valid = data[listOfColumnNames[y]][train_size:]
valid.is_copy = False
print(len(train), len(valid))
# training = train[listOfColumnNames[y]]
# validation = valid[listOfColumnNames[y]]
#
# model = auto_arima(training, start_p=1, start_q=1,max_p=3, max_q=3, m=12,start_P=0, seasonal=False,d=1, D=1, trace=True,error_action='ignore',suppress_warnings=True)
# # model = auto_arima(training,seasonal=True,trace=True,error_action='ignore',suppress_warnings=True)
# model.fit(training)
# forecast = model.predict(n_periods=test_size)
# # rms=np.sqrt(np.mean(np.power((np.array(valid['Close'])-np.array(forecast['Prediction'])),2)))
# last_row = df.iloc[-1]
# print(last_row)
#
# last_date = pd.DataFrame()
#
# last_date['Predictions'] = 0
#
# # last_date['Date'] = pd.date_range(last_row['Date'], periods = 12, freq ='M')
# # last_date.set_index('Date',inplace=True)
# # print(last_date)
# # print( last_date.index)
# forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction'])
# plt.plot(train['data'])
# plt.plot(valid['data'])
# print(forecast)
# plt.plot(forecast['Prediction'])
# plt.show()
#
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.callbacks import ModelCheckpoint, EarlyStopping
print(data)
train_size = int(len(df1) * 0.80)
test_size = len(df1) - train_size
train = data[0:train_size]
valid = data[train_size:]
# converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data)
x_train, y_train = [], []
for i in range(6, len(train)):
x_train.append(scaled_data[i - 6:i, 0])
y_train.append(scaled_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
def get_val():
|
valX, valY = get_val()
# create and fit the LSTM network
from pandas import DataFrame
train1 = DataFrame()
val1 = DataFrame()
# for i in range(5):
model = Sequential()
model.add(LSTM(units=300, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(units=25))
model.add(Dropout(0.15))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
history_callback = model.fit(x_train, y_train, epochs=80, batch_size=12, validation_data=(valX, valY), verbose=1)
loss_history = history_callback.history["loss"]
train1[str(i)] = pd.Series(history_callback.history['loss'])
val1[str(i)] = pd.Series(history_callback.history['val_loss'])
# plot train and validation loss across multiple runs
plt.plot(train1, color='blue', label='train')
plt.plot(val1, color='orange', label='validation')
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()
# predicting 246 values, using past 60 from the train data
inputs = data[len(data) - len(valid) - 6:].values
inputs = inputs.reshape(-1, 1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(6, inputs.shape[0]):
X_test.append(inputs[i - 6:i, 0])
X_test = np.array(X_test)
print("-----------------")
print(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
closing_price = model.predict(X_test)
print("----------------+++==-")
print(len(closing_price))
closing_price = scaler.inverse_transform(closing_price)
rms = np.sqrt(np.mean(np.power((valid - closing_price), 2)))
print(rms)
train = data[:train_size]
valid = data[train_size:]
valid['Predictions'] = closing_price
# last_date['Predictions']=closing_price
plt.plot(train[listOfColumnNames[y]])
plt.plot(valid[[listOfColumnNames[y], 'Predictions']])
plt.show()
# save the model to disk.=
import pickle
dateTimeObj = datetime.now()
date_time = dateTimeObj.strftime("%m-%d-%Y_%H-%M-%S")
filename = "Query_"+str(id)+"_ p_value_"+str(y)+"_"+date_time
pickle.dump(model, open(filename, 'wb'))
print(valid[[listOfColumnNames[y], 'Predictions']])
mySql_insert_query = "INSERT INTO prediction_model (sql_id, p_value_"+str(y)+") VALUES ("+str(id)+",'"+filename+"')ON DUPLICATE KEY UPDATE p_value_"+str(y)+"='"+filename+"';"
cursor.execute(mySql_insert_query)
connection.commit()
connection.commit()
###############
# train = df
# print(train)
# from keras.preprocessing.sequence import TimeseriesGenerator
# scaler.fit(train)
# train = scaler.transform(train)
# n_input = 6
# n_features = 1
# generator = TimeseriesGenerator(train, train, length=n_input, batch_size=12)
# model.fit_generator(generator,epochs=30)
# pred_list = []
# batch = train[-n_input:].reshape((1, n_input, n_features))
# for i in range(n_input):
# pred_list.append(model.predict(batch)[0])
# batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
#
#
# from pandas.tseries.offsets import DateOffset
# add_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,7) ]
# future_dates = pd.DataFrame(index=add_dates[1:],columns=df.columns)
#
# df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
# index=future_dates[-n_input:].index, columns=['Prediction'])
#
# df_proj = pd.concat([df,df_predict], axis=1)
#
# print(df_proj)
#
# plt.figure(figsize=(20, 5))
# plt.plot(df_proj.index, df_proj['data'])
# plt.plot(df_proj.index, df_proj['Prediction'], color='r')
# plt.legend(loc='best', fontsize='xx-large')
# plt.xticks(fontsize=18)
# plt.yticks(fontsize=16)
# plt.show()
# #
# # scaler = MinMaxScaler(feature_range=(0, 1))
# # train = scaler.fit_transform(dataset)
# # scaler.fit(train)
# # train = scaler.transform(train)
# # n_input = 6
# # n_features = 1
# # from keras.preprocessing.sequence import TimeseriesGenerator
# #
# # pred_list = []
# #
# # batch = train[-n_input:].reshape((1, n_input, n_features))
# #
# # for i in range(n_input):
# # pred_list.append(model.predict(batch)[0])
# # batch = np.append(batch[:, | X1, y1 = [], []
print(train_size + 6)
print(len(df))
for i in range(train_size + 6, len(df)):
X1.append(scaled_data[i - 6:i, 0])
y1.append(scaled_data[i, 0])
X1, y1 = np.array(X1), np.array(y1)
print(X1)
print(len(X1))
X1 = np.reshape(X1, (X1.shape[0], X1.shape[1], 1))
return X1, y1 | identifier_body |
lstm.py | )
# # print( last_date.index)
# forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction'])
# plt.plot(train['data'])
# plt.plot(valid['data'])
# print(forecast)
# plt.plot(forecast['Prediction'])
# plt.show()
#
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.callbacks import ModelCheckpoint, EarlyStopping
print(data)
train_size = int(len(df1) * 0.80)
test_size = len(df1) - train_size
train = data[0:train_size]
valid = data[train_size:]
# converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data)
x_train, y_train = [], []
for i in range(6, len(train)):
x_train.append(scaled_data[i - 6:i, 0])
y_train.append(scaled_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
def get_val():
X1, y1 = [], []
print(train_size + 6)
print(len(df))
for i in range(train_size + 6, len(df)):
X1.append(scaled_data[i - 6:i, 0])
y1.append(scaled_data[i, 0])
X1, y1 = np.array(X1), np.array(y1)
print(X1)
print(len(X1))
X1 = np.reshape(X1, (X1.shape[0], X1.shape[1], 1))
return X1, y1
valX, valY = get_val()
# create and fit the LSTM network
from pandas import DataFrame
train1 = DataFrame()
val1 = DataFrame()
# for i in range(5):
model = Sequential()
model.add(LSTM(units=300, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(units=25))
model.add(Dropout(0.15))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
history_callback = model.fit(x_train, y_train, epochs=80, batch_size=12, validation_data=(valX, valY), verbose=1)
loss_history = history_callback.history["loss"]
train1[str(i)] = pd.Series(history_callback.history['loss'])
val1[str(i)] = pd.Series(history_callback.history['val_loss'])
# plot train and validation loss across multiple runs
plt.plot(train1, color='blue', label='train')
plt.plot(val1, color='orange', label='validation')
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()
# predicting 246 values, using past 60 from the train data
inputs = data[len(data) - len(valid) - 6:].values
inputs = inputs.reshape(-1, 1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(6, inputs.shape[0]):
X_test.append(inputs[i - 6:i, 0])
X_test = np.array(X_test)
print("-----------------")
print(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
closing_price = model.predict(X_test)
print("----------------+++==-")
print(len(closing_price))
closing_price = scaler.inverse_transform(closing_price)
rms = np.sqrt(np.mean(np.power((valid - closing_price), 2)))
print(rms)
train = data[:train_size]
valid = data[train_size:]
valid['Predictions'] = closing_price
# last_date['Predictions']=closing_price
plt.plot(train[listOfColumnNames[y]])
plt.plot(valid[[listOfColumnNames[y], 'Predictions']])
plt.show()
# save the model to disk.=
import pickle
dateTimeObj = datetime.now()
date_time = dateTimeObj.strftime("%m-%d-%Y_%H-%M-%S")
filename = "Query_"+str(id)+"_ p_value_"+str(y)+"_"+date_time
pickle.dump(model, open(filename, 'wb'))
print(valid[[listOfColumnNames[y], 'Predictions']])
mySql_insert_query = "INSERT INTO prediction_model (sql_id, p_value_"+str(y)+") VALUES ("+str(id)+",'"+filename+"')ON DUPLICATE KEY UPDATE p_value_"+str(y)+"='"+filename+"';"
cursor.execute(mySql_insert_query)
connection.commit()
connection.commit()
###############
# train = df
# print(train)
# from keras.preprocessing.sequence import TimeseriesGenerator
# scaler.fit(train)
# train = scaler.transform(train)
# n_input = 6
# n_features = 1
# generator = TimeseriesGenerator(train, train, length=n_input, batch_size=12)
# model.fit_generator(generator,epochs=30)
# pred_list = []
# batch = train[-n_input:].reshape((1, n_input, n_features))
# for i in range(n_input):
# pred_list.append(model.predict(batch)[0])
# batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
#
#
# from pandas.tseries.offsets import DateOffset
# add_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,7) ]
# future_dates = pd.DataFrame(index=add_dates[1:],columns=df.columns)
#
# df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
# index=future_dates[-n_input:].index, columns=['Prediction'])
#
# df_proj = pd.concat([df,df_predict], axis=1)
#
# print(df_proj)
#
# plt.figure(figsize=(20, 5))
# plt.plot(df_proj.index, df_proj['data'])
# plt.plot(df_proj.index, df_proj['Prediction'], color='r')
# plt.legend(loc='best', fontsize='xx-large')
# plt.xticks(fontsize=18)
# plt.yticks(fontsize=16)
# plt.show()
# #
# # scaler = MinMaxScaler(feature_range=(0, 1))
# # train = scaler.fit_transform(dataset)
# # scaler.fit(train)
# # train = scaler.transform(train)
# # n_input = 6
# # n_features = 1
# # from keras.preprocessing.sequence import TimeseriesGenerator
# #
# # pred_list = []
# #
# # batch = train[-n_input:].reshape((1, n_input, n_features))
# #
# # for i in range(n_input):
# # pred_list.append(model.predict(batch)[0])
# # batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
# #
# # df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),index=df[-n_input:].index, columns=['Prediction'])
# # df_test = pd.concat([df,df_predict], axis=1)
# #
# #
# # generator = TimeseriesGenerator(train, train, length=n_input, batch_size=6)
# # model.fit_generator(generator,epochs=25)
# # pred_list = []
# # batch = train[-n_input:].reshape((1, n_input, n_features))
# # for i in range(n_input):
# # pred_list.append(model.predict(batch)[0])
# # batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
# #
# # from pandas.tseries.offsets import DateOffset
# # add_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,7) ]
# # future_dates = pd.DataFrame(index=add_dates[1:],columns=df.columns)
# #
# #
# # df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
# # index=future_dates[-n_input:].index, columns=['Prediction'])
# #
# # valid = pd.concat([df,df_predict], axis=1)
# #
# # print(valid)
#
# plt.plot(df['data'])
# plt.plot(valid['Prediction'])
# plt.show()
# return training data
def get_train():
X1, y1 = list(), list()
for i in range(6, len(train)):
X1.append(scaled_data[i - 6:i, 0])
y1.append(scaled_data[i, 0])
X1, y1 = np.array(X1), np.array(y1)
print(X1)
print(len(X1))
X1 = np.reshape(X1, (X1.shape[0], X1.shape[1], 1))
return X1, y1
# return validation data
def get_val():
X1, y1 = [], []
print(train_size + 6)
print(len(df))
for i in range(train_size + 6, len(df)):
| X1.append(scaled_data[i - 6:i, 0])
y1.append(scaled_data[i, 0]) | conditional_block |
|
lstm.py | ,max_p=3, max_q=3, m=12,start_P=0, seasonal=False,d=1, D=1, trace=True,error_action='ignore',suppress_warnings=True)
# # model = auto_arima(training,seasonal=True,trace=True,error_action='ignore',suppress_warnings=True)
# model.fit(training)
# forecast = model.predict(n_periods=test_size)
# # rms=np.sqrt(np.mean(np.power((np.array(valid['Close'])-np.array(forecast['Prediction'])),2)))
# last_row = df.iloc[-1]
# print(last_row)
#
# last_date = pd.DataFrame()
#
# last_date['Predictions'] = 0
#
# # last_date['Date'] = pd.date_range(last_row['Date'], periods = 12, freq ='M')
# # last_date.set_index('Date',inplace=True)
# # print(last_date)
# # print( last_date.index)
# forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction'])
# plt.plot(train['data'])
# plt.plot(valid['data'])
# print(forecast)
# plt.plot(forecast['Prediction'])
# plt.show()
#
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
from keras.callbacks import ModelCheckpoint, EarlyStopping
print(data)
train_size = int(len(df1) * 0.80)
test_size = len(df1) - train_size
train = data[0:train_size]
valid = data[train_size:]
# converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(data)
x_train, y_train = [], []
for i in range(6, len(train)):
x_train.append(scaled_data[i - 6:i, 0])
y_train.append(scaled_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
def get_val():
X1, y1 = [], []
print(train_size + 6)
print(len(df))
for i in range(train_size + 6, len(df)):
X1.append(scaled_data[i - 6:i, 0])
y1.append(scaled_data[i, 0])
X1, y1 = np.array(X1), np.array(y1)
print(X1)
print(len(X1))
X1 = np.reshape(X1, (X1.shape[0], X1.shape[1], 1))
return X1, y1
valX, valY = get_val()
# create and fit the LSTM network
from pandas import DataFrame
train1 = DataFrame()
val1 = DataFrame()
# for i in range(5):
model = Sequential()
model.add(LSTM(units=300, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(units=25))
model.add(Dropout(0.15))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
history_callback = model.fit(x_train, y_train, epochs=80, batch_size=12, validation_data=(valX, valY), verbose=1)
loss_history = history_callback.history["loss"]
train1[str(i)] = pd.Series(history_callback.history['loss'])
val1[str(i)] = pd.Series(history_callback.history['val_loss'])
# plot train and validation loss across multiple runs
plt.plot(train1, color='blue', label='train')
plt.plot(val1, color='orange', label='validation')
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()
# predicting 246 values, using past 60 from the train data
inputs = data[len(data) - len(valid) - 6:].values
inputs = inputs.reshape(-1, 1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(6, inputs.shape[0]):
X_test.append(inputs[i - 6:i, 0])
X_test = np.array(X_test)
print("-----------------")
print(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
closing_price = model.predict(X_test)
print("----------------+++==-")
print(len(closing_price))
closing_price = scaler.inverse_transform(closing_price)
rms = np.sqrt(np.mean(np.power((valid - closing_price), 2)))
print(rms)
train = data[:train_size]
valid = data[train_size:]
valid['Predictions'] = closing_price
# last_date['Predictions']=closing_price
plt.plot(train[listOfColumnNames[y]])
plt.plot(valid[[listOfColumnNames[y], 'Predictions']])
plt.show()
# save the model to disk.=
import pickle
dateTimeObj = datetime.now()
date_time = dateTimeObj.strftime("%m-%d-%Y_%H-%M-%S")
filename = "Query_"+str(id)+"_ p_value_"+str(y)+"_"+date_time
pickle.dump(model, open(filename, 'wb'))
print(valid[[listOfColumnNames[y], 'Predictions']])
mySql_insert_query = "INSERT INTO prediction_model (sql_id, p_value_"+str(y)+") VALUES ("+str(id)+",'"+filename+"')ON DUPLICATE KEY UPDATE p_value_"+str(y)+"='"+filename+"';"
cursor.execute(mySql_insert_query)
connection.commit()
connection.commit()
###############
# train = df
# print(train)
# from keras.preprocessing.sequence import TimeseriesGenerator
# scaler.fit(train)
# train = scaler.transform(train)
# n_input = 6
# n_features = 1
# generator = TimeseriesGenerator(train, train, length=n_input, batch_size=12)
# model.fit_generator(generator,epochs=30)
# pred_list = []
# batch = train[-n_input:].reshape((1, n_input, n_features))
# for i in range(n_input):
# pred_list.append(model.predict(batch)[0])
# batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
#
#
# from pandas.tseries.offsets import DateOffset
# add_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,7) ]
# future_dates = pd.DataFrame(index=add_dates[1:],columns=df.columns)
#
# df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
# index=future_dates[-n_input:].index, columns=['Prediction'])
#
# df_proj = pd.concat([df,df_predict], axis=1)
#
# print(df_proj)
#
# plt.figure(figsize=(20, 5))
# plt.plot(df_proj.index, df_proj['data'])
# plt.plot(df_proj.index, df_proj['Prediction'], color='r')
# plt.legend(loc='best', fontsize='xx-large')
# plt.xticks(fontsize=18)
# plt.yticks(fontsize=16)
# plt.show()
# #
# # scaler = MinMaxScaler(feature_range=(0, 1))
# # train = scaler.fit_transform(dataset)
# # scaler.fit(train)
# # train = scaler.transform(train)
# # n_input = 6
# # n_features = 1
# # from keras.preprocessing.sequence import TimeseriesGenerator
# #
# # pred_list = []
# #
# # batch = train[-n_input:].reshape((1, n_input, n_features))
# #
# # for i in range(n_input):
# # pred_list.append(model.predict(batch)[0])
# # batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
# #
# # df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),index=df[-n_input:].index, columns=['Prediction'])
# # df_test = pd.concat([df,df_predict], axis=1)
# #
# #
# # generator = TimeseriesGenerator(train, train, length=n_input, batch_size=6)
# # model.fit_generator(generator,epochs=25)
# # pred_list = []
# # batch = train[-n_input:].reshape((1, n_input, n_features))
# # for i in range(n_input):
# # pred_list.append(model.predict(batch)[0])
# # batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
# #
# # from pandas.tseries.offsets import DateOffset
# # add_dates = [df.index[-1] + DateOffset(months=x) for x in range(0,7) ]
# # future_dates = pd.DataFrame(index=add_dates[1:],columns=df.columns)
# #
# #
# # df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
# # index=future_dates[-n_input:].index, columns=['Prediction'])
# #
# # valid = pd.concat([df,df_predict], axis=1)
# #
# # print(valid)
#
# plt.plot(df['data'])
# plt.plot(valid['Prediction'])
# plt.show()
# return training data
def | get_train | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.