code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.PowerSystemResource import PowerSystemResource
class PowerCutZone(PowerSystemResource):
"""An area or zone of the power system which is used for load shedding purposes.
"""
def __init__(self, cutLevel1=0.0, cutLevel2=0.0, EnergyConsumers=None, *args, **kw_args):
"""Initialises a new 'PowerCutZone' instance.
@param cutLevel1: First level (amount) of load to cut as a percentage of total zone load
@param cutLevel2: Second level (amount) of load to cut as a percentage of total zone load
@param EnergyConsumers: An energy consumer is assigned to a power cut zone
"""
#: First level (amount) of load to cut as a percentage of total zone load
self.cutLevel1 = cutLevel1
#: Second level (amount) of load to cut as a percentage of total zone load
self.cutLevel2 = cutLevel2
self._EnergyConsumers = []
self.EnergyConsumers = [] if EnergyConsumers is None else EnergyConsumers
super(PowerCutZone, self).__init__(*args, **kw_args)
_attrs = ["cutLevel1", "cutLevel2"]
_attr_types = {"cutLevel1": float, "cutLevel2": float}
_defaults = {"cutLevel1": 0.0, "cutLevel2": 0.0}
_enums = {}
_refs = ["EnergyConsumers"]
_many_refs = ["EnergyConsumers"]
def getEnergyConsumers(self):
"""An energy consumer is assigned to a power cut zone
"""
return self._EnergyConsumers
def setEnergyConsumers(self, value):
for x in self._EnergyConsumers:
x.PowerCutZone = None
for y in value:
y._PowerCutZone = self
self._EnergyConsumers = value
EnergyConsumers = property(getEnergyConsumers, setEnergyConsumers)
def addEnergyConsumers(self, *EnergyConsumers):
for obj in EnergyConsumers:
obj.PowerCutZone = self
def removeEnergyConsumers(self, *EnergyConsumers):
for obj in EnergyConsumers:
obj.PowerCutZone = None
| rwl/PyCIM | CIM14/IEC61970/LoadModel/PowerCutZone.py | Python | mit | 3,088 |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history www.reportlab.co.uk/rl-cgi/viewcvs.cgi/rlextra/graphics/Csrc/renderPM/renderP.py
__version__=''' $Id: renderPM.py 3091 2007-05-23 16:12:00Z rgbecker $ '''
"""Usage:
from reportlab.graphics import renderPM
renderPM.drawToFile(drawing,filename,fmt='GIF',configPIL={....})
Other functions let you create a PM drawing as string or into a PM buffer.
Execute the script to see some test drawings."""
from reportlab.graphics.shapes import *
from reportlab.graphics.renderbase import StateTracker, getStateDelta, renderScaledDrawing
from reportlab.pdfbase.pdfmetrics import getFont, unicode2T1
from math import sin, cos, pi, ceil
from reportlab.lib.utils import getStringIO, open_and_read
from reportlab import rl_config
class RenderPMError(Exception):
pass
import string, os, sys
try:
import _renderPM
except ImportError, errMsg:
raise ImportError, "No module named _renderPM\n" + \
(str(errMsg)!='No module named _renderPM' and "it may be the wrong version or badly installed!" or
"see http://www.reportlab.org/rl_addons.html")
from types import TupleType, ListType
_SeqTypes = (TupleType,ListType)
def _getImage():
try:
from PIL import Image
except ImportError:
import Image
return Image
def Color2Hex(c):
#assert isinstance(colorobj, colors.Color) #these checks don't work well RGB
if c: return ((0xFF&int(255*c.red)) << 16) | ((0xFF&int(255*c.green)) << 8) | (0xFF&int(255*c.blue))
return c
# the main entry point for users...
def draw(drawing, canvas, x, y, showBoundary=rl_config._unset_):
"""As it says"""
R = _PMRenderer()
R.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
from reportlab.graphics.renderbase import Renderer
class _PMRenderer(Renderer):
"""This draws onto a pix map image. It needs to be a class
rather than a function, as some image-specific state tracking is
needed outside of the state info in the SVG model."""
def __init__(self):
self._tracker = StateTracker()
def pop(self):
self._tracker.pop()
self.applyState()
def push(self,node):
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyState()
def applyState(self):
s = self._tracker.getState()
self._canvas.ctm = s['ctm']
self._canvas.strokeWidth = s['strokeWidth']
self._canvas.strokeColor = Color2Hex(s['strokeColor'])
self._canvas.lineCap = s['strokeLineCap']
self._canvas.lineJoin = s['strokeLineJoin']
da = s['strokeDashArray']
da = da and (0,da) or None
self._canvas.dashArray = da
self._canvas.fillColor = Color2Hex(s['fillColor'])
self._canvas.setFont(s['fontName'], s['fontSize'])
def initState(self,x,y):
deltas = STATE_DEFAULTS.copy()
deltas['transform'] = self._canvas._baseCTM[0:4]+(x,y)
self._tracker.push(deltas)
self.applyState()
def drawNode(self, node):
"""This is the recursive method called for each node
in the tree"""
#apply state changes
self.push(node)
#draw the object, or recurse
self.drawNodeDispatcher(node)
# restore the state
self.pop()
def drawRect(self, rect):
c = self._canvas
if rect.rx == rect.ry == 0:
#plain old rectangle, draw clockwise (x-axis to y-axis) direction
c.rect(rect.x,rect.y, rect.width, rect.height)
else:
c.roundRect(rect.x,rect.y, rect.width, rect.height, rect.rx, rect.ry)
def drawLine(self, line):
self._canvas.line(line.x1,line.y1,line.x2,line.y2)
def drawImage(self, image):
if image.path and os.path.exists(image.path):
if type(image.path) is type(''):
im = _getImage().open(image.path).convert('RGB')
else:
im = image.path.convert('RGB')
srcW, srcH = im.size
dstW, dstH = image.width, image.height
if dstW is None: dstW = srcW
if dstH is None: dstH = srcH
self._canvas._aapixbuf(
image.x, image.y, dstW, dstH,
im.tostring(), srcW, srcH, 3,
)
def drawCircle(self, circle):
c = self._canvas
c.circle(circle.cx,circle.cy, circle.r)
c.fillstrokepath()
def drawPolyLine(self, polyline, _doClose=0):
P = polyline.points
assert len(P) >= 2, 'Polyline must have 1 or more points'
c = self._canvas
c.pathBegin()
c.moveTo(P[0], P[1])
for i in range(2, len(P), 2):
c.lineTo(P[i], P[i+1])
if _doClose:
c.pathClose()
c.pathFill()
c.pathStroke()
def drawEllipse(self, ellipse):
c=self._canvas
c.ellipse(ellipse.cx, ellipse.cy, ellipse.rx,ellipse.ry)
c.fillstrokepath()
def drawPolygon(self, polygon):
self.drawPolyLine(polygon,_doClose=1)
def drawString(self, stringObj):
canv = self._canvas
fill = canv.fillColor
if fill is not None:
S = self._tracker.getState()
text_anchor = S['textAnchor']
fontName = S['fontName']
fontSize = S['fontSize']
text = stringObj.text
x = stringObj.x
y = stringObj.y
if not text_anchor in ['start','inherited']:
textLen = stringWidth(text, fontName,fontSize)
if text_anchor=='end':
x = x-textLen
elif text_anchor=='middle':
x = x - textLen/2
else:
raise ValueError, 'bad value for textAnchor '+str(text_anchor)
canv.drawString(x,y,text,_fontInfo=(fontName,fontSize))
def drawPath(self, path):
c = self._canvas
if path is EmptyClipPath:
del c._clipPaths[-1]
if c._clipPaths:
P = c._clipPaths[-1]
icp = P.isClipPath
P.isClipPath = 1
self.drawPath(P)
P.isClipPath = icp
else:
c.clipPathClear()
return
c.pathBegin()
drawFuncs = (c.moveTo, c.lineTo, c.curveTo, c.pathClose)
from reportlab.graphics.shapes import _renderPath
isClosed = _renderPath(path, drawFuncs)
if path.isClipPath:
c.clipPathSet()
c._clipPaths.append(path)
else:
if isClosed: c.pathFill()
c.pathStroke()
def _setFont(gs,fontName,fontSize):
try:
gs.setFont(fontName,fontSize)
except _renderPM.Error, errMsg:
if errMsg.args[0]!="Can't find font!": raise
#here's where we try to add a font to the canvas
try:
f = getFont(fontName)
if _renderPM._version<='0.98': #added reader arg in 0.99
_renderPM.makeT1Font(fontName,f.face.findT1File(),f.encoding.vector)
else:
_renderPM.makeT1Font(fontName,f.face.findT1File(),f.encoding.vector,open_and_read)
except:
s1, s2 = map(str,sys.exc_info()[:2])
raise RenderPMError, "Can't setFont(%s) missing the T1 files?\nOriginally %s: %s" % (fontName,s1,s2)
gs.setFont(fontName,fontSize)
def _convert2pilp(im):
Image = _getImage()
return im.convert("P", dither=Image.NONE, palette=Image.ADAPTIVE)
def _saveAsPICT(im,fn,fmt,transparent=None):
im = _convert2pilp(im)
cols, rows = im.size
#s = _renderPM.pil2pict(cols,rows,im.tostring(),im.im.getpalette(),transparent is not None and Color2Hex(transparent) or -1)
s = _renderPM.pil2pict(cols,rows,im.tostring(),im.im.getpalette())
if not hasattr(fn,'write'):
open(os.path.splitext(fn)[0]+'.'+string.lower(fmt),'wb').write(s)
if os.name=='mac':
from reportlab.lib.utils import markfilename
markfilename(fn,ext='PICT')
else:
fn.write(s)
BEZIER_ARC_MAGIC = 0.5522847498 #constant for drawing circular arcs w/ Beziers
class PMCanvas:
def __init__(self,w,h,dpi=72,bg=0xffffff,configPIL=None):
'''configPIL dict is passed to image save method'''
scale = dpi/72.0
w = int(w*scale+0.5)
h = int(h*scale+0.5)
self.__dict__['_gs'] = _renderPM.gstate(w,h,bg=bg)
self.__dict__['_bg'] = bg
self.__dict__['_baseCTM'] = (scale,0,0,scale,0,0)
self.__dict__['_clipPaths'] = []
self.__dict__['configPIL'] = configPIL
self.__dict__['_dpi'] = dpi
self.ctm = self._baseCTM
def _drawTimeResize(self,w,h,bg=None):
if bg is None: bg = self._bg
self._drawing.width, self._drawing.height = w, h
A = {'ctm':None, 'strokeWidth':None, 'strokeColor':None, 'lineCap':None, 'lineJoin':None, 'dashArray':None, 'fillColor':None}
gs = self._gs
fN,fS = gs.fontName, gs.fontSize
for k in A.keys():
A[k] = getattr(gs,k)
del gs, self._gs
gs = self.__dict__['_gs'] = _renderPM.gstate(w,h,bg=bg)
for k in A.keys():
setattr(self,k,A[k])
gs.setFont(fN,fS)
def toPIL(self):
im = _getImage().new('RGB', size=(self._gs.width, self._gs.height))
im.fromstring(self._gs.pixBuf)
return im
def saveToFile(self,fn,fmt=None):
im = self.toPIL()
if fmt is None:
if type(fn) is not StringType:
raise ValueError, "Invalid type '%s' for fn when fmt is None" % type(fn)
fmt = os.path.splitext(fn)[1]
if fmt.startswith('.'): fmt = fmt[1:]
configPIL = self.configPIL or {}
fmt = string.upper(fmt)
if fmt in ('GIF','TIFFP'):
im = _convert2pilp(im)
if fmt=='TIFFP': fmt='TIFF'
if fmt in ('PCT','PICT'):
return _saveAsPICT(im,fn,fmt,transparent=configPIL.get('transparent',None))
elif fmt in ['PNG','TIFF','BMP', 'PPM', 'TIF']:
if fmt=='TIF': fmt = 'TIFF'
if fmt=='PNG':
try:
from PIL import PngImagePlugin
except ImportError:
import PngImagePlugin
elif fmt=='BMP':
try:
from PIL import BmpImagePlugin
except ImportError:
import BmpImagePlugin
elif fmt in ('JPG','JPEG'):
fmt = 'JPEG'
elif fmt in ('GIF',):
pass
else:
raise RenderPMError,"Unknown image kind %s" % fmt
if fmt=='TIFF':
tc = configPIL.get('transparent',None)
if tc:
from PIL import ImageChops, Image
T = 768*[0]
for o, c in zip((0,256,512), tc.bitmap_rgb()):
T[o+c] = 255
#if type(fn) is type(''): ImageChops.invert(im.point(T).convert('L').point(255*[0]+[255])).save(fn+'_mask.gif','GIF')
im = Image.merge('RGBA', im.split()+(ImageChops.invert(im.point(T).convert('L').point(255*[0]+[255])),))
#if type(fn) is type(''): im.save(fn+'_masked.gif','GIF')
for a,d in ('resolution',self._dpi),('resolution unit','inch'):
configPIL[a] = configPIL.get(a,d)
apply(im.save,(fn,fmt),configPIL)
if not hasattr(fn,'write') and os.name=='mac':
from reportlab.lib.utils import markfilename
markfilename(fn,ext=fmt)
def saveToString(self,fmt='GIF'):
s = getStringIO()
self.saveToFile(s,fmt=fmt)
return s.getvalue()
def _saveToBMP(self,f):
'''
Niki Spahiev, <[email protected]>, asserts that this is a respectable way to get BMP without PIL
f is a file like object to which the BMP is written
'''
import struct
gs = self._gs
pix, width, height = gs.pixBuf, gs.width, gs.height
f.write(struct.pack('=2sLLLLLLhh24x','BM',len(pix)+54,0,54,40,width,height,1,24))
rowb = width * 3
for o in range(len(pix),0,-rowb):
f.write(pix[o-rowb:o])
f.write( '\0' * 14 )
def setFont(self,fontName,fontSize,leading=None):
_setFont(self._gs,fontName,fontSize)
def __setattr__(self,name,value):
setattr(self._gs,name,value)
def __getattr__(self,name):
return getattr(self._gs,name)
def fillstrokepath(self,stroke=1,fill=1):
if fill: self.pathFill()
if stroke: self.pathStroke()
def _bezierArcSegmentCCW(self, cx,cy, rx,ry, theta0, theta1):
"""compute the control points for a bezier arc with theta1-theta0 <= 90.
Points are computed for an arc with angle theta increasing in the
counter-clockwise (CCW) direction. returns a tuple with starting point
and 3 control points of a cubic bezier curve for the curvto opertator"""
# Requires theta1 - theta0 <= 90 for a good approximation
assert abs(theta1 - theta0) <= 90
cos0 = cos(pi*theta0/180.0)
sin0 = sin(pi*theta0/180.0)
x0 = cx + rx*cos0
y0 = cy + ry*sin0
cos1 = cos(pi*theta1/180.0)
sin1 = sin(pi*theta1/180.0)
x3 = cx + rx*cos1
y3 = cy + ry*sin1
dx1 = -rx * sin0
dy1 = ry * cos0
#from pdfgeom
halfAng = pi*(theta1-theta0)/(2.0 * 180.0)
k = abs(4.0 / 3.0 * (1.0 - cos(halfAng) ) /(sin(halfAng)) )
x1 = x0 + dx1 * k
y1 = y0 + dy1 * k
dx2 = -rx * sin1
dy2 = ry * cos1
x2 = x3 - dx2 * k
y2 = y3 - dy2 * k
return ((x0,y0), ((x1,y1), (x2,y2), (x3,y3)) )
def bezierArcCCW(self, cx,cy, rx,ry, theta0, theta1):
"""return a set of control points for Bezier approximation to an arc
with angle increasing counter clockwise. No requirement on |theta1-theta0| <= 90
However, it must be true that theta1-theta0 > 0."""
# I believe this is also clockwise
# pretty much just like Robert Kern's pdfgeom.BezierArc
angularExtent = theta1 - theta0
# break down the arc into fragments of <=90 degrees
if abs(angularExtent) <= 90.0: # we just need one fragment
angleList = [(theta0,theta1)]
else:
Nfrag = int( ceil( abs(angularExtent)/90.) )
fragAngle = float(angularExtent)/ Nfrag # this could be negative
angleList = []
for ii in range(Nfrag):
a = theta0 + ii * fragAngle
b = a + fragAngle # hmm.. is I wonder if this is precise enought
angleList.append((a,b))
ctrlpts = []
for (a,b) in angleList:
if not ctrlpts: # first time
[(x0,y0), pts] = self._bezierArcSegmentCCW(cx,cy, rx,ry, a,b)
ctrlpts.append(pts)
else:
[(tmpx,tmpy), pts] = self._bezierArcSegmentCCW(cx,cy, rx,ry, a,b)
ctrlpts.append(pts)
return ((x0,y0), ctrlpts)
def addEllipsoidalArc(self, cx,cy, rx, ry, ang1, ang2):
"""adds an ellisesoidal arc segment to a path, with an ellipse centered
on cx,cy and with radii (major & minor axes) rx and ry. The arc is
drawn in the CCW direction. Requires: (ang2-ang1) > 0"""
((x0,y0), ctrlpts) = self.bezierArcCCW(cx,cy, rx,ry,ang1,ang2)
self.lineTo(x0,y0)
for ((x1,y1), (x2,y2),(x3,y3)) in ctrlpts:
self.curveTo(x1,y1,x2,y2,x3,y3)
def drawCentredString(self, x, y, text, text_anchor='middle'):
if self.fillColor is not None:
textLen = stringWidth(text, self.fontName,self.fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2.
self.drawString(x,y,text)
def drawRightString(self, text, x, y):
self.drawCentredString(text,x,y,text_anchor='end')
def drawString(self, x, y, text, _fontInfo=None):
gs = self._gs
if _fontInfo:
fontName, fontSize = _fontInfo
else:
fontSize = gs.fontSize
fontName = gs.fontName
try:
gfont=getFont(gs.fontName)
except:
gfont = None
font = getFont(fontName)
if font._dynamicFont:
if isinstance(text,unicode): text = text.encode('utf8')
gs.drawString(x,y,text)
else:
fc = font
if not isinstance(text,unicode):
try:
text = text.decode('utf8')
except UnicodeDecodeError,e:
i,j = e.args[2:4]
raise UnicodeDecodeError(*(e.args[:4]+('%s\n%s-->%s<--%s' % (e.args[4],text[i-10:i],text[i:j],text[j:j+10]),)))
FT = unicode2T1(text,[font]+font.substitutionFonts)
n = len(FT)
nm1 = n-1
wscale = 0.001*fontSize
for i in xrange(n):
f, t = FT[i]
if f!=fc:
_setFont(gs,f.fontName,fontSize)
fc = f
gs.drawString(x,y,t)
if i!=nm1:
x += wscale*sum(map(f.widths.__getitem__,map(ord,t)))
if font!=fc:
_setFont(gs,fontName,fontSize)
def line(self,x1,y1,x2,y2):
if self.strokeColor is not None:
self.pathBegin()
self.moveTo(x1,y1)
self.lineTo(x2,y2)
self.pathStroke()
def rect(self,x,y,width,height,stroke=1,fill=1):
self.pathBegin()
self.moveTo(x, y)
self.lineTo(x+width, y)
self.lineTo(x+width, y + height)
self.lineTo(x, y + height)
self.pathClose()
self.fillstrokepath(stroke=stroke,fill=fill)
def roundRect(self, x, y, width, height, rx,ry):
"""rect(self, x, y, width, height, rx,ry):
Draw a rectangle if rx or rx and ry are specified the corners are
rounded with ellipsoidal arcs determined by rx and ry
(drawn in the counter-clockwise direction)"""
if rx==0: rx = ry
if ry==0: ry = rx
x2 = x + width
y2 = y + height
self.pathBegin()
self.moveTo(x+rx,y)
self.addEllipsoidalArc(x2-rx, y+ry, rx, ry, 270, 360 )
self.addEllipsoidalArc(x2-rx, y2-ry, rx, ry, 0, 90)
self.addEllipsoidalArc(x+rx, y2-ry, rx, ry, 90, 180)
self.addEllipsoidalArc(x+rx, y+ry, rx, ry, 180, 270)
self.pathClose()
self.fillstrokepath()
def circle(self, cx, cy, r):
"add closed path circle with center cx,cy and axes r: counter-clockwise orientation"
self.ellipse(cx,cy,r,r)
def ellipse(self, cx,cy,rx,ry):
"""add closed path ellipse with center cx,cy and axes rx,ry: counter-clockwise orientation
(remember y-axis increases downward) """
self.pathBegin()
# first segment
x0 = cx + rx # (x0,y0) start pt
y0 = cy
x3 = cx # (x3,y3) end pt of arc
y3 = cy-ry
x1 = cx+rx
y1 = cy-ry*BEZIER_ARC_MAGIC
x2 = x3 + rx*BEZIER_ARC_MAGIC
y2 = y3
self.moveTo(x0, y0)
self.curveTo(x1,y1,x2,y2,x3,y3)
# next segment
x0 = x3
y0 = y3
x3 = cx-rx
y3 = cy
x1 = cx-rx*BEZIER_ARC_MAGIC
y1 = cy-ry
x2 = x3
y2 = cy- ry*BEZIER_ARC_MAGIC
self.curveTo(x1,y1,x2,y2,x3,y3)
# next segment
x0 = x3
y0 = y3
x3 = cx
y3 = cy+ry
x1 = cx-rx
y1 = cy+ry*BEZIER_ARC_MAGIC
x2 = cx -rx*BEZIER_ARC_MAGIC
y2 = cy+ry
self.curveTo(x1,y1,x2,y2,x3,y3)
#last segment
x0 = x3
y0 = y3
x3 = cx+rx
y3 = cy
x1 = cx+rx*BEZIER_ARC_MAGIC
y1 = cy+ry
x2 = cx+rx
y2 = cy+ry*BEZIER_ARC_MAGIC
self.curveTo(x1,y1,x2,y2,x3,y3)
self.pathClose()
def saveState(self):
'''do nothing for compatibility'''
pass
def setFillColor(self,aColor):
self.fillColor = Color2Hex(aColor)
def setStrokeColor(self,aColor):
self.strokeColor = Color2Hex(aColor)
restoreState = saveState
# compatibility routines
def setLineCap(self,cap):
self.lineCap = cap
def setLineJoin(self,join):
self.lineJoin = join
def setLineWidth(self,width):
self.strokeWidth = width
def drawToPMCanvas(d, dpi=72, bg=0xffffff, configPIL=None, showBoundary=rl_config._unset_):
d = renderScaledDrawing(d)
c = PMCanvas(d.width, d.height, dpi=dpi, bg=bg, configPIL=configPIL)
draw(d, c, 0, 0, showBoundary=showBoundary)
return c
def drawToPIL(d, dpi=72, bg=0xffffff, configPIL=None, showBoundary=rl_config._unset_):
return drawToPMCanvas(d, dpi=dpi, bg=bg, configPIL=configPIL, showBoundary=showBoundary).toPIL()
def drawToPILP(d, dpi=72, bg=0xffffff, configPIL=None, showBoundary=rl_config._unset_):
Image = _getImage()
im = drawToPIL(d, dpi=dpi, bg=bg, configPIL=configPIL, showBoundary=showBoundary)
return im.convert("P", dither=Image.NONE, palette=Image.ADAPTIVE)
def drawToFile(d,fn,fmt='GIF', dpi=72, bg=0xffffff, configPIL=None, showBoundary=rl_config._unset_):
'''create a pixmap and draw drawing, d to it then save as a file
configPIL dict is passed to image save method'''
c = drawToPMCanvas(d, dpi=dpi, bg=bg, configPIL=configPIL, showBoundary=showBoundary)
c.saveToFile(fn,fmt)
def drawToString(d,fmt='GIF', dpi=72, bg=0xffffff, configPIL=None, showBoundary=rl_config._unset_):
s = getStringIO()
drawToFile(d,s,fmt=fmt, dpi=dpi, bg=bg, configPIL=configPIL)
return s.getvalue()
save = drawToFile
def test():
def ext(x):
if x=='tiff': x='tif'
return x
#grab all drawings from the test module and write out.
#make a page of links in HTML to assist viewing.
import os
from reportlab.graphics import testshapes
getAllTestDrawings = testshapes.getAllTestDrawings
drawings = []
if not os.path.isdir('pmout'):
os.mkdir('pmout')
htmlTop = """<html><head><title>renderPM output results</title></head>
<body>
<h1>renderPM results of output</h1>
"""
htmlBottom = """</body>
</html>
"""
html = [htmlTop]
i = 0
#print in a loop, with their doc strings
for (drawing, docstring, name) in getAllTestDrawings(doTTF=hasattr(_renderPM,'ft_get_face')):
fnRoot = 'renderPM%d' % i
if 1 or i==10:
w = int(drawing.width)
h = int(drawing.height)
html.append('<hr><h2>Drawing %s %d</h2>\n<pre>%s</pre>' % (name, i, docstring))
for k in ['gif','tiff', 'png', 'jpg', 'pct']:
if k in ['gif','png','jpg','pct']:
html.append('<p>%s format</p>\n' % string.upper(k))
try:
filename = '%s.%s' % (fnRoot, ext(k))
fullpath = os.path.join('pmout', filename)
if os.path.isfile(fullpath):
os.remove(fullpath)
if k=='pct':
from reportlab.lib.colors import white
drawToFile(drawing,fullpath,fmt=k,configPIL={'transparent':white})
else:
drawToFile(drawing,fullpath,fmt=k)
if k in ['gif','png','jpg']:
html.append('<img src="%s" border="1"><br>\n' % filename)
print 'wrote',fullpath
except AttributeError:
print 'Problem drawing %s file'%k
raise
if os.environ.get('RL_NOEPSPREVIEW','0')=='1': drawing.__dict__['preview'] = 0
drawing.save(formats=['eps','pdf'],outDir='pmout',fnRoot=fnRoot)
i = i + 1
#if i==10: break
html.append(htmlBottom)
htmlFileName = os.path.join('pmout', 'index.html')
open(htmlFileName, 'w').writelines(html)
if sys.platform=='mac':
from reportlab.lib.utils import markfilename
markfilename(htmlFileName,ext='HTML')
print 'wrote %s' % htmlFileName
if __name__=='__main__':
test()
| alexissmirnov/donomo | donomo_archive/lib/reportlab/graphics/renderPM.py | Python | bsd-3-clause | 24,436 |
# External Package dependencies
#import requests
from selenium.webdriver import Firefox, Ie, PhantomJS, Chrome, Safari, DesiredCapabilities
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
# Python built-in Packages
import types
import os
import sys
import inspect
import importlib
import configparser
import pprint
from pathlib import Path, PurePath
#TODO: convert os.path to pathlib.Path/PurePath
#TODO: convert request['django']['path'] to request.django.path
# Internal Modules
from . import abstract, browser, interface
class TestException(Exception):
"""Test exceptions."""
class Test(object):
"""
A Test instance should have the following attributes that are often necessary to execute a test:
DEFAULT Attributes
1. data - a mutant dictionary which you can use both key referencing and dot notation to retrieve values
2. request - a copy of parameters passed to Test instance initialization
OPTIONAL Attributes
1. configparser - by calling self.setup_config_parser()
2. browser - by calling self.launch_browser()
"""
def __init__(self, request=None):
self.data = abstract.MutantDictionary()
self.request = request
self.pp = pprint.PrettyPrinter(indent=4)
def setup_notebooks(self, parent, subfolder=None):
return abstract.Utils().import_parent_folders(parent, subfolder)
def setup_django_models(self, request=None):
request = request or self.request
if 'django' in request:
self.models = abstract.MutantDictionary()
self.get_django_models(request)
def setup_config_parser(self, request=None):
"""Creates a ConfigParser, attach a reference of it to the Test instance
and returns the ConfigParser instance."""
def data(self, key, section=None):
section = section or self.sections()[0]
return self.get(section, key)
request = request or self.request
if isinstance(request, str):
# convert it into a dictionary
request = {
'configparser': request
}
# then replace self.request if it's not defined yet
try:
self.request.update(request)
except AttributeError:
self.request = request
if 'configparser' in request:
self.configparser = configparser.ConfigParser()
self.configparser.read(request['configparser'])
setattr(self.configparser, 'data', types.MethodType(data, self.configparser))
return self.configparser
raise TestException("A configuration file was not specified.")
def setup_proxy(self, request=None):
request = request or self.request
if 'proxy' in request:
pass
"""uncomment this to enable browsermobproxy
from browsermobproxy import Server
self.server = Server(request['proxy'])
self.server.start()
self.proxy = self.server.create_proxy()
selenium_proxy = self.proxy.selenium_proxy()"""
else:
selenium_proxy = None
return selenium_proxy
# def enable_api(self, request=None):
# self.api = abstract.MutantDictionary()
# self.api.session = requests.Session()
# requests.packages.urllib3.disable_warnings()
# self.api.session.headers = {'content-type': 'application/json'}
# self.api.session.verify = False
# self.api.codes = requests.codes
# self.api._requests = requests
# self.api.pp = self.pp
# self.register_modules("api", [abstract, interface])
# return self.api
def launch_browser(self, request=None):
request = request or self.request
if isinstance(request, str):
request = {
'browser': request
}
try:
self.request.update(request)
request = self.request
except AttributeError:
self.request = request
if 'browser' in request:
capabilities_map = {
"Firefox": DesiredCapabilities.FIREFOX,
"IExplorer": DesiredCapabilities.INTERNETEXPLORER,
"Chrome": DesiredCapabilities.CHROME,
"PhantomJS": DesiredCapabilities.PHANTOMJS,
"Safari": DesiredCapabilities.SAFARI,
}
caps = capabilities_map[request['browser']]
############################################
### Firefox
############################################
if request['browser'] == 'Firefox':
firefox_profile = FirefoxProfile()
if 'firefox' in request:
if 'preferences' in request['firefox']:
for preference in request['firefox']['preferences']:
firefox_profile.set_preference(*preference)
if 'extensions' in request['firefox']:
for extension in request['firefox']['extensions']:
extension = PurePath(request['firefox']['extensions_path'], extension)
firefox_profile.add_extension(str(extension))
if 'mime' in request['firefox']:
import shutil
shutil.copy2(request['firefox']['mime'], firefox_profile.profile_dir)
if 'capabilities' in request['firefox']:
caps.update(request['firefox']['capabilities'])
selenium_proxy = self.setup_proxy()
class Mixin(Firefox, browser.BrowsingActions): pass
self.browser = Mixin(firefox_profile, proxy=selenium_proxy, capabilities=caps)
############################################
### Internet Explorer
############################################
elif request['browser'] == 'IExplorer':
# Not a good idea => caps['nativeEvents'] = False
iedriver_server = os.path.join(request['iexplorer']['server_path'],
request['iexplorer']['server_file'])
class Mixin(Ie, browser.BrowsingActions): pass
self.browser = Mixin(iedriver_server, capabilities=caps)
############################################
### GhostDriver, PhantomJS
############################################
elif request['browser'] == 'PhantomJS':
service_args = ["--ignore-ssl-errors=yes"]
caps['phantomjs.page.settings.userAgent'] = (
'Mozilla/5.0 (Windows NT'
' 6.1; Win64; x64; rv:16.0) Gecko/20121026 Firefox/16.0'
)
class Mixin(PhantomJS, browser.BrowsingActions): pass
self.browser = Mixin(service_args=service_args,
desired_capabilities=caps)
# If you don't do this, you'll get the pain:
# https://github.com/angular/protractor/issues/585
self.browser.set_window_size(1024, 768)
############################################
### Chrome
############################################
elif request['browser'] == 'Chrome':
chromedriver_server = os.path.join(request['chrome']['server_path'],
request['chrome']['server_file'])
os.environ["webdriver.chrome.driver"] = chromedriver_server
class Mixin(Chrome, browser.BrowsingActions): pass
self.browser = Mixin(chromedriver_server)
############################################
### Safari
############################################
elif request['browser'] == 'Safari':
selenium_server = os.path.join(request['safari']['server_path'],
request['safari']['server_file'])
class Mixin(Safari, browser.BrowsingActions): pass
self.browser = Mixin(selenium_server)
return self.browser
print("Please specify which browser to launch.")
assert 'browser' in request
def register_functions(self, attr_name, *functions):
"""
:param attr_name: The name of the attribute that will be attached to the test object.
:param functions: A sequence of functions that will be attached
to the test object using the attribute mentioned above.
:return: None
"""
for func in functions:
_attribute = getattr(self, attr_name)
setattr(_attribute, func.__name__, types.MethodType(func, _attribute))
def register_modules(self, attr_name, *modules):
"""Register module defined functions to an attribute of the test object."""
for mod in modules:
for func_name, func in inspect.getmembers(mod, inspect.isfunction):
_attribute = getattr(self, attr_name)
setattr(_attribute, func_name, types.MethodType(func, _attribute))
def register_classes(self, *args):
"""This generates high level classes which inherits all the capabilities of the humble browser.
For example, if you have class 'Angora' inside the module you passed into this method,
you can then use it like 'test.Angora.get_element(locator)' because the 'Angora' attribute of 'test'
is a mutated 'browser' object."""
class SubBrowser(self.browser.__class__):
def __init__(self, browser):
self.__dict__.update(browser.__dict__)
for mod in args:
for cls_name, cls in inspect.getmembers(mod, inspect.isclass):
if not hasattr(self, cls_name):
setattr(self, cls_name, SubBrowser(self.browser))
_attribute = getattr(self, cls_name)
for func_name, func in inspect.getmembers(cls, inspect.isfunction):
setattr(_attribute, func_name, types.MethodType(func, _attribute))
def reload_modules(self, *modules):
"""This is useful when you have edited a module and you want this edited version to be updated in your
currently running jupyter notebook."""
for module in modules:
importlib.reload(module)
def get_django_models(self, request):
# Setup Django
if str(request['django']['path']) not in sys.path:
sys.path.append(request['django']['path'])
if not 'DJANGO_SETTINGS_MODULE' in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = "{}.settings".format(request['django']['name'])
import django
django.setup()
for app in request['django']['apps'].keys():
app_models = importlib.import_module("{}.models".format(app))
for model in request['django']['apps'][app]:
self.models[model] = getattr(app_models, model)
| ldiary/marigoso | marigoso/test.py | Python | mit | 11,244 |
#!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# A Happy command line utility through which a virtual node joins the Internet.
#
# The command is executed by instantiating and running HappyInternet class.
#
from __future__ import absolute_import
from __future__ import print_function
import getopt
import sys
import happy.HappyInternet
from happy.Utils import *
if __name__ == "__main__":
options = happy.HappyInternet.option()
try:
opts, args = getopt.getopt(sys.argv[1:], "hn:qadf:s:e:",
["help", "node=", "quiet", "add", "delete", "interface=", "isp=", "seed="])
except getopt.GetoptError as err:
print(happy.HappyInternet.HappyInternet.__doc__)
print(hred(str(err)))
sys.exit(hred("%s: Failed to parse arguments." % (__file__)))
for o, a in opts:
if o in ("-h", "--help"):
print(happy.HappyInternet.HappyInternet.__doc__)
sys.exit(0)
elif o in ("-q", "--quiet"):
options["quiet"] = True
elif o in ("-a", "--add"):
options["add"] = True
elif o in ("-d", "--delete"):
options["delete"] = True
elif o in ("-f", "--interface"):
options["iface"] = a
elif o in ("-s", "--isp"):
options["isp"] = a
elif o in ("-e", "--seed"):
options["seed"] = a
elif o in ("-n", "--node"):
options["node_id"] = a
else:
assert False, "unhandled option"
cmd = happy.HappyInternet.HappyInternet(options)
cmd.start()
| openweave/happy | bin/happy-internet.py | Python | apache-2.0 | 2,244 |
import pandas as pd
import MySQLdb
import re
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
class MPAADataFrame:
def __init__(self, username, password):
"""
Pass in username and password
"""
self.df = parseMPAARatings(username, password)
self.pluckReason()
self.pluckRating()
self.createRatingValue()
def pluckReason(self):
"""
Plucks the reason for a rating from the
pandas dataframe 'info' key and returns
"""
def safeSub(pat, x):
try:
return re.sub(pat, '', x)
except:
print x
return x
pat = re.compile('^(Rated)\s*(\sG|PG\s|PG-13\s|PG- 13|R\s|NC-17\s)\s*(for)*\s*')
vmatch = np.vectorize(lambda x: safeSub(pat,x))
self.reason = np.array([r for r in vmatch(self.df['info'].values)])
def pluckRating(self):
"""
Plucks the G, PG, PG-13, R rating from the
pandas dataframe 'info' key and returns it
"""
def safeSearch(pat, x):
try:
return re.search(pat, x).group()
except:
print x
return x
pat = re.compile('(\sG|PG\s|PG-13\s|PG- 13|R\s|NC-17\s)')
vmatch = np.vectorize(lambda x: safeSearch(pat,x))
self.rating = np.array([r.replace(' ', '') for r in vmatch(self.df['info'].values)])
def createRatingValue(self):
"""
Maps MPAA rating to an integer
"""
rating_key = {'G': 0, 'PG': 1, 'PG-13': 2, 'R': 3, 'NC-17': 4}
self.rating_value = [rating_key[key] for key in self.rating]
def testNaiveBayes(self):
"""
Breaks the reason into Counted, tokenized, stemmed words
"""
self.clf = Pipeline([('vect', CountVectorizer(encoding='ISO-8859-1')),
('tfidf', TfidfTransformer()),
('classifier', MultinomialNB()),
])
self.n_train = int(round(len(self.reason) * .67))
self.clf.fit(self.reason[:self.n_train], self.rating_value[:self.n_train])
def parseMPAARatings(username, password, dbase="imdb.db"):
"""
Parses the SQL IMDb and puts the title and MPAA rating and
justification into a returned pandas dataframe
"""
# connect to the database
db = MySQLdb.connect(host="localhost", user=username,
passwd=password, db=dbase)
# use pandas to get rows i want
df = pd.read_sql('SELECT movie_info.id, title, info FROM movie_info \
LEFT OUTER JOIN title ON movie_id = title.id WHERE info_type_id=97;',
con=db)
print 'loaded dataframe from MySQL. records:', len(df)
db.close()
return df
def addRatings(df, username, password, dbase='imdb.db'):
"""
Calls pluckRating and adds results back to the movie_info table
"""
rating = pluckRating(df)
db = MySQLdb.connect(host="localhost", user=username,
passwd=password, db=dbase)
c = db.cursor()
for i in df['id'].values:
query = """UPDATE movie_info SET rating = '{:s}' WHERE id = {:d};""".format(rating[i-1], i)
c.execute(query)
db.commit()
db.close()
| hillnich/imdbML | imdbML.py | Python | gpl-2.0 | 3,507 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Most of this code was obtained from the Python documentation online.
"""Decorator utility functions.
decorators:
- synchronized
- propertyx
- accepts
- returns
- singleton
- attrs
- deprecated
"""
import functools
import warnings
import threading
import sys
def synchronized(lock=None):
"""Decorator that synchronizes a method or a function with a mutex lock.
Example usage:
@synchronized()
def operation(self, a, b):
...
"""
if lock is None:
lock = threading.Lock()
def wrapper(function):
def new_function(*args, **kwargs):
lock.acquire()
try:
return function(*args, **kwargs)
finally:
lock.release()
return new_function
return wrapper
def propertyx(function):
"""Decorator to easily create properties in classes.
Example:
class Angle(object):
def __init__(self, rad):
self._rad = rad
@property
def rad():
def fget(self):
return self._rad
def fset(self, angle):
if isinstance(angle, Angle):
angle = angle.rad
self._rad = float(angle)
Arguments:
- `function`: The function to be decorated.
"""
keys = ('fget', 'fset', 'fdel')
func_locals = {'doc': function.__doc__}
def probe_func(frame, event, arg):
if event == 'return':
locals = frame.f_locals
func_locals.update(dict((k, locals.get(k)) for k in keys))
sys.settrace(None)
return probe_func
sys.settrace(probe_func)
function()
return property(**func_locals)
def accepts(*types):
"""Decorator to ensure that the decorated function accepts the given types as arguments.
Example:
@accepts(int, (int,float))
@returns((int,float))
def func(arg1, arg2):
return arg1 * arg2
"""
def check_accepts(f):
assert len(types) == f.func_code.co_argcount
def new_f(*args, **kwds):
for (a, t) in zip(args, types):
assert isinstance(a, t),\
"arg %r does not match %s" % (a, t)
return f(*args, **kwds)
new_f.func_name = f.func_name
return new_f
return check_accepts
def returns(rtype):
"""Decorator to ensure that the decorated function returns the given
type as argument.
Example:
@accepts(int, (int,float))
@returns((int,float))
def func(arg1, arg2):
return arg1 * arg2
"""
def check_returns(f):
def new_f(*args, **kwds):
result = f(*args, **kwds)
assert isinstance(result, rtype),\
"return value %r does not match %s" % (result, rtype)
return result
new_f.func_name = f.func_name
return new_f
return check_returns
def singleton(cls):
"""Decorator to ensures a class follows the singleton pattern.
Example:
@singleton
class MyClass:
...
"""
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
def attrs(**kwds):
"""Decorator to add attributes to a function.
Example:
@attrs(versionadded="2.2",
author="Guido van Rossum")
def mymethod(f):
...
"""
def decorate(f):
for k in kwds:
setattr(f, k, kwds[k])
return f
return decorate
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
## Usage examples ##
@deprecated
def my_func():
pass
@other_decorators_must_be_upper
@deprecated
def my_func():
pass
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function %(funcname)s." % {
'funcname': func.__name__,
},
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func
| austinwagner/sublime-sourcepawn | watchdog/utils/decorators.py | Python | mit | 4,060 |
"""appsync module initialization; sets value for base decorator."""
from .models import appsync_backends
from ..core.models import base_decorator
mock_appsync = base_decorator(appsync_backends)
| spulec/moto | moto/appsync/__init__.py | Python | apache-2.0 | 195 |
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
try:
# The 1D interface
dim = 2
a = NumericalPoint(dim, -1.0)
b = NumericalPoint(dim, 2.0)
domain = Domain(Interval(a, b))
p1 = NumericalPoint(dim, 0.5)
p2 = NumericalPoint(dim, 2.5)
print("Domain=", domain)
# Accessors
print("Dimension=", domain.getDimension())
# Contains
print("is point ", p1, " inside ", domain, "? ", domain.contains(p1))
print("is point ", p2, " inside ", domain, "? ", domain.contains(p2))
except:
import sys
print("t_Domain_std.py", sys.exc_info()[0], sys.exc_info()[1])
| dubourg/openturns | python/test/t_Domain_std.py | Python | gpl-3.0 | 665 |
import pytest
from semantic_release import ci_checks
from semantic_release.errors import CiVerificationError
def test_circle_should_pass_if_branch_is_master_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'master')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('master')
def test_circle_should_pass_if_branch_is_correct_and_no_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
assert ci_checks.circle('other-branch')
def test_circle_should_raise_ci_verification_error_for_wrong_branch(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', '')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
def test_circle_should_raise_ci_verification_error_for_pr(monkeypatch):
monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')
monkeypatch.setenv('CI_PULL_REQUEST', 'http://the-url-of-the-pr')
with pytest.raises(CiVerificationError):
ci_checks.circle('master')
| wlonk/python-semantic-release | tests/ci_checks/test_circle.py | Python | mit | 1,089 |
import sys
def setup(core, object):
object.setStfFilename('static_item_n')
object.setStfName('item_wookiee_gloves_02_01')
object.setDetailFilename('static_item_d')
object.setDetailName('item_wookiee_gloves_02_01')
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:agility_modified', 3)
object.setStringAttribute('class_required', 'Trader')
return | agry/NGECore2 | scripts/object/tangible/wearables/wookiee/item_trader_gloves_02_01.py | Python | lgpl-3.0 | 355 |
# If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
# Find the sum of all the multiples of 3 or 5 below 1000.
def process(limit):
numberOf3 = (limit - 1) / 3
numberOf5 = (limit - 1) / 5
sumOfAll3 = sum([i * 3 for i in xrange(1, numberOf3 + 1)])
sumOfAll5 = sum([i * 5 for i in xrange(1, numberOf5 + 1) if (i % 3 != 0)])
return sumOfAll3 + sumOfAll5
def main():
print process(1000)
if __name__ == '__main__':
main()
| zifter/projecteuler | 1-99/t1.py | Python | mit | 530 |
# -*- coding: utf-8 -*-
from nose.tools import eq_, ok_, assert_raises
from dao.dinpy.dinpy import *
from dao.dinpy.dinpy import DinpySyntaxError
from dao import *
from dao.builtins.io import prin
from dao.term import Var
from dao.dinpy.dexpr import _VarSymbol
from dao import special
from dao.builtins.rule import replace_def, remove, append_def, insert_def, \
abolish, retractall, retract
from dao.dinpy.dinpy import AtForm, varcache
from dao.builtins import arith
from dao.special import UserFunction, begin, quote
from dao.solve import set_run_mode, noninteractive, DaoUncaughtThrow, to_sexpression
set_run_mode(noninteractive)
def preparse_to_sexpression(exp):
return to_sexpression(preparse(exp))
a, b, c = var.a.b.c
a, b, c = preparse([a, b, c])
i, j = preparse([v.i, v.j])
n = preparse(v.n)
x, y = preparse([v.x, v.y])
class TestVarDeclare:
def test1(self):
ok_(isinstance(a, Var))
ok_(isinstance(b, Var))
ok_(isinstance(c, Var))
ok_(isinstance(i, Var))
class Test_v_var:
def test_v(self):
x = v.a
eq_(x.__class__, _VarSymbol)
eq_(preparse(x), varcache('a'))
def test_var(self):
x = var.a.b.c
eq_(preparse(list(x)),
(varcache('a'),varcache('b'),varcache('c')))
class TestAssign:
def test_assign1(self):
eq_(preparse(v.i<<1), preparse(special.set(i, 1)))
def test_assign2(self):
eq_(preparse_to_sexpression(put.i.j<<v.i+1), preparse_to_sexpression(special.set_list([i,j], arith.add(i, 1))))
def test_assign3(self):
eq_(preparse_to_sexpression(put.i.j<<(1,2)), preparse_to_sexpression(special.set_list([i,j], (1,2))))
class TestDo:
def test_do1(self):
eq_(preparse(do[v.i<<1]), preparse(special.begin(special.set(i, 1))))
class TestLet:
def test_let1(self):
eq_(preparse(let(v.i << 1).do[1,2]), special.let(((i,1),), 1, 2))
def test_eval_let1(self):
eq_(eval(let(v.i << 1).do[v.i]), 1)
def test_let2(self):
let1 = let(v.a<<1).do[prin(1)]
eq_(preparse(let1), special.let([(a,1)], prin(1)))
def test_let3(self):
let1 = let(v.a << v.b << 1).do[prin(1)]
eq_(preparse(let1), special.let(((b,1), (a,b)), prin(1)))
def test_let4(self):
let1 = let( v.a/ v.b << (1,2)).do[prin(1)]
def test_eval_let4(self):
let1 = let( v.a/ v.b << (1,2)).do[v.a+v.b]
eq_(eval(let1), 3)
class TestIff:
def test_iff1(self):
assert_raises(DinpySyntaxError, preparse, iff(v.i==1)[2])
eq_(preparse(iff(v.i==1).do[2]), special.iff([(arith.eq(i,1), 2)]))
def test_iff2(self):
eq_(preparse(iff(1) .do[2]
.elsif(3) .do[4].
els [5]),
special.iff([(1, 2),(3, 4)], 5))
def test_eval_iff2(self):
eq_(eval(iff(0) .do[1]
.elsif(1) .do[2]
.els [3]),
2)
eq_(eval(iff(0) .do[1]
.elsif(0) .do[2]
.els [3]),
3)
class TestProtect:
def test_eval_protect(self):
eq_(eval(protect [ prin(1) ] .always[ prin(2) ]), None)
def test_eval_protect2(self): assert_raises(DaoUncaughtThrow, eval,
protect [ prin(1), throw(1).do[2] ] .always[ prin(2) ])
def test_eval_protect3(self):
eq_(eval(
catch(1)
.do[
protect [ prin(1), throw(1).do[2], prin(3) ]
.always[ prin(2) ]
]), 2)
class TestLoop:
def test_loop(self):
eq_(preparse(loop[prin(1)]), special.LoopForm((prin(1),)))
def test_eval_loop(self):
i = v.i
eq_(eval(begin(i<<0, loop[prin(i), ++i, iff(i==3).do[exit >>i]], i)), 3)
def test_loop_times(self):
eq_(preparse(loop(10)[prin(1)]), special.LoopTimesForm(10, (prin(1),), 'a'))
def test_eval_loop_times(self):
eq_(eval(loop(3)[prin(1)]), None)
def test_loop_when(self):
eq_(preparse(loop[prin(1)].when(1)), special.LoopWhenForm((prin(1),), 1))
def test_eval_loop_when(self):
eq_(eval(do[ v.i<<0, loop[prin(v.i), ++v.i].when(v.i<3), v.i]), 3)
def test_when_loop(self):
eq_(preparse(when(1).loop[prin(1)]), special.WhenLoopForm(1, (prin(1),)))
def test_when_loop2(self):
eq_(preparse(when(v.i!=0).loop[ prin(v.i)]), special.WhenLoopForm(preparse(v.i!=0), (prin(i),)))
def test_loop_until(self):
eq_(preparse(loop[prin(1)].until(v.i==1)),
special.LoopUntilForm((prin(1),), arith.eq(i, 1)))
def test_eval_loop_until(self):
eq_(eval(do[ v.i<<0, loop[prin(v.i), ++v.i].until(v.i==3), v.i]), 3)
class TestCase:
def test_Case1(self):
x = preparse(v.x)
eq_(preparse(case(x).of(1)[prin(1)].of(2,3)[prin(4)].els[prin(5)]),
special.CaseForm(x,{1:(prin(1),), 2:(prin(4),), 3:(prin(4),)}, (prin(5),)))
def test_eval_Case1(self):
x = preparse(v.x)
eq_(eval(case(2).of(1)[prin(1)].of(2,3)[prin(quote((2,3))), quote((2,3))].els[prin(5)]),
(2,3))
def test_eval_Case2(self):
x = preparse(v.x)
eq_(eval(case(3).of(1)[prin(1), 1].of(2)[prin(2), 2].els[prin(3), 3]),
3)
def test_eval_Case3(self):
x = preparse(v.x)
eq_(eval(begin(v.x<<quote((1,2)), case(x).of((1,2), (3,4))[prin(x), x].of(2,3)[prin((2,3)), (2,3)].els[prin(5)])),
(1,2))
class TestEach:
def test_slice(self):
i = preparse(v.i); j = preparse(v.j)
eq_(preparse(each(v.i,v.j)[1:3][1:3].loop[prin(v.i)]),
special.EachForm((i,j), zip(range(1,3),range(1,3)),(prin(i),)))
def test_eval_slice(self):
eq_(eval(each(v.i,v.j)[1:3][1:3].loop[prin(v.i, v.j), quote((v.i, v.j))]),
None)
def test_getitem1(self):
i = preparse(v.i); j = preparse(v.j)
eq_(preparse(each(v.i,v.j)[zip(range(2), range(2))].loop[prin(v.i, v.j)]),
special.EachForm((i,j), tuple(zip(range(2),range(2))),(prin(i,j),)))
def test_eval_getitem1(self):
eq_(eval(each(v.i,v.j)[zip(range(2), range(2))].loop[prin(v.i, v.j), quote((v.i, v.j))]),
None)
def test_getitem2(self):
i = preparse(v.i); j = preparse(v.j)
eq_(preparse(each(v.i,v.j)[range(2)][range(2)].loop[prin(v.i, v.j)]),
special.EachForm((i,j), zip(range(2),range(2)),(prin(i,j),)))
def test_eval_getitem2(self):
eq_(eval(each(v.i,v.j)[range(2)][range(2)].loop[prin(v.i, v.j), quote((v.i, v.j))]),
None)
class TestExitNext:
def test_exit1(self):
eq_(preparse(exit/'loop'), special.exit(None, 'loop'))
def test_exit2(self):
eq_(preparse(exit/'loop'*2>>v.i), special.exit(i, 'loop', 2))
def test_next1(self):
eq_(preparse(next/'loop'), special.next('loop'))
def test_next2(self):
eq_(preparse(next/'loop'*2), special.next('loop', 2))
class TestBlockLabel:
def test_label(self):
eq_(preparse(label.a%loop[0]), special.LoopForm((0,), 'a'))
def test_block(self):
eq_(preparse(block.a[1]), special.block('a', 1))
def test_block2(self):
eq_(preparse(block.a[ v.i << 1 ]), special.block('a', special.set(i,1)))
class TestFun:
def test_at(self):
at1 = at(i)[1](j)[2][3](x,y)[4]
eq_(preparse(at1),
AtForm((((i,),((1,),)), ((j,),((2,),(3,),)),((x,y),((4,),)))))
def test_at2(self):
eq_(preparse(at[prin(1)]), AtForm(((None,((prin(1),),)),)))
def test1(self):
eq_(preparse(fun. a(x)[prin(1)]), replace_def(a, (x,), ((prin(1),),), UserFunction))
def test_eval_a_x(self):
eq_(eval(do[fun. a(x)[prin(x), x], a(1)]), 1)
def test2(self):
eq_(preparse(fun. a(x)[prin(1)]), replace_def(a, (x,), ((prin(1),),), UserFunction))
def test_eval_a_x2(self):
#assert 0, 'repace_def rethink'
x = v.x
eq_(eval(do[fun. a(x)[prin(x), x], a(1),
fun. a(x)[prin(-x), -x], a(1)]), -1)
eq_(eval(do[fun. a(x)[prin(x), x], a(1),
fun. a(x, i)[prin(-x, i), -x], a(3), a(1, 2)]), -1)
def test3(self):
eq_(preparse(fun. a(x)>= [prin(1)]),
append_def(a, (x,), [(prin(1),)], special.UserFunction))
def test4(self):
eq_(preparse(fun. a(x)>= at[prin(1)]),
append_def(a, (x,), ((prin(1),),), special.UserFunction))
def test41(self):
eq_(preparse(fun. a(x)>= at[prin(1)][prin(2)]),
append_def(a, (x,), ((prin(1),),(prin(2),),), special.UserFunction))
def test42(self):
eq_(preparse(fun. a(x)<= at[prin(1)]),
insert_def(a, (x,), ((prin(1),),), special.UserFunction))
def test5(self):
eq_(preparse(fun. a== at()[prin(1)]),
special.set(a, special.FunctionForm(((), prin(1)))))
def test6(self):
eq_(preparse(fun. a>= at()[prin(1)]),
special.begin(append_def(a, (), ((prin(1),),), special.UserFunction)))
def test61(self):
eq_(preparse(fun. a<= at()[prin(1)]),
special.begin(insert_def(a, (), ((prin(1),),), special.UserFunction)))
def test7(self):
eq_(preparse(-fun. a/3),abolish(a,3))
def test8(self):
eq_(preparse(-fun. a(x)), remove(a,(x,), special.UserFunction))
def test9(self):
eq_(preparse(fun()[prin(1)]), special.FunctionForm(((), prin(1))))
def test_eval_letr_fun(self):
eq_(eval(letr (v.f2 << fun(v.x)[ iff(v.x<1).do[v.x].els[v.f2(v.x-1)]]).do[v.f2(2)]), 0)
class TestMacro:
def test5(self):
eq_(preparse(macro. a== at()[prin(1)]),
special.set(a, special.MacroForm(((), prin(1)))))
class TestSample:
def test_hello(self):
from dao.dinpy.samples import hello
def test_interactive(self):
from dao.solve import set_run_mode, interactive
set_run_mode(mode=interactive)
from dao.dinpy.samples import interactive
def test_parse_interactive(self):
from dao.solve import set_run_mode, interactive
set_run_mode(mode=interactive)
from dao.dinpy.samples import parse_interactive
def test_sample(self):
from dao.dinpy.samples import sample
| chaosim/dao | dao/dinpy/tests/testdinpy.py | Python | gpl-3.0 | 9,844 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import json
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from behave import *
@step('I share first element in the history list')
def step_impl(context):
context.execute_steps(u'''
given I open History dialog
''')
history = context.browser.find_element_by_id("HistoryPopup")
entries = history.find_elements_by_xpath('.//li[not(@data-clone-template)]')
assert len(entries) > 0, "There are no entries in the history"
item = entries[0]
item.find_elements_by_xpath('.//*[@data-share-item]')[0].click()
@then('the json to share is shown with url "{url}" and contains the following headers')
def step_impl(context, url):
# Wait for modal to appear
WebDriverWait(context.browser, 10).until(
expected_conditions.visibility_of_element_located(
(By.ID, 'ShareRequestForm')))
output = context.browser.execute_script("return restman.ui.editors.get('#ShareRequestEditor').getValue();")
snippet = json.loads(output)
assert url == snippet["url"], "URL: \"{}\" not in output.\nOutput: {}".format(value, output)
for row in context.table:
assert row['key'] in snippet['headers'], "Header {} is not in output".format(row['key'])
assert row['value'] == snippet['headers'][row['key']], "Header value is not correct. Expected: {}; Actual: {}".format(value, snippet['headers'][name])
@step('I click on import request')
def step_impl(context):
context.execute_steps(u'''
given I open History dialog
''')
# Click on import
context.browser.find_element_by_id('ImportHistory').click()
WebDriverWait(context.browser, 10).until(
expected_conditions.visibility_of_element_located(
(By.ID, 'ImportRequestForm')))
@step('I write a shared request for "{url}"')
def step_impl(context, url):
req = json.dumps({
"method": "POST",
"url": url,
"headers": {
"Content-Type": "application/json",
"X-Test-Header": "shared_request"
},
"body": {
"type": "form",
"content": {
"SomeKey": "SomeValue11233",
"SomeOtherKey": "SomeOtherValue019",
}
}
})
context.browser.execute_script("return restman.ui.editors.setValue('#ImportRequestEditor', atob('{}'));".format(base64.b64encode(req)))
@step('I click on load import request')
def step_impl(context):
# Import request
context.browser.find_element_by_xpath("//*[@id='ImportRequestForm']//input[@value='Import']").click()
| jsargiot/restman | tests/steps/share.py | Python | mit | 2,709 |
# Given a linked list and a value x, partition it such that
# all nodes less than x come before nodes greater than or equal to x.
#
# You should preserve the original relative order of the nodes in each
# of the two partitions.
#
# For example,
# Given 1->4->3->2->5->2 and x = 3,
# return 1->2->2->4->3->5.
from node.sllist import ListNode
class Solution:
# @param head, a ListNode
# @param x, an integer
# @return a ListNode
def partition(self, head, x):
less_head = None
less_tail = None
more_head = None
more_tail = None
p = head
while p is not None:
if p.val < x:
if less_head is None:
less_head = ListNode(p.val)
less_tail = less_head
else:
less_tail.next = ListNode(p.val)
less_tail = less_tail.next
else:
if more_head is None:
more_head = ListNode(p.val)
more_tail = more_head
else:
more_tail.next = ListNode(p.val)
more_tail = more_tail.next
p = p.next
if less_head is None:
return more_head
else:
less_tail.next = more_head
return less_head
def test():
pass
if __name__ == '__main__':
test()
| feigaochn/leetcode | p86_partition_list.py | Python | mit | 1,393 |
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template
class BirthdayPlugin(WillPlugin):
@periodic(month="4", day="9", minute="0", hour="0")
def happy_birthday_from_will(self):
self.say("@steven Happy Birthday!!!! :)")
| skoczen/my-will | plugins/birthdays.py | Python | mit | 309 |
# -*- coding: utf-8 -*-
"""Operating system independent (generic) preprocessor plugins."""
from dfvfs.helpers import file_system_searcher
from plaso.lib import definitions
from plaso.preprocessors import interface
from plaso.preprocessors import manager
class DetermineOperatingSystemPlugin(
interface.FileSystemArtifactPreprocessorPlugin):
"""Plugin to determine the operating system."""
# pylint: disable=abstract-method
# This plugin does not use an artifact definition and therefore does not
# use _ParsePathSpecification.
# We need to check for both forward and backward slashes since the path
# specification will be dfVFS back-end dependent.
_WINDOWS_LOCATIONS = set([
'/windows/system32', '\\windows\\system32', '/winnt/system32',
'\\winnt\\system32', '/winnt35/system32', '\\winnt35\\system32',
'\\wtsrv\\system32', '/wtsrv/system32'])
def __init__(self):
"""Initializes a plugin to determine the operating system."""
super(DetermineOperatingSystemPlugin, self).__init__()
self._find_specs = [
file_system_searcher.FindSpec(
case_sensitive=False, location='/etc',
location_separator='/'),
file_system_searcher.FindSpec(
case_sensitive=False, location='/System/Library',
location_separator='/'),
file_system_searcher.FindSpec(
case_sensitive=False, location='\\Windows\\System32',
location_separator='\\'),
file_system_searcher.FindSpec(
case_sensitive=False, location='\\WINNT\\System32',
location_separator='\\'),
file_system_searcher.FindSpec(
case_sensitive=False, location='\\WINNT35\\System32',
location_separator='\\'),
file_system_searcher.FindSpec(
case_sensitive=False, location='\\WTSRV\\System32',
location_separator='\\')]
# pylint: disable=unused-argument
def Collect(self, mediator, artifact_definition, searcher, file_system):
"""Collects values using a file artifact definition.
Args:
mediator (PreprocessMediator): mediates interactions between preprocess
plugins and other components, such as storage and knowledge base.
artifact_definition (artifacts.ArtifactDefinition): artifact definition.
searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess
the file system.
file_system (dfvfs.FileSystem): file system to be preprocessed.
Raises:
PreProcessFail: if the preprocessing fails.
"""
locations = []
for path_spec in searcher.Find(find_specs=self._find_specs):
relative_path = searcher.GetRelativePath(path_spec)
if relative_path:
locations.append(relative_path.lower())
operating_system = definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN
if self._WINDOWS_LOCATIONS.intersection(set(locations)):
operating_system = definitions.OPERATING_SYSTEM_FAMILY_WINDOWS_NT
elif '/system/library' in locations:
operating_system = definitions.OPERATING_SYSTEM_FAMILY_MACOS
elif '/etc' in locations:
operating_system = definitions.OPERATING_SYSTEM_FAMILY_LINUX
if operating_system != definitions.OPERATING_SYSTEM_FAMILY_UNKNOWN:
mediator.SetValue('operating_system', operating_system)
manager.PreprocessPluginsManager.RegisterPlugins([
DetermineOperatingSystemPlugin])
| log2timeline/plaso | plaso/preprocessors/generic.py | Python | apache-2.0 | 3,399 |
from datetime import timedelta
from dockerrotate.main import parse_arguments
def test_timestamp_parsing():
assert parse_arguments(['containers', '--created', '1h']).created == timedelta(hours=1)
assert parse_arguments(['containers', '--created', '23m']).created == timedelta(minutes=23)
assert parse_arguments(['containers', '--created', '2d']).created == timedelta(days=2)
assert parse_arguments(['containers', '--created', '0h']).created == timedelta()
assert parse_arguments(['containers', '--created', '0']).created == timedelta()
| locationlabs/docker-rotate | tests/test_argparse.py | Python | apache-2.0 | 559 |
# -*- coding: utf-8 -
#
# This file is part of couchdb-requests released under the MIT license.
# See the NOTICE for more information.
#
from .exceptions import MultipleResultsFound, NoResultFound
class View(object):
"""
An iterable object representing a query.
Do not construct directly. Use :meth:`couchdbreq.Database.view`,
:meth:`couchdbreq.Database.all_docs` or :meth:`couchdbreq.view.View.filter`.
"""
UNDEFINED_VALUE = object()
def __init__(self, db, view_path, schema=None, params=None):
"""
Do not construct directly. Use :meth:`couchdbreq.Database.view`,
:meth:`couchdbreq.Database.all_docs` or :meth:`couchdbreq.view.View.filter`.
"""
self._params = params
self._db = db
self._view_path = view_path
self._schema = schema
def _iterator(self, **params):
mparams = {}
for k, v in self._params.iteritems():
if v == View.UNDEFINED_VALUE:
continue
mparams[k] = v
for k, v in params.iteritems():
if v == View.UNDEFINED_VALUE:
continue
mparams[k] = v
keys = None
if 'keys' in mparams:
keys = mparams.pop('keys')
if keys != None:
resp = self._db._res.post(self._view_path, payload={ 'keys': keys }, params=mparams)
else:
resp = self._db._res.get(self._view_path, params=mparams)
schema = self._schema
for row in resp.json_body['rows']:
if schema is not None:
yield schema.wrap_row(row)
else:
yield row
def first(self, is_null_exception=False):
"""
Return the first result of this query or None if the result doesn’t contain any rows.
:param is_null_exception: If True then raise :class:`couchdbreq.exceptions.NoResultFound` if no
results are found.
:return: A dict representing the row result or None
"""
try:
return self._iterator(limit=1).next()
except StopIteration:
if is_null_exception:
raise NoResultFound()
return None
def one(self, is_null_exception=False):
"""
Return exactly one result or raise an exception if multiple results are found.
:param is_null_exception: If True then raise :class:`couchdbreq.exceptions.NoResultFound` if no
results are found.
:return: A dict representing the row result or None
"""
row1 = None
for row in self._iterator(limit=2):
if row1:
raise MultipleResultsFound()
row1 = row
if not row1 and is_null_exception:
raise NoResultFound()
return row1
def all(self):
"""
Get a list of all rows
:return: :py:class:`list`
"""
return list(self._iterator())
def count(self):
"""
Return the number of results
:return: :py:class:`int`
"""
# FIXME: Implement better
count = 0
for _ in self._iterator():
count += 1
return count
def __nonzero__(self):
return bool(self.count())
def __iter__(self):
return self._iterator()
def filter(self,
startkey=UNDEFINED_VALUE, endkey=UNDEFINED_VALUE,
keys=UNDEFINED_VALUE, key=UNDEFINED_VALUE,
startkey_docid=UNDEFINED_VALUE, endkey_docid=UNDEFINED_VALUE,
skip=UNDEFINED_VALUE, limit=UNDEFINED_VALUE,
inclusive_end=UNDEFINED_VALUE):
"""
Return a new View object with updated query parameters.
The original View object remains unchanged.
:return: A new :class:`couchdbreq.view.View` object
"""
params = self._params.copy()
if startkey != View.UNDEFINED_VALUE:
params['startkey'] = startkey
if endkey != View.UNDEFINED_VALUE:
params['endkey'] = endkey
if keys != View.UNDEFINED_VALUE:
params['keys'] = keys
if key != View.UNDEFINED_VALUE:
params['key'] = key
if startkey_docid != View.UNDEFINED_VALUE:
params['startkey_docid'] = startkey_docid
if endkey_docid != View.UNDEFINED_VALUE:
params['endkey_docid'] = endkey_docid
if skip != View.UNDEFINED_VALUE:
params['skip'] = skip
if limit != View.UNDEFINED_VALUE:
params['limit'] = limit
if inclusive_end != View.UNDEFINED_VALUE:
params['inclusive_end'] = inclusive_end
return View(self._db, self._view_path, self._schema, params=params)
| adamlofts/couchdb-requests | couchdbreq/view.py | Python | mit | 4,866 |
#!/usr/bin/python
import os
import multiprocessing
import shutil
# Provide access to the helper scripts
def modify_path():
scripts_dir = os.path.dirname(__file__)
while not 'Scripts' in os.listdir(scripts_dir):
scripts_dir = os.path.abspath(os.path.join(scripts_dir, '..'))
scripts_dir = os.path.join(scripts_dir, 'Scripts')
if not scripts_dir in os.environ['PATH']:
os.environ['PATH'] += os.pathsep + scripts_dir
print '\nPATH = {}\n'.format(os.environ['PATH'])
# Move new files and folders to 'Refs'
def move(old_snap):
new_snap = os.listdir(os.curdir)
if not os.path.exists('Refs'):
os.mkdir('Refs')
for f in new_snap:
if not f in old_snap:
fname = os.path.basename(f)
new_name = os.path.join(os.curdir, 'Refs', fname)
if os.path.isfile(new_name):
os.remove(new_name)
if os.path.isdir(new_name):
shutil.rmtree(new_name)
os.rename(f, new_name)
if __name__ == '__main__':
# Enable multithreading for ccx
os.environ['OMP_NUM_THREADS'] = str(multiprocessing.cpu_count())
# Explicitly move to example's directory
os.chdir(os.path.dirname(__file__))
# Run the example
modify_path()
snap = os.listdir(os.curdir)
os.system("cgx -b pre.fbl")
os.system("ccx Naht")
os.system("cgx -b post.fbl")
os.system("cgx -b plots.fbl")
move(snap)
| mkraska/CalculiX-Examples | Thermal/Thermografie/test.py | Python | mit | 1,441 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-06 14:00
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| sutiialex/djangogirls | blog/migrations/0001_initial.py | Python | apache-2.0 | 1,051 |
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2016 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import xml.parsers.expat
from nemubot.tools.xmlparser import node as module_state
class ModuleStatesFile:
def __init__(self):
self.root = None
self.stack = list()
def startElement(self, name, attrs):
cur = module_state.ModuleState(name)
for name in attrs.keys():
cur.setAttribute(name, attrs[name])
self.stack.append(cur)
def characters(self, content):
self.stack[len(self.stack)-1].content += content
def endElement(self, name):
child = self.stack.pop()
size = len(self.stack)
if size > 0:
self.stack[size - 1].content = self.stack[size - 1].content.strip()
self.stack[size - 1].addChild(child)
else:
self.root = child
class XMLParser:
def __init__(self, knodes):
self.knodes = knodes
self.stack = list()
self.child = 0
def parse_file(self, path):
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self.startElement
p.CharacterDataHandler = self.characters
p.EndElementHandler = self.endElement
with open(path, "rb") as f:
p.ParseFile(f)
return self.root
def parse_string(self, s):
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self.startElement
p.CharacterDataHandler = self.characters
p.EndElementHandler = self.endElement
p.Parse(s, 1)
return self.root
@property
def root(self):
if len(self.stack):
return self.stack[0][0]
else:
return None
@property
def current(self):
if len(self.stack):
return self.stack[-1][0]
else:
return None
def display_stack(self):
return " in ".join([str(type(s).__name__) for s,c in reversed(self.stack)])
def startElement(self, name, attrs):
if not self.current or not hasattr(self.current, "startElement") or not self.current.startElement(name, attrs):
if name not in self.knodes:
raise TypeError(name + " is not a known type to decode")
else:
self.stack.append((self.knodes[name](**attrs), self.child))
self.child = 0
else:
self.child += 1
def characters(self, content):
if self.current and hasattr(self.current, "characters"):
self.current.characters(content)
def endElement(self, name):
if hasattr(self.current, "endElement"):
self.current.endElement(None)
if self.child:
self.child -= 1
# Don't remove root
elif len(self.stack) > 1:
last, self.child = self.stack.pop()
if hasattr(self.current, "addChild"):
if self.current.addChild(name, last):
return
raise TypeError(name + " tag not expected in " + self.display_stack())
def saveDocument(self, f=None, header=True, short_empty_elements=False):
if f is None:
import io
f = io.StringIO()
import xml.sax.saxutils
gen = xml.sax.saxutils.XMLGenerator(f, "utf-8", short_empty_elements=short_empty_elements)
if header:
gen.startDocument()
self.root.saveElement(gen)
if header:
gen.endDocument()
return f
def parse_file(filename):
p = xml.parsers.expat.ParserCreate()
mod = ModuleStatesFile()
p.StartElementHandler = mod.startElement
p.EndElementHandler = mod.endElement
p.CharacterDataHandler = mod.characters
with open(filename, "rb") as f:
p.ParseFile(f)
return mod.root
def parse_string(string):
p = xml.parsers.expat.ParserCreate()
mod = ModuleStatesFile()
p.StartElementHandler = mod.startElement
p.EndElementHandler = mod.endElement
p.CharacterDataHandler = mod.characters
p.Parse(string, 1)
return mod.root
| nbr23/nemubot | nemubot/tools/xmlparser/__init__.py | Python | agpl-3.0 | 4,752 |
from datetime import datetime
#####################
# Account Test Data #
#####################
account = {
'name': 'Test Account Name',
'type': 'Checking',
'bank_name': 'Bank of Catonsville',
'account_num': '1234567890'
}
account_put = {
'name': 'Savings Account',
'type': 'Savings'
}
db_account = {
'id': 'acct_testaccountname',
'name': 'Test Account Name',
'type': 'Checking',
'bank_name': 'Bank of Catonsville',
'account_num': '1234567890',
'bal_uncleared': 2635.63,
'bal_cleared': -40.92,
'bal_reconciled': 1021.61,
'budget_monitored': True
}
db_account_2 = {
'id': 'acct_toaccountname',
'name': 'To Account Name',
'type': 'Savings',
'bank_name': 'Bank of Catonsville',
'account_num': '0987654321',
'bal_uncleared': 100.00,
'bal_cleared': 100.00,
'bal_reconciled': 200.00,
'budget_monitored': False
}
db_account_3 = {
'id': 'acct_to2accountname',
'name': 'To 2 Account Name',
'type': 'Savings',
'bank_name': 'Bank of Catonsville',
'account_num': '0987654320',
'bal_uncleared': 500.00,
'bal_cleared': 500.00,
'bal_reconciled': 600.00,
'budget_monitored': False
}
#########################
# Transaction Test Data #
#########################
transaction = {
'date': '2014-08-10',
'type': 'EFT',
'payee': 'Giant',
# need: category/account, split -> consider fields.Nested
'reconciled': '',
'amount': -52.08,
'memo': ''
}
transaction_transfer = {
'date': '2014-08-10',
'type': 'XFER',
'payee': 'Move to Savings',
'reconciled': '',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'acct_toaccountname'
}
transaction_put_amount = { # id = 53f69e77137a001e344259cb (Amazon.com)
'amount': -14.01,
'memo': 'Birthday present'
}
transaction_put_reconciled = { # id = 53f69e77137a001e344259cb (Amazon.com)
'reconciled': 'C'
}
transaction_put_amountreconciled = { # id = 53f69e77137a001e344259cb (Amazon.com)
'amount': -14.01,
'reconciled': 'C'
}
db_transactions= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'DEP',
'payee': 'Sandy Spring Bank',
'reconciled': 'R',
'amount': 1145.06,
'memo': 'Sandy\'s Salary',
'cat_or_acct_id': '1'
},
{
'id': '53f69e77137a001e344259c8',
'date': datetime(2014,8,1),
'type': 'EFT',
'payee': 'Costco',
'reconciled': 'R',
'amount': -123.45,
'memo': 'Test transaction memo',
'cat_or_acct_id': '2'
},
{
'id': '53f69e77137a001e344259c9',
'date': datetime(2014,8,6),
'type': 'EFT',
'payee': 'Exxon',
'reconciled': 'C',
'amount': -40.92,
'memo': '',
'cat_or_acct_id': '2'
},
{
'id': '53f69e77137a001e344259ca',
'date': datetime(2014,8,18),
'type': 'DEP',
'payee': 'U.S. Government',
'reconciled': '',
'amount': 2649.52,
'memo': 'Kyle\'s Salary',
'cat_or_acct_id': '1'
},
{
'id': '53f69e77137a001e344259cb',
'date': datetime(2014,8,12),
'type': 'EFT',
'payee': 'Amazon.com',
'reconciled': '',
'amount': -13.89,
'memo': '',
'cat_or_acct_id': '2'
}
]
db_transfer_transactions_fromAcct= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'C',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'acct_toaccountname'
},
{
'id': '53f69e77137a001e344259c8',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'C',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'somecategoryidstring'
}
]
db_transfer_transactions_toAcct= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'R',
'amount': 100.00,
'memo': '',
'cat_or_acct_id': 'acct_testaccountname'
}
]
###################
# Payee Test Data #
###################
payee = { 'name': 'Costco' }
payee_put = { 'name': 'Newegg.com' }
db_payees = [
{
'id': '53f69e77137a001e344259f1',
'name': 'Costco'
},
{
'id': '53f69e77137a001e344259f2',
'name': 'Amazon.com'
},
{
'id': '53f69e77137a001e344259f3',
'name': 'U.S. Government'
},
{
'id': '53f69e77137a001e344259f4',
'name': 'Exxon'
},
{
'id': '53f69e77137a001e344259f5',
'name': 'Sandy Spring Bank'
}
]
######################
# Category Test Data #
######################
category_1 = {
'name': 'Tithe',
'parent_id': None
}
category_2 = {
'name': 'Gas & Electric',
'parent_id': '1234567890'
}
category_put = { 'parent_id': '1234567890' }
db_categories = [
{
'id': '53f69e77137a001e344259f1',
'name': 'Auto',
'budget_tracked': False,
'parent_id': None
},
{
'id': '53f69e77137a001e344259fa',
'name': 'Gas',
'budget_tracked': True,
'parent_id': '53f69e77137a001e344259f1' # Parent = Auto
},
{
'id': '53f69e77137a001e344259fb',
'name': 'Service',
'budget_tracked': True,
'parent_id': '53f69e77137a001e344259f1' # Parent = Auto
},
{
'id': '53f69e77137a001e344259f2',
'name': 'Dining & Entertainment',
'budget_tracked': True,
'parent_id': None
},
{
'id': '53f69e77137a001e344259f3',
'name': 'Tithe',
'budget_tracked': True,
'parent_id': None
}
] | kschoelz/abacuspb | test/test_data.py | Python | gpl-2.0 | 5,915 |
#coding: utf-8
from functools import partial, update_wrapper
from django.utils.datastructures import MultiValueDictKeyError
from django.core.paginator import Paginator, EmptyPage
from django.http import Http404
from . import settings
from .utils import unicode_urlencode, get_instance_from_path
class SimplePaginator(object):
"""
Class based decorator.
SimplePagination decorator must be used along with 'render_to'
decorator from django-annoying application
http://bitbucket.org/offline/django-annoying/wiki/Home
"""
def __init__(self, key=None, style=None, per_page=None,
frame_size=None, template=None, anchor=None):
"""
Decorator parameters
key - Name of the variable with objects that we paginate.
style - name of pagination backend.
per_page - number of objects to show on page.
frame_size - max pages numbers to show.
"""
self.style = style or settings.PAGINATION_STYLE
self.anchor = anchor
self.backend = get_instance_from_path(settings.PAGINATION_BACKENDS[self.style])
self.key = key or self.backend.KEY
self.per_page = per_page or self.backend.PER_PAGE
self.frame_size = frame_size or self.backend.FRAME_SIZE
self.template = template or self.backend.TEMPLATE or 'paginator_%s.html' % self.style
self.user_per_page_allowed = self.backend.USER_PER_PAGE_ALLOWED
self.user_per_page_max = self.backend.USER_PER_PAGE_MAX
def __call__(self, function):
"""
Receive decorated function and return
function decorated with decorate method
"""
decorated = partial(self.decorate, function)
return update_wrapper(decorated, self.decorate)
def decorate(self, function, request, *args, **kwargs):
# execute view
output = function(request, *args, **kwargs)
# only try to paginate if view returned dictionary,
# in all other cases just return view output.
if not isinstance(output, dict):
return output
params = request.GET.copy()
try:
current_page = int(params.pop('page')[0])
except (ValueError, KeyError):
current_page = 1
# we dont modify self.per_page because it's decorator
# and it initialize only once.
per_page = self.per_page
# per_page should change from GET parameters only if this
# is allowed in settings or backend, also it must be lower
# or equal then self.user_per_page_max.
if self.user_per_page_allowed and 'per_page' in params:
try:
user_per_page = int(params['per_page'])
if user_per_page <= self.user_per_page_max:
per_page = user_per_page
else:
per_page = self.user_per_page_max
params['per_page'] = self.user_per_page_max
except (ValueError, KeyError):
params['per_page'] = self.per_page
elif 'per_page' in params:
params.pop('per_page')
# we will paginate value of self.key, original object will be replaced
# by items that should be only in current page.
try:
paginate_qs = output.pop(self.key)
except KeyError:
raise KeyError("Key '%s' not found in view's returned dictionary" % self.key)
# create django built in paginator object
paginator = Paginator(paginate_qs, per_page)
try:
# check that asked page is exists
page = paginator.page(current_page)
except EmptyPage:
raise Http404()
# replace paginated items by only items we should see.
output[self.key] = page.object_list
# extra data that we may need to build links
data = {}
data['current_page'] = current_page # active page number
data['per_page'] = per_page # items per page
data['params'] = unicode_urlencode(params) # get parameters
data['anchor'] = self.anchor # ancor
data['number_of_pages'] = number_of_pages = paginator.num_pages # number of pages
data['template'] = self.template
data['count'] = paginator.count
# execute the pagination function
data.update(self.backend.paginate(self.frame_size, number_of_pages, current_page))
# your view now have extra key 'paginator' with all extra data inside.
output['paginator'] = data
return output
paginate = SimplePaginator
def simple_paginate(queryset, request, *args, **kwargs):
"""
http://habrahabr.ru/blogs/django/76961/#comment_2239477
обертка для того, чтобы не использовать render_to
"""
@paginate(*args, **kwargs)
def inner(request,queryset):
return {'object_list': queryset}
data = inner(request,queryset)
# в data будет лежать
# {'object_list': <отфильтрованный object_list>, 'paginator': <наш паджинатор>}
# вместо этого можно возвращать 2 значения.
# Или принимать словарь с контекстом и обновлять его.
data['paginator']['object_list'] = data['object_list']
return data['paginator']
| quantum13/hgh | apps/simplepagination/__init__.py | Python | gpl-2.0 | 5,458 |
from __future__ import annotations
from dataclasses import dataclass
import os
from ..typecheck import *
from ..import core
from .import dap
if TYPE_CHECKING:
from .session import Session
@dataclass
class SourceLocation:
source: dap.Source
line: int|None = None
column: int|None = None
@staticmethod
def from_path(file: str, line: int|None, column: int|None) -> SourceLocation:
return SourceLocation(dap.Source(os.path.basename(file), file), line, column)
@property
def name(self) -> str:
name = os.path.basename(self.source.name or '??')
if self.column and self.line:
return f'{name}@{self.line}:{self.column}'
if self.line:
return f'{name}@{self.line}'
return name
class Variable:
def __init__(self, session: Session, name: str, value: str|None, variablesReference: int|None, containerVariablesReference: int|None = None, evaluateName: str|None = None) -> None:
self.session = session
self.name = name
self.evaluateName = evaluateName
self.value = value
self.variablesReference = variablesReference
self.containerVariablesReference = containerVariablesReference
self.fetched: core.Future[list[Variable]]|None = None
@staticmethod
def from_variable(session: Session, containerVariablesReference: int, variable: dap.Variable):
return Variable(
session,
variable.name,
variable.value,
variable.variablesReference,
containerVariablesReference,
variable.evaluateName,
)
@staticmethod
def from_scope(session: Session, scope: dap.Scope):
return Variable(
session,
scope.name,
None,
scope.variablesReference,
)
@staticmethod
def from_evaluate(session: Session, name: str, evaluate: dap.EvaluateResponse):
return Variable(
session,
name,
evaluate.result,
evaluate.variablesReference,
)
async def fetch(self):
assert self.variablesReference
return await self.session.get_variables(self.variablesReference)
async def children(self) -> list[Variable]:
if not self.has_children:
return []
if not self.fetched:
self.fetched = core.run(self.fetch())
children = await self.fetched
return children
@property
def has_children(self) -> bool:
return bool(self.variablesReference)
| dmilith/SublimeText3-dmilith | Packages/Debugger/modules/dap/variable.py | Python | mit | 2,214 |
__author__ = 'regu0004'
| its-dirg/id_token_verify | src/__init__.py | Python | apache-2.0 | 24 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import unittest
from webkitpy.common.net import layouttestresults_unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.layout_package.json_results_generator import strip_json_wrapper
from webkitpy.layout_tests.port.base import Port
from webkitpy.tool.commands.rebaseline_server import TestConfig, RebaselineServer
from webkitpy.tool.servers import rebaseline_server
class RebaselineTestTest(unittest.TestCase):
def test_text_rebaseline_update(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_new(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_no_op_1(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/win/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_no_op_2(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.checksum',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' No current baselines to move',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' Moved text-expected.txt',
' Updating baselines for mac',
' Updated text-expected.txt',
])
def test_text_rebaseline_move_only_images(self):
self._assertRebaseline(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
),
results_files=(
'fast/image-actual.png',
'fast/image-actual.checksum',
),
test_name='fast/image.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=True,
expected_log=[
'Rebaselining fast/image...',
' Moving current mac baselines to mac-leopard',
' Moved image-expected.checksum',
' Moved image-expected.png',
' Updating baselines for mac',
' Updated image-expected.checksum',
' Updated image-expected.png',
])
def test_text_rebaseline_move_already_exist(self):
self._assertRebaseline(
test_files=(
'fast/text-expected.txt',
'platform/mac-leopard/fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
results_files=(
'fast/text-actual.txt',
),
test_name='fast/text.html',
baseline_target='mac',
baseline_move_to='mac-leopard',
expected_success=False,
expected_log=[
'Rebaselining fast/text...',
' Moving current mac baselines to mac-leopard',
' Already had baselines in mac-leopard, could not move existing mac ones',
])
def test_image_rebaseline(self):
self._assertRebaseline(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
),
results_files=(
'fast/image-actual.png',
'fast/image-actual.checksum',
),
test_name='fast/image.html',
baseline_target='mac',
baseline_move_to='none',
expected_success=True,
expected_log=[
'Rebaselining fast/image...',
' Updating baselines for mac',
' Updated image-expected.checksum',
' Updated image-expected.png',
])
def test_gather_baselines(self):
example_json = layouttestresults_unittest.LayoutTestResultsTest.example_full_results_json
results_json = json.loads(strip_json_wrapper(example_json))
server = RebaselineServer()
server._test_config = get_test_config()
server._gather_baselines(results_json)
self.assertEqual(
results_json['tests']['svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html']['state'],
'needs_rebaseline')
self.assertNotIn('prototype-chocolate.html', results_json['tests'])
def _assertRebaseline(self, test_files, results_files, test_name, baseline_target,
baseline_move_to, expected_success, expected_log):
log = []
test_config = get_test_config(test_files, results_files)
success = rebaseline_server._rebaseline_test(
test_name,
baseline_target,
baseline_move_to,
test_config,
log=log.append)
self.assertEqual(expected_log, log)
self.assertEqual(expected_success, success)
class GetActualResultFilesTest(unittest.TestCase):
def test(self):
test_config = get_test_config(result_files=(
'fast/text-actual.txt',
'fast2/text-actual.txt',
'fast/text2-actual.txt',
'fast/text-notactual.txt',
))
self.assertItemsEqual(
('text-actual.txt',),
rebaseline_server._get_actual_result_files(
'fast/text.html', test_config))
class GetBaselinesTest(unittest.TestCase):
def test_no_baselines(self):
self._assertBaselines(
test_files=(),
test_name='fast/missing.html',
expected_baselines={})
def test_text_baselines(self):
self._assertBaselines(
test_files=(
'fast/text-expected.txt',
'platform/mac/fast/text-expected.txt',
),
test_name='fast/text.html',
expected_baselines={
'mac': {'.txt': True},
'base': {'.txt': False},
})
def test_image_and_text_baselines(self):
self._assertBaselines(
test_files=(
'fast/image-expected.txt',
'platform/mac/fast/image-expected.png',
'platform/mac/fast/image-expected.checksum',
'platform/win/fast/image-expected.png',
'platform/win/fast/image-expected.checksum',
),
test_name='fast/image.html',
expected_baselines={
'base': {'.txt': True},
'mac': {'.checksum': True, '.png': True},
'win': {'.checksum': False, '.png': False},
})
def test_extra_baselines(self):
self._assertBaselines(
test_files=(
'fast/text-expected.txt',
'platform/nosuchplatform/fast/text-expected.txt',
),
test_name='fast/text.html',
expected_baselines={'base': {'.txt': True}})
def _assertBaselines(self, test_files, test_name, expected_baselines):
actual_baselines = rebaseline_server.get_test_baselines(test_name, get_test_config(test_files))
self.assertEqual(expected_baselines, actual_baselines)
def get_test_config(test_files=None, result_files=None):
test_files = test_files or []
result_files = result_files or []
host = MockHost()
port = host.port_factory.get()
layout_tests_directory = port.layout_tests_dir()
results_directory = port.results_directory()
for filename in test_files:
host.filesystem.write_binary_file(host.filesystem.join(layout_tests_directory, filename), '')
for filename in result_files:
host.filesystem.write_binary_file(host.filesystem.join(results_directory, filename), '')
class TestMacPort(Port):
# Abstract method path_to_apache not implemented - pylint: disable=abstract-method
port_name = "mac"
FALLBACK_PATHS = {'': ['mac']}
return TestConfig(
TestMacPort(host, 'mac'),
layout_tests_directory,
results_directory,
('mac', 'mac-leopard', 'win', 'linux'),
host)
| danakj/chromium | third_party/WebKit/Tools/Scripts/webkitpy/tool/servers/rebaseline_server_unittest.py | Python | bsd-3-clause | 12,199 |
## \file
## \ingroup tutorial_pyroot
## \notebook -nodraw
## example of macro to read data from an ascii file and
## create a root file with a Tree.
##
## NOTE: comparing the results of this macro with those of staff.C, you'll
## notice that the resultant file is a couple of bytes smaller, because the
## code below strips all white-spaces, whereas the .C version does not.
##
## \macro_code
##
## \author Wim Lavrijsen
import re, array, os
import ROOT
from ROOT import TFile, TTree, gROOT, AddressOf
## A C/C++ structure is required, to allow memory based access
gROOT.ProcessLine(
"struct staff_t {\
Int_t Category;\
UInt_t Flag;\
Int_t Age;\
Int_t Service;\
Int_t Children;\
Int_t Grade;\
Int_t Step;\
Int_t Hrweek;\
Int_t Cost;\
Char_t Division[4];\
Char_t Nation[3];\
};" );
## Function to read in data from ASCII file and fill the ROOT tree
def staff():
staff = ROOT.staff_t()
# The input file cern.dat is a copy of the CERN staff data base
# from 1988
f = TFile( 'staff.root', 'RECREATE' )
tree = TTree( 'T', 'staff data from ascii file' )
tree.Branch( 'staff', staff, 'Category/I:Flag:Age:Service:Children:Grade:Step:Hrweek:Cost' )
tree.Branch( 'Divisions', AddressOf( staff, 'Division' ), 'Division/C' )
tree.Branch( 'Nation', AddressOf( staff, 'Nation' ), 'Nation/C' )
# note that the branches Division and Nation cannot be on the first branch
fname = os.path.join(ROOT.gROOT.GetTutorialsDir(), 'tree', 'cernstaff.dat')
for line in open(fname).readlines():
t = list(filter( lambda x: x, re.split( '\s+', line ) ) )
staff.Category = int(t[0]) # assign as integers
staff.Flag = int(t[1])
staff.Age = int(t[2])
staff.Service = int(t[3])
staff.Children = int(t[4])
staff.Grade = int(t[5])
staff.Step = int(t[6])
staff.Hrweek = int(t[7])
staff.Cost = int(t[8])
staff.Division = t[9] # assign as strings
staff.Nation = t[10]
tree.Fill()
tree.Print()
tree.Write()
#### run fill function if invoked on CLI
if __name__ == '__main__':
staff()
| lgiommi/root | tutorials/pyroot/staff.py | Python | lgpl-2.1 | 2,307 |
"""json-rpc protocol handler."""
import json
import logging
import sys
from jsonrpc_pyclient.error import JsonRpcError
# configure logger
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.ERROR)
# get the proper JSONDecodeError exception type
if sys.version < '3.5':
JSONDecodeError = ValueError
else:
JSONDecodeError = json.decoder.JSONDecodeError
class RpcProtocolHandler(object):
"""json-rpc protocol handler."""
request_id = 0
def __init__(self, version='2.0'):
"""
Create RpcProtocolHandler object.
args:
version -- json-rpc server version ('1.0', '2.0')
"""
self._version = version
def build_request(self, method, parameters, is_notification):
"""
Build json-rpc request string.
usage:
build_request('hello', {'name': 'Bob'}, False)
build_request('add', [1, 2], False)
build_request('notification', None, True)
args:
method -- name of method to invoke on server
parameters -- method parameters. If present, parameters must
be a json structured value ({}, []). If the
method doesn't take any parameters, use None.
is_notification -- whether or not the request is a notification
returns:
a tuple of:
json-rpc request string
json-rpc request id (None for notifications)
"""
request = {}
# populate json-rpc request fields
request['method'] = method
if parameters:
request['params'] = parameters
if self._version == '2.0':
request['jsonrpc'] = self._version
if not is_notification:
RpcProtocolHandler.request_id += 1
req_id = RpcProtocolHandler.request_id
request['id'] = req_id
elif self._version == '1.0':
req_id = None
request['id'] = req_id
else:
req_id = None
# convert json object into a string with a linefeed delimiter
request = (json.dumps(request) + '\n')
return request, req_id
def handle_response(self, in_response):
"""
Handle json-rpc response string.
args:
in_response -- json-rpc response string
returns:
json-rpc result field (None if input string isn't valid json)
"""
global _logger
if in_response:
try:
response = json.loads(in_response)
except JSONDecodeError as e:
# The server should practically never return a string
# that isn't properly json-formatted, but in the case
# that something does go wrong (e.g. the network garbles
# the string), we'll log the exception and move on.
_logger.error('invalid json string - %s: %s', in_response, e)
return None
else:
if 'result' in response:
return response['result']
else:
# if there's no result field, that means an error
# occurred, so let's raise an exception.
raise JsonRpcError(response['error']['code'],
response['error']['message'])
| tvannoy/jsonrpc_pyclient | jsonrpc_pyclient/protocolhandler.py | Python | mit | 3,539 |
# Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 1999, 2001, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# A dialog for specifying line properties
#
import operator
from X import LineDoubleDash
from Sketch.const import JoinMiter, JoinBevel, JoinRound,\
CapButt, CapProjecting, CapRound
from Sketch.Lib import util
from Sketch import _, Trafo, SimpleGC, SolidPattern, EmptyPattern, \
StandardDashes, StandardArrows, StandardColors
from Tkinter import Frame, Label, IntVar, LEFT, X, E, W, GROOVE
from tkext import ColorButton, UpdatedCheckbutton, MyOptionMenu2
from sketchdlg import StylePropertyPanel
from lengthvar import create_length_entry
import skpixmaps
pixmaps = skpixmaps.PixmapTk
def create_bitmap_image(tk, name, bitmap):
data = util.xbm_string(bitmap)
tk.call(('image', 'create', 'bitmap', name, '-foreground', 'black',
'-data', data, '-maskdata', data))
return name
_thickness = 3
_width = 90
def draw_dash_bitmap(gc, dashes):
scale = float(_thickness)
if dashes:
dashes = map(operator.mul, dashes, [scale] * len(dashes))
dashes = map(int, map(round, dashes))
for idx in range(len(dashes)):
length = dashes[idx]
if length <= 0:
dashes[idx] = 1
elif length > 255:
dashes[idx] = 255
else:
dashes = [_width + 10, 1]
gc.SetDashes(dashes)
gc.DrawLine(0, _thickness / 2, _width, _thickness / 2)
def create_dash_images(tk, tkwin, dashes):
bitmap = tkwin.CreatePixmap(_width, _thickness, 1)
gc = bitmap.CreateGC(foreground = 1, background = 0,
line_style = LineDoubleDash, line_width = _thickness)
images = []
for dash in dashes:
draw_dash_bitmap(gc, dash)
image = create_bitmap_image(tk, 'dash_' + `len(images)`, bitmap)
images.append((image, dash))
return gc, bitmap, images
_arrow_width = 31
_arrow_height = 25
_mirror = Trafo(-1, 0, 0, 1, 0, 0)
def draw_arrow_bitmap(gc, arrow, which = 2):
gc.gc.foreground = 0
gc.gc.FillRectangle(0, 0, _arrow_width + 1, _arrow_height + 1)
gc.gc.foreground = 1
y = _arrow_height / 2
if which == 1:
gc.PushTrafo()
gc.Concat(_mirror)
gc.DrawLineXY(0, 0, -1000, 0)
if arrow is not None:
arrow.Draw(gc)
if which == 1:
gc.PopTrafo()
def create_arrow_images(tk, tkwin, arrows):
arrows = [None] + arrows
bitmap = tkwin.CreatePixmap(_arrow_width, _arrow_height, 1)
gc = SimpleGC()
gc.init_gc(bitmap, foreground = 1, background = 0, line_width = 3)
gc.Translate(_arrow_width / 2, _arrow_height / 2)
gc.Scale(2)
images1 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 1)
image = create_bitmap_image(tk, 'arrow1_' + `len(images1)`, bitmap)
images1.append((image, arrow))
images2 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 2)
image = create_bitmap_image(tk, 'arrow2_' + `len(images2)`, bitmap)
images2.append((image, arrow))
return gc, bitmap, images1, images2
class LinePanel(StylePropertyPanel):
title = _("Line Style")
def __init__(self, master, main_window, doc):
StylePropertyPanel.__init__(self, master, main_window, doc,
name = 'linedlg')
def build_dlg(self):
top = self.top
button_frame = self.create_std_buttons(top)
button_frame.grid(row = 5, columnspan = 2, sticky = 'ew')
color_frame = Frame(top, relief = GROOVE, bd = 2)
color_frame.grid(row = 0, columnspan = 2, sticky = 'ew')
label = Label(color_frame, text = _("Color"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.color_but = ColorButton(color_frame, width = 3, height = 1,
command = self.set_line_color)
self.color_but.SetColor(StandardColors.black)
self.color_but.pack(side = LEFT, expand = 1, anchor = W)
self.var_color_none = IntVar(top)
check = UpdatedCheckbutton(color_frame, text = _("None"),
variable = self.var_color_none,
command = self.do_apply)
check.pack(side = LEFT, expand = 1)
width_frame = Frame(top, relief = GROOVE, bd = 2)
width_frame.grid(row = 1, columnspan = 2, sticky = 'ew')
label = Label(width_frame, text = _("Width"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.var_width = create_length_entry(top, width_frame,
self.set_line_width,
scroll_pad = 0)
tkwin = self.main_window.canvas.tkwin
gc, bitmap, dashlist = create_dash_images(self.top.tk, tkwin,
StandardDashes())
self.opt_dash = MyOptionMenu2(top, dashlist, command = self.set_dash,
entry_type = 'image',
highlightthickness = 0)
self.opt_dash.grid(row = 2, columnspan = 2, sticky = 'ew', ipady = 2)
self.dash_gc = gc
self.dash_bitmap = bitmap
gc, bitmap, arrow1, arrow2 = create_arrow_images(self.top.tk, tkwin,
StandardArrows())
self.opt_arrow1 = MyOptionMenu2(top, arrow1, command = self.set_arrow,
args = 1, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow1.grid(row = 3, column = 0, sticky = 'ew', ipady = 2)
self.opt_arrow2 = MyOptionMenu2(top, arrow2, command = self.set_arrow,
args = 2, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow2.grid(row = 3, column = 1, sticky = 'ew', ipady = 2)
self.arrow_gc = gc
self.arrow_bitmap = bitmap
self.opt_join = MyOptionMenu2(top, [(pixmaps.JoinMiter, JoinMiter),
(pixmaps.JoinRound, JoinRound),
(pixmaps.JoinBevel, JoinBevel)],
command = self.set_line_join,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_join.grid(row = 4, column = 0, sticky = 'ew')
self.opt_cap = MyOptionMenu2(top,
[(pixmaps.CapButt, CapButt),
(pixmaps.CapRound, CapRound),
(pixmaps.CapProjecting, CapProjecting)],
command = self.set_line_cap,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_cap.grid(row = 4, column = 1, sticky = 'ew')
self.opt_cap.SetValue(None)
def close_dlg(self):
StylePropertyPanel.close_dlg(self)
self.var_width = None
def init_from_style(self, style):
if style.HasLine():
self.var_color_none.set(0)
self.opt_join.SetValue(style.line_join)
self.opt_cap.SetValue(style.line_cap)
self.color_but.SetColor(style.line_pattern.Color())
self.var_width.set(style.line_width)
self.init_dash(style)
self.init_arrow(style)
else:
self.var_color_none.set(1)
def init_from_doc(self):
self.Update()
def Update(self):
if self.document.HasSelection():
properties = self.document.CurrentProperties()
self.init_from_style(properties)
def do_apply(self):
kw = {}
if not self.var_color_none.get():
color = self.color_but.Color()
kw["line_pattern"] = SolidPattern(color)
kw["line_width"] = self.var_width.get()
kw["line_join"] = self.opt_join.GetValue()
kw["line_cap"] = self.opt_cap.GetValue()
kw["line_dashes"] = self.opt_dash.GetValue()
kw["line_arrow1"] = self.opt_arrow1.GetValue()
kw["line_arrow2"] = self.opt_arrow2.GetValue()
else:
kw["line_pattern"] = EmptyPattern
self.set_properties(_("Set Outline"), 'line', kw)
def set_line_join(self, *args):
self.document.SetProperties(line_join = self.opt_join.GetValue(),
if_type_present = 1)
def set_line_cap(self, *args):
self.document.SetProperties(line_cap = self.opt_cap.GetValue(),
if_type_present = 1)
def set_line_color(self):
self.document.SetLineColor(self.color_but.Color())
def set_line_width(self, *rest):
self.document.SetProperties(line_width = self.var_width.get(),
if_type_present = 1)
def set_dash(self, *args):
self.document.SetProperties(line_dashes = self.opt_dash.GetValue(),
if_type_present = 1)
def init_dash(self, style):
dashes = style.line_dashes
draw_dash_bitmap(self.dash_gc, dashes)
dash_image = create_bitmap_image(self.top.tk, 'dash_image',
self.dash_bitmap)
self.opt_dash.SetValue(dashes, dash_image)
def set_arrow(self, arrow, which):
if which == 1:
self.document.SetProperties(line_arrow1 = arrow,
if_type_present = 1)
else:
self.document.SetProperties(line_arrow2 = arrow,
if_type_present = 1)
def init_arrow(self, style):
arrow = style.line_arrow1
draw_arrow_bitmap(self.arrow_gc, arrow, 1)
arrow_image = create_bitmap_image(self.top.tk, 'arrow1_image',
self.arrow_bitmap)
self.opt_arrow1.SetValue(arrow, arrow_image)
arrow = style.line_arrow2
draw_arrow_bitmap(self.arrow_gc, arrow, 2)
arrow_image = create_bitmap_image(self.top.tk, 'arrow2_image',
self.arrow_bitmap)
self.opt_arrow2.SetValue(arrow, arrow_image)
def update_from_object_cb(self, obj):
if obj is not None:
self.init_from_style(obj.Properties())
| shumik/skencil-c | Sketch/UI/linedlg.py | Python | gpl-2.0 | 11,272 |
# ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import unittest
from binascii import unhexlify
from Cryptodome.Util.py3compat import b, bchr
from Cryptodome.Util.number import bytes_to_long
from Cryptodome.Util.strxor import strxor
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.SelfTest.loader import load_tests
from Cryptodome.Hash import SHA1
from Cryptodome.PublicKey import RSA
from Cryptodome.Signature import pkcs1_15
from Cryptodome.Signature import PKCS1_v1_5
def load_hash_by_name(hash_name):
return __import__("Cryptodome.Hash." + hash_name, globals(), locals(), ["new"])
class FIPS_PKCS1_Verify_Tests(unittest.TestCase):
def shortDescription(self):
return "FIPS PKCS1 Tests (Verify)"
def test_can_sign(self):
test_public_key = RSA.generate(1024).publickey()
verifier = pkcs1_15.new(test_public_key)
self.assertEqual(verifier.can_sign(), False)
test_vectors_verify = load_tests(("Cryptodome", "SelfTest", "Signature", "test_vectors", "PKCS1-v1.5"),
"SigVer15_186-3.rsp",
"Signature Verification 186-3",
{ 'shaalg' : lambda x: x,
'd' : lambda x: int(x),
'result' : lambda x: x })
for count, tv in enumerate(test_vectors_verify):
if isinstance(tv, basestring):
continue
if hasattr(tv, "n"):
modulus = tv.n
continue
hash_module = load_hash_by_name(tv.shaalg.upper())
hash_obj = hash_module.new(tv.msg)
public_key = RSA.construct([bytes_to_long(x) for x in modulus, tv.e])
verifier = pkcs1_15.new(public_key)
def positive_test(self, hash_obj=hash_obj, verifier=verifier, signature=tv.s):
verifier.verify(hash_obj, signature)
def negative_test(self, hash_obj=hash_obj, verifier=verifier, signature=tv.s):
self.assertRaises(ValueError, verifier.verify, hash_obj, signature)
if tv.result == 'f':
setattr(FIPS_PKCS1_Verify_Tests, "test_negative_%d" % count, negative_test)
else:
setattr(FIPS_PKCS1_Verify_Tests, "test_positive_%d" % count, positive_test)
class FIPS_PKCS1_Sign_Tests(unittest.TestCase):
def shortDescription(self):
return "FIPS PKCS1 Tests (Sign)"
def test_can_sign(self):
test_private_key = RSA.generate(1024)
signer = pkcs1_15.new(test_private_key)
self.assertEqual(signer.can_sign(), True)
test_vectors_sign = load_tests(("Cryptodome", "SelfTest", "Signature", "test_vectors", "PKCS1-v1.5"),
"SigGen15_186-2.txt",
"Signature Generation 186-2",
{ 'shaalg' : lambda x: x })
test_vectors_sign += load_tests(("Cryptodome", "SelfTest", "Signature", "test_vectors", "PKCS1-v1.5"),
"SigGen15_186-3.txt",
"Signature Generation 186-3",
{ 'shaalg' : lambda x: x })
for count, tv in enumerate(test_vectors_sign):
if isinstance(tv, basestring):
continue
if hasattr(tv, "n"):
modulus = tv.n
continue
if hasattr(tv, "e"):
private_key = RSA.construct([bytes_to_long(x) for x in modulus, tv.e, tv.d])
signer = pkcs1_15.new(private_key)
continue
hash_module = load_hash_by_name(tv.shaalg.upper())
hash_obj = hash_module.new(tv.msg)
def new_test(self, hash_obj=hash_obj, signer=signer, result=tv.s):
signature = signer.sign(hash_obj)
self.assertEqual(signature, result)
setattr(FIPS_PKCS1_Sign_Tests, "test_%d" % count, new_test)
class PKCS1_15_NoParams(unittest.TestCase):
"""Verify that PKCS#1 v1.5 signatures pass even without NULL parameters in
the algorithm identifier (PyCrypto/LP bug #1119552)."""
rsakey = """-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+TLr7UkvEtFrRhDDKMtuII
q19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQJACUSDEp8RTe32ftq8IwG8
Wojl5mAd1wFiIOrZ/Uv8b963WJOJiuQcVN29vxU5+My9GPZ7RA3hrDBEAoHUDPrI
OQIhAPIPLz4dphiD9imAkivY31Rc5AfHJiQRA7XixTcjEkojAiEAyh/pJHks/Mlr
+rdPNEpotBjfV4M4BkgGAA/ipcmaAjcCIQCHvhwwKVBLzzTscT2HeUdEeBMoiXXK
JACAr3sJQJGxIQIgarRp+m1WSKV1MciwMaTOnbU7wxFs9DP1pva76lYBzgUCIQC9
n0CnZCJ6IZYqSt0H5N7+Q+2Ro64nuwV/OSQfM6sBwQ==
-----END RSA PRIVATE KEY-----"""
msg = b("This is a test\x0a")
# PKCS1 v1.5 signature of the message computed using SHA-1.
# The digestAlgorithm SEQUENCE does NOT contain the NULL parameter.
signature = "a287a13517f716e72fb14eea8e33a8db4a4643314607e7ca3e3e28"\
"1893db74013dda8b855fd99f6fecedcb25fcb7a434f35cd0a101f8"\
"b19348e0bd7b6f152dfc"
signature = unhexlify(b(signature))
def runTest(self):
verifier = pkcs1_15.new(RSA.importKey(self.rsakey))
hashed = SHA1.new(self.msg)
verifier.verify(hashed, self.signature)
class PKCS1_Legacy_Module_Tests(unittest.TestCase):
"""Verify that the legacy module Cryptodome.Signature.PKCS1_v1_5
behaves as expected. The only difference is that the verify()
method returns True/False and does not raise exceptions."""
def shortDescription(self):
return "Test legacy Cryptodome.Signature.PKCS1_v1_5"
def runTest(self):
key = RSA.importKey(PKCS1_15_NoParams.rsakey)
hashed = SHA1.new(b("Test"))
good_signature = PKCS1_v1_5.new(key).sign(hashed)
verifier = PKCS1_v1_5.new(key.publickey())
self.assertEqual(verifier.verify(hashed, good_signature), True)
# Flip a few bits in the signature
bad_signature = strxor(good_signature, bchr(1) * len(good_signature))
self.assertEqual(verifier.verify(hashed, bad_signature), False)
class PKCS1_All_Hashes_Tests(unittest.TestCase):
def shortDescription(self):
return "Test PKCS#1v1.5 signature in combination with all hashes"
def runTest(self):
key = RSA.generate(1024)
signer = pkcs1_15.new(key)
hash_names = ("MD2", "MD4", "MD5", "RIPEMD160", "SHA1",
"SHA224", "SHA256", "SHA384", "SHA512",
"SHA3_224", "SHA3_256", "SHA3_384", "SHA3_512")
for name in hash_names:
hashed = load_hash_by_name(name).new(b("Test"))
signer.sign(hashed)
from Cryptodome.Hash import BLAKE2b, BLAKE2s
for hash_size in (20, 32, 48, 64):
hashed_b = BLAKE2b.new(digest_bytes=hash_size, data=b("Test"))
signer.sign(hashed_b)
for hash_size in (16, 20, 28, 32):
hashed_s = BLAKE2s.new(digest_bytes=hash_size, data=b("Test"))
signer.sign(hashed_s)
def get_tests(config={}):
tests = []
tests += list_test_cases(FIPS_PKCS1_Verify_Tests)
tests += list_test_cases(FIPS_PKCS1_Sign_Tests)
tests += list_test_cases(PKCS1_15_NoParams)
tests += list_test_cases(PKCS1_Legacy_Module_Tests)
tests += list_test_cases(PKCS1_All_Hashes_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| chronicwaffle/PokemonGo-DesktopMap | app/pylibs/win32/Cryptodome/SelfTest/Signature/test_pkcs1_15.py | Python | mit | 8,992 |
import os
import unittest
from vsg import vhdlFile
from vsg.tests import utils
sLrmUnit = 'concurrent_assertion_statement'
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(os.path.dirname(__file__), sLrmUnit,'classification_test_input.vhd'))
oFile = vhdlFile.vhdlFile(lFile)
class test_token(unittest.TestCase):
def test_classification(self):
sTestDir = os.path.join(os.path.dirname(__file__), sLrmUnit)
lExpected = []
utils.read_file(os.path.join(sTestDir, 'classification_results.txt'), lExpected, False)
lActual = []
for oObject in utils.extract_objects(oFile, True):
lActual.append(str(oObject))
self.assertEqual(lExpected, lActual)
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/vhdlFile/test_concurrent_assertion_statement.py | Python | gpl-3.0 | 722 |
# http://www.geeksforgeeks.org/count-triplets-with-sum-smaller-that-a-given-value/
def find_all_triplet(input, total):
input.sort()
result = 0
for i in range(len(input) - 2):
j = i + 1
k = len(input) - 1
while j < k:
if input[i] + input[j] + input[k] >= total:
k = k - 1
else:
result += k - j
j = j + 1
return result
if __name__ == '__main__':
input = [5, 1, 3, 4, 7]
print(find_all_triplet(input, 12))
| rtkasodariya/interview | python/array/tripletsumlessthantotal.py | Python | apache-2.0 | 531 |
__version__ = "1.7.1"
| xmikos/hangupsbot | hangupsbot/version.py | Python | gpl-3.0 | 22 |
import argparse
import collections
import mock
import pytest
import subprocess
from ..cli import main
from .directory import directory
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'mon', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy' in result
assert 'positional arguments:' in result
assert 'optional arguments:' in result
def test_bad_no_conf(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'mon'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy' in result
assert 'too few arguments' in result
assert err.value.status == 2
def test_bad_no_mon(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'mon'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy mon' in result
assert 'too few arguments' in result
assert err.value.status == 2
from mock import Mock, patch
def make_fake_connection(platform_information=None):
get_connection = Mock()
get_connection.return_value = get_connection
get_connection.remote_module.platform_information = Mock(
return_value=platform_information)
return get_connection
def test_simple(tmpdir, capsys):
with tmpdir.join('ceph.conf').open('w') as f:
f.write("""\
[global]
fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
mon initial members = host1
""")
ns = argparse.Namespace()
ns.pushy = mock.Mock()
conn = mock.NonCallableMock(name='PushyClient')
ns.pushy.return_value = conn
mock_compiled = collections.defaultdict(mock.Mock)
conn.compile.side_effect = mock_compiled.__getitem__
MON_SECRET = 'AQBWDj5QAP6LHhAAskVBnUkYHJ7eYREmKo5qKA=='
def _create_mon(cluster, get_monitor_secret):
secret = get_monitor_secret()
assert secret == MON_SECRET
fake_ip_addresses = lambda x: ['10.0.0.1']
try:
with patch('ceph_deploy.new.net.ip_addresses', fake_ip_addresses):
with mock.patch('ceph_deploy.new.net.get_nonlocal_ip', lambda x: '10.0.0.1'):
with mock.patch('ceph_deploy.new.arg_validators.Hostname', lambda: lambda x: x):
with mock.patch('ceph_deploy.new.hosts'):
with directory(str(tmpdir)):
main(
args=['-v', 'new', '--no-ssh-copykey', 'host1'],
namespace=ns,
)
main(
args=['-v', 'mon', 'create', 'host1'],
namespace=ns,
)
except SystemExit as e:
raise AssertionError('Unexpected exit: %s', e)
out, err = capsys.readouterr()
err = err.lower()
assert 'creating new cluster named ceph' in err
assert 'monitor host1 at 10.0.0.1' in err
assert 'resolving host host1' in err
assert "monitor initial members are ['host1']" in err
assert "monitor addrs are ['10.0.0.1']" in err
| ddiss/ceph-deploy | ceph_deploy/tests/test_cli_mon.py | Python | mit | 3,325 |
#!/usr/bin/env python
# encoding: utf-8
"""A service to sync a local file tree to jottacloud.
Copies and updates files in the cloud by comparing md5 hashes, like the official client.
Run it from crontab at an appropriate interval.
"""
# This file is part of jottacloudclient.
#
# jottacloudclient is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jottacloudclient is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jottacloudclient. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2015 Håvard Gulldahl <[email protected]>
#import included batteries
import os, re, os.path, sys, logging, argparse
import math, time
log = logging.getLogger(__name__)
#import pip modules
from clint.textui import progress, puts, colored
#import jottalib
from jottalib.JFS import JFS
from . import jottacloud, __version__
def humanizeFileSize(size):
size = abs(size)
if (size==0):
return "0B"
units = ['B','KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB']
p = math.floor(math.log(size, 2)/10)
return "%.3f%s" % (size/math.pow(1024,p),units[int(p)])
def filescanner(topdir, jottapath, jfs, errorfile, exclude=None, dry_run=False, prune_files=True, prune_folders=True ):
errors = {}
def saferun(cmd, *args):
log.debug('running %s with args %s', cmd, args)
try:
return apply(cmd, args)
except Exception as e:
puts(colored.red('Ouch. Something\'s wrong with "%s":' % args[0]))
log.exception('SAFERUN: Got exception when processing %s', args)
errors.update( {args[0]:e} )
return False
_files = 0
try:
for dirpath, onlylocal, onlyremote, bothplaces, onlyremotefolders in jottacloud.compare(topdir, jottapath, jfs, exclude_patterns=exclude):
puts(colored.green("Entering dir: %s" % dirpath))
if len(onlylocal):
_start = time.time()
_uploadedbytes = 0
for f in progress.bar(onlylocal, label="uploading %s new files: " % len(onlylocal)):
if os.path.islink(f.localpath):
log.debug("skipping symlink: %s", f)
continue
log.debug("uploading new file: %s", f)
if not dry_run:
if saferun(jottacloud.new, f.localpath, f.jottapath, jfs) is not False:
_uploadedbytes += os.path.getsize(f.localpath)
_files += 1
_end = time.time()
puts(colored.magenta("Network upload speed %s/sec" % ( humanizeFileSize( (_uploadedbytes / (_end-_start)) ) )))
if prune_files and len(onlyremote):
puts(colored.red("Deleting %s files from JottaCloud because they no longer exist locally " % len(onlyremote)))
for f in progress.bar(onlyremote, label="deleting JottaCloud file: "):
log.debug("deleting cloud file that has disappeared locally: %s", f)
if not dry_run:
if saferun(jottacloud.delete, f.jottapath, jfs) is not False:
_files += 1
if len(bothplaces):
for f in progress.bar(bothplaces, label="comparing %s existing files: " % len(bothplaces)):
log.debug("checking whether file contents has changed: %s", f)
if not dry_run:
if saferun(jottacloud.replace_if_changed, f.localpath, f.jottapath, jfs) is not False:
_files += 1
if prune_folders and len(onlyremotefolders):
puts(colored.red("Deleting %s folders from JottaCloud because they no longer exist locally " % len(onlyremotefolders)))
for f in onlyremotefolders:
if not dry_run:
if saferun(jottacloud.deleteDir, f.jottapath, jfs) is not False:
logging.debug("Deleted remote folder %s", f.jottapath)
except KeyboardInterrupt:
# Ctrl-c pressed, cleaning up
pass
if len(errors) == 0:
puts('Finished syncing %s files to JottaCloud, no errors. yay!' % _files)
else:
puts(('Finished syncing %s files, ' % _files )+
colored.red('with %s errors (read %s for details)' % (len(errors), errorfile, )))
| cowai/jottalib | src/jottalib/scanner.py | Python | gpl-3.0 | 4,824 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-25 21:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pattern', '0011_auto_20170425_1751'),
]
operations = [
migrations.AddField(
model_name='pattern',
name='child',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pattern.Pattern'),
),
]
| yaxu/patternlib | pattern/migrations/0012_pattern_child.py | Python | gpl-3.0 | 546 |
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2021 Martin Glueck All rights reserved
# Neugasse 2, A--2244 Spannberg, Austria. [email protected]
# #*** <License> ************************************************************#
# This module is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this module. If not, see <http://www.gnu.org/licenses/>.
# #*** </License> ***********************************************************#
#
#++
# Name
# STG.Program
#
# Purpose
# A application program used by a device
#
#--
from Once_Property import Once_Property
from STG._Object_ import _STG_Object_
from STG._Program_Object_ import _Program_Object_
from STG.Parameter import Parameter, Parameter_Ref, Parameter_Type, Absolute_Segment
from STG.Language import Language
import os
from collections import defaultdict
class Static (_STG_Object_) :
"""Find a static reference"""
def __init__ (self, xml) :
super ().__init__ ()
self.xml = xml
self.memories = dict ()
# end def __init__
def find (self, id, tag, cls = None, * args, ** kw) :
result = super ().get \
(self.xml, "//E:%s[@Id='%s']" % (tag, id))
if cls :
result = cls (xml = result, * args, ** kw)
return result
# end def find
def get (self, id, tag, cls = None, * args, ** kw) :
if id not in cls.Table :
cls.Table [id] = self.find (id, tag, cls, * args, ** kw)
return cls.Table [id]
# end def get
def Parameter_Ref (self, id, parent, program) :
return self.get \
( id, "ParameterRef", Parameter_Ref
, static = self, parent = parent, program = program
)
# end def Parameter_Ref
def Parameter (self, id) :
return self.get \
(id, "Parameter", Parameter, static = self)
# end def Parameter_Ref
def Parameter_Type (self, id) :
return self.get \
(id, "ParameterType", Parameter_Type, static = self)
# end def Parameter_Type
def Memory (self, id) :
result = self.get \
(id, "AbsoluteSegment", Absolute_Segment, static = self)
self.memories [id] = result
return result
# end def Code_Segment
# end class Static
class Program (_Program_Object_) :
"""An application program used by an EIB device"""
def __init__ (self, xml) :
super ().__init__ ()
self.xml = xml
self.id = xml.get ("Id")
self.mask = xml.get ("MaskVersion")
self.raw_name = xml.get ("Name")
self.manu_id = int (self.id[2:6], 16)
self.app_number = int (xml.get ("ApplicationNumber"))
self.app_version = int (xml.get ("ApplicationVersion"))
prop_load = self.xpath (xml, "//E:LdCtrlCompareProp[@PropId=78]")
if prop_load :
idata = prop_load [0].get ("InlineData")
data = []
for i in range (len (idata) // 2) :
data.append ("0x" + idata [i*2:i*2+2])
data = ", ".join (data)
else :
data = "-"
self.load_compare = data
self.parameter_refs = dict ()
self.com_object_refs = dict ()
static = Static (self.get (xml, "E:Static"))
for abse in self.xpath (xml, "//E:AbsoluteSegment") :
static.Memory (abse.get ("Id"))
self._visit_element (self, self.get (xml, "E:Dynamic"), static)
self._setup_tables (static)
self._finalize ()
# end def __init__
def _finalize (self) :
self.memory_segments = \
[ m for m in sorted ( Absolute_Segment.Table.values ()
, key = lambda m : m.address
)
]
ram_section = \
[ m for m in self.memory_segments
if (m.size > 1) and m.data is None
]
if ram_section :
self.com_ram_memory = ram_section [0]
self.com_objects_by_number = defaultdict (list)
for cor in self.com_object_refs.values () :
self.com_objects_by_number [cor.number].append (cor)
# end def _finalize
def as_html (self, template = "parameter_overview-grid.jnj") :
from jinja2 import Environment, FileSystemLoader
path = os.path.dirname (__file__)
env = Environment \
(loader = FileSystemLoader (os.path.join (path, "jinja")))
template = env.get_template (template)
return template.render (dict (device = self))
# end def as_html
def eeprom_as_html (self, reference_address = 0) :
p_refs = sorted \
( ( pr for pr in self.parameter_refs.values ()
if pr.parameter.address
)
, key = lambda pr : (pr.parameter.address, pr.parameter.mask)
)
from jinja2 import Environment, FileSystemLoader
path = os.path.dirname (__file__)
env = Environment \
(loader = FileSystemLoader (os.path.join (path, "jinja")))
template = env.get_template ("eeprom_layout.jnj")
return template.render \
( dict ( p_refs = p_refs
, program = self
, ref_addr = reference_address
)
)
# end def eeprom_as_html
@Once_Property
def name (self) :
return Language.Translation (self.id, "Name", self.raw_name)
# end def name
def _setup_tables (self, static) :
adr_tab = self.get (self.xml, "//E:AddressTable")
aso_tab = self.get (self.xml, "//E:AssociationTable")
com_tab = self.get (self.xml, "//E:ComObjectTable")
self.address_table = \
( int (adr_tab.get ("Offset"))
, int (adr_tab.get ("MaxEntries"))
, static.Memory (adr_tab.get ("CodeSegment"))
)
self.assoc_table = \
( int (aso_tab.get ("Offset"))
, int (aso_tab.get ("MaxEntries"))
, static.Memory (aso_tab.get ("CodeSegment"))
)
self.com_table = \
( int (aso_tab.get ("Offset"))
, static.Memory (com_tab.get ("CodeSegment"))
)
# end def _setup_tables
### pickle interfaces
Value_Attributes = ("id", "mask", "app_number", "app_version", "load_compare")
@property
def pickle_cargo (self) :
result = super ().pickle_cargo
for attr in "address_table", "assoc_table", "com_table" :
value = getattr (self, attr)
value = value [:-1] + (value [-1].id, )
result [attr] = value
return result
# end def pickle_cargo
@classmethod
def From_Pickle (cls, dump) :
for attr in "address_table", "assoc_table", "com_table" :
value = dump [attr]
value = value [:-1] + (Absolute_Segment.Table [value [-1]], )
dump [attr] = value
result = super (Program, cls).From_Pickle (None, dump)
result._finalize ()
return result
# end def From_Pickle
# end class Program
if __name__ == "__main__" :
from STG._Object_ import _STG_Object_
from STG.Language import Language
from STG.Datapoint import Datapoint
import sys
if len (sys.argv) > 2 :
master = Datapoint.Parse (sys.argv [2])
Datapoint.From_Master (master)
root = _STG_Object_.Parse (sys.argv [1])
Language.add (root)
Language.set ("de-DE")
if 1 :
prg = Program (Program.get (root, "//E:ApplicationProgram"))
if len (sys.argv) > 3 :
file = open (sys.argv [3], "w", encoding = "utf-8")
else :
file = sys.stdout
file.write (prg.as_html ())
if len (sys.argv) > 3 :
file.close ()
print (prg.name)
### __END__ STG.Program
| selfbus/development-tools | test-case-generator/STG/Program.py | Python | gpl-3.0 | 8,523 |
"""
Helpers to manipulate deferred DDL statements that might need to be adjusted or
discarded within when executing a migration.
"""
class Reference:
"""Base class that defines the reference interface."""
def references_table(self, table):
"""
Return whether or not this instance references the specified table.
"""
return False
def references_column(self, table, column):
"""
Return whether or not this instance references the specified column.
"""
return False
def rename_table_references(self, old_table, new_table):
"""
Rename all references to the old_name to the new_table.
"""
pass
def rename_column_references(self, table, old_column, new_column):
"""
Rename all references to the old_column to the new_column.
"""
pass
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
def __str__(self):
raise NotImplementedError('Subclasses must define how they should be converted to string.')
class Table(Reference):
"""Hold a reference to a table."""
def __init__(self, table, quote_name):
self.table = table
self.quote_name = quote_name
def references_table(self, table):
return self.table == table
def rename_table_references(self, old_table, new_table):
if self.table == old_table:
self.table = new_table
def __str__(self):
return self.quote_name(self.table)
class TableColumns(Table):
"""Base class for references to multiple columns of a table."""
def __init__(self, table, columns):
self.table = table
self.columns = columns
def references_column(self, table, column):
return self.table == table and column in self.columns
def rename_column_references(self, table, old_column, new_column):
if self.table == table:
for index, column in enumerate(self.columns):
if column == old_column:
self.columns[index] = new_column
class Columns(TableColumns):
"""Hold a reference to one or many columns."""
def __init__(self, table, columns, quote_name):
self.quote_name = quote_name
super().__init__(table, columns)
def __str__(self):
return ', '.join(self.quote_name(column) for column in self.columns)
class IndexName(TableColumns):
"""Hold a reference to an index name."""
def __init__(self, table, columns, suffix, create_index_name):
self.suffix = suffix
self.create_index_name = create_index_name
super().__init__(table, columns)
def __str__(self):
return self.create_index_name(self.table, self.columns, self.suffix)
class ForeignKeyName(TableColumns):
"""Hold a reference to a foreign key name."""
def __init__(self, from_table, from_columns, to_table, to_columns, suffix_template, create_fk_name):
self.to_reference = TableColumns(to_table, to_columns)
self.suffix_template = suffix_template
self.create_fk_name = create_fk_name
super().__init__(from_table, from_columns,)
def references_table(self, table):
return super().references_table(table) or self.to_reference.references_table(table)
def references_column(self, table, column):
return (
super().references_column(table, column) or
self.to_reference.references_column(table, column)
)
def rename_table_references(self, old_table, new_table):
super().rename_table_references(old_table, new_table)
self.to_reference.rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
super().rename_column_references(table, old_column, new_column)
self.to_reference.rename_column_references(table, old_column, new_column)
def __str__(self):
suffix = self.suffix_template % {
'to_table': self.to_reference.table,
'to_column': self.to_reference.columns[0],
}
return self.create_fk_name(self.table, self.columns, suffix)
class Statement(Reference):
"""
Statement template and formatting parameters container.
Allows keeping a reference to a statement without interpolating identifiers
that might have to be adjusted if they're referencing a table or column
that is removed
"""
def __init__(self, template, **parts):
self.template = template
self.parts = parts
def references_table(self, table):
return any(
hasattr(part, 'references_table') and part.references_table(table)
for part in self.parts.values()
)
def references_column(self, table, column):
return any(
hasattr(part, 'references_column') and part.references_column(table, column)
for part in self.parts.values()
)
def rename_table_references(self, old_table, new_table):
for part in self.parts.values():
if hasattr(part, 'rename_table_references'):
part.rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
for part in self.parts.values():
if hasattr(part, 'rename_column_references'):
part.rename_column_references(table, old_column, new_column)
def __str__(self):
return self.template % self.parts
| tysonclugg/django | django/db/backends/ddl_references.py | Python | bsd-3-clause | 5,519 |
"""A random walk proposal distribution.
Author:
Ilias Bilionis
Date:
1/15/2013
"""
__all__ = ['RandomWalkProposal']
import numpy as np
import math
from . import ProposalDistribution
class RandomWalkProposal(ProposalDistribution):
"""A random walk proposal distribution."""
def __init__(self, dt=1e-3, name='Random Walk Proposal'):
"""Initialize the object."""
super(RandomWalkProposal, self).__init__(dt=dt, name=name)
def __call__(self, x_p, x_n):
"""Evaluate the logarithm of the pdf of the chain."""
k = x_p.shape[0]
y = x_n - x_p
return -0.5 * (k * math.log(2. * math.pi)
+ k * math.log(self.dt)
+ np.dot(y, y) / self.dt ** 2)
def sample(self, x_p, x_n):
"""Sample from the pdf of the chain."""
k = x_p.shape[0]
x_n[:] = x_p + self.dt * np.random.randn(k)
| ebilionis/py-best | best/random/_random_walk_proposal.py | Python | lgpl-3.0 | 914 |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from superdesk.tests import TestCase
from superdesk.io.feed_parsers.pa_nitf import PAFeedParser
from lxml import etree
class PANITFFileTestCase(TestCase):
vocab = [{'_id': 'genre', 'items': [{'name': 'Current'}]}]
def setUpForChildren(self):
super().setUpForChildren()
with self.app.app_context():
self.app.data.insert('vocabularies', self.vocab)
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.normpath(os.path.join(dirname, '../fixtures', self.filename))
provider = {'name': 'Test'}
with open(fixture, 'rb') as f:
xml = etree.parse(f)
self.item = PAFeedParser().parse(xml.getroot(), provider)
class PAFileWithNoSubjects(PANITFFileTestCase):
filename = 'pa2.xml'
def test_headline(self):
self.assertEqual(self.item.get('headline'), 'Soccer INT-Teams')
def test_anpa_category(self):
self.assertEqual(self.item.get('anpa_category'), [{'qcode': 'S'}])
class PATestCase(PANITFFileTestCase):
filename = 'pa1.xml'
def test_slugline(self):
self.assertEqual(self.item.get('slugline'), 'Sport Trivia (Oct 14)')
self.assertEqual(self.item.get('headline'), 'PA SPORT TRIVIA (OCTOBER 14)')
self.assertEqual('usable', self.item.get('pubstatus'))
self.assertEqual('af1f7ad5-5619-49de-84cc-2e608538c77fSSS-3-1', self.item.get('guid'))
self.assertEqual(self.item.get('format'), 'HTML')
self.assertEqual(4, len(self.item.get('subject')))
self.assertIn('Trivia (Oct 14)', self.item.get('keywords'))
self.assertEqual(637, self.item.get('word_count'))
class PAEmbargoTestCase(PANITFFileTestCase):
filename = 'pa3.xml'
def test_slugline(self):
self.assertEqual(self.item.get('pubstatus'), 'usable')
class PAEntertainmentTest(PANITFFileTestCase):
filename = 'pa4.xml'
def test_entertainment_category(self):
self.assertEqual(self.item.get('anpa_category'), [{'qcode': 'E'}])
class PACharsetConversionTest(PANITFFileTestCase):
filename = 'pa5.xml'
def test_charset(self):
self.assertTrue(self.item['body_html'].startswith('<p>Treasury coffers will take a £66 billion annual hit '
'if Britain goes for a so-called hard Brexit'))
self.assertEqual(self.item['headline'], 'HARD BREXIT TO COST UK UP TO £66BN A YEAR, SAYS TREASURY')
| nistormihai/superdesk-core | tests/io/feed_parsers/pa_nitf_test.py | Python | agpl-3.0 | 2,808 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import datetime
from app import db
__author__ = 'Hanks'
import unittest
from app.models import User, Role, Permission, AnonymousUser, Follow
class UserModelTestCase(unittest.TestCase):
def test_password_setter(self):
u = User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password(self):
u = User(password='cat')
with self.assertRaises(AttributeError):
u.password
def test_password_verification(self):
u = User(password='cat')
self.assertTrue(u.verify_password('cat'))
self.assertTrue(u.verify_password('dog'))
def test_password_salts_are_radom(self):
u = User(password='cat')
u2 = User(password='cat')
self.assertTrue(u.password_hash != u2.password_hash)
def test_roles_add_permission(self):
Role.insert_roles()
u = User(email='[email protected]', password='cat')
self.assertTrue(u.can(Permission.WRITE_ARTICLES))
self.assertFalse(u.can(Permission.MODERATE_COMMENTS))
def test_anonoymous_user(self):
u = AnonymousUser()
self.assertFalse(u.can(Permission.FOLLOW))
def test_follows(self):
u1 = User(email='[email protected]', password='cat')
u2 = User(email='[email protected]', password='dog')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
timestamp_before = datetime.utcnow()
u1.follow(u2)
db.session.add(u1)
db.session.commit()
timestamp_after = datetime.utcnow()
self.assertTrue(u1.is_following(u2))
self.assertFalse(u1.is_followed_by(u2))
self.assertTrue(u2.is_followed_by(u1))
self.assertTrue(u1.followed.count() == 1)
self.assertTrue(u2.followers.count() == 1)
f = u1.followed.all()[-1]
self.assertTrue(f.followed == u2)
self.assertTrue(timestamp_before <= f.timestamp <= timestamp_after)
f = u2.followers.all()[-1]
self.assertTrue(f.follower == u1)
u1.unfollow(u2)
db.session.add(u1)
db.session.commit()
self.assertTrue(u1.followed.count() == 0)
self.assertTrue(u2.followers.count() == 0)
self.assertTrue(Follow.query.count() == 0)
u2.follow(u1)
db.session.add(u1)
db.session.add(u2)
db.session.commit()
db.session.delete(u2)
db.session.commit()
self.assertTrue(Follow.query.count() == 0)
| hanks-zyh/Flask-blog | tests/test_user_model.py | Python | apache-2.0 | 2,611 |
# -*- coding: utf-8 -*-
# Downdload larrge-scale compounds' fingerprints from the PubChem Database.
# ?Calculate the similarities
# Songpeng Zu
# 2015-11-10
# import package
import pubchempy as pcb
import chemfp.bitops as chembit
import argparse # Temp for args.
# Functions
def chunks_get_compounds(l,n):
""" Yield successive n-sized chunks from l. (From Stack Overflow)"""
for i in xrange(0,len(l),n):
try:
yield pcb.get_compounds(l[i:i+n])
except Exception, e:
print e
pass
def read_hexfile(infile="seed_drug_cid2fingerprint"):
""" Read all the lines into a list.
First column as the cids, Second column as the hex string.
"""
with open(infile,'r') as fr:
cids = [x.strip().split()[0] for x in fr]
with open(infile,'r') as fr:
fps = [x.strip().split()[1] for x in fr]
return cids, fps
def download_PubChemFinger(line_start_num = 0,line_end_num = 1000,block_num=100,
inputfile="zinc2cid_lead_uniq",
outfilename="zinc_cid2pubchemfp",colnum=2,cid_col_index=1):
"""Directly download the fingerprints from PubChem.
Note: if one column per line (colnum==1), we treat it as CID directly;
if multiple column per line (colnum >1), we use the cid_col_index for the column specific for CID(PubChem ID).
some of them return more than one fingerprints, and we didnot consider this situation.
"""
# Get compounds_list
compounds_list = []
with open(inputfile,'r') as fr:
if colnum < 2:
compounds_list = [int(line.strip()) for line in fr]
else:
compounds_list = [int(line.strip().split()[cid_col_index]) for line in fr]
# Write the results.
with open(outfilename+"_"+str(line_start_num)+"_"+str(line_end_num),'w') as fw:
compounds_list = compounds_list[line_start_num:line_end_num] # Resize compounds_list
for compounds in chunks_get_compounds(compounds_list,block_num):
try:
tmp_write_list = ['\t'.join([str(compound.cid),compound.fingerprint]) for compound in compounds]
fw.write('\n'.join(tmp_write_list))
except Exception,e: # Ignore the possible errors.
print e # print the error, then continue
continue
def tanimoto_by_MattSwain(compound1,compound2):
"""This function is provided from pubchempy by MattSwain.
It's not fast. Using the hex_tanimoto form chemfp.bitops instead.
"""
fp1 = int(compound1.fingerprint, 16) # as HEX
fp2 = int(compound2.fingerprint, 16)
fp1_count = bin(fp1).count('1') # binary
fp2_count = bin(fp1).count('1')
both_count = bin(fp1 & fp2).count('1')
return float(both_count) / (fp1_count + fp2_count - both_count)
def cal_PubChem_Similarity_one2all(one_zinc_fp_hex,fp_hex_drug):
"""Calculate the similarites for one zinc compounds against all compounds.
"""
tmp_array = [round(chembit.hex_tanimoto(one_zinc_fp_hex,fp_drug),3)
for fp_drug in fp_hex_drug]
return '\t'.join([str(x) for x in tmp_array])
def calc_PubChem_Similarity(infilenm_zinc,infilenm_drug,outfilenm):
"""Calculate the PubChem Similarites between compounds from zinc and drugbank.
"""
cid_zinc, fp_hex_zinc = read_hexfile(infilenm_zinc)
cid_drug, fp_hex_drug = read_hexfile(infilenm_drug)
with open(outfilenm + '_cid','w') as fw_cid:
fw_cid.write('\n'.join([str(cid) for cid in cid_zinc]))
with open(outfilenm,'w') as fw:
fw.write('\n'.join(
[cal_PubChem_Similarity_one2all(fp_zinc,fp_hex_drug)
for fp_zinc in fp_hex_zinc]))
def download_fp():
parser = argparse.ArgumentParser()
parser.add_argument("line_start",type=int,help="The start line for reading")
parser.add_argument("line_end",type=int,help="The end line for reading")
args = parser.parse_args()
download_PubChemFinger(args.line_start,args.line_end) # Set the line start and line end.
def get_similarity_matrix():
parser = argparse.ArgumentParser()
parser.add_argument("zinc_file_name",type=str,help="The input file name for zinc")
parser.add_argument("drugbank_file_name",type=str,help="The input file name for drugbank")
parser.add_argument("output_file",type=str,help="The output file name for the similarity matrix")
args = parser.parse_args()
calc_PubChem_Similarity(args.zinc_file_name,args.drugbank_file_name,args.output_file)
if __name__ == "__main__":
# download_fp()
get_similarity_matrix()
| biotcm/PICheM | ChemSpace/pubchem_similarity_calc.py | Python | mit | 4,625 |
from os.path import dirname, basename, isfile
import glob
excepts = ['__init__', 'widget']
# Find all *.py files and add them to import
modules = [basename(f)[:-3] for f in glob.glob(dirname(__file__)+"/*.py") if
isfile(f)]
__all__ = [f for f in modules if f not in excepts]
| alberand/lemonbar | widgets/__init__.py | Python | mit | 284 |
# core django imports
from django.contrib import admin
# imports from your apps
from .models import Lookout
@admin.register(Lookout)
class LookoutAdmin(admin.ModelAdmin):
list_display = ('id', 'created', 'modified', 'owner', 'isbn')
list_filter = ('created', 'modified', 'owner')
| srct/bookshare | bookshare/lookouts/admin.py | Python | gpl-3.0 | 290 |
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.utils.translation import ugettext
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from models import InvitationError, Invitation, InvitationStats
from forms import InvitationForm, RegistrationFormInvitation
from registration.signals import user_registered
def apply_extra_context(context, extra_context=None):
if extra_context is None:
extra_context = {}
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return context
@login_required
def invite(request, success_url=None,
form_class=InvitationForm,
template_name='invitation/invitation_form.html',
extra_context=None):
"""
Create an invitation and send invitation email.
Send invitation email and then redirect to success URL if the
invitation form is valid. Redirect named URL ``invitation_unavailable``
on InvitationError. Render invitation form template otherwise.
**Required arguments:**
None.
**Optional arguments:**
:success_url:
The URL to redirect to on successful registration. Default value is
``None``, ``invitation_complete`` will be resolved in this case.
:form_class:
A form class to use for invitation. Takes ``request.user`` as first
argument to its constructor. Must have an ``email`` field. Custom
validation can be implemented here.
:template_name:
A custom template to use. Default value is
``invitation/invitation_form.html``.
:extra_context:
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
**Template:**
``invitation/invitation_form.html`` or ``template_name`` keyword
argument.
**Context:**
A ``RequestContext`` instance is used rendering the template. Context,
in addition to ``extra_context``, contains:
:form:
The invitation form.
"""
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
try:
invitation = Invitation.objects.invite(
request.user, form.cleaned_data["email"], form.cleaned_data["message"])
except InvitationError, e:
print '****'
print e
print '****'
return HttpResponseRedirect(reverse('invitation_unavailable'))
invitation.send_email(request=request)
if 'next' in request.REQUEST:
return HttpResponseRedirect(request.REQUEST['next'])
return HttpResponseRedirect(success_url or reverse('invitation_complete'))
else:
form = form_class()
context = apply_extra_context(RequestContext(request), extra_context)
return render_to_response(template_name,
{'form': form},
context_instance=context)
def register(request,
invitation_key,
wrong_key_template='invitation/wrong_invitation_key.html',
redirect_to_if_authenticated='/',
success_url=None,
form_class=RegistrationFormInvitation,
template_name='registration/registration_form.html',
extra_context=None):
"""
Allow a new user to register via invitation.
Send invitation email and then redirect to success URL if the
invitation form is valid. Redirect named URL ``invitation_unavailable``
on InvitationError. Render invitation form template otherwise. Sends
registration.signals.user_registered after creating the user.
**Required arguments:**
:invitation_key:
An invitation key in the form of ``[\da-e]{40}``
**Optional arguments:**
:wrong_key_template:
Template to be used when an invalid invitation key is supplied.
Default value is ``invitation/wrong_invitation_key.html``.
:redirect_to_if_authenticated:
URL to be redirected when an authenticated user calls this view.
Defaults value is ``/``
:success_url:
The URL to redirect to on successful registration. Default value is
``None``, ``invitation_registered`` will be resolved in this case.
:form_class:
A form class to use for registration. Takes the invited email as first
argument to its constructor.
:template_name:
A custom template to use. Default value is
``registration/registration_form.html``.
:extra_context:
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
**Templates:**
``invitation/invitation_form.html`` or ``template_name`` keyword
argument as the *main template*.
``invitation/wrong_invitation_key.html`` or ``wrong_key_template`` keyword
argument as the *wrong key template*.
**Context:**
``RequestContext`` instances are used rendering both templates. Context,
in addition to ``extra_context``, contains:
For wrong key template
:invitation_key: supplied invitation key
For main template
:form:
The registration form.
"""
if request.user.is_authenticated():
return HttpResponseRedirect(redirect_to_if_authenticated)
try:
invitation = Invitation.objects.find(invitation_key)
except Invitation.DoesNotExist:
context = apply_extra_context(RequestContext(request), extra_context)
return render_to_response(wrong_key_template,
{'invitation_key': invitation_key},
context_instance=context)
if request.method == 'POST':
form = form_class(invitation.email, request.POST, request.FILES)
if form.is_valid():
new_user = form.save()
invitation.mark_accepted(new_user)
user_registered.send(sender="invitation",
user=new_user,
request=request)
# return HttpResponseRedirect(success_url or reverse('invitation_registered'))
# return HttpResponseRedirect(success_url or reverse('profiles-profile-detail', kwargs={'slug':new_user.username}))
return HttpResponseRedirect(success_url or reverse('auth_login'))
else:
form = form_class(invitation.email)
context = apply_extra_context(RequestContext(request), extra_context)
return render_to_response(template_name,
{'form': form},
context_instance=context)
@staff_member_required
def reward(request):
"""
Add invitations to users with high invitation performance and redirect
refferring page.
"""
rewarded_users, invitations_given = InvitationStats.objects.reward()
if rewarded_users:
message = ugettext(u'%(users)s users are given a total of ' \
u'%(invitations)s invitations.') % {
'users': rewarded_users,
'invitations': invitations_given}
else:
message = ugettext(u'No user has performance above ' \
u'threshold, no invitations awarded.')
request.user.message_set.create(message=message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
| hzlf/openbroadcast | website/apps/invitation/views.py | Python | gpl-3.0 | 7,841 |
import os
import sys
import numba.cuda
from .. import submodules
from .localimport import localimport
def cuda_available():
available = numba.cuda.is_available()
if not available:
print('Warning! No GPU detected, so most models will fail... If you are in Colab, make sure you enable GPU in the menu: Runtime -> Change runtime type.')
return available
def get_submodules_root(submodule_name):
submodules_root = os.path.dirname(submodules.__file__)
return os.path.join(submodules_root, submodule_name)
class import_from(object):
def __init__(self, submodule_name):
self.submodule = get_submodules_root(submodule_name)
sys.path.append(self.submodule)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
sys.path = [path for path in sys.path if path != self.submodule]
return False
| ml4a/ml4a-guides | ml4a/models/submodules/__init__.py | Python | gpl-2.0 | 909 |
#!/usr/bin/env python3
'''Conway's Game of Life in a Curses Terminal Window
'''
import curses
import time
from GameOfLife import World, Cell, Patterns
from curses import ( COLOR_BLACK, COLOR_BLUE, COLOR_CYAN,
COLOR_GREEN, COLOR_MAGENTA, COLOR_RED,
COLOR_WHITE, COLOR_YELLOW )
class CursesWorld(World):
'''
Display a Game of Life in a terminal window using curses.
'''
colors = [COLOR_WHITE,COLOR_YELLOW,COLOR_MAGENTA,
COLOR_CYAN,COLOR_RED,COLOR_GREEN,COLOR_BLUE]
def __init__(self,window,cellClass=None):
'''
:param: window - curses window
:param: cellClass - optional Cell subclass
'''
h,w = window.getmaxyx()
super(CursesWorld,self).__init__(w,h-1,cellClass)
self.w = window
self.interval = 0
for n,fg in enumerate(self.colors):
curses.init_pair(n+1,fg,COLOR_BLACK)
@property
def gps(self):
'''
Generations per second.
'''
try:
return self._gps
except AttributeError:
pass
self._gps = 0
return self._gps
@gps.setter
def gps(self,newValue):
self._gps = int(newValue)
def colorForCell(self,cell):
'''
Returns a curses color_pair for a cell, chosen by the cell's age.
'''
n = min(cell.age // 100,len(self.colors)-1)
return curses.color_pair(n+1)
def handle_input(self):
'''
Accepts input from the user and acts on it.
Key Action
-----------------
q exit()
Q exit()
+ increase redraw interval by 10 milliseconds
- decrease redraw interval by 10 milliseconds
'''
c = self.w.getch()
if c == ord('q') or c == ord('Q'):
exit()
if c == ord('+'):
self.interval += 10
if c == ord('-'):
self.interval -= 10
if self.interval < 0:
self.interval = 0
@property
def status(self):
'''
Format string for the status line.
'''
try:
return self._status.format(self=self,
a=len(self.alive),
t=len(self.cells))
except AttributeError:
pass
s = ['Q to quit\t',
'{self.generation:>10} G',
'{self.gps:>4} G/s',
'Census: {a:>5}/{t:<5}',
'{self.interval:>4} ms +/-']
self._status = ' '.join(s)
return self._status.format(self=self,
a=len(self.alive),
t=len(self.cells))
def draw(self):
'''
:return: None
Updates each character in the curses window with
the appropriate colored marker for each cell in the world.
Moves the cursor to bottom-most line, left-most column
when finished.
'''
for y in range(self.height):
for x in range(self.width):
c = self[x,y]
self.w.addch(y,x,str(c)[0],self.colorForCell(c))
self.w.addstr(self.height,2,self.status)
self.w.move(self.height,1)
def run(self,stop=-1,interval=0):
'''
:param: stop - optional integer
:param: interval - optional integer
:return: None
This method will run the simulation described by world until the
given number of generations specified by ''stop'' has been met.
The default value will cause the simulation to run until interrupted
by the user.
The interval is number of milliseconds to pause between generations.
The default value of zero allows the simulation to run as fast as
possible.
The simulation is displayed via curses in a terminal window and
displays a status line at the bottom of the window.
The simulation can be stopped by the user pressing the keys 'q' or
'Q'. The interval between simulation steps can be increased with
the plus key '+' or decreased with the minus key '-' by increments
of 10 milliseconds.
'''
self.w.clear()
self.interval = interval
try:
while True:
if self.generation == stop:
break
self.handle_input()
t0 = time.time()
self.step()
self.draw()
self.w.refresh()
if self.interval:
curses.napms(self.interval)
t1 = time.time()
self.gps = 1/(t1-t0)
except KeyboardInterrupt:
pass
def main(stdscr,argv):
w = CursesWorld(stdscr)
if len(argv) == 1:
raise ValueError("no patterns specified.")
for thing in argv[1:]:
name,_,where = thing.partition(',')
try:
x,y = map(int,where.split(','))
except:
x,y = 0,0
w.addPattern(Patterns[name],x=x,y=y)
stdscr.nodelay(True)
w.run()
def usage(argv,msg=None,exit_value=-1):
usagefmt = 'usage: {name} [[pattern_name],[X,Y]] ...'
namefmt = '\t{n}'
print(usagefmt.format(name=os.path.basename(argv[0])))
if msg:
print(msg)
print('pattern names:')
[print(namefmt.format(n=name)) for name in Patterns.keys()]
exit(exit_value)
if __name__ == '__main__':
import sys
import os
from curses import wrapper
try:
wrapper(main,sys.argv)
except KeyError as e:
usage(sys.argv,'unknown pattern {p}'.format(p=str(e)))
except ValueError as e:
usage(sys.argv,str(e))
| JnyJny/GameOfLife | contrib/CGameOfLife.py | Python | mit | 5,943 |
import pytest
from flexmock import flexmock
from f8a_worker.enums import EcosystemBackend
from f8a_worker.models import Base, Ecosystem, create_db_scoped_session
from f8a_worker.setup_celery import get_dispatcher_config_files
from f8a_worker.storages import AmazonS3
from selinon import Config
# To use fixtures from this file, either name them as an input argument or use 'usefixtures' marker
# https://docs.pytest.org/en/latest/fixture.html#using-fixtures-from-classes-modules-or-projects
@pytest.fixture
def rdb():
session = create_db_scoped_session()
# TODO: we may need to run actual migrations here
# make sure all session objects from scoped_session get closed here
# otherwise drop_all() would hang indefinitely
session.close_all()
# NOTE: this also drops any data created by fixtures (e.g. builtin ecosystems),
# so if you want to use these, create them by hand before running your tests
# We can't use Base.metadata.drop_all(bind=session.bind), since they may be tables from
# e.g. bayesian server, that reference f8a_worker tables and will prevent dropping them
tables = session.bind.table_names()
for t in tables:
session.execute('drop table if exists "{t}" cascade'.format(t=t))
session.commit()
Base.metadata.create_all(bind=session.bind)
return session
@pytest.fixture
def maven(rdb):
maven = Ecosystem(name='maven', backend=EcosystemBackend.maven,
fetch_url='')
rdb.add(maven)
rdb.commit()
return maven
@pytest.fixture
def npm(rdb):
npm = Ecosystem(name='npm', backend=EcosystemBackend.npm,
fetch_url='https://registry.npmjs.org/')
rdb.add(npm)
rdb.commit()
return npm
@pytest.fixture
def pypi(rdb):
pypi = Ecosystem(name='pypi', backend=EcosystemBackend.pypi,
fetch_url='https://pypi.python.org/pypi')
rdb.add(pypi)
rdb.commit()
return pypi
@pytest.fixture
def rubygems(rdb):
rubygems = Ecosystem(name='rubygems', backend=EcosystemBackend.rubygems,
fetch_url='https://rubygems.org/api/v1')
rdb.add(rubygems)
rdb.commit()
return rubygems
@pytest.fixture
def nuget(rdb):
nuget = Ecosystem(name='nuget', backend=EcosystemBackend.nuget,
fetch_url='https://api.nuget.org/packages/')
rdb.add(nuget)
rdb.commit()
return nuget
@pytest.fixture
def go(rdb):
e = Ecosystem(name='go', backend=EcosystemBackend.scm, fetch_url='')
rdb.add(e)
rdb.commit()
return e
@pytest.fixture()
def dispatcher_setup():
""" Setup environment for Dispatcher if needed """
nodes_yaml, flows_yaml = get_dispatcher_config_files()
Config.set_config_yaml(nodes_yaml, flows_yaml)
@pytest.fixture()
def no_s3_connection():
flexmock(AmazonS3).should_receive('is_connected').and_return(True)
| fridex/fabric8-analytics-worker | tests/conftest.py | Python | gpl-3.0 | 2,886 |
"""Test."""
import numpy as np
from pybotics.geometry import matrix_2_vector
from pybotics.tool import Tool
def test_tool():
"""Test."""
tool = Tool()
cg = [1, 2, 3]
tool.cg = cg
np.testing.assert_allclose(tool.cg, cg)
p = [1, 2, 3]
tool.position = p
np.testing.assert_allclose(tool.position, p)
np.testing.assert_allclose(tool.vector, matrix_2_vector(tool.matrix))
| nnadeau/pybotics | tests/test_tool.py | Python | mit | 407 |
# -*- coding: utf-8 -*-
# © 2018 Comunitea - Javier Colmenero <[email protected]>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import models, fields, _
from odoo.exceptions import UserError
import xlrd
import xlwt
import base64
from odoo import http
from odoo.http import request
from odoo.addons.web.controllers.main import serialize_exception,content_disposition
import StringIO
import logging
_logger = logging.getLogger(__name__)
# Global variable to store the new created templates
template_ids = []
class Binary(http.Controller):
@http.route('/web/binary/download_document', type='http', auth="public")
@serialize_exception
def download_document(self,filename,filecontent, **kw):
""" Download link for files stored as binary fields.
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
#print base64.b64decode(filecontent)
return request.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
file = base64.b64decode(self.import_file)
book = xlrd.open_workbook(filename)
sh = book.sheet_by_index(0)
filecontent = []
stream = StringIO.StringIO()
for nline in range(0, sh.nrows):
filecontent.append(sh.row_values(nline))
print filecontent
if not filecontent:
return request.not_found()
else:
return request.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
class ProductImportWzd(models.TransientModel):
_name = 'product.import.wzd'
name = fields.Char('Importation name')
file = fields.Binary(string='File')
brand_id = fields.Many2one('product.brand', 'Brand')
filename = fields.Char(string='Filename')
categ_id = fields.Many2one('product.category', 'Default product category')
create_attributes = fields.Boolean('Create attributes/values if neccesary')
def _parse_row_vals(self, row, idx):
res = {
'default_code': row[0],
'x_barcode': row[1],
'barcode': row[3],
'ext_id': row[4],
}
# Check mandatory values setted
return res
def get_ref(self, ext_id):
ext_id_c = ext_id.split('.')
if len(ext_id_c) == 1:
domain = [('model', '=', 'product.product'), ('module', '=', ''), ('name', '=', ext_id)]
else:
domain = [('model', '=', 'product.product'), ('module', '=', ext_id_c[0]), ('name', '=', ext_id_c[1])]
res_id = self.env['ir.model.data'].search(domain, limit=1)
return res_id and res_id.res_id or False
def import_products(self):
self.ensure_one()
_logger.info(_('STARTING PRODUCT IMPORTATION'))
# get the first worksheet
file = base64.b64decode(self.file)
book = xlrd.open_workbook(file_contents=file)
sh = book.sheet_by_index(0)
created_product_ids = []
idx = 1
error_idx=0
p_ids = []
row_err = []
stream = StringIO.StringIO()
workbook = xlwt.Workbook(encoding = 'ascii')
worksheet = workbook.add_sheet('Lista de filas con error')
for nline in range(1, sh.nrows):
if idx< 15000:
idx += 1
row = sh.row_values(nline)
row_vals = self._parse_row_vals(row, idx)
res_id = self.get_ref(row_vals['ext_id'])
if res_id:
sql = "update product_product set barcode='{}' where id={}".format(str(row_vals['barcode']), res_id)
print sql
self._cr.execute(sql)
p_ids.append(res_id)
_logger.info(_('Van {} de {}: Update {} a {}'.format(nline, sh.nrows, res_id, row_vals['barcode'])))
else:
colu = 0
for col in row_vals:
row_err.append(row_vals[col])
worksheet.write(error_idx, colu, label=row_vals[col])
colu += 1
error_idx+=1
_logger.info(_('Error en {} a {}'.format(row_vals['ext_id'], row_vals['barcode'])))
workbook.save('../../var/log/mecalux/Errores.xls')
return
workbook.save(stream)
stream.seek(0)
data = stream.read()
return {
'type': 'ir.actions.act_url',
'url': '/web/binary/download_document?filename=./Errores.xls&filecontent={}'.format(data),
'target': 'self',
}
def action_view_products(self, product_ids):
self.ensure_one()
action = self.env.ref(
'product.product_normal_action').read()[0]
action['domain'] = [('id', 'in', product_ids)]
return action
| Comunitea/CMNT_00098_2017_JIM_addons | jim_product_import/wizard/product_import_wzd.py | Python | agpl-3.0 | 5,306 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
class Calculator(object):
def add(self, x, y):
return x+y
def sub(self, x, y):
return x-y
def prod(self, x, y):
return x*y
def div(self, x, y):
return x/y
def prodUsingAdd(self, x, y):
r = 0
for i in range(0, x):
r = self.add(r, y)
return r
| davidam/python-examples | poo/calculator/app/calculator.py | Python | gpl-3.0 | 1,230 |
# file openpyxl/shared/date_time.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Manage Excel date weirdness."""
# Python stdlib imports
from __future__ import division
from math import floor
import calendar
import datetime
import time
import re
# constants
W3CDTF_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
RE_W3CDTF = '(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(.(\d{2}))?Z'
EPOCH = datetime.datetime.utcfromtimestamp(0)
def datetime_to_W3CDTF(dt):
"""Convert from a datetime to a timestamp string."""
return datetime.datetime.strftime(dt, W3CDTF_FORMAT)
def W3CDTF_to_datetime(formatted_string):
"""Convert from a timestamp string to a datetime object."""
match = re.match(RE_W3CDTF,formatted_string)
digits = map(int, match.groups()[:6])
return datetime.datetime(*digits)
class SharedDate(object):
"""Date formatting utilities for Excel with shared state.
Excel has a two primary date tracking schemes:
Windows - Day 1 == 1900-01-01
Mac - Day 1 == 1904-01-01
SharedDate stores which system we are using and converts dates between
Python and Excel accordingly.
"""
CALENDAR_WINDOWS_1900 = 1900
CALENDAR_MAC_1904 = 1904
datetime_object_type = 'DateTime'
def __init__(self):
self.excel_base_date = self.CALENDAR_WINDOWS_1900
def datetime_to_julian(self, date):
"""Convert from python datetime to excel julian date representation."""
if isinstance(date, datetime.datetime):
return self.to_julian(date.year, date.month, date.day, \
hours=date.hour, minutes=date.minute, seconds=date.second)
elif isinstance(date, datetime.date):
return self.to_julian(date.year, date.month, date.day)
def to_julian(self, year, month, day, hours=0, minutes=0, seconds=0):
"""Convert from Python date to Excel JD."""
# explicitly disallow bad years
# Excel 2000 treats JD=0 as 1/0/1900 (buggy, disallow)
# Excel 2000 treats JD=2958466 as a bad date (Y10K bug!)
if year < 1900 or year > 10000:
msg = 'Year not supported by Excel: %s' % year
raise ValueError(msg)
if self.excel_base_date == self.CALENDAR_WINDOWS_1900:
# Fudge factor for the erroneous fact that the year 1900 is
# treated as a Leap Year in MS Excel. This affects every date
# following 28th February 1900
if year == 1900 and month <= 2:
excel_1900_leap_year = False
else:
excel_1900_leap_year = True
excel_base_date = 2415020
else:
raise NotImplementedError('Mac dates are not yet supported.')
#excel_base_date = 2416481
#excel_1900_leap_year = False
# Julian base date adjustment
if month > 2:
month = month - 3
else:
month = month + 9
year -= 1
# Calculate the Julian Date, then subtract the Excel base date
# JD 2415020 = 31 - Dec - 1899 -> Excel Date of 0
century, decade = int(str(year)[:2]), int(str(year)[2:])
excel_date = floor(146097 * century / 4) + \
floor((1461 * decade) / 4) + floor((153 * month + 2) / 5) + \
day + 1721119 - excel_base_date
if excel_1900_leap_year:
excel_date += 1
# check to ensure that we exclude 2/29/1900 as a possible value
if self.excel_base_date == self.CALENDAR_WINDOWS_1900 \
and excel_date == 60:
msg = 'Error: Excel believes 1900 was a leap year'
raise ValueError(msg)
excel_time = ((hours * 3600) + (minutes * 60) + seconds) / 86400
return excel_date + excel_time
def from_julian(self, value=0):
"""Convert from the Excel JD back to a date"""
if self.excel_base_date == self.CALENDAR_WINDOWS_1900:
excel_base_date = 25569
if value < 60:
excel_base_date -= 1
elif value == 60:
msg = 'Error: Excel believes 1900 was a leap year'
raise ValueError(msg)
else:
raise NotImplementedError('Mac dates are not yet supported.')
#excel_base_date = 24107
if value >= 1:
utc_days = value - excel_base_date
return EPOCH + datetime.timedelta(days=utc_days)
elif value >= 0:
hours = floor(value * 24)
mins = floor(value * 24 * 60) - floor(hours * 60)
secs = floor(value * 24 * 60 * 60) - floor(hours * 60 * 60) - \
floor(mins * 60)
return datetime.time(int(hours), int(mins), int(secs))
else:
msg = 'Negative dates (%s) are not supported' % value
raise ValueError(msg)
| ChristineLaMuse/mozillians | vendor-local/lib/python/tablib/packages/openpyxl/shared/date_time.py | Python | bsd-3-clause | 5,956 |
subs = {}
| leojohnthomas/ahkab | subs.py | Python | gpl-2.0 | 10 |
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from sklearn.metrics import confusion_matrix
from torch.nn.functional import cosine_similarity
def square_size(n):
side = int(np.sqrt(n))
if side ** 2 < n:
side += 1
return side
def activity_square(vector):
n = len(vector)
side = square_size(n)
square = torch.zeros(side ** 2)
square[:n] = vector
return square.view(side, side)
def fig2img(fig):
canvas = FigureCanvas(fig)
canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
img = np.fromstring(canvas.tostring_rgb(), dtype="uint8").reshape(
int(height), int(width), 3
)
return img
def plot_confusion_matrix(
y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues
):
"""
This function plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# Compute confusion matrix
cm = confusion_matrix(y_true.cpu(), y_pred.cpu())
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
fig = Figure()
ax = fig.gca()
im = ax.imshow(cm, interpolation="nearest", cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(
xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes,
yticklabels=classes,
title=title,
ylabel="True label",
xlabel="Predicted label",
)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(
j,
i,
format(cm[i, j], fmt),
ha="center",
va="center",
color="white" if cm[i, j] > thresh else "black",
)
return ax, fig
def plot_activity_grid(distrs, n_labels=10):
"""
For flattened models, plot cell activations for each combination of
input and actual next input
"""
fig, axs = plt.subplots(
n_labels,
n_labels,
dpi=300,
gridspec_kw={"hspace": 0.7, "wspace": 0.7},
sharex=True,
sharey=True,
)
for i in range(n_labels):
for j in range(n_labels):
key = "%d-%d" % (i, j)
if key in distrs:
activity_arr = distrs[key]
dist = torch.stack(activity_arr)
ax = axs[i][j]
mean_act = activity_square(dist.mean(dim=0).cpu())
side = mean_act.size(0)
ax.imshow(mean_act, origin="bottom", extent=(0, side, 0, side))
else:
ax.set_visible(False)
ax.axis("off")
ax.set_title(key, fontsize=5)
return fig
def plot_activity(distrs, n_labels=10, level="column"):
"""
Plot column activations for each combination of input and actual next input
Should show mini-column union activity (subsets of column-level activity
which predict next input) in the RSM model.
"""
n_plots = len(distrs.keys())
fig, axs = plt.subplots(n_plots, 1, dpi=300, gridspec_kw={"hspace": 0.7})
pi = 0
for i in range(n_labels):
for j in range(n_labels):
key = "%d-%d" % (i, j)
if key in distrs:
activity_arr = distrs[key]
dist = torch.stack(activity_arr)
ax = axs[pi]
pi += 1
bsz, m, n = dist.size()
no_columns = n == 1
col_act = dist.max(dim=2).values
if level == "column" or no_columns:
act = col_act
elif level == "cell":
col = col_act.view(bsz, m, 1)
act = torch.cat((dist, col), 2).view(bsz, m, n + 1)
mean_act = act.mean(dim=0).cpu()
if no_columns:
mean_act = activity_square(mean_act)
side = mean_act.size(0)
ax.imshow(mean_act, origin="bottom", extent=(0, side, 0, side))
else:
ax.imshow(
mean_act.t(), origin="bottom", extent=(0, m - 1, 0, n + 1)
)
ax.plot([0, m - 1], [n, n], linewidth=0.4)
ax.axis("off")
ax.set_title(key, fontsize=5)
return fig
def _repr_similarity_grid(
ax,
activity_arr,
cmap=plt.cm.Blues,
normalize=False,
labels=None,
title=None,
tick_fontsize=2,
fontsize=1.2,
):
n_labels = len(labels)
grid = torch.zeros(n_labels, n_labels)
# Compute grid (cosine similarity)
for i, act1 in enumerate(activity_arr):
for j, act2 in enumerate(activity_arr):
if j > i:
break
if act1 is not None and act2 is not None:
sim = cosine_similarity(act1, act2, dim=0)
grid[i, j] = grid[j, i] = sim
ax.imshow(grid, interpolation="nearest", cmap=cmap, vmin=0, vmax=1)
# ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(
xticks=np.arange(grid.shape[1]),
yticks=np.arange(grid.shape[0]),
# ... and label them with the respective list entries
xticklabels=labels,
yticklabels=labels,
title=title,
)
ax.tick_params(labelsize=tick_fontsize)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
thresh = grid.max() / 2.0
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
ax.text(
j,
i,
format(grid[i, j], ".2f"),
ha="center",
va="center",
fontsize=fontsize,
color="white" if grid[i, j] > thresh else "black",
)
def plot_representation_similarity(
distrs, n_labels=10, title=None, save=None, fontsize=1.6
):
"""
Plot grid showing representation similarity between distributions passed
into distrs dict.
"""
fig, axs = plt.subplots(1, 2, dpi=300)
ax_id = 0
col_activities = []
cell_activities = []
labels = []
for i in range(n_labels):
for j in range(n_labels):
key = "%d-%d" % (i, j)
col_act = cell_act = None
if key in distrs:
activity_arr = distrs[key]
dist = torch.stack(activity_arr)
ax_id += 1
size = dist.size()
if len(size) == 3:
bsz, m, n = size
tc = m * n
else:
bsz, m = size
tc = m
if m != tc:
col_act = (
dist.max(dim=-1).values.view(bsz, m).mean(dim=0).flatten().cpu()
)
col_activities.append(col_act)
# TODO: Check reshaping here
cell_act = dist.view(bsz, tc).mean(dim=0).flatten().cpu()
labels.append(key)
cell_activities.append(cell_act)
if col_activities:
_repr_similarity_grid(
axs[0], col_activities, labels=labels, title="Column", fontsize=fontsize
)
_repr_similarity_grid(
axs[1], cell_activities, labels=labels, title="Cell", fontsize=fontsize
)
suptitle = "Repr Similarity (Cos)"
if title:
suptitle += " - " + title
fig.suptitle(suptitle)
if save:
fig.savefig(save)
return fig
def get_grad_printer(msg):
"""
This function returns a printer function, that prints information about a
tensor's gradient. Used by register_hook in the backward pass.
"""
def printer(grad):
if grad.nelement() == 1:
print(f"{msg} {grad}")
else:
print(
f"{msg} shape: {grad.shape}"
f" {len(grad.nonzero())}/{grad.numel()} nonzero"
f" max: {grad.max()} min: {grad.min()}"
f" mean: {grad.mean()}"
)
return printer
def count_parameters(model, exclude=None):
params = 0
for n, p in model.named_parameters():
if p.requires_grad and (exclude is None or exclude not in n):
params += p.numel()
return params
def print_epoch_values(ret):
"""
Print dictionary of epoch values with large arrays removed
"""
print_ret = {}
for key, _val in ret.items():
if not key.startswith("img_") and not key.startswith("hist_"):
print_ret[key] = ret[key]
return print_ret
def _plot_grad_flow(self):
"""
Plots the gradients flowing through different layers in the net during
training. Can be used for checking for possible gradient
vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow
"""
ave_grads = []
max_grads = []
layers = []
for n, p in self.model.named_parameters():
if (p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend(
[
Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4),
],
["max-gradient", "mean-gradient", "zero-gradient"],
)
| chetan51/nupic.research | projects/rsm/util.py | Python | gpl-3.0 | 11,475 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################
# Module(s) declaration
########################################
import rospy
import os
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from python_qt_binding import loadUi
########################################
# Constante(s) and Variable(s) declaration
########################################
DIR_COBOTGUI_RESOURCES = os.path.join(get_pkg_dir('airbus_cobot_gui'),'resources')
DIR_COBOTGUI_ACCOUNTS = DIR_COBOTGUI_RESOURCES+'/accounts'
DIR_COBOTGUI_BACKUP = DIR_COBOTGUI_RESOURCES+'/accounts/backup'
DIR_COBOTGUI_IMAGES = DIR_COBOTGUI_RESOURCES+'/images'
DIR_COBOTGUI_LAYOUTS = DIR_COBOTGUI_RESOURCES+'/layouts'
DIR_COBOTGUI_VALUES = DIR_COBOTGUI_RESOURCES+'/values'
########################################
# Class(ies) declaration
########################################
class CobotGuiAccounts():
def __init__(self):
class CobotGuiBackup():
def __init__(self):
self.uuid = self.__class__.__name__
self.dir = DIR_COBOTGUI_BACKUP
self.accounts_back = DIR_COBOTGUI_BACKUP+'/accounts_back.db'
def findById(self, id=""):
try:
return getattr(self,id)
except:
return None
self.uuid = self.__class__.__name__
self.dir = DIR_COBOTGUI_ACCOUNTS
self.accounts = DIR_COBOTGUI_ACCOUNTS+'/accounts.db'
self.accounts = DIR_COBOTGUI_ACCOUNTS+'/accounts.xml'
self.encoded_accounts = DIR_COBOTGUI_ACCOUNTS+'/encoded_accounts.db'
self.backup = CobotGuiBackup()
def findById(self, id=""):
try:
return getattr(self,id)
except:
return None
class CobotGuiImages():
def __init__(self):
self.uuid = self.__class__.__name__
self.dir = DIR_COBOTGUI_IMAGES
self.icon_pause = DIR_COBOTGUI_IMAGES+'/icon_pause.png'
self.wellcome_background = DIR_COBOTGUI_IMAGES+'/wellcome_background.png'
self.trad = DIR_COBOTGUI_IMAGES+'/trad.png'
self.logo_airbus_group_2 = DIR_COBOTGUI_IMAGES+'/logo_airbus_group_2.png'
self.open_xml = DIR_COBOTGUI_IMAGES+'/open_xml.png'
self.ico_user = DIR_COBOTGUI_IMAGES+'/ico_user.png'
self.es = DIR_COBOTGUI_IMAGES+'/es.png'
self.en = DIR_COBOTGUI_IMAGES+'/en.png'
self.fr = DIR_COBOTGUI_IMAGES+'/fr.png'
self.de = DIR_COBOTGUI_IMAGES+'/de.png'
self.logo_airbus = DIR_COBOTGUI_IMAGES+'/logo_airbus.png'
self.icon_play = DIR_COBOTGUI_IMAGES+'/icon_play.png'
self.icon_mission = DIR_COBOTGUI_IMAGES+'/icon_mission.png'
self.move = DIR_COBOTGUI_IMAGES+'/move.png'
self.icon_monitoring = DIR_COBOTGUI_IMAGES+'/icon_monitoring.png'
self.desktop_launch = DIR_COBOTGUI_IMAGES+'/desktop_launch.png'
self.airbus_cobot_gui = DIR_COBOTGUI_IMAGES+'/airbus_cobot_gui.png'
self.switch_footer = DIR_COBOTGUI_IMAGES+'/switch_footer.png'
self.ico_alarm = DIR_COBOTGUI_IMAGES+'/ico_alarm.png'
self.icon_emergency_stop_unlock = DIR_COBOTGUI_IMAGES+'/icon_emergency_stop_unlock.png'
self.icon_emergency_stop_locked = DIR_COBOTGUI_IMAGES+'/icon_emergency_stop_locked.png'
self.logo_airbus_group = DIR_COBOTGUI_IMAGES+'/logo_airbus_group.png'
self.wellcome_base = DIR_COBOTGUI_IMAGES+'/wellcome_base.png'
self.icon_emergency_stop = DIR_COBOTGUI_IMAGES+'/icon_emergency_stop.png'
self.status_error = DIR_COBOTGUI_IMAGES+'/status_error.png'
self.status_warning = DIR_COBOTGUI_IMAGES+'/status_warning.png'
self.status_stale = DIR_COBOTGUI_IMAGES+'/status_stale.png'
self.status_ok = DIR_COBOTGUI_IMAGES+'/status_ok.png'
def findById(self, id=""):
try:
return getattr(self,id)
except:
return None
class CobotGuiLayouts():
def __init__(self):
self.uuid = self.__class__.__name__
self.dir = DIR_COBOTGUI_LAYOUTS
self.languages_manager_popup = DIR_COBOTGUI_LAYOUTS+'/languages_manager_popup.ui'
self.add_user_widget = DIR_COBOTGUI_LAYOUTS+'/add_user_widget.ui'
self.account_popup = DIR_COBOTGUI_LAYOUTS+'/account_popup.ui'
self.menu_launcher2 = DIR_COBOTGUI_LAYOUTS+'/menu_launcher2.ui'
self.remove_account_widget = DIR_COBOTGUI_LAYOUTS+'/remove_account_widget.ui'
self.login_dialog = DIR_COBOTGUI_LAYOUTS+'/login_dialog.ui'
self.alarm_widget = DIR_COBOTGUI_LAYOUTS+'/alarm_widget.ui'
self.mainwindow = DIR_COBOTGUI_LAYOUTS+'/mainwindow.ui'
self.users_accounts_dialog = DIR_COBOTGUI_LAYOUTS+'/users_accounts_dialog.ui'
self.welcome = DIR_COBOTGUI_LAYOUTS+'/welcome.ui'
self.accounts_manager_dialog = DIR_COBOTGUI_LAYOUTS+'/accounts_manager_dialog.ui'
self.modif_account_widget = DIR_COBOTGUI_LAYOUTS+'/modif_account_widget.ui'
self.alarm_listview = DIR_COBOTGUI_LAYOUTS+'/alarm_listview.ui'
self.languages_popup = DIR_COBOTGUI_LAYOUTS+'/languages_popup.ui'
self.menu_launcher = DIR_COBOTGUI_LAYOUTS+'/menu_launcher.ui'
self.diagnostics_popup = DIR_COBOTGUI_LAYOUTS+'/diagnostics_popup.ui'
def findById(self, id=""):
try:
return getattr(self,id)
except:
return None
class CobotGuiValues():
def __init__(self):
class CobotGuiStrings():
def __init__(self):
self.uuid = self.__class__.__name__
def findById(self, id=""):
try:
return getattr(self,id)
except:
return None
def alarms_waiting(self, lng="en"):
if lng == "en":
return "alarms waiting ...".decode('utf-8')
elif lng == "fr":
return "alarmes en attente ...".decode('utf-8')
else:
return "alarms waiting ...".decode('utf-8')
def language_selection(self, lng="en"):
if lng == "en":
return "The language selected is".decode('utf-8')
elif lng == "fr":
return "La langue sélectionnée est".decode('utf-8')
elif lng == "de":
return "Die gewählte Sprache".decode('utf-8')
elif lng == "es":
return "El idioma seleccionado es".decode('utf-8')
else:
return "The language selected is".decode('utf-8')
def app_mode(self, lng="en"):
if lng == "en":
return "Switching AUTOMATIC to MANUAL mode".decode('utf-8')
elif lng == "fr":
return "Commutation du mode automatique à manuel".decode('utf-8')
else:
return "Switching AUTOMATIC to MANUAL mode".decode('utf-8')
def auto(self, lng="en"):
if lng == "en":
return "AUTO".decode('utf-8')
elif lng == "fr":
return "AUTO".decode('utf-8')
else:
return "AUTO".decode('utf-8')
def manu(self, lng="en"):
if lng == "en":
return "MANU".decode('utf-8')
elif lng == "fr":
return "MANU".decode('utf-8')
else:
return "MANU".decode('utf-8')
def aborted(self, lng="en"):
if lng == "en":
return "Aborted".decode('utf-8')
elif lng == "fr":
return "Avorté".decode('utf-8')
else:
return "Aborted".decode('utf-8')
def access_rights(self, lng="en"):
if lng == "en":
return "Access rights".decode('utf-8')
elif lng == "fr":
return "Droits d'accès".decode('utf-8')
else:
return "Access rights".decode('utf-8')
def account_manager(self, lng="en"):
if lng == "en":
return "Account manager".decode('utf-8')
elif lng == "fr":
return "Gestion des comptes".decode('utf-8')
else:
return "Account manager".decode('utf-8')
def actions(self, lng="en"):
if lng == "en":
return "Actions".decode('utf-8')
elif lng == "fr":
return "Actions".decode('utf-8')
else:
return "Actions".decode('utf-8')
def add(self, lng="en"):
if lng == "en":
return "Add".decode('utf-8')
elif lng == "fr":
return "Ajouter".decode('utf-8')
else:
return "Add".decode('utf-8')
def add_user(self, lng="en"):
if lng == "en":
return "Add user account".decode('utf-8')
elif lng == "fr":
return "Ajouter compte utilisateur".decode('utf-8')
else:
return "Add user account".decode('utf-8')
def confirm_password(self, lng="en"):
if lng == "en":
return "Confirm password".decode('utf-8')
elif lng == "fr":
return "Confirmer le mot de passe".decode('utf-8')
else:
return "Confirm password".decode('utf-8')
def confirm_your_password(self, lng="en"):
if lng == "en":
return "Confirm your password".decode('utf-8')
elif lng == "fr":
return "Confirmez votre mot de passe".decode('utf-8')
else:
return "Confirm your password".decode('utf-8')
def connection(self, lng="en"):
if lng == "en":
return "Connection".decode('utf-8')
elif lng == "fr":
return "Connection".decode('utf-8')
else:
return "Connection".decode('utf-8')
def current(self, lng="en"):
if lng == "en":
return "Current".decode('utf-8')
elif lng == "fr":
return "Courant".decode('utf-8')
else:
return "Current".decode('utf-8')
def current_password(self, lng="en"):
if lng == "en":
return "Current password".decode('utf-8')
elif lng == "fr":
return "Mot de passe actuel".decode('utf-8')
else:
return "Current password".decode('utf-8')
def emergency_stop(self, lng="en"):
if lng == "en":
return "The system was stopped, be careful before restarting the applications !".decode('utf-8')
elif lng == "fr":
return "Le système a été arrêt, faite attention avant de redémarrer les applications !".decode('utf-8')
else:
return "The system was stopped, be careful before restarting the applications !".decode('utf-8')
def release_emergency_stop(self, lng="en"):
if lng == "en":
return "Release emergency stop".decode('utf-8')
elif lng == "fr":
return "Déverrouillé l'arrêt d'urgence".decode('utf-8')
else:
return "Release emergency stop".decode('utf-8')
def disconnection(self, lng="en"):
if lng == "en":
return "Disconnection".decode('utf-8')
elif lng == "fr":
return "Déconnection".decode('utf-8')
else:
return "Disconnection".decode('utf-8')
def exit(self, lng="en"):
if lng == "en":
return "Exit".decode('utf-8')
elif lng == "fr":
return "Quitter".decode('utf-8')
else:
return "Exit".decode('utf-8')
def invalid_password(self, lng="en"):
if lng == "en":
return "Invalid password !".decode('utf-8')
elif lng == "fr":
return "Mot de passe incorrect !".decode('utf-8')
else:
return "Invalid password !".decode('utf-8')
def invalid_user_id(self, lng="en"):
if lng == "en":
return "Invalid user id !".decode('utf-8')
elif lng == "fr":
return "Identifiant utilisateur incorrect !".decode('utf-8')
else:
return "Invalid user id !".decode('utf-8')
def launch(self, lng="en"):
if lng == "en":
return "Launch".decode('utf-8')
elif lng == "fr":
return "Lanceur".decode('utf-8')
else:
return "Launch".decode('utf-8')
def login(self, lng="en"):
if lng == "en":
return "Login".decode('utf-8')
elif lng == "fr":
return "Connexion".decode('utf-8')
else:
return "Login".decode('utf-8')
def modif(self, lng="en"):
if lng == "en":
return "Modif".decode('utf-8')
elif lng == "fr":
return "Modifier".decode('utf-8')
else:
return "Modif".decode('utf-8')
def modif_user_account(self, lng="en"):
if lng == "en":
return "Modif user account".decode('utf-8')
elif lng == "fr":
return "Modifier compte utilisateur".decode('utf-8')
else:
return "Modif user account".decode('utf-8')
def name(self, lng="en"):
if lng == "en":
return "Name".decode('utf-8')
elif lng == "fr":
return "Nom".decode('utf-8')
else:
return "Name".decode('utf-8')
def new_password(self, lng="en"):
if lng == "en":
return "New password".decode('utf-8')
elif lng == "fr":
return "Nouveau mot de passe".decode('utf-8')
else:
return "New password".decode('utf-8')
def next_step(self, lng="en"):
if lng == "en":
return "Next step".decode('utf-8')
elif lng == "fr":
return "Etape suivante".decode('utf-8')
else:
return "Next step".decode('utf-8')
def off(self, lng="en"):
if lng == "en":
return "Off".decode('utf-8')
elif lng == "fr":
return "Off".decode('utf-8')
else:
return "Off".decode('utf-8')
def ok(self, lng="en"):
if lng == "en":
return "Ok".decode('utf-8')
elif lng == "fr":
return "Ok".decode('utf-8')
else:
return "Ok".decode('utf-8')
def on(self, lng="en"):
if lng == "en":
return "On".decode('utf-8')
elif lng == "fr":
return "On".decode('utf-8')
else:
return "On".decode('utf-8')
def open(self, lng="en"):
if lng == "en":
return "Open".decode('utf-8')
elif lng == "fr":
return "Ouvrir".decode('utf-8')
else:
return "Open".decode('utf-8')
def close(self, lng="en"):
if lng == "en":
return "Close".decode('utf-8')
elif lng == "fr":
return "Fermer".decode('utf-8')
else:
return "Close".decode('utf-8')
def open_mission(self, lng="en"):
if lng == "en":
return "Open mission".decode('utf-8')
elif lng == "fr":
return "Ouvrir une mission".decode('utf-8')
else:
return "Open mission".decode('utf-8')
def parameters(self, lng="en"):
if lng == "en":
return "Parameters".decode('utf-8')
elif lng == "fr":
return "Paramètres".decode('utf-8')
else:
return "Parameters".decode('utf-8')
def password(self, lng="en"):
if lng == "en":
return "Password".decode('utf-8')
elif lng == "fr":
return "Mot de passe".decode('utf-8')
else:
return "Password".decode('utf-8')
def preempt(self, lng="en"):
if lng == "en":
return "Preempt".decode('utf-8')
elif lng == "fr":
return "Préempter".decode('utf-8')
else:
return "Preempt".decode('utf-8')
def remove(self, lng="en"):
if lng == "en":
return "Remove".decode('utf-8')
elif lng == "fr":
return "Supprimer".decode('utf-8')
else:
return "Remove".decode('utf-8')
def remove_user_account(self, lng="en"):
if lng == "en":
return "Remove user account".decode('utf-8')
elif lng == "fr":
return "Supprimer compte utilisateur".decode('utf-8')
else:
return "Remove user account".decode('utf-8')
def rights(self, lng="en"):
if lng == "en":
return "Rights".decode('utf-8')
elif lng == "fr":
return "Droits".decode('utf-8')
else:
return "Rights".decode('utf-8')
def select_access_rights(self, lng="en"):
if lng == "en":
return "Select access rights".decode('utf-8')
elif lng == "fr":
return "Sélectionner un droits accès".decode('utf-8')
else:
return "Select access rights".decode('utf-8')
def select_user(self, lng="en"):
if lng == "en":
return "Select user".decode('utf-8')
elif lng == "fr":
return "Sélectionner un utilisateur".decode('utf-8')
else:
return "Select user".decode('utf-8')
def settings(self, lng="en"):
if lng == "en":
return "Settings".decode('utf-8')
elif lng == "fr":
return "Paramètres".decode('utf-8')
else:
return "Settings".decode('utf-8')
def fields_not_filled(self, lng="en"):
if lng == "en":
return "Some fields are not filled".decode('utf-8')
elif lng == "fr":
return "Certains champs ne sont pas remplis".decode('utf-8')
else:
return "Some fields are not filled".decode('utf-8')
def start(self, lng="en"):
if lng == "en":
return "Start".decode('utf-8')
elif lng == "fr":
return "Démarrer".decode('utf-8')
else:
return "Start".decode('utf-8')
def status(self, lng="en"):
if lng == "en":
return "Status".decode('utf-8')
elif lng == "fr":
return "Statut".decode('utf-8')
else:
return "Status".decode('utf-8')
def stop(self, lng="en"):
if lng == "en":
return "Stop".decode('utf-8')
elif lng == "fr":
return "Arrêter".decode('utf-8')
else:
return "Stop".decode('utf-8')
def passwords_different(self, lng="en"):
if lng == "en":
return "The passwords are different".decode('utf-8')
elif lng == "fr":
return "Les mots de passe sont différents".decode('utf-8')
else:
return "The passwords are different".decode('utf-8')
def add_user_success(self, lng="en"):
if lng == "en":
return "The user was added successfully".decode('utf-8')
elif lng == "fr":
return "L'utilisateur a été ajouté avec succès".decode('utf-8')
else:
return "The user was added successfully".decode('utf-8')
def user_mv_success(self, lng="en"):
if lng == "en":
return "The user was modified successfully".decode('utf-8')
elif lng == "fr":
return "L'utilisateur a été modifié avec succès".decode('utf-8')
else:
return "The user was modified successfully".decode('utf-8')
def user_rm_success(self, lng="en"):
if lng == "en":
return "The user was removed successfully".decode('utf-8')
elif lng == "fr":
return "L'utilisateur a été supprimé avec succès".decode('utf-8')
else:
return "The user was removed successfully".decode('utf-8')
def time(self, lng="en"):
if lng == "en":
return "Time".decode('utf-8')
elif lng == "fr":
return "Temps".decode('utf-8')
else:
return "Time".decode('utf-8')
def user(self, lng="en"):
if lng == "en":
return "User".decode('utf-8')
elif lng == "fr":
return "Utilisateur".decode('utf-8')
else:
return "User".decode('utf-8')
def user_account(self, lng="en"):
if lng == "en":
return "User account".decode('utf-8')
elif lng == "fr":
return "Compte utilisateur".decode('utf-8')
else:
return "User account".decode('utf-8')
def user_guide(self, lng="en"):
if lng == "en":
return "User guide".decode('utf-8')
elif lng == "fr":
return "Guide utilisateur".decode('utf-8')
else:
return "User guide".decode('utf-8')
def user_id(self, lng="en"):
if lng == "en":
return "User id".decode('utf-8')
elif lng == "fr":
return "Identifiant".decode('utf-8')
else:
return "User id".decode('utf-8')
def user_list(self, lng="en"):
if lng == "en":
return "User list".decode('utf-8')
elif lng == "fr":
return "Liste des utilisateurs".decode('utf-8')
else:
return "User list".decode('utf-8')
def traductor(self, lng="en"):
if lng == "en":
return "Traductor".decode('utf-8')
elif lng == "fr":
return "Traducteur".decode('utf-8')
else:
return "Traductor".decode('utf-8')
def language(self, lng="en"):
if lng == "en":
return "Language".decode('utf-8')
elif lng == "fr":
return "Langue".decode('utf-8')
else:
return "Language".decode('utf-8')
class CobotGuiStyles():
def __init__(self):
self.uuid = self.__class__.__name__
self.alarm = "QWidget{background-color: #ffff01;}"
self.background_estop_locked = "QWidget{background-color: #ff0000;}"
self.background_estop_unlocked = "QWidget{background-color: #d9d9d9;}"
self.login = "QDialog{background-color:qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #616763, stop: 1 #89928c);}"
self.default_launch = "QPushButton{background-color: rgba(255,0,0,80%);border-radius: 10px;font-size: 12pt;font-weight:60;color: #ffffff;}"
self.transparent_background = "background-color: transparent;font-size: 20pt;"
self.no_background = "background:none;"
self.bad_password = "background-color: #ffffff;border-radius: 5px;font-size: 16pt; font-weight:40; color: rgb(255,0,0);"
self.good_password = "background-color: #ffffff;border-radius: 5px;font-size: 16pt; font-weight:40; color: rgb(0,255,0);"
self.no_password = "background-color: #ffffff;border-radius: 5px;font-size: 16pt; font-weight:40; color: #494842;"
self.text = "QLabel {font-size: 22pt;}"
self.diagnostics = "QWidget{background-color: #ffff01;}"
def findById(self, id=""):
try:
return getattr(self,id)
except:
return None
self.uuid = self.__class__.__name__
self.dir = DIR_COBOTGUI_VALUES
self.launchers = DIR_COBOTGUI_VALUES+'/launchers.xml'
self.strings = CobotGuiStrings()
self.styles = CobotGuiStyles()
def findById(self, id=""):
try:
return getattr(self,id)
except:
return None
class R:
DIR = DIR_COBOTGUI_RESOURCES
accounts = CobotGuiAccounts()
images = CobotGuiImages()
layouts = CobotGuiLayouts()
values = CobotGuiValues()
@staticmethod
def getPixmapById(id=""):
return QPixmap(R.images.findById(id))
@staticmethod
def getIconById(id=""):
return QIcon(R.images.findById(id))
# End of file
| ipa-led/airbus_coop | airbus_cobot_gui/src/airbus_cobot_gui/res.py | Python | apache-2.0 | 28,643 |
from galaxy.web.base.controller import *
import pkg_resources
pkg_resources.require( "simplejson" )
pkg_resources.require( "SVGFig" )
import simplejson
import base64, httplib, urllib2, sgmllib, svgfig
import math
import zipfile, time, os, tempfile, string
from operator import itemgetter
from galaxy.web.framework.helpers import time_ago, grids
from galaxy.tools.parameters import *
from galaxy.tools import DefaultToolState
from galaxy.tools.parameters.grouping import Repeat, Conditional
from galaxy.datatypes.data import Data
from galaxy.util.odict import odict
from galaxy.util.sanitize_html import sanitize_html
from galaxy.util.topsort import topsort, topsort_levels, CycleError
from galaxy.workflow.modules import *
from galaxy import model
from galaxy.model.mapping import desc
from galaxy.model.orm import *
from galaxy.model.item_attrs import *
from galaxy.web.framework.helpers import to_unicode
from galaxy.jobs.actions.post import ActionBox
class StoredWorkflowListGrid( grids.Grid ):
class StepsColumn( grids.GridColumn ):
def get_value(self, trans, grid, workflow):
return len( workflow.latest_workflow.steps )
# Grid definition
use_panels = True
title = "Saved Workflows"
model_class = model.StoredWorkflow
default_filter = { "name" : "All", "tags": "All" }
default_sort_key = "-update_time"
columns = [
grids.TextColumn( "Name", key="name", attach_popup=True, filterable="advanced" ),
grids.IndividualTagsColumn( "Tags", "tags", model_tag_association_class=model.StoredWorkflowTagAssociation, filterable="advanced", grid_name="StoredWorkflowListGrid" ),
StepsColumn( "Steps" ),
grids.GridColumn( "Created", key="create_time", format=time_ago ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search", visible=False, filterable="standard" )
)
operations = [
grids.GridOperation( "Edit", allow_multiple=False, condition=( lambda item: not item.deleted ), async_compatible=False ),
grids.GridOperation( "Run", condition=( lambda item: not item.deleted ), async_compatible=False ),
grids.GridOperation( "Clone", condition=( lambda item: not item.deleted ), async_compatible=False ),
grids.GridOperation( "Rename", condition=( lambda item: not item.deleted ), async_compatible=False ),
grids.GridOperation( "Sharing", condition=( lambda item: not item.deleted ), async_compatible=False ),
grids.GridOperation( "Delete", condition=( lambda item: item.deleted ), async_compatible=True ),
]
def apply_query_filter( self, trans, query, **kwargs ):
return query.filter_by( user=trans.user, deleted=False )
class StoredWorkflowAllPublishedGrid( grids.Grid ):
title = "Published Workflows"
model_class = model.StoredWorkflow
default_sort_key = "update_time"
default_filter = dict( public_url="All", username="All", tags="All" )
use_async = True
columns = [
grids.PublicURLColumn( "Name", key="name", filterable="advanced" ),
grids.OwnerAnnotationColumn( "Annotation", key="annotation", model_annotation_association_class=model.StoredWorkflowAnnotationAssociation, filterable="advanced" ),
grids.OwnerColumn( "Owner", key="username", model_class=model.User, filterable="advanced" ),
grids.CommunityRatingColumn( "Community Rating", key="rating" ),
grids.CommunityTagsColumn( "Community Tags", key="tags", model_tag_association_class=model.StoredWorkflowTagAssociation, filterable="advanced", grid_name="PublicWorkflowListGrid" ),
grids.ReverseSortColumn( "Last Updated", key="update_time", format=time_ago )
]
columns.append(
grids.MulticolFilterColumn(
"Search name, annotation, owner, and tags",
cols_to_filter=[ columns[0], columns[1], columns[2], columns[4] ],
key="free-text-search", visible=False, filterable="standard" )
)
operations = []
def build_initial_query( self, trans, **kwargs ):
# Join so that searching stored_workflow.user makes sense.
return trans.sa_session.query( self.model_class ).join( model.User.table )
def apply_query_filter( self, trans, query, **kwargs ):
# A public workflow is published, has a slug, and is not deleted.
return query.filter( self.model_class.published==True ).filter( self.model_class.slug != None ).filter( self.model_class.deleted == False )
# Simple SGML parser to get all content in a single tag.
class SingleTagContentsParser( sgmllib.SGMLParser ):
def __init__( self, target_tag ):
sgmllib.SGMLParser.__init__( self )
self.target_tag = target_tag
self.cur_tag = None
self.tag_content = ""
def unknown_starttag( self, tag, attrs ):
""" Called for each start tag. """
self.cur_tag = tag
def handle_data( self, text ):
""" Called for each block of plain text. """
if self.cur_tag == self.target_tag:
self.tag_content += text
class WorkflowController( BaseController, Sharable, UsesStoredWorkflow, UsesAnnotations, UsesItemRatings ):
stored_list_grid = StoredWorkflowListGrid()
published_list_grid = StoredWorkflowAllPublishedGrid()
__myexp_url = "sandbox.myexperiment.org:80"
@web.expose
def index( self, trans ):
return self.list( trans )
@web.expose
@web.require_login( "use Galaxy workflows" )
def list_grid( self, trans, **kwargs ):
""" List user's stored workflows. """
status = message = None
if 'operation' in kwargs:
operation = kwargs['operation'].lower()
if operation == "rename":
return self.rename( trans, **kwargs )
history_ids = util.listify( kwargs.get( 'id', [] ) )
if operation == "sharing":
return self.sharing( trans, id=history_ids )
return self.stored_list_grid( trans, **kwargs )
@web.expose
@web.require_login( "use Galaxy workflows", use_panels=True )
def list( self, trans ):
"""
Render workflow main page (management of existing workflows)
"""
user = trans.get_user()
workflows = trans.sa_session.query( model.StoredWorkflow ) \
.filter_by( user=user, deleted=False ) \
.order_by( desc( model.StoredWorkflow.table.c.update_time ) ) \
.all()
shared_by_others = trans.sa_session \
.query( model.StoredWorkflowUserShareAssociation ) \
.filter_by( user=user ) \
.join( 'stored_workflow' ) \
.filter( model.StoredWorkflow.deleted == False ) \
.order_by( desc( model.StoredWorkflow.update_time ) ) \
.all()
# Legacy issue: all shared workflows must have slugs.
slug_set = False
for workflow_assoc in shared_by_others:
slug_set = self.create_item_slug( trans.sa_session, workflow_assoc.stored_workflow )
if slug_set:
trans.sa_session.flush()
return trans.fill_template( "workflow/list.mako",
workflows = workflows,
shared_by_others = shared_by_others )
@web.expose
@web.require_login( "use Galaxy workflows" )
def list_for_run( self, trans ):
"""
Render workflow list for analysis view (just allows running workflow
or switching to management view)
"""
user = trans.get_user()
workflows = trans.sa_session.query( model.StoredWorkflow ) \
.filter_by( user=user, deleted=False ) \
.order_by( desc( model.StoredWorkflow.table.c.update_time ) ) \
.all()
shared_by_others = trans.sa_session \
.query( model.StoredWorkflowUserShareAssociation ) \
.filter_by( user=user ) \
.filter( model.StoredWorkflow.deleted == False ) \
.order_by( desc( model.StoredWorkflow.table.c.update_time ) ) \
.all()
return trans.fill_template( "workflow/list_for_run.mako",
workflows = workflows,
shared_by_others = shared_by_others )
@web.expose
def list_published( self, trans, **kwargs ):
grid = self.published_list_grid( trans, **kwargs )
if 'async' in kwargs:
return grid
else:
# Render grid wrapped in panels
return trans.fill_template( "workflow/list_published.mako", grid=grid )
@web.expose
def display_by_username_and_slug( self, trans, username, slug ):
""" Display workflow based on a username and slug. """
# Get workflow.
session = trans.sa_session
user = session.query( model.User ).filter_by( username=username ).first()
stored_workflow = trans.sa_session.query( model.StoredWorkflow ).filter_by( user=user, slug=slug, deleted=False ).first()
if stored_workflow is None:
raise web.httpexceptions.HTTPNotFound()
# Security check raises error if user cannot access workflow.
self.security_check( trans.get_user(), stored_workflow, False, True)
# Get data for workflow's steps.
self.get_stored_workflow_steps( trans, stored_workflow )
# Get annotations.
stored_workflow.annotation = self.get_item_annotation_str( trans.sa_session, stored_workflow.user, stored_workflow )
for step in stored_workflow.latest_workflow.steps:
step.annotation = self.get_item_annotation_str( trans.sa_session, stored_workflow.user, step )
# Get rating data.
user_item_rating = 0
if trans.get_user():
user_item_rating = self.get_user_item_rating( trans.sa_session, trans.get_user(), stored_workflow )
if user_item_rating:
user_item_rating = user_item_rating.rating
else:
user_item_rating = 0
ave_item_rating, num_ratings = self.get_ave_item_rating_data( trans.sa_session, stored_workflow )
return trans.fill_template_mako( "workflow/display.mako", item=stored_workflow, item_data=stored_workflow.latest_workflow.steps,
user_item_rating = user_item_rating, ave_item_rating=ave_item_rating, num_ratings=num_ratings )
@web.expose
def get_item_content_async( self, trans, id ):
""" Returns item content in HTML format. """
stored = self.get_stored_workflow( trans, id, False, True )
if stored is None:
raise web.httpexceptions.HTTPNotFound()
# Get data for workflow's steps.
self.get_stored_workflow_steps( trans, stored )
# Get annotations.
stored.annotation = self.get_item_annotation_str( trans.sa_session, stored.user, stored )
for step in stored.latest_workflow.steps:
step.annotation = self.get_item_annotation_str( trans.sa_session, stored.user, step )
return trans.stream_template_mako( "/workflow/item_content.mako", item = stored, item_data = stored.latest_workflow.steps )
@web.expose
@web.require_login( "use Galaxy workflows" )
def share( self, trans, id, email="", use_panels=False ):
msg = mtype = None
# Load workflow from database
stored = self.get_stored_workflow( trans, id )
if email:
other = trans.sa_session.query( model.User ) \
.filter( and_( model.User.table.c.email==email,
model.User.table.c.deleted==False ) ) \
.first()
if not other:
mtype = "error"
msg = ( "User '%s' does not exist" % email )
elif other == trans.get_user():
mtype = "error"
msg = ( "You cannot share a workflow with yourself" )
elif trans.sa_session.query( model.StoredWorkflowUserShareAssociation ) \
.filter_by( user=other, stored_workflow=stored ).count() > 0:
mtype = "error"
msg = ( "Workflow already shared with '%s'" % email )
else:
share = model.StoredWorkflowUserShareAssociation()
share.stored_workflow = stored
share.user = other
session = trans.sa_session
session.add( share )
self.create_item_slug( session, stored )
session.flush()
trans.set_message( "Workflow '%s' shared with user '%s'" % ( stored.name, other.email ) )
return trans.response.send_redirect( url_for( controller='workflow', action='sharing', id=id ) )
return trans.fill_template( "/ind_share_base.mako",
message = msg,
messagetype = mtype,
item=stored,
email=email,
use_panels=use_panels )
@web.expose
@web.require_login( "use Galaxy workflows" )
def sharing( self, trans, id, **kwargs ):
""" Handle workflow sharing. """
# Get session and workflow.
session = trans.sa_session
stored = self.get_stored_workflow( trans, id )
session.add( stored )
# Do operation on workflow.
if 'make_accessible_via_link' in kwargs:
self._make_item_accessible( trans.sa_session, stored )
elif 'make_accessible_and_publish' in kwargs:
self._make_item_accessible( trans.sa_session, stored )
stored.published = True
elif 'publish' in kwargs:
stored.published = True
elif 'disable_link_access' in kwargs:
stored.importable = False
elif 'unpublish' in kwargs:
stored.published = False
elif 'disable_link_access_and_unpublish' in kwargs:
stored.importable = stored.published = False
elif 'unshare_user' in kwargs:
user = session.query( model.User ).get( trans.security.decode_id( kwargs['unshare_user' ] ) )
if not user:
error( "User not found for provided id" )
association = session.query( model.StoredWorkflowUserShareAssociation ) \
.filter_by( user=user, stored_workflow=stored ).one()
session.delete( association )
# Legacy issue: workflows made accessible before recent updates may not have a slug. Create slug for any workflows that need them.
if stored.importable and not stored.slug:
self._make_item_accessible( trans.sa_session, stored )
session.flush()
return trans.fill_template( "/workflow/sharing.mako", use_panels=True, item=stored )
@web.expose
@web.require_login( "to import a workflow", use_panels=True )
def imp( self, trans, id, **kwargs ):
# Set referer message.
referer = trans.request.referer
if referer is not "":
referer_message = "<a href='%s'>return to the previous page</a>" % referer
else:
referer_message = "<a href='%s'>go to Galaxy's start page</a>" % url_for( '/' )
# Do import.
session = trans.sa_session
stored = self.get_stored_workflow( trans, id, check_ownership=False )
if stored.importable == False:
return trans.show_error_message( "The owner of this workflow has disabled imports via this link.<br>You can %s" % referer_message, use_panels=True )
elif stored.deleted:
return trans.show_error_message( "You can't import this workflow because it has been deleted.<br>You can %s" % referer_message, use_panels=True )
else:
# Copy workflow.
imported_stored = model.StoredWorkflow()
imported_stored.name = "imported: " + stored.name
imported_stored.latest_workflow = stored.latest_workflow
imported_stored.user = trans.user
# Save new workflow.
session = trans.sa_session
session.add( imported_stored )
session.flush()
# Copy annotations.
self.copy_item_annotation( session, stored.user, stored, imported_stored.user, imported_stored )
for order_index, step in enumerate( stored.latest_workflow.steps ):
self.copy_item_annotation( session, stored.user, step, \
imported_stored.user, imported_stored.latest_workflow.steps[order_index] )
session.flush()
# Redirect to load galaxy frames.
return trans.show_ok_message(
message="""Workflow "%s" has been imported. <br>You can <a href="%s">start using this workflow</a> or %s."""
% ( stored.name, web.url_for( controller='workflow' ), referer_message ), use_panels=True )
@web.expose
@web.require_login( "use Galaxy workflows" )
def edit_attributes( self, trans, id, **kwargs ):
# Get workflow and do error checking.
stored = self.get_stored_workflow( trans, id )
if not stored:
error( "You do not own this workflow or workflow ID is invalid." )
# Update workflow attributes if new values submitted.
if 'name' in kwargs:
# Rename workflow.
stored.name = kwargs[ 'name' ]
if 'annotation' in kwargs:
# Set workflow annotation; sanitize annotation before adding it.
annotation = sanitize_html( kwargs[ 'annotation' ], 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), stored, annotation )
trans.sa_session.flush()
return trans.fill_template( 'workflow/edit_attributes.mako',
stored=stored,
annotation=self.get_item_annotation_str( trans.sa_session, trans.user, stored )
)
@web.expose
@web.require_login( "use Galaxy workflows" )
def rename( self, trans, id, new_name=None, **kwargs ):
stored = self.get_stored_workflow( trans, id )
if new_name is not None:
stored.name = new_name
trans.sa_session.flush()
# For current workflows grid:
trans.set_message ( "Workflow renamed to '%s'." % new_name )
return self.list( trans )
# For new workflows grid:
#message = "Workflow renamed to '%s'." % new_name
#return self.list_grid( trans, message=message, status='done' )
else:
return form( url_for( action='rename', id=trans.security.encode_id(stored.id) ),
"Rename workflow", submit_text="Rename", use_panels=True ) \
.add_text( "new_name", "Workflow Name", value=to_unicode( stored.name ) )
@web.expose
@web.require_login( "use Galaxy workflows" )
def rename_async( self, trans, id, new_name=None, **kwargs ):
stored = self.get_stored_workflow( trans, id )
if new_name:
stored.name = new_name
trans.sa_session.flush()
return stored.name
@web.expose
@web.require_login( "use Galaxy workflows" )
def annotate_async( self, trans, id, new_annotation=None, **kwargs ):
stored = self.get_stored_workflow( trans, id )
if new_annotation:
# Sanitize annotation before adding it.
new_annotation = sanitize_html( new_annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), stored, new_annotation )
trans.sa_session.flush()
return new_annotation
@web.expose
@web.require_login( "rate items" )
@web.json
def rate_async( self, trans, id, rating ):
""" Rate a workflow asynchronously and return updated community data. """
stored = self.get_stored_workflow( trans, id, check_ownership=False, check_accessible=True )
if not stored:
return trans.show_error_message( "The specified workflow does not exist." )
# Rate workflow.
stored_rating = self.rate_item( trans.sa_session, trans.get_user(), stored, rating )
return self.get_ave_item_rating_data( trans.sa_session, stored )
@web.expose
@web.require_login( "use Galaxy workflows" )
def set_accessible_async( self, trans, id=None, accessible=False ):
""" Set workflow's importable attribute and slug. """
stored = self.get_stored_workflow( trans, id )
# Only set if importable value would change; this prevents a change in the update_time unless attribute really changed.
importable = accessible in ['True', 'true', 't', 'T'];
if stored and stored.importable != importable:
if importable:
self._make_item_accessible( trans.sa_session, stored )
else:
stored.importable = importable
trans.sa_session.flush()
return
@web.expose
@web.require_login( "modify Galaxy items" )
def set_slug_async( self, trans, id, new_slug ):
stored = self.get_stored_workflow( trans, id )
if stored:
stored.slug = new_slug
trans.sa_session.flush()
return stored.slug
@web.expose
def get_embed_html_async( self, trans, id ):
""" Returns HTML for embedding a workflow in a page. """
# TODO: user should be able to embed any item he has access to. see display_by_username_and_slug for security code.
stored = self.get_stored_workflow( trans, id )
if stored:
return "Embedded Workflow '%s'" % stored.name
@web.expose
@web.json
@web.require_login( "use Galaxy workflows" )
def get_name_and_link_async( self, trans, id=None ):
""" Returns workflow's name and link. """
stored = self.get_stored_workflow( trans, id )
if self.create_item_slug( trans.sa_session, stored ):
trans.sa_session.flush()
return_dict = { "name" : stored.name, "link" : url_for( action="display_by_username_and_slug", username=stored.user.username, slug=stored.slug ) }
return return_dict
@web.expose
@web.require_login( "use Galaxy workflows" )
def gen_image( self, trans, id ):
stored = self.get_stored_workflow( trans, id, check_ownership=True )
session = trans.sa_session
workflow = stored.latest_workflow
data = []
canvas = svgfig.canvas(style="stroke:black; fill:none; stroke-width:1px; stroke-linejoin:round; text-anchor:left")
text = svgfig.SVG("g")
connectors = svgfig.SVG("g")
boxes = svgfig.SVG("g")
svgfig.Text.defaults["font-size"] = "10px"
in_pos = {}
out_pos = {}
margin = 5
line_px = 16 # how much spacing between input/outputs
widths = {} # store px width for boxes of each step
max_width, max_x, max_y = 0, 0, 0
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step( trans, step )
# Pack attributes into plain dictionary
step_dict = {
'id': step.order_index,
'data_inputs': module.get_data_inputs(),
'data_outputs': module.get_data_outputs(),
'position': step.position
}
input_conn_dict = {}
for conn in step.input_connections:
input_conn_dict[ conn.input_name ] = \
dict( id=conn.output_step.order_index, output_name=conn.output_name )
step_dict['input_connections'] = input_conn_dict
data.append(step_dict)
x, y = step.position['left'], step.position['top']
count = 0
max_len = len(module.get_name()) * 1.5
text.append( svgfig.Text(x, y + 20, module.get_name(), **{"font-size": "14px"} ).SVG() )
y += 45
for di in module.get_data_inputs():
cur_y = y+count*line_px
if step.order_index not in in_pos:
in_pos[step.order_index] = {}
in_pos[step.order_index][di['name']] = (x, cur_y)
text.append( svgfig.Text(x, cur_y, di['label']).SVG() )
count += 1
max_len = max(max_len, len(di['label']))
if len(module.get_data_inputs()) > 0:
y += 15
for do in module.get_data_outputs():
cur_y = y+count*line_px
if step.order_index not in out_pos:
out_pos[step.order_index] = {}
out_pos[step.order_index][do['name']] = (x, cur_y)
text.append( svgfig.Text(x, cur_y, do['name']).SVG() )
count += 1
max_len = max(max_len, len(do['name']))
widths[step.order_index] = max_len*5.5
max_x = max(max_x, step.position['left'])
max_y = max(max_y, step.position['top'])
max_width = max(max_width, widths[step.order_index])
for step_dict in data:
width = widths[step_dict['id']]
x, y = step_dict['position']['left'], step_dict['position']['top']
boxes.append( svgfig.Rect(x-margin, y, x+width-margin, y+30, fill="#EBD9B2").SVG() )
box_height = (len(step_dict['data_inputs']) + len(step_dict['data_outputs'])) * line_px + margin
# Draw separator line
if len(step_dict['data_inputs']) > 0:
box_height += 15
sep_y = y + len(step_dict['data_inputs']) * line_px + 40
text.append( svgfig.Line(x-margin, sep_y, x+width-margin, sep_y).SVG() ) #
# input/output box
boxes.append( svgfig.Rect(x-margin, y+30, x+width-margin, y+30+box_height, fill="#ffffff").SVG() )
for conn, output_dict in step_dict['input_connections'].iteritems():
in_coords = in_pos[step_dict['id']][conn]
out_conn_pos = out_pos[output_dict['id']][output_dict['output_name']]
adjusted = (out_conn_pos[0] + widths[output_dict['id']], out_conn_pos[1])
text.append( svgfig.SVG("circle", cx=out_conn_pos[0]+widths[output_dict['id']]-margin, cy=out_conn_pos[1]-margin, r=5, fill="#ffffff" ) )
connectors.append( svgfig.Line(adjusted[0], adjusted[1]-margin, in_coords[0]-10, in_coords[1], arrow_end="true" ).SVG() )
canvas.append(connectors)
canvas.append(boxes)
canvas.append(text)
width, height = (max_x + max_width + 50), max_y + 300
canvas['width'] = "%s px" % width
canvas['height'] = "%s px" % height
canvas['viewBox'] = "0 0 %s %s" % (width, height)
trans.response.set_content_type("image/svg+xml")
return canvas.standalone_xml()
@web.expose
@web.require_login( "use Galaxy workflows" )
def clone( self, trans, id ):
stored = self.get_stored_workflow( trans, id, check_ownership=False )
user = trans.get_user()
if stored.user == user:
owner = True
else:
if trans.sa_session.query( model.StoredWorkflowUserShareAssociation ) \
.filter_by( user=user, stored_workflow=stored ).count() == 0:
error( "Workflow is not owned by or shared with current user" )
owner = False
new_stored = model.StoredWorkflow()
new_stored.name = "Clone of '%s'" % stored.name
new_stored.latest_workflow = stored.latest_workflow
if not owner:
new_stored.name += " shared by '%s'" % stored.user.email
new_stored.user = user
# Persist
session = trans.sa_session
session.add( new_stored )
session.flush()
# Display the management page
trans.set_message( 'Clone created with name "%s"' % new_stored.name )
return self.list( trans )
@web.expose
@web.require_login( "create workflows" )
def create( self, trans, workflow_name=None, workflow_annotation="" ):
"""
Create a new stored workflow with name `workflow_name`.
"""
user = trans.get_user()
if workflow_name is not None:
# Create the new stored workflow
stored_workflow = model.StoredWorkflow()
stored_workflow.name = workflow_name
stored_workflow.user = user
# And the first (empty) workflow revision
workflow = model.Workflow()
workflow.name = workflow_name
workflow.stored_workflow = stored_workflow
stored_workflow.latest_workflow = workflow
# Add annotation.
workflow_annotation = sanitize_html( workflow_annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), stored_workflow, workflow_annotation )
# Persist
session = trans.sa_session
session.add( stored_workflow )
session.flush()
# Display the management page
trans.set_message( "Workflow '%s' created" % stored_workflow.name )
return self.list( trans )
else:
return form( url_for(), "Create New Workflow", submit_text="Create", use_panels=True ) \
.add_text( "workflow_name", "Workflow Name", value="Unnamed workflow" ) \
.add_text( "workflow_annotation", "Workflow Annotation", value="", help="A description of the workflow; annotation is shown alongside shared or published workflows." )
@web.expose
def delete( self, trans, id=None ):
"""
Mark a workflow as deleted
"""
# Load workflow from database
stored = self.get_stored_workflow( trans, id )
# Marke as deleted and save
stored.deleted = True
trans.sa_session.add( stored )
trans.sa_session.flush()
# Display the management page
trans.set_message( "Workflow '%s' deleted" % stored.name )
return self.list( trans )
@web.expose
@web.require_login( "edit workflows" )
def editor( self, trans, id=None ):
"""
Render the main workflow editor interface. The canvas is embedded as
an iframe (necessary for scrolling to work properly), which is
rendered by `editor_canvas`.
"""
if not id:
error( "Invalid workflow id" )
stored = self.get_stored_workflow( trans, id )
return trans.fill_template( "workflow/editor.mako", stored=stored, annotation=self.get_item_annotation_str( trans.sa_session, trans.user, stored ) )
@web.json
def editor_form_post( self, trans, type='tool', tool_id=None, annotation=None, **incoming ):
"""
Accepts a tool state and incoming values, and generates a new tool
form and some additional information, packed into a json dictionary.
This is used for the form shown in the right pane when a node
is selected.
"""
trans.workflow_building_mode = True
module = module_factory.from_dict( trans, {
'type': type,
'tool_id': tool_id,
'tool_state': incoming.pop("tool_state")
} )
module.update_state( incoming )
if type=='tool':
return {
'tool_state': module.get_state(),
'data_inputs': module.get_data_inputs(),
'data_outputs': module.get_data_outputs(),
'tool_errors': module.get_errors(),
'form_html': module.get_config_form(),
'annotation': annotation,
'post_job_actions': module.get_post_job_actions()
}
else:
return {
'tool_state': module.get_state(),
'data_inputs': module.get_data_inputs(),
'data_outputs': module.get_data_outputs(),
'tool_errors': module.get_errors(),
'form_html': module.get_config_form(),
'annotation': annotation
}
@web.json
def get_new_module_info( self, trans, type, **kwargs ):
"""
Get the info for a new instance of a module initialized with default
parameters (any keyword arguments will be passed along to the module).
Result includes data inputs and outputs, html representation
of the initial form, and the initial tool state (with default values).
This is called asynchronously whenever a new node is added.
"""
trans.workflow_building_mode = True
module = module_factory.new( trans, type, **kwargs )
return {
'type': module.type,
'name': module.get_name(),
'tool_id': module.get_tool_id(),
'tool_state': module.get_state(),
'tooltip': module.get_tooltip(),
'data_inputs': module.get_data_inputs(),
'data_outputs': module.get_data_outputs(),
'form_html': module.get_config_form(),
'annotation': ""
}
@web.json
def load_workflow( self, trans, id ):
"""
Get the latest Workflow for the StoredWorkflow identified by `id` and
encode it as a json string that can be read by the workflow editor
web interface.
"""
user = trans.get_user()
id = trans.security.decode_id( id )
trans.workflow_building_mode = True
# Load encoded workflow from database
stored = trans.sa_session.query( model.StoredWorkflow ).get( id )
assert stored.user == user
workflow = stored.latest_workflow
# Pack workflow data into a dictionary and return
data = {}
data['name'] = workflow.name
data['steps'] = {}
data['upgrade_messages'] = {}
# For each step, rebuild the form and encode the state
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step( trans, step )
if not module:
step_annotation = self.get_item_annotation_obj( trans.sa_session, trans.user, step )
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
invalid_tool_form_html = """<div class="toolForm tool-node-error"><div class="toolFormTitle form-row-error">Unrecognized Tool: %s</div><div class="toolFormBody"><div class="form-row">
The tool id '%s' for this tool is unrecognized.<br/><br/>To save this workflow, you will need to delete this step or enable the tool.
</div></div></div>""" % (step.tool_id, step.tool_id)
step_dict = {
'id': step.order_index,
'type': 'invalid',
'tool_id': step.tool_id,
'name': 'Unrecognized Tool: %s' % step.tool_id,
'tool_state': None,
'tooltip': None,
'tool_errors': ["Unrecognized Tool Id: %s" % step.tool_id],
'data_inputs': [],
'data_outputs': [],
'form_html': invalid_tool_form_html,
'annotation' : annotation_str,
'post_job_actions' : {},
'workflow_outputs' : []
}
step_dict['input_connections'] = input_conn_dict
# Position
step_dict['position'] = step.position
# Add to return value
data['steps'][step.order_index] = step_dict
continue
# Fix any missing parameters
upgrade_message = module.check_and_update_state()
if upgrade_message:
# FIXME: Frontend should be able to handle workflow messages
# as a dictionary not just the values
data['upgrade_messages'][step.order_index] = upgrade_message.values()
# Get user annotation.
step_annotation = self.get_item_annotation_obj( trans.sa_session, trans.user, step )
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
# Pack attributes into plain dictionary
step_dict = {
'id': step.order_index,
'type': module.type,
'tool_id': module.get_tool_id(),
'name': module.get_name(),
'tool_state': module.get_state(),
'tooltip': module.get_tooltip(),
'tool_errors': module.get_errors(),
'data_inputs': module.get_data_inputs(),
'data_outputs': module.get_data_outputs(),
'form_html': module.get_config_form(),
'annotation' : annotation_str,
'post_job_actions' : {},
'workflow_outputs' : []
}
# Connections
input_connections = step.input_connections
if step.type is None or step.type == 'tool':
# Determine full (prefixed) names of valid input datasets
data_input_names = {}
def callback( input, value, prefixed_name, prefixed_label ):
if isinstance( input, DataToolParameter ):
data_input_names[ prefixed_name ] = True
visit_input_values( module.tool.inputs, module.state.inputs, callback )
# Filter
# FIXME: this removes connection without displaying a message currently!
input_connections = [ conn for conn in input_connections if conn.input_name in data_input_names ]
# post_job_actions
pja_dict = {}
for pja in step.post_job_actions:
pja_dict[pja.action_type+pja.output_name] = dict(action_type = pja.action_type,
output_name = pja.output_name,
action_arguments = pja.action_arguments)
step_dict['post_job_actions'] = pja_dict
#workflow outputs
outputs = []
for output in step.workflow_outputs:
outputs.append(output.output_name)
step_dict['workflow_outputs'] = outputs
# Encode input connections as dictionary
input_conn_dict = {}
for conn in input_connections:
input_conn_dict[ conn.input_name ] = \
dict( id=conn.output_step.order_index, output_name=conn.output_name )
step_dict['input_connections'] = input_conn_dict
# Position
step_dict['position'] = step.position
# Add to return value
data['steps'][step.order_index] = step_dict
return data
@web.json
def save_workflow( self, trans, id, workflow_data ):
"""
Save the workflow described by `workflow_data` with id `id`.
"""
# Get the stored workflow
stored = self.get_stored_workflow( trans, id )
# Put parameters in workflow mode
trans.workflow_building_mode = True
# Convert incoming workflow data from json
data = simplejson.loads( workflow_data )
# Create new workflow from incoming data
workflow = model.Workflow()
# Just keep the last name (user can rename later)
workflow.name = stored.name
# Assume no errors until we find a step that has some
workflow.has_errors = False
# Create each step
steps = []
# The editor will provide ids for each step that we don't need to save,
# but do need to use to make connections
steps_by_external_id = {}
errors = []
for key, step_dict in data['steps'].iteritems():
if step_dict['type'] != 'data_input' and step_dict['tool_id'] not in trans.app.toolbox.tools_by_id:
errors.append("Step %s requires tool '%s'." % (step_dict['id'], step_dict['tool_id']))
if errors:
return dict( name=workflow.name,
message="This workflow includes missing or invalid tools. It cannot be saved until the following steps are removed or the missing tools are enabled.",
errors=errors)
# First pass to build step objects and populate basic values
for key, step_dict in data['steps'].iteritems():
# Create the model class for the step
step = model.WorkflowStep()
steps.append( step )
steps_by_external_id[ step_dict['id' ] ] = step
# FIXME: Position should be handled inside module
step.position = step_dict['position']
module = module_factory.from_dict( trans, step_dict )
module.save_to_step( step )
if step_dict.has_key('workflow_outputs'):
for output_name in step_dict['workflow_outputs']:
m = model.WorkflowOutput(workflow_step = step, output_name = output_name)
trans.sa_session.add(m)
if step.tool_errors:
# DBTODO Check for conditional inputs here.
workflow.has_errors = True
# Stick this in the step temporarily
step.temp_input_connections = step_dict['input_connections']
# Save step annotation.
annotation = step_dict[ 'annotation' ]
if annotation:
annotation = sanitize_html( annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), step, annotation )
# Second pass to deal with connections between steps
for step in steps:
# Input connections
for input_name, conn_dict in step.temp_input_connections.iteritems():
if conn_dict:
conn = model.WorkflowStepConnection()
conn.input_step = step
conn.input_name = input_name
conn.output_name = conn_dict['output_name']
conn.output_step = steps_by_external_id[ conn_dict['id'] ]
del step.temp_input_connections
# Order the steps if possible
attach_ordered_steps( workflow, steps )
# Connect up
workflow.stored_workflow = stored
stored.latest_workflow = workflow
# Persist
trans.sa_session.flush()
# Return something informative
errors = []
if workflow.has_errors:
errors.append( "Some steps in this workflow have validation errors" )
if workflow.has_cycles:
errors.append( "This workflow contains cycles" )
if errors:
rval = dict( message="Workflow saved, but will not be runnable due to the following errors",
errors=errors )
else:
rval = dict( message="Workflow saved" )
rval['name'] = workflow.name
return rval
@web.expose
@web.require_login( "use workflows" )
def export( self, trans, id=None, **kwd ):
"""
Handles download/export workflow command.
"""
stored = self.get_stored_workflow( trans, id, check_ownership=False, check_accessible=True )
return trans.fill_template( "/workflow/export.mako", item=stored, use_panels=True )
@web.expose
@web.require_login( "use workflows" )
def download_to_wspgrade_file( self, trans, id=None ):
"""
Handles download as WS-PGRADE workflow
"""
# Load encoded workflow from database
user = trans.get_user()
id = trans.security.decode_id( id )
trans.workflow_building_mode = True
stored = trans.sa_session.query( model.StoredWorkflow ).get( id )
self.security_check( trans.get_user(), stored, False, True )
# Convert workflow to dict.
workflow_dict = self._workflow_to_dict( trans, stored )
valid_chars = '.0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
sname = stored.name
sname = ''.join(c in valid_chars and c or '_' for c in sname)[0:150]
#methods for the topological sort
def add_node(graph, node):
#Add a node to the graph if not already exists.
if not graph.has_key(node):
graph[node] = [0] # 0 = number of arcs coming into this node.
return graph
def add_arc(graph, fromnode, tonode):
""" Add an arc to a graph. Can create multiple arcs.
The end nodes must already exist.
"""
graph[fromnode].append(tonode)
# Update the count of incoming arcs in tonode.
graph[tonode][0] = graph[tonode][0] + 1
return graph
def topological_sort(items, partial_order):
""" Perform topological sort.
"""
# step 1 - create a directed graph with an arc a->b for each input
# pair (a,b).
graph = {}
for v in items:
graph = add_node(graph, v)
for a,b in partial_order:
graph = add_arc(graph, a, b)
# Step 2 - find all roots (nodes with zero incoming arcs).
roots = [node for (node,nodeinfo) in graph.items() if nodeinfo[0] == 0]
# step 3 - repeatedly emit a root and remove it from the graph. Removing
# a node may convert some of the node's direct children into roots.
sorted = []
while len(roots) != 0:
root = roots.pop()
sorted.append(root)
for child in graph[root][1:]:
graph[child][0] = graph[child][0] - 1
if graph[child][0] == 0:
roots.append(child)
del graph[root]
if len(graph.items()) != 0:
# There is a loop in the input.
return None
return sorted
# Create nodes_order, input_nodes and partial_order
nodes_order = []
input_nodes = []
partial_order = []
for step_num, step in workflow_dict['steps'].items():
if step['type'] == "tool" or step['type'] is None:
nodes_order.append(step['id'])
if step['type'] == "data_input":
input_nodes.append(step['id'])
for step_num, step in workflow_dict['steps'].items():
for input_name, input_connection in step['input_connections'].items():
if input_connection['id'] not in input_nodes:
partial_order.append( (input_connection['id'], step['id']) )
#execute topological sort
nodes_order = topological_sort(nodes_order, partial_order)
graph = {}
for v in nodes_order:
graph = add_node(graph, v)
for a,b in partial_order:
graph = add_arc(graph, a, b)
#sort parents directly before children
i = 0
while i < len(nodes_order):
if graph[nodes_order[i]][0] == 0 and len(graph[nodes_order[i]]) > 1:
col = nodes_order.index(graph[nodes_order[i]][1])
for j in range(2, len(graph[nodes_order[i]])):
if col > nodes_order.index(graph[nodes_order[i]][j]):
col = nodes_order.index(graph[nodes_order[i]][j])
if col-i > 1:
nodes_order.insert(col,nodes_order[i])
nodes_order.pop(i)
i = -1
i += 1
#create graph with nodes of parents
graph_parents = {}
for v in nodes_order:
graph_parents = add_node(graph_parents, v)
for a,b in partial_order:
graph_parents = add_arc(graph_parents, b, a)
#calculate coordinates for WSPGRADE workflow
x_max = -100
dist = 120
y_max = 20
x = []
y = []
id = []
for i in range(len(nodes_order)):
parents_list = graph_parents[nodes_order[i]]
parents_list.pop(0)
if len(parents_list) == 0:
x_max += dist
id.append(nodes_order[i])
x.append(x_max)
y.append(y_max)
else:
x_new = x[id.index(parents_list[0])]
y_new = y[id.index(parents_list[0])]
if len(parents_list) == 1:
y_new += dist
for j in range(1, len(parents_list)):
if x_new == x[id.index(parents_list[j])]:
x_new += dist
elif x_new < x[id.index(parents_list[j])]:
x_new = x[id.index(parents_list[j])]
if y_new == y[id.index(parents_list[j])]:
y_new += dist
elif y_new < y[id.index(parents_list[j])]:
y_new = y[id.index(parents_list[j])]
for j in range(len(id)):
if x[j] == x_new and y[j] == y_new:
x_new += dist
if x_max < x_new:
x_max = x_new
id.append(nodes_order[i])
x.append(x_new)
y.append(y_new)
#insert coordinates and port ids
for step_num, step in workflow_dict['steps'].items():
if step['type'] == "tool" or step['type'] is None:
step['position']['left'] = x[nodes_order.index(step['id'])]
step['position']['top'] = y[nodes_order.index(step['id'])]
step['param'] = ''
version = step['tool_version'].rstrip(' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ()')
step['tool_version'] = version
for input in step['inputs']:
step['param'] += '-' + input['name'] + ' '
i = 0
for input_name, input_connection in step['input_connections'].items():
input_connection['idinput'] = i
i += 1
for output in step['outputs']:
output['visited'] = 0
output['id'] = i
i += 1
#insert prejob and preoutput
for step_num, step in workflow_dict['steps'].items():
if step['type'] == "tool" or step['type'] is None:
for input_name, input_connection in step['input_connections'].items():
if input_connection['id'] in input_nodes:
input_connection['prejob'] = ""
input_connection['preoutput'] = ""
else:
parent_step = workflow_dict['steps'][input_connection['id']]
input_connection['prejob'] = parent_step['name'] + parent_step['tool_version']
for output in parent_step['outputs']:
if output['name'] == input_connection['output_name']:
input_connection['preoutput'] = output['id']
# calculate port coordinates
def ccw(portA, portB, portC):
return (portC[1]-portA[1])*(portB[0]-portA[0]) > (portB[1]-portA[1])*(portC[0]-portA[0])
def intersect(portA, portB, portC, portD):
return ccw(portA,portC,portD) != ccw(portB,portC,portD) and ccw(portA,portB,portC) != ccw(portA,portB,portD)
x_port = [-15, -15, -15, -15, 0, 15, 30, 45, 60, 60, 60, 60, 0, 15, 30, 45]
y_port = [45, 30, 15, 0, -15, -15, -15, -15, 0, 15, 30, 45, 60, 60, 60, 60]
coord = {}
for j in range(len(nodes_order)):
coord[j] = []
j = len(nodes_order) - 1
while j >= 0:
step = workflow_dict['steps'][nodes_order[j]]
for output in step['outputs']:
if output['visited'] == 0:
port = (step['position']['left']+45, step['position']['top']+60)
i = 14
while port in coord[j] and i >= 0:
port = (step['position']['left'] + x_port[i], step['position']['top'] + y_port[i])
i -= 1
output['x'] = port[0]
output['y'] = port[1]
output['visited'] = 1
coord[j].append(port)
ports_order_index = []
for input_name, input_connection in step['input_connections'].items():
if input_connection['id'] not in input_nodes:
parent_step = workflow_dict['steps'][input_connection['id']]
ports_order_index.append( (int(input_connection['id']), parent_step['position']['left'], parent_step['position']['top']) )
ports_order = []
if len(ports_order_index) > 0:
ports_order_y = sorted(ports_order_index, key=itemgetter(2), reverse=True)
ports_order = sorted(ports_order_y, key=itemgetter(1))
for i in range(len(ports_order)):
if ports_order[i][2] == step['position']['top']:
ports_order.insert(0, ports_order[i])
ports_order.pop(i+1)
for input_name, input_connection in step['input_connections'].items():
if input_connection['id'] in input_nodes:
port = (step['position']['left']-15, step['position']['top']+45)
i = 1
while port in coord[j] and i < len(x_port):
port = (step['position']['left'] + x_port[i], step['position']['top'] + y_port[i])
i += 1
input_connection['x'] = port[0]
input_connection['y'] = port[1]
coord[j].append(port)
while len(ports_order) > 0:
for input_name, input_connection in step['input_connections'].items():
if len(ports_order) > 0 and input_connection['id'] not in input_nodes:
parent_step = workflow_dict['steps'][input_connection['id']]
if ports_order[0][0] == parent_step['id']:
if step['position']['left'] == parent_step['position']['left']:
port = (step['position']['left']+45, step['position']['top']-15)
i = 5
outport = (parent_step['position']['left']+45, parent_step['position']['top']+60)
k = 14
else:
port = (step['position']['left']-15, step['position']['top']+45)
i = 1
outport = (parent_step['position']['left']+60, parent_step['position']['top']+45)
k = 10
for output in parent_step['outputs']:
if output['name'] == input_connection['output_name'] and output['visited'] == 0:
while outport in coord[parent_step['id']] and k >= 0:
outport = (parent_step['position']['left'] + x_port[k], parent_step['position']['top'] + y_port[k])
k -= 1
output['x'] = outport[0]
output['y'] = outport[1]
output['visited'] = 1
coord[parent_step['id']].append(outport)
while port in coord[j] and i < len(x_port):
port = (step['position']['left'] + x_port[i], step['position']['top'] + y_port[i])
i += 1
input_connection['x'] = port[0]
input_connection['y'] = port[1]
coord[j].append(port)
ports_order.pop(0)
j -= 1
# remove intersections on input_connections ports
for step_num, step in workflow_dict['steps'].items():
input_ports = []
output_ports = []
if len(step['input_connections']) > 1:
for input_name, input_connection in step['input_connections'].items():
parent_step = workflow_dict['steps'][input_connection['id']]
for output in parent_step['outputs']:
if output['name'] == input_connection['output_name']:
input_ports.append( (input_connection['x'], input_connection['y']) )
output_ports.append( (output['x'], output['y']) )
for i in range(len(input_ports)-1):
for j in range(i+1, len(input_ports)):
if intersect(input_ports[i], output_ports[i], input_ports[j], output_ports[j]):
buffer_port = input_ports[i]
input_ports[i] = input_ports[j]
input_ports[j] = buffer_port
i = 0
for input_name, input_connection in step['input_connections'].items():
parent_step = workflow_dict['steps'][input_connection['id']]
for output in parent_step['outputs']:
if output['name'] == input_connection['output_name']:
input_connection['x'] = input_ports[i][0]
input_connection['y'] = input_ports[i][1]
i += 1
# Create workflow content XML.
workflow_content = trans.fill_template( "workflow/wspgrade_download_content.mako", \
workflow_name=sname, \
workflow_description=workflow_dict['annotation'], \
workflow_steps=workflow_dict['steps'] )
workflow_xml = trans.fill_template( "workflow/wspgrade_download.mako", \
workflow_name=sname, \
workflow_description=workflow_dict['annotation'], \
workflow_content=workflow_content )
workflow_wspgrade = workflow_xml.strip()
# create zip file
tmpd = tempfile.mkdtemp()
tmpf = os.path.join( tmpd, 'workflow.zip' )
file = zipfile.ZipFile(tmpf, 'w', zipfile.ZIP_DEFLATED)
info = zipfile.ZipInfo('workflow.xml')
info.compress_type = zipfile.ZIP_DEFLATED
info.external_attr = 0644 << 16L
file.writestr(info, workflow_wspgrade)
info = zipfile.ZipInfo(sname+'/')
info.compress_type = zipfile.ZIP_DEFLATED
info.external_attr = 040755 << 16L
file.writestr(info, '')
file.close()
tmpfh = open( tmpf )
trans.response.set_content_type( "application/x-zip-compressed" )
trans.response.headers[ "Content-Disposition" ] = "attachment; filename=gUSE%s.zip" % sname
return tmpfh
@web.expose
@web.require_login( "use workflows" )
def import_from_myexp( self, trans, myexp_id, myexp_username=None, myexp_password=None ):
"""
Imports a workflow from the myExperiment website.
"""
#
# Get workflow XML.
#
# Get workflow content.
conn = httplib.HTTPConnection( self.__myexp_url )
# NOTE: blocks web thread.
headers = {}
if myexp_username and myexp_password:
auth_header = base64.b64encode( '%s:%s' % ( myexp_username, myexp_password ))[:-1]
headers = { "Authorization" : "Basic %s" % auth_header }
conn.request( "GET", "/workflow.xml?id=%s&elements=content" % myexp_id, headers=headers )
response = conn.getresponse()
workflow_xml = response.read()
conn.close()
parser = SingleTagContentsParser( "content" )
parser.feed( workflow_xml )
workflow_content = base64.b64decode( parser.tag_content )
#
# Process workflow XML and create workflow.
#
parser = SingleTagContentsParser( "galaxy_json" )
parser.feed( workflow_content )
workflow_dict = from_json_string( parser.tag_content )
# Create workflow.
workflow = self._workflow_from_dict( trans, workflow_dict, source="myExperiment" ).latest_workflow
# Provide user feedback.
if workflow.has_errors:
return trans.show_warn_message( "Imported, but some steps in this workflow have validation errors" )
if workflow.has_cycles:
return trans.show_warn_message( "Imported, but this workflow contains cycles" )
else:
return trans.show_message( "Workflow '%s' imported" % workflow.name )
@web.expose
@web.require_login( "use workflows" )
def export_to_myexp( self, trans, id, myexp_username, myexp_password ):
"""
Exports a workflow to myExperiment website.
"""
# Load encoded workflow from database
user = trans.get_user()
id = trans.security.decode_id( id )
trans.workflow_building_mode = True
stored = trans.sa_session.query( model.StoredWorkflow ).get( id )
self.security_check( trans.get_user(), stored, False, True )
# Convert workflow to dict.
workflow_dict = self._workflow_to_dict( trans, stored )
#
# Create and submit workflow myExperiment request.
#
# Create workflow content XML.
workflow_dict_packed = simplejson.dumps( workflow_dict, indent=4, sort_keys=True )
workflow_content = trans.fill_template( "workflow/myexp_export_content.mako", \
workflow_dict_packed=workflow_dict_packed, \
workflow_steps=workflow_dict['steps'] )
# Create myExperiment request.
request_raw = trans.fill_template( "workflow/myexp_export.mako", \
workflow_name=workflow_dict['name'], \
workflow_description=workflow_dict['annotation'], \
workflow_content=workflow_content
)
# strip() b/c myExperiment XML parser doesn't allow white space before XML; utf-8 handles unicode characters.
request = unicode( request_raw.strip(), 'utf-8' )
# Do request and get result.
auth_header = base64.b64encode( '%s:%s' % ( myexp_username, myexp_password ))[:-1]
headers = { "Content-type": "text/xml", "Accept": "text/plain", "Authorization" : "Basic %s" % auth_header }
conn = httplib.HTTPConnection( self.__myexp_url )
# NOTE: blocks web thread.
conn.request("POST", "/workflow.xml", request, headers)
response = conn.getresponse()
response_data = response.read()
conn.close()
# Do simple parse of response to see if export successful and provide user feedback.
parser = SingleTagContentsParser( 'id' )
parser.feed( response_data )
myexp_workflow_id = parser.tag_content
workflow_list_str = " <br>Return to <a href='%s'>workflow list." % url_for( action='list' )
if myexp_workflow_id:
return trans.show_message( \
"Workflow '%s' successfully exported to myExperiment. %s" % \
( stored.name, workflow_list_str ),
use_panels=True )
else:
return trans.show_error_message( \
"Workflow '%s' could not be exported to myExperiment. Error: %s. %s" % \
( stored.name, response_data, workflow_list_str ), use_panels=True )
@web.json_pretty
def for_direct_import( self, trans, id ):
"""
Get the latest Workflow for the StoredWorkflow identified by `id` and
encode it as a json string that can be imported back into Galaxy
This has slightly different information than the above. In particular,
it does not attempt to decode forms and build UIs, it just stores
the raw state.
"""
stored = self.get_stored_workflow( trans, id, check_ownership=False, check_accessible=True )
return self._workflow_to_dict( trans, stored )
@web.json_pretty
def export_to_file( self, trans, id ):
"""
Get the latest Workflow for the StoredWorkflow identified by `id` and
encode it as a json string that can be imported back into Galaxy
This has slightly different information than the above. In particular,
it does not attempt to decode forms and build UIs, it just stores
the raw state.
"""
# Get workflow.
stored = self.get_stored_workflow( trans, id, check_ownership=False, check_accessible=True )
# Stream workflow to file.
stored_dict = self._workflow_to_dict( trans, stored )
valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
sname = stored.name
sname = ''.join(c in valid_chars and c or '_' for c in sname)[0:150]
trans.response.headers["Content-Disposition"] = "attachment; filename=Galaxy-Workflow-%s.ga" % ( sname )
trans.response.set_content_type( 'application/galaxy-archive' )
return stored_dict
@web.expose
def import_workflow( self, trans, workflow_text=None, url=None ):
if workflow_text is None and url is None:
return form( url_for(), "Import Workflow", submit_text="Import", use_panels=True ) \
.add_text( "url", "Workflow URL", "" ) \
.add_input( "textarea", "Encoded workflow (as generated by export workflow)", "workflow_text", "" )
if url:
# Load workflow from external URL
# NOTE: blocks the web thread.
try:
workflow_data = urllib2.urlopen( url ).read()
except Exception, e:
return trans.show_error_message( "Failed to open URL %s<br><br>Message: %s" % ( url, str( e ) ) )
else:
workflow_data = workflow_text
# Convert incoming workflow data from json
try:
data = simplejson.loads( workflow_data )
except Exception, e:
return trans.show_error_message( "Data at '%s' does not appear to be a Galaxy workflow<br><br>Message: %s" % ( url, str( e ) ) )
# Create workflow.
workflow = self._workflow_from_dict( trans, data, source="uploaded file" ).latest_workflow
# Provide user feedback and show workflow list.
if workflow.has_errors:
trans.set_message( "Imported, but some steps in this workflow have validation errors",
type="warning" )
if workflow.has_cycles:
trans.set_message( "Imported, but this workflow contains cycles",
type="warning" )
else:
trans.set_message( "Workflow '%s' imported" % workflow.name )
return self.list( trans )
@web.json
def get_datatypes( self, trans ):
ext_to_class_name = dict()
classes = []
for k, v in trans.app.datatypes_registry.datatypes_by_extension.iteritems():
c = v.__class__
ext_to_class_name[k] = c.__module__ + "." + c.__name__
classes.append( c )
class_to_classes = dict()
def visit_bases( types, cls ):
for base in cls.__bases__:
if issubclass( base, Data ):
types.add( base.__module__ + "." + base.__name__ )
visit_bases( types, base )
for c in classes:
n = c.__module__ + "." + c.__name__
types = set( [ n ] )
visit_bases( types, c )
class_to_classes[ n ] = dict( ( t, True ) for t in types )
return dict( ext_to_class_name=ext_to_class_name, class_to_classes=class_to_classes )
@web.expose
def build_from_current_history( self, trans, job_ids=None, dataset_ids=None, workflow_name=None ):
user = trans.get_user()
history = trans.get_history()
if not user:
return trans.show_error_message( "Must be logged in to create workflows" )
if ( job_ids is None and dataset_ids is None ) or workflow_name is None:
jobs, warnings = get_job_dict( trans )
# Render
return trans.fill_template(
"workflow/build_from_current_history.mako",
jobs=jobs,
warnings=warnings,
history=history )
else:
# Ensure job_ids and dataset_ids are lists (possibly empty)
if job_ids is None:
job_ids = []
elif type( job_ids ) is not list:
job_ids = [ job_ids ]
if dataset_ids is None:
dataset_ids = []
elif type( dataset_ids ) is not list:
dataset_ids = [ dataset_ids ]
# Convert both sets of ids to integers
job_ids = [ int( id ) for id in job_ids ]
dataset_ids = [ int( id ) for id in dataset_ids ]
# Find each job, for security we (implicately) check that they are
# associated witha job in the current history.
jobs, warnings = get_job_dict( trans )
jobs_by_id = dict( ( job.id, job ) for job in jobs.keys() )
steps = []
steps_by_job_id = {}
hid_to_output_pair = {}
# Input dataset steps
for hid in dataset_ids:
step = model.WorkflowStep()
step.type = 'data_input'
hid_to_output_pair[ hid ] = ( step, 'output' )
steps.append( step )
# Tool steps
for job_id in job_ids:
assert job_id in jobs_by_id, "Attempt to create workflow with job not connected to current history"
job = jobs_by_id[ job_id ]
tool = trans.app.toolbox.tools_by_id[ job.tool_id ]
param_values = job.get_param_values( trans.app )
associations = cleanup_param_values( tool.inputs, param_values )
# Doing it this way breaks dynamic parameters, backed out temporarily.
# def extract_callback( input, value, prefixed_name, prefixed_label ):
# if isinstance( value, UnvalidatedValue ):
# return str( value )
# visit_input_values( tool.inputs, param_values, extract_callback )
step = model.WorkflowStep()
step.type = 'tool'
step.tool_id = job.tool_id
step.tool_inputs = tool.params_to_strings( param_values, trans.app )
# NOTE: We shouldn't need to do two passes here since only
# an earlier job can be used as an input to a later
# job.
for other_hid, input_name in associations:
if other_hid in hid_to_output_pair:
other_step, other_name = hid_to_output_pair[ other_hid ]
conn = model.WorkflowStepConnection()
conn.input_step = step
conn.input_name = input_name
# Should always be connected to an earlier step
conn.output_step = other_step
conn.output_name = other_name
steps.append( step )
steps_by_job_id[ job_id ] = step
# Store created dataset hids
for assoc in job.output_datasets:
hid_to_output_pair[ assoc.dataset.hid ] = ( step, assoc.name )
# Workflow to populate
workflow = model.Workflow()
workflow.name = workflow_name
# Order the steps if possible
attach_ordered_steps( workflow, steps )
# And let's try to set up some reasonable locations on the canvas
# (these are pretty arbitrary values)
levorder = order_workflow_steps_with_levels( steps )
base_pos = 10
for i, steps_at_level in enumerate( levorder ):
for j, index in enumerate( steps_at_level ):
step = steps[ index ]
step.position = dict( top = ( base_pos + 120 * j ),
left = ( base_pos + 220 * i ) )
# Store it
stored = model.StoredWorkflow()
stored.user = user
stored.name = workflow_name
workflow.stored_workflow = stored
stored.latest_workflow = workflow
trans.sa_session.add( stored )
trans.sa_session.flush()
# Index page with message
return trans.show_message( "Workflow '%s' created from current history." % workflow_name )
## return trans.show_ok_message( "<p>Workflow '%s' created.</p><p><a target='_top' href='%s'>Click to load in workflow editor</a></p>"
## % ( workflow_name, web.url_for( action='editor', id=trans.security.encode_id(stored.id) ) ) )
@web.expose
def run( self, trans, id, **kwargs ):
stored = self.get_stored_workflow( trans, id, check_ownership=False )
user = trans.get_user()
if stored.user != user:
if trans.sa_session.query( model.StoredWorkflowUserShareAssociation ) \
.filter_by( user=user, stored_workflow=stored ).count() == 0:
error( "Workflow is not owned by or shared with current user" )
# Get the latest revision
workflow = stored.latest_workflow
# It is possible for a workflow to have 0 steps
if len( workflow.steps ) == 0:
error( "Workflow cannot be run because it does not have any steps" )
#workflow = Workflow.from_simple( simplejson.loads( stored.encoded_value ), trans.app )
if workflow.has_cycles:
error( "Workflow cannot be run because it contains cycles" )
if workflow.has_errors:
error( "Workflow cannot be run because of validation errors in some steps" )
# Build the state for each step
errors = {}
has_upgrade_messages = False
has_errors = False
if kwargs:
# If kwargs were provided, the states for each step should have
# been POSTed
# Get the kwarg keys for data inputs
input_keys = filter(lambda a: a.endswith('|input'), kwargs)
# Example: prefixed='2|input'
# Check if one of them is a list
multiple_input_key = None
multiple_inputs = [None]
for input_key in input_keys:
if isinstance(kwargs[input_key], list):
multiple_input_key = input_key
multiple_inputs = kwargs[input_key]
# List to gather values for the template
invocations=[]
for input_number, single_input in enumerate(multiple_inputs):
# Example: single_input='1', single_input='2', etc...
# 'Fix' the kwargs, to have only the input for this iteration
if multiple_input_key:
kwargs[multiple_input_key] = single_input
for step in workflow.steps:
step.upgrade_messages = {}
# Connections by input name
step.input_connections_by_name = \
dict( ( conn.input_name, conn ) for conn in step.input_connections )
# Extract just the arguments for this step by prefix
p = "%s|" % step.id
l = len(p)
step_args = dict( ( k[l:], v ) for ( k, v ) in kwargs.iteritems() if k.startswith( p ) )
step_errors = None
if step.type == 'tool' or step.type is None:
module = module_factory.from_workflow_step( trans, step )
# Fix any missing parameters
step.upgrade_messages = module.check_and_update_state()
if step.upgrade_messages:
has_upgrade_messages = True
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
module.add_dummy_datasets( connections=step.input_connections )
# Get the tool
tool = module.tool
# Get the state
step.state = state = module.state
# Get old errors
old_errors = state.inputs.pop( "__errors__", {} )
# Update the state
step_errors = tool.update_state( trans, tool.inputs, step.state.inputs, step_args,
update_only=True, old_errors=old_errors )
else:
# Fix this for multiple inputs
module = step.module = module_factory.from_workflow_step( trans, step )
state = step.state = module.decode_runtime_state( trans, step_args.pop( "tool_state" ) )
step_errors = module.update_runtime_state( trans, state, step_args )
if step_errors:
errors[step.id] = state.inputs["__errors__"] = step_errors
if 'run_workflow' in kwargs and not errors:
new_history = None
if 'new_history' in kwargs:
if 'new_history_name' in kwargs and kwargs['new_history_name'] != '':
nh_name = kwargs['new_history_name']
else:
nh_name = "History from %s workflow" % workflow.name
if multiple_input_key:
nh_name = '%s %d' % (nh_name, input_number + 1)
new_history = trans.app.model.History( user=trans.user, name=nh_name )
trans.sa_session.add( new_history )
# Run each step, connecting outputs to inputs
workflow_invocation = model.WorkflowInvocation()
workflow_invocation.workflow = workflow
outputs = odict()
for i, step in enumerate( workflow.steps ):
# Execute module
job = None
if step.type == 'tool' or step.type is None:
tool = trans.app.toolbox.tools_by_id[ step.tool_id ]
input_values = step.state.inputs
# Connect up
def callback( input, value, prefixed_name, prefixed_label ):
if isinstance( input, DataToolParameter ):
if prefixed_name in step.input_connections_by_name:
conn = step.input_connections_by_name[ prefixed_name ]
return outputs[ conn.output_step.id ][ conn.output_name ]
visit_input_values( tool.inputs, step.state.inputs, callback )
# Execute it
job, out_data = tool.execute( trans, step.state.inputs, history=new_history)
outputs[ step.id ] = out_data
# Create new PJA associations with the created job, to be run on completion.
# PJA Parameter Replacement (only applies to immediate actions-- rename specifically, for now)
# Pass along replacement dict with the execution of the PJA so we don't have to modify the object.
replacement_dict = {}
for k, v in kwargs.iteritems():
if k.startswith('wf_parm|'):
replacement_dict[k[8:]] = v
for pja in step.post_job_actions:
if pja.action_type in ActionBox.immediate_actions:
ActionBox.execute(trans.app, trans.sa_session, pja, job, replacement_dict)
else:
job.add_post_job_action(pja)
else:
job, out_data = step.module.execute( trans, step.state )
outputs[ step.id ] = out_data
# Record invocation
workflow_invocation_step = model.WorkflowInvocationStep()
workflow_invocation_step.workflow_invocation = workflow_invocation
workflow_invocation_step.workflow_step = step
workflow_invocation_step.job = job
# All jobs ran sucessfully, so we can save now
trans.sa_session.add( workflow_invocation )
invocations.append({'outputs': outputs,
'new_history': new_history})
trans.sa_session.flush()
return trans.fill_template( "workflow/run_complete.mako",
workflow=stored,
invocations=invocations )
else:
# Prepare each step
missing_tools = []
for step in workflow.steps:
step.upgrade_messages = {}
# Contruct modules
if step.type == 'tool' or step.type is None:
# Restore the tool state for the step
step.module = module_factory.from_workflow_step( trans, step )
if not step.module:
if step.tool_id not in missing_tools:
missing_tools.append(step.tool_id)
continue
step.upgrade_messages = step.module.check_and_update_state()
if step.upgrade_messages:
has_upgrade_messages = True
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
step.module.add_dummy_datasets( connections=step.input_connections )
# Store state with the step
step.state = step.module.state
# Error dict
if step.tool_errors:
has_errors = True
errors[step.id] = step.tool_errors
else:
## Non-tool specific stuff?
step.module = module_factory.from_workflow_step( trans, step )
step.state = step.module.get_runtime_state()
# Connections by input name
step.input_connections_by_name = dict( ( conn.input_name, conn ) for conn in step.input_connections )
if missing_tools:
stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
return trans.fill_template("workflow/run.mako", steps=[], workflow=stored, missing_tools = missing_tools)
# Render the form
stored.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, stored )
return trans.fill_template(
"workflow/run.mako",
steps=workflow.steps,
workflow=stored,
has_upgrade_messages=has_upgrade_messages,
errors=errors,
incoming=kwargs )
@web.expose
def tag_outputs( self, trans, id, **kwargs ):
stored = self.get_stored_workflow( trans, id, check_ownership=False )
user = trans.get_user()
if stored.user != user:
if trans.sa_session.query( model.StoredWorkflowUserShareAssociation ) \
.filter_by( user=user, stored_workflow=stored ).count() == 0:
error( "Workflow is not owned by or shared with current user" )
# Get the latest revision
workflow = stored.latest_workflow
# It is possible for a workflow to have 0 steps
if len( workflow.steps ) == 0:
error( "Workflow cannot be tagged for outputs because it does not have any steps" )
if workflow.has_cycles:
error( "Workflow cannot be tagged for outputs because it contains cycles" )
if workflow.has_errors:
error( "Workflow cannot be tagged for outputs because of validation errors in some steps" )
# Build the state for each step
errors = {}
has_upgrade_messages = False
has_errors = False
if kwargs:
# If kwargs were provided, the states for each step should have
# been POSTed
for step in workflow.steps:
if step.type == 'tool':
# Extract just the output flags for this step.
p = "%s|otag|" % step.id
l = len(p)
outputs = [k[l:] for ( k, v ) in kwargs.iteritems() if k.startswith( p )]
if step.workflow_outputs:
for existing_output in step.workflow_outputs:
if existing_output.output_name not in outputs:
trans.sa_session.delete(existing_output)
else:
outputs.remove(existing_output.output_name)
for outputname in outputs:
m = model.WorkflowOutput(workflow_step_id = int(step.id), output_name = outputname)
trans.sa_session.add(m)
# Prepare each step
trans.sa_session.flush()
for step in workflow.steps:
step.upgrade_messages = {}
# Contruct modules
if step.type == 'tool' or step.type is None:
# Restore the tool state for the step
step.module = module_factory.from_workflow_step( trans, step )
# Fix any missing parameters
step.upgrade_messages = step.module.check_and_update_state()
if step.upgrade_messages:
has_upgrade_messages = True
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
step.module.add_dummy_datasets( connections=step.input_connections )
# Store state with the step
step.state = step.module.state
# Error dict
if step.tool_errors:
has_errors = True
errors[step.id] = step.tool_errors
else:
## Non-tool specific stuff?
step.module = module_factory.from_workflow_step( trans, step )
step.state = step.module.get_runtime_state()
# Connections by input name
step.input_connections_by_name = dict( ( conn.input_name, conn ) for conn in step.input_connections )
# Render the form
return trans.fill_template(
"workflow/tag_outputs.mako",
steps=workflow.steps,
workflow=stored,
has_upgrade_messages=has_upgrade_messages,
errors=errors,
incoming=kwargs )
@web.expose
def configure_menu( self, trans, workflow_ids=None ):
user = trans.get_user()
if trans.request.method == "POST":
if workflow_ids is None:
workflow_ids = []
elif type( workflow_ids ) != list:
workflow_ids = [ workflow_ids ]
sess = trans.sa_session
# This explicit remove seems like a hack, need to figure out
# how to make the association do it automatically.
for m in user.stored_workflow_menu_entries:
sess.delete( m )
user.stored_workflow_menu_entries = []
q = sess.query( model.StoredWorkflow )
# To ensure id list is unique
seen_workflow_ids = set()
for id in workflow_ids:
if id in seen_workflow_ids:
continue
else:
seen_workflow_ids.add( id )
m = model.StoredWorkflowMenuEntry()
m.stored_workflow = q.get( id )
user.stored_workflow_menu_entries.append( m )
sess.flush()
return trans.show_message( "Menu updated", refresh_frames=['tools'] )
else:
user = trans.get_user()
ids_in_menu = set( [ x.stored_workflow_id for x in user.stored_workflow_menu_entries ] )
workflows = trans.sa_session.query( model.StoredWorkflow ) \
.filter_by( user=user, deleted=False ) \
.order_by( desc( model.StoredWorkflow.table.c.update_time ) ) \
.all()
shared_by_others = trans.sa_session \
.query( model.StoredWorkflowUserShareAssociation ) \
.filter_by( user=user ) \
.filter( model.StoredWorkflow.deleted == False ) \
.all()
return trans.fill_template( "workflow/configure_menu.mako",
workflows=workflows,
shared_by_others=shared_by_others,
ids_in_menu=ids_in_menu )
def _workflow_to_dict( self, trans, stored ):
"""
Converts a workflow to a dict of attributes suitable for exporting.
"""
workflow = stored.latest_workflow
workflow_annotation = self.get_item_annotation_obj( trans.sa_session, trans.user, stored )
annotation_str = ""
if workflow_annotation:
annotation_str = workflow_annotation.annotation
# Pack workflow data into a dictionary and return
data = {}
data['a_galaxy_workflow'] = 'true' # Placeholder for identifying galaxy workflow
data['format-version'] = "0.1"
data['name'] = workflow.name
data['annotation'] = annotation_str
data['steps'] = {}
# For each step, rebuild the form and encode the state
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step( trans, step )
# Get user annotation.
step_annotation = self.get_item_annotation_obj(trans.sa_session, trans.user, step )
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
# Step info
step_dict = {
'id': step.order_index,
'type': module.type,
'tool_id': module.get_tool_id(),
'tool_version' : step.tool_version,
'name': module.get_name(),
'tool_state': module.get_state( secure=False ),
'tool_errors': module.get_errors(),
## 'data_inputs': module.get_data_inputs(),
## 'data_outputs': module.get_data_outputs(),
'annotation' : annotation_str
}
# Data inputs
step_dict['inputs'] = []
if module.type == "data_input":
# Get input dataset name; default to 'Input Dataset'
name = module.state.get( 'name', 'Input Dataset')
step_dict['inputs'].append( { "name" : name, "description" : annotation_str } )
else:
# Step is a tool and may have runtime inputs.
for name, val in module.state.inputs.items():
input_type = type( val )
if input_type == RuntimeValue:
step_dict['inputs'].append( { "name" : name, "description" : "runtime parameter for tool %s" % module.get_name() } )
elif input_type == dict:
# Input type is described by a dict, e.g. indexed parameters.
for partname, partval in val.items():
if type( partval ) == RuntimeValue:
step_dict['inputs'].append( { "name" : name, "description" : "runtime parameter for tool %s" % module.get_name() } )
# User outputs
step_dict['user_outputs'] = []
"""
module_outputs = module.get_data_outputs()
step_outputs = trans.sa_session.query( WorkflowOutput ).filter( step=step )
for output in step_outputs:
name = output.output_name
annotation = ""
for module_output in module_outputs:
if module_output.get( 'name', None ) == name:
output_type = module_output.get( 'extension', '' )
break
data['outputs'][name] = { 'name' : name, 'annotation' : annotation, 'type' : output_type }
"""
# All step outputs
step_dict['outputs'] = []
if type( module ) is ToolModule:
for output in module.get_data_outputs():
step_dict['outputs'].append( { 'name' : output['name'], 'type' : output['extensions'][0] } )
# Connections
input_connections = step.input_connections
if step.type is None or step.type == 'tool':
# Determine full (prefixed) names of valid input datasets
data_input_names = {}
def callback( input, value, prefixed_name, prefixed_label ):
if isinstance( input, DataToolParameter ):
data_input_names[ prefixed_name ] = True
visit_input_values( module.tool.inputs, module.state.inputs, callback )
# Filter
# FIXME: this removes connection without displaying a message currently!
input_connections = [ conn for conn in input_connections if conn.input_name in data_input_names ]
# Encode input connections as dictionary
input_conn_dict = {}
for conn in input_connections:
input_conn_dict[ conn.input_name ] = \
dict( id=conn.output_step.order_index, output_name=conn.output_name )
step_dict['input_connections'] = input_conn_dict
# Position
step_dict['position'] = step.position
# Add to return value
data['steps'][step.order_index] = step_dict
return data
def _workflow_from_dict( self, trans, data, source=None ):
"""
Creates a workflow from a dict. Created workflow is stored in the database and returned.
"""
# Put parameters in workflow mode
trans.workflow_building_mode = True
# Create new workflow from incoming dict
workflow = model.Workflow()
# If there's a source, put it in the workflow name.
if source:
name = "%s (imported from %s)" % ( data['name'], source )
else:
name = data['name']
workflow.name = name
# Assume no errors until we find a step that has some
workflow.has_errors = False
# Create each step
steps = []
# The editor will provide ids for each step that we don't need to save,
# but do need to use to make connections
steps_by_external_id = {}
# First pass to build step objects and populate basic values
for key, step_dict in data['steps'].iteritems():
# Create the model class for the step
step = model.WorkflowStep()
steps.append( step )
steps_by_external_id[ step_dict['id' ] ] = step
# FIXME: Position should be handled inside module
step.position = step_dict['position']
module = module_factory.from_dict( trans, step_dict, secure=False )
module.save_to_step( step )
if step.tool_errors:
workflow.has_errors = True
# Stick this in the step temporarily
step.temp_input_connections = step_dict['input_connections']
# Save step annotation.
annotation = step_dict[ 'annotation' ]
if annotation:
annotation = sanitize_html( annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), step, annotation )
# Second pass to deal with connections between steps
for step in steps:
# Input connections
for input_name, conn_dict in step.temp_input_connections.iteritems():
if conn_dict:
conn = model.WorkflowStepConnection()
conn.input_step = step
conn.input_name = input_name
conn.output_name = conn_dict['output_name']
conn.output_step = steps_by_external_id[ conn_dict['id'] ]
del step.temp_input_connections
# Order the steps if possible
attach_ordered_steps( workflow, steps )
# Connect up
stored = model.StoredWorkflow()
stored.name = workflow.name
workflow.stored_workflow = stored
stored.latest_workflow = workflow
stored.user = trans.user
# Persist
trans.sa_session.add( stored )
trans.sa_session.flush()
return stored
## ---- Utility methods -------------------------------------------------------
def attach_ordered_steps( workflow, steps ):
ordered_steps = order_workflow_steps( steps )
if ordered_steps:
workflow.has_cycles = False
for i, step in enumerate( ordered_steps ):
step.order_index = i
workflow.steps.append( step )
else:
workflow.has_cycles = True
workflow.steps = steps
def edgelist_for_workflow_steps( steps ):
"""
Create a list of tuples representing edges between `WorkflowSteps` based
on associated `WorkflowStepConnection`s
"""
edges = []
steps_to_index = dict( ( step, i ) for i, step in enumerate( steps ) )
for step in steps:
edges.append( ( steps_to_index[step], steps_to_index[step] ) )
for conn in step.input_connections:
edges.append( ( steps_to_index[conn.output_step], steps_to_index[conn.input_step] ) )
return edges
def order_workflow_steps( steps ):
"""
Perform topological sort of the steps, return ordered or None
"""
position_data_available = True
for step in steps:
if not step.position or not 'left' in step.position or not 'top' in step.position:
position_data_available = False
if position_data_available:
steps.sort(cmp=lambda s1,s2: cmp( math.sqrt(s1.position['left']**2 + s1.position['top']**2), math.sqrt(s2.position['left']**2 + s2.position['top']**2)))
try:
edges = edgelist_for_workflow_steps( steps )
node_order = topsort( edges )
return [ steps[i] for i in node_order ]
except CycleError:
return None
def order_workflow_steps_with_levels( steps ):
try:
return topsort_levels( edgelist_for_workflow_steps( steps ) )
except CycleError:
return None
class FakeJob( object ):
"""
Fake job object for datasets that have no creating_job_associations,
they will be treated as "input" datasets.
"""
def __init__( self, dataset ):
self.is_fake = True
self.id = "fake_%s" % dataset.id
def get_job_dict( trans ):
"""
Return a dictionary of Job -> [ Dataset ] mappings, for all finished
active Datasets in the current history and the jobs that created them.
"""
history = trans.get_history()
# Get the jobs that created the datasets
warnings = set()
jobs = odict()
for dataset in history.active_datasets:
# FIXME: Create "Dataset.is_finished"
if dataset.state in ( 'new', 'running', 'queued' ):
warnings.add( "Some datasets still queued or running were ignored" )
continue
#if this hda was copied from another, we need to find the job that created the origial hda
job_hda = dataset
while job_hda.copied_from_history_dataset_association:
job_hda = job_hda.copied_from_history_dataset_association
if not job_hda.creating_job_associations:
jobs[ FakeJob( dataset ) ] = [ ( None, dataset ) ]
for assoc in job_hda.creating_job_associations:
job = assoc.job
if job in jobs:
jobs[ job ].append( ( assoc.name, dataset ) )
else:
jobs[ job ] = [ ( assoc.name, dataset ) ]
return jobs, warnings
def cleanup_param_values( inputs, values ):
"""
Remove 'Data' values from `param_values`, along with metadata cruft,
but track the associations.
"""
associations = []
names_to_clean = []
# dbkey is pushed in by the framework
if 'dbkey' in values:
del values['dbkey']
root_values = values
# Recursively clean data inputs and dynamic selects
def cleanup( prefix, inputs, values ):
for key, input in inputs.items():
if isinstance( input, ( SelectToolParameter, DrillDownSelectToolParameter ) ):
if input.is_dynamic and not isinstance( values[key], UnvalidatedValue ):
values[key] = UnvalidatedValue( values[key] )
if isinstance( input, DataToolParameter ):
tmp = values[key]
values[key] = None
# HACK: Nested associations are not yet working, but we
# still need to clean them up so we can serialize
# if not( prefix ):
if tmp: #this is false for a non-set optional dataset
associations.append( ( tmp.hid, prefix + key ) )
# Cleanup the other deprecated crap associated with datasets
# as well. Worse, for nested datasets all the metadata is
# being pushed into the root. FIXME: MUST REMOVE SOON
key = prefix + key + "_"
for k in root_values.keys():
if k.startswith( key ):
del root_values[k]
elif isinstance( input, Repeat ):
group_values = values[key]
for i, rep_values in enumerate( group_values ):
rep_index = rep_values['__index__']
prefix = "%s_%d|" % ( key, rep_index )
cleanup( prefix, input.inputs, group_values[i] )
elif isinstance( input, Conditional ):
group_values = values[input.name]
current_case = group_values['__current_case__']
prefix = "%s|" % ( key )
cleanup( prefix, input.cases[current_case].inputs, group_values )
cleanup( "", inputs, values )
return associations
| WorkflowConversion/Galaxy2gUSE | lib/galaxy/web/controllers/workflow.py | Python | gpl-2.0 | 105,324 |
#!/usr/bin/env python3
import datetime
from itertools import groupby
class ProbabilityTable:
"""
Given a tree with values at the leaves in the form of nested lists,
return a Dict where the key is a leaf value and the value is the probability
of choosing that leaf from a unformly distributed random walk of the tree.
>>> ProbabilityTable.from_tree(['A', 'B']).to_packwars_dict() == \
{'A': 0.5, 'B': 0.5}
True
>>> ProbabilityTable.from_tree(['A', ['X', 'Y']]).to_packwars_dict() == \
{'Y': 0.25, 'X': 0.25, 'A': 0.5}
True
>>> ProbabilityTable.from_tree(['common']).to_packwars_dict()
{'common': 1.0}
>>> ProbabilityTable.from_tree([]).to_packwars_dict()
{}
>>> ProbabilityTable.from_tree("AB").to_packwars_dict()
Traceback (most recent call last):
...
AssertionError: Must be a nested-list tree
"""
def __init__(self, table=None):
self.table = table if table else {}
@classmethod
def from_tree(cls, tree):
assert not isinstance(tree, str), "Must be a nested-list tree"
table = {}
def _add_to_table(node, unity):
if not node:
return
elif isinstance(node, str):
# leaf node
leaf = node
table[leaf] = table.get(leaf, 0.0) + unity
else:
# sub-tree
sub_tree = node
sub_unity = unity / len(sub_tree)
for sub_node in sub_tree:
_add_to_table(sub_node, sub_unity)
_add_to_table(tree, 1.0)
return cls(table=table)
def keys(self):
return self.table.keys()
def pop(self, *args, **kwargs):
self.table.pop(*args, **kwargs)
def to_packwars_dict(self):
return self.table
class BoosterFormat:
JUNK = {"marketing", "checklist", "token"}
def __init__(self, probability_tables=[]):
self.probability_tables = probability_tables
for table in probability_tables[::-1]:
for junk in self.JUNK:
table.pop(junk, None)
if not table:
probability_tables.remove(table)
@classmethod
def from_mtgjson_dict(cls, data):
probability_tables = []
for tree in data:
if isinstance(tree, str):
# mtgjson has either strings or arrays here,
# make them all arrays
tree = [tree]
probability_tables.append(ProbabilityTable.from_tree(tree))
return cls(probability_tables)
def required_card_types(self):
card_types = set()
for probability_table in self.probability_tables:
card_types |= probability_table.keys()
return card_types
def to_packwars_dict(self):
return [p.to_packwars_dict() for p in self.probability_tables]
class Card:
def __init__(self, name, rarity, layout, mana_cost):
self.name = name
self.rarity = rarity
self.layout = layout
self.mana_cost = mana_cost
@classmethod
def from_mtgjson_dict(cls, set_code, data):
mana_cost = data.get('manaCost', None)
return cls(
name=data['name'],
rarity=data['rarity'],
layout=data['layout'],
mana_cost=mana_cost,
)
def is_card_type(self, set_code, card_type):
if card_type == 'Steamflogger Boss':
return self.name == 'Steamflogger Boss'
elif card_type == 'double faced common':
return self.layout == "double-faced" and not self.mana_cost and self.rarity == 'Common'
elif card_type == 'double faced uncommon':
return self.layout == "double-faced" and not self.mana_cost and self.rarity == 'Uncommon'
elif card_type == 'double faced rare':
return self.layout == "double-faced" and not self.mana_cost and self.rarity == 'Rare'
elif card_type == 'double faced mythic rare':
return self.layout == "double-faced" and not self.mana_cost and self.rarity == 'Mythic Rare'
elif card_type == 'double faced':
return self.layout == "double-faced" and not self.mana_cost
else:
return card_type == self.rarity.lower()
# if set_code in ('TSB', 'TSP') and rarity == 'Special':
# return True
# elif rarity == 'Basic Land':
# return "land"
# elif layout == "double-faced" and mana_cost:
# return "double faced"
# return rarity.lower()
@classmethod
def make_card_type(cls, set_code, rarity, layout, mana_cost):
"""
Given a set code, and a card json dump, try to figure out which value
in the booster listing it matches. E.g., Timeshift has, under booster,
"timeshifted purple", which means you need a card from timeshift with
the "special" rarity.
This method should return "timeshifted purple" for a card with those
properties.
>>> Card.make_card_type('TSP', 'special', 'normal', '{1}{W}')
'timeshifted purple'
>>> Card.make_card_type('ABC', 'common', 'normal', '{2}{U}')
'common'
"""
if set_code in ('TSB', 'TSP') and rarity == 'Special':
return "timeshifted purple"
elif rarity == 'Basic Land':
return "land"
elif layout == "double-faced" and mana_cost:
return "double faced"
return rarity.lower()
class Set:
SET_TYPE_EXPANSION = 'expansion'
SET_TYPE_CORE = 'core'
SET_TYPE_JOKE = 'un'
class UnknownSetTypeError(Exception):
pass
def __init__(self, name, code, release_date, set_type, block, booster_format, cards):
self.name = name
self.code = code
self.release_date = release_date
self.set_type = set_type
self.block = block
self.booster_format = booster_format
self.cards = cards
@classmethod
def from_mtgjson_dict(cls, data):
release_date = datetime.datetime.strptime(data['releaseDate'], "%Y-%m-%d"),
set_type = cls.set_type_from_string(data['type'])
booster_format = BoosterFormat.from_mtgjson_dict(data['booster'])
cards = [Card.from_mtgjson_dict(data['code'], card_data) for card_data in data['cards']]
return cls(
name=data['name'],
code=data['code'],
release_date=release_date,
set_type=set_type,
block=data.get('block', None),
booster_format=booster_format,
cards=cards,
)
@classmethod
def set_type_from_string(cls, set_type_string):
if set_type_string == 'expansion':
return cls.SET_TYPE_EXPANSION
elif set_type_string == 'core':
return cls.SET_TYPE_CORE
elif set_type_string == 'un':
return cls.SET_TYPE_JOKE
else:
raise cls.UnknownSetTypeError(set_type_string)
def to_packwars_dict(self):
cards_by_type = {}
for card_type in self.booster_format.required_card_types():
cards_by_type[card_type] = list(set([card.name for card in self.cards if card.is_card_type(self.code, card_type)]))
if not cards_by_type[card_type] and card_type == 'land':
cards_by_type[card_type] = ["Plains", "Island", "Swamp", "Mountain", "Forest"]
assert cards_by_type[card_type], "Set {} requires {} for its booster but none are found {!r}".format(self.code, card_type, self.booster_format.required_card_types())
return {
"name": self.make_name(self.block, self.name),
"boosterFormat": self.booster_format.to_packwars_dict(),
"cardsByType": cards_by_type,
}
@classmethod
def make_name(cls, block, name):
"""
>>> Set.make_name("Shadowmoor", "Eventide")
'Shadowmoor - Eventide'
>>> Set.make_name("Shards of Alara", "Shards of Alara")
'Shards of Alara'
"""
if not block or block == name:
return name
return "{block} - {name}".format(block=block, name=name)
class AllSets:
"""
expected output I think:
[
{
"name": "Lorwyn",
"booster": {
"common": 10,
"uncommon: 5,
"rare": 1
},
"rare": ["A", "B"],
"uncommon": ["X", "Y"],
}
]
new output:
[
{
"name": "Lorwyn",
"boosterFormat": [
{"common": 1.0},
{"common": 1.0},
{"common": 1.0},
{"uncommon": 1.0},
{"uncommon": 1.0},
{"rare": 0.8, "mythic": 0.2}
],
"cardsByType": {
"rare": ["A", "B"],
"uncommon": ["X", "Y"],
}
}
]
"""
def __init__(self, sets):
self.sets = sets
self.sets.sort(key=lambda s: s.release_date)
@classmethod
def from_mtgjson_file(cls, mtgjson_file):
sets = []
# parse json file from mtgjson to dict
set_data_by_code = hack_mtgjson_dict(json.load(mtgjson_file))
# for each set in the dict, turn it into a Set object
for code, set_data in set_data_by_code.items():
try:
sets.append(Set.from_mtgjson_dict(set_data))
except Set.UnknownSetTypeError as e:
pass
#sys.stderr.write("skipping set: {}\n".format(code))
return cls(sets=sets)
def to_packwars_dict(self):
return [s.to_packwars_dict() for s in self.sets]
def dump_packwars_jsonp(self, outfile, json_only):
if not json_only:
outfile.write("mtgJSON(")
json.dump(self.to_packwars_dict(), outfile)
if not json_only:
outfile.write(")")
def hack_mtgjson_dict(data):
"""
Fix broken crap in mtgjson file
"""
# move all cards from TSB into TSP and delete TSB
# data['TSP']['cards'].extend(data['TSB']['cards'])
# del data['TSB']
# No timespiral
del data['TSB']
del data['TSP']
del data['PLC']
del data['FUT']
return data
if __name__ == "__main__":
import argparse
import sys
import json
parser = argparse.ArgumentParser(description="Convert mtgjson file to packwars jsonp file")
parser.add_argument("-t", "--test", action="store_true", help="run tests")
parser.add_argument("-j", "--json", action="store_true", help="generate as json instead of jsonp")
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
if args.test:
import doctest
doctest.testmod()
sys.exit()
all_sets = AllSets.from_mtgjson_file(args.infile)
all_sets.dump_packwars_jsonp(args.outfile, args.json)
| qtip/packwars-simulator | convert.py | Python | mit | 11,046 |
# -*- coding: utf-8 -*-
from setuptools import setup
import saml2idp
with open('README.md') as readme:
description = readme.read()
with open('HISTORY.md') as history:
changelog = history.read()
setup(
name='dj-saml-idp',
version=saml2idp.__version__,
author='Sebastian Vetter',
author_email='[email protected]',
description='SAML 2.0 IdP for Django',
long_description='\n\n'.join([description, changelog]),
install_requires=[
'Django<2',
'M2Crypto>=0.35.2',
'beautifulsoup4>=4.8.1',
'structlog==16.1.0',
'lxml==4.4.1'
],
license='MIT',
packages=['saml2idp'],
url='http://github.com/mobify/dj-saml-idp',
zip_safe=False,
include_package_data=True,
)
| mobify/dj-saml-idp | setup.py | Python | mit | 757 |
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def unwrap_schedule(scheduler, num_steps=10):
lrs = []
for _ in range(num_steps):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def unwrap_and_save_reload_schedule(scheduler, num_steps=10):
lrs = []
for step in range(num_steps):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
file_name = os.path.join(tmpdirname, "schedule.bin")
torch.save(scheduler.state_dict(), file_name)
state_dict = torch.load(file_name)
scheduler.load_state_dict(state_dict)
return lrs
@require_torch
class OptimizationTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_adam_w(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = AdamW(params=[w], lr=2e-1, weight_decay=0.0)
for _ in range(100):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
def test_adafactor(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
target = torch.tensor([0.4, 0.2, -0.5])
criterion = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
optimizer = Adafactor(
params=[w],
lr=1e-2,
eps=(1e-30, 1e-3),
clip_threshold=1.0,
decay_rate=-0.8,
beta1=None,
weight_decay=0.0,
relative_step=False,
scale_parameter=False,
warmup_init=False,
)
for _ in range(1000):
loss = criterion(w, target)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
@require_torch
class ScheduleInitTest(unittest.TestCase):
m = nn.Linear(50, 50) if is_torch_available() else None
optimizer = AdamW(m.parameters(), lr=10.0) if is_torch_available() else None
num_steps = 10
def assertListAlmostEqual(self, list1, list2, tol, msg=None):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol, msg=msg)
def test_schedulers(self):
common_kwargs = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
scheds = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
}
for scheduler_func, data in scheds.items():
kwargs, expected_learning_rates = data
scheduler = scheduler_func(self.optimizer, **kwargs)
self.assertEqual(len([scheduler.get_lr()[0]]), 1)
lrs_1 = unwrap_schedule(scheduler, self.num_steps)
self.assertListAlmostEqual(
lrs_1,
expected_learning_rates,
tol=1e-2,
msg=f"failed for {scheduler_func} in normal scheduler",
)
scheduler = scheduler_func(self.optimizer, **kwargs)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual(lrs_1, lrs_2, msg=f"failed for {scheduler_func} in save and reload")
| huggingface/transformers | tests/optimization/test_optimization.py | Python | apache-2.0 | 6,080 |
#! /usr/bin/env python3
# ****************************************************************************
# Copyright 2020 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# ==============================================================================
# -*- coding: utf-8 -*-
"""Script to get build parameters interactively from user.
Adapted significantly from tensorflow.git/configure.py .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_TENSORRT_VERSION = '7'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.7,5.2,6.0,6.1,7.0,7.2,7.5'
_DEFAULT_PYTHON_LIB_PATH = '/usr/lib/python3/dist-packages'
_DEFAULT_PROMPT_ASK_ATTEMPTS = 3
_APOLLO_ROOT_DIR = ''
_APOLLO_BAZELRC = '.apollo.bazelrc'
_APOLLO_CURRENT_BAZEL_VERSION = None
_APOLLO_MIN_BAZEL_VERSION = '2.0.0'
_APOLLO_INSIDE_DOCKER = True
_APOLLO_DOCKER_STAGE = "dev"
_INTERACTIVE_MODE = True
class UserInputError(Exception):
pass
def is_linux():
return platform.system() == 'Linux'
def inside_docker():
return os.path.isfile('/.dockerenv')
def docker_stage():
default_apollo_stage = "dev"
if not inside_docker():
return default_apollo_stage
stage_conf = "/etc/apollo.conf"
if not os.path.exists(stage_conf) or not os.path.isfile(stage_conf):
return default_apollo_stage
with open("/etc/apollo.conf") as f:
for line in f:
line = line.strip()
if line.startswith("stage="):
return line.split("=")[-1]
return default_apollo_stage
def default_root_dir():
current_dir = os.path.dirname(__file__)
if len(current_dir) == 0:
current_dir = '.'
return os.path.abspath(os.path.join(current_dir, '..'))
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def write_to_bazelrc(line):
with open(_APOLLO_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_blank_line_to_bazelrc():
with open(_APOLLO_BAZELRC, 'a') as f:
f.write('\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env {}="{}"'.format(var_name, str(var)))
def write_build_var_to_bazelrc(bazel_config_name, option_name):
# TODO(build): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' % (bazel_config_name,
option_name))
def run_shell(cmd, allow_non_zero=False, stderr=None):
if stderr is None:
stderr = sys.stdout
if allow_non_zero:
try:
output = subprocess.check_output(cmd, stderr=stderr)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd, stderr=stderr)
return output.decode("UTF-8").strip()
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
stderr = open(os.devnull, 'wb')
library_paths = run_shell(
[
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
],
stderr=stderr).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path) and not path.startswith(
os.path.join(_APOLLO_ROOT_DIR, "bazel-bin")):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell(
[python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_common_dirs(environ_cp):
"""Setup --distdir and --output_user_root directories"""
cache_dir = os.path.join(_APOLLO_ROOT_DIR, '.cache')
dist_dir = os.path.join(_APOLLO_ROOT_DIR, '.cache/distdir')
# if cyber/setup.bash not sourced
if 'APOLLO_CACHE_DIR' in environ_cp:
cache_dir = environ_cp['APOLLO_CACHE_DIR']
if 'APOLLO_BAZEL_DIST_DIR' in environ_cp:
dist_dir = environ_cp['APOLLO_BAZEL_DIST_DIR']
write_to_bazelrc('startup --output_user_root="{}/bazel"'.format(cache_dir))
write_to_bazelrc('common --distdir="{}"'.format(dist_dir))
write_to_bazelrc('common --repository_cache="{}/repos"'.format(cache_dir))
write_to_bazelrc('build --disk_cache="{}/build"'.format(cache_dir))
write_to_bazelrc('')
def setup_python(environ_cp):
"""Setup python related env variables."""
if not _INTERACTIVE_MODE:
setup_python_non_interactively(environ_cp)
else:
setup_python_interactively(environ_cp)
def setup_python_non_interactively(environ_cp):
"""Setup python related env variables non-interactively."""
# Get PYTHON_BIN_PATH, default is the current running python.
python_bin_path = sys.executable
if not os.path.exists(python_bin_path):
print('Invalid python path: {} cannot be found.'.format(
python_bin_path))
sys.exit(1)
if not os.path.isfile(python_bin_path) or not os.access(
python_bin_path, os.X_OK):
print('{} is not executable.'.format(python_bin_path))
sys.exit(1)
python_major_version = get_python_major_version(python_bin_path)
if python_major_version != '3':
print('Python 2 was Retired on April 2020. Use Python 3 instead.')
sys.exit(1)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
if _DEFAULT_PYTHON_LIB_PATH in python_lib_paths:
default_python_lib_path = _DEFAULT_PYTHON_LIB_PATH
else:
default_python_lib_path = python_lib_paths[0]
python_lib_path = default_python_lib_path
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"{}\"'.format(python_bin_path))
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH',
environ_cp.get('PYTHONPATH'))
def setup_python_interactively(environ_cp):
"""Setup python related env variables interactively."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = (
'Please specify the location of python. [Default is '
'{}]: ').format(default_python_bin_path)
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(
python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: {} cannot be found.'.format(
python_bin_path))
else:
print('{} is not executable. Is it the python binary?'.format(
python_bin_path))
environ_cp['PYTHON_BIN_PATH'] = ''
python_major_version = get_python_major_version(python_bin_path)
if python_major_version != '3':
print('Python 2 was Retired on April 2020. Use Python 3 instead.')
sys.exit(1)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [{}]\n'.format(python_lib_paths[0]))
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"{}\"'.format(python_bin_path))
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH',
environ_cp.get('PYTHONPATH'))
def reset_apollo_bazelrc():
"""Reset file that contains customized config settings."""
open(_APOLLO_BAZELRC, 'w').close()
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build your project with {} support?'.format(
query_item)
if not yes_reply:
yes_reply = '{} support will be enabled for your project.'.format(
query_item)
if not no_reply:
no_reply = 'No {}'.format(yes_reply)
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' % (var_name, ', '.join(true_strings),
', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: {}'.format(user_input_origin))
return var
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version):
"""Check installed bazel version.
Args:
min_version: string for minimum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel first.')
sys.exit(0)
stderr = open(os.devnull, 'wb')
curr_version = run_shell(
['bazel', '--version'], allow_non_zero=True, stderr=stderr)
if curr_version.startswith('bazel '):
curr_version = curr_version.split('bazel ')[1]
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print(
'Please upgrade your bazel installation to version %s or higher' % min_version)
sys.exit(1)
return curr_version
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
resolve_symlinks=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
resolve_symlinks: (Bool) Translate symbolic links into the real filepath.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError(
'Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
if resolve_symlinks and os.path.islink(val):
val = os.path.realpath(val)
environ_cp[var_name] = val
return val
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
if not _INTERACTIVE_MODE:
gcc_host_compiler_path = default_gcc_host_compiler_path
if os.path.islink(gcc_host_compiler_path):
gcc_host_compiler_path = os.path.realpath(gcc_host_compiler_path)
else:
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
resolve_symlinks=True,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH',
gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_PATHS', ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tensorrt_version(environ_cp):
"""Set TF_TENSORRT_VERSION."""
if not int(environ_cp.get('TF_NEED_TENSORRT', False)):
return
ask_tensorrt_version = (
'Please specify the TensorRT version you want to use. '
'[Leave empty to default to TensorRT %s]: '
) % _DEFAULT_TENSORRT_VERSION
tf_tensorrt_version = get_from_env_or_user_or_default(
environ_cp, 'TF_TENSORRT_VERSION', ask_tensorrt_version,
_DEFAULT_TENSORRT_VERSION)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
def set_nccl_version(environ_cp):
"""Set TF_NCCL_VERSION."""
if 'TF_NCCL_VERSION' in environ_cp:
return
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
)
tf_nccl_version = get_from_env_or_user_or_default(
environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, '')
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin,
os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
if not _INTERACTIVE_MODE:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if native_cuda_compute_capabilities:
tf_cuda_compute_capabilities = native_cuda_compute_capabilities
else:
tf_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
return
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'CUDA compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size, and that '
'we only supports compute '
'capabilities >= 3.7 [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(
tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = float(m.group(0))
if ver < 3.5:
print(
'ERROR: We only supports CUDA compute capabilities 3.7 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
# write_to_bazelrc('build --config=cuda')
pass
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
cuda_libraries = ['cuda', 'cudnn']
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
find_cuda_script = os.path.join(
_APOLLO_ROOT_DIR,
'third_party/gpus/find_cuda_config.py')
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], find_cuda_script]
+ cuda_libraries,
stdout=subprocess.PIPE,
env=environ_cp)
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': '))
for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def setup_cuda_family_config_interactively(environ_cp):
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUBLAS_VERSION',
'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION',
'TF_NCCL_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
'NCCL_INSTALL_PATH',
'NCCL_HDR_PATH',
'TENSORRT_INSTALL_PATH'
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be
# validated.
environ_cp = dict(environ_save)
# TODO(build): revisit these settings
set_cuda_version(environ_cp)
set_cudnn_version(environ_cp)
set_tensorrt_version(environ_cp)
set_nccl_version(environ_cp)
set_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
def setup_cuda_family_config_non_interactively(environ_cp):
if not validate_cuda_config(environ_cp):
print("Cannot validate_cuda_config non-interactively. Aborting ...")
sys.exit(1)
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUBLAS_VERSION',
'TF_CUDNN_VERSION',
'TF_TENSORRT_VERSION',
'TF_NCCL_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
'NCCL_INSTALL_PATH',
'NCCL_HDR_PATH',
'TENSORRT_INSTALL_PATH'
]
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
def setup_cuda_family_config(environ_cp):
"""Setup CUDA/cuDNN/TensorRT/NCCL action env."""
if not _INTERACTIVE_MODE:
setup_cuda_family_config_non_interactively(environ_cp)
else:
setup_cuda_family_config_interactively(environ_cp)
def set_other_build_config():
build_text = """
# This config refers to building with CUDA available.
build:using_cuda --define=using_cuda=true
build:using_cuda --action_env TF_NEED_CUDA=1
build:using_cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
# This config refers to building CUDA with nvcc.
build:cuda --config=using_cuda
build:cuda --define=using_cuda_nvcc=true
build:tensorrt --action_env TF_NEED_TENSORRT=1
"""
with open(_APOLLO_BAZELRC, 'a') as f:
f.write(build_text)
write_build_var_to_bazelrc('teleop', 'WITH_TELEOP')
def main():
if not is_linux():
raise ValueError('Currently, only Linux is support.')
global _APOLLO_ROOT_DIR
global _APOLLO_BAZELRC
global _APOLLO_CURRENT_BAZEL_VERSION
global _APOLLO_INSIDE_DOCKER
global _INTERACTIVE_MODE
global _APOLLO_DOCKER_STAGE
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_file',
type=str,
default='.apollo.bazelrc',
help='Path of the bazelrc file to write to (relative to APOLLO_ROOT_DIR)')
parser.add_argument('--interactive', type=str2bool, nargs='?',
const=True, default=True,
help='Run this script interactively')
args = parser.parse_args()
_APOLLO_ROOT_DIR = default_root_dir()
_APOLLO_BAZELRC = os.path.join(_APOLLO_ROOT_DIR, args.output_file)
_APOLLO_INSIDE_DOCKER = inside_docker()
_APOLLO_DOCKER_STAGE = docker_stage()
_INTERACTIVE_MODE = args.interactive
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
try:
current_bazel_version = check_bazel_version(_APOLLO_MIN_BAZEL_VERSION)
except subprocess.CalledProcessError as e:
print('Error checking bazel version: ',
e.output.decode('UTF-8').strip())
raise e
_APOLLO_CURRENT_BAZEL_VERSION = convert_version_to_int(
current_bazel_version)
reset_apollo_bazelrc()
setup_common_dirs(environ_cp)
setup_python(environ_cp)
environ_cp['TF_NEED_CUDA'] = '1'
# build:gpu --config=using_cuda
write_to_bazelrc('build:gpu --config=cuda')
if _APOLLO_DOCKER_STAGE == "dev":
environ_cp['TF_NEED_TENSORRT'] = '1'
write_to_bazelrc('build:gpu --config=tensorrt')
write_blank_line_to_bazelrc()
setup_cuda_family_config(environ_cp)
set_cuda_compute_capabilities(environ_cp)
if not _APOLLO_INSIDE_DOCKER and 'LD_LIBRARY_PATH' in environ_cp:
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
# Set up which gcc nvcc should use as the host compiler
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
set_other_build_config()
if __name__ == '__main__':
main()
| ApolloAuto/apollo | tools/bootstrap.py | Python | apache-2.0 | 36,519 |
# -*- coding: utf-8 -*-
'''
Tests payment
:copyright: (c) 2015 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
import unittest
import random
from ast import literal_eval
from decimal import Decimal
import json
from datetime import date
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, USER, DB_NAME, CONTEXT
from trytond.config import config
from trytond.transaction import Transaction
from nereid import current_user
from test_checkout import BaseTestCheckout
config.set('email', 'from', '[email protected]')
class TestCheckoutPayment(BaseTestCheckout):
"Test the payment Step"
def setUp(self):
super(TestCheckoutPayment, self).setUp()
trytond.tests.test_tryton.install_module(
'payment_gateway_authorize_net'
)
def _process_sale_by_completing_payments(self, sales):
"""Process sale and complete payments.
"""
self.Sale.process(sales)
self.Sale.process_all_pending_payments()
def create_payment_profile(self, party, gateway):
"""
Create a payment profile for the party
"""
AddPaymentProfileWizard = POOL.get(
'party.party.payment_profile.add', type='wizard'
)
# create a profile
profile_wiz = AddPaymentProfileWizard(
AddPaymentProfileWizard.create()[0]
)
profile_wiz.card_info.party = party.id
profile_wiz.card_info.address = party.addresses[0].id
profile_wiz.card_info.provider = gateway.provider
profile_wiz.card_info.gateway = gateway
profile_wiz.card_info.owner = party.name
profile_wiz.card_info.number = '4111111111111111'
profile_wiz.card_info.expiry_month = '11'
profile_wiz.card_info.expiry_year = '2018'
profile_wiz.card_info.csc = '353'
with Transaction().set_context(return_profile=True):
return profile_wiz.transition_add()
def _create_regd_user_order(self, client, quantity=None):
"""
A helper function that creates an order for a regd user.
This is to avoid clutter within the tests below
"""
if not quantity:
quantity = random.randrange(10, 100)
client.post(
'/cart/add', data={
'product': self.product1.id,
'quantity': quantity,
}
)
# Sign-in
rv = client.post(
'/checkout/sign-in', data={
'email': '[email protected]',
'password': 'password',
'checkout_mode': 'account',
}
)
country = self.Country(self.available_countries[0])
subdivision = country.subdivisions[0]
rv = client.post(
'/checkout/shipping-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'country': country.id,
'subdivision': subdivision.id,
}
)
# Post to payment delivery-address with same flag
rv = client.post(
'/checkout/payment',
data={'use_shipment_address': 'True'}
)
self.assertEqual(rv.status_code, 200)
def _create_guest_order(self, client, quantity=None):
"""
A helper function that creates an order for a guest user.
This is to avoid clutter within the tests below
"""
if not quantity:
quantity = random.randrange(10, 100)
client.post(
'/cart/add', data={
'product': self.product1.id,
'quantity': quantity
}
)
# Sign-in
rv = client.post(
'/checkout/sign-in', data={
'email': '[email protected]',
'checkout_mode': 'guest',
}
)
country = self.Country(self.available_countries[0])
subdivision = country.subdivisions[0]
rv = client.post(
'/checkout/shipping-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'country': country.id,
'subdivision': subdivision.id,
}
)
# Post to payment delivery-address with same flag
rv = client.post(
'/checkout/payment',
data={'use_shipment_address': 'True'}
)
self.assertEqual(rv.status_code, 200)
def _create_cheque_payment_method(self):
"""
A helper function that creates the cheque gateway and assigns
it to the websites.
"""
PaymentGateway = POOL.get('payment_gateway.gateway')
NereidWebsite = POOL.get('nereid.website')
PaymentMethod = POOL.get('nereid.website.payment_method')
Journal = POOL.get('account.journal')
cash_journal, = Journal.search([
('name', '=', 'Cash')
])
gateway = PaymentGateway(
name='Offline Payment Methods',
journal=cash_journal,
provider='self',
method='manual',
)
gateway.save()
website, = NereidWebsite.search([])
payment_method = PaymentMethod(
name='Cheque',
gateway=gateway,
website=website
)
payment_method.save()
return payment_method
def _create_auth_net_gateway_for_site(self):
"""
A helper function that creates the authorize.net gateway and assigns
it to the websites.
"""
PaymentGateway = POOL.get('payment_gateway.gateway')
NereidWebsite = POOL.get('nereid.website')
Journal = POOL.get('account.journal')
cash_journal, = Journal.search([
('name', '=', 'Cash')
])
gateway = PaymentGateway(
name='Authorize.net',
journal=cash_journal,
provider='authorize_net',
method='credit_card',
authorize_net_login='327deWY74422',
authorize_net_transaction_key='32jF65cTxja88ZA2',
test=True
)
gateway.save()
websites = NereidWebsite.search([])
NereidWebsite.write(websites, {
'accept_credit_card': True,
'save_payment_profile': True,
'credit_card_gateway': gateway.id,
})
return gateway
def test_0005_no_skip_signin(self):
"Ensure that guest orders cant directly skip to enter shipping address"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
c.post(
'/cart/add', data={
'product': self.product1.id, 'quantity': 5
}
)
rv = c.get('/checkout/payment')
self.assertEqual(rv.status_code, 302)
self.assertTrue(
rv.location.endswith('/checkout/sign-in')
)
def test_0010_no_skip_shipping_address(self):
"""
Ensure that guest orders cant directly skip to payment without a
valid shipment_address.
Once shipment address is there, it should be possible to get the
page even without a invoice_address
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
country = self.Country(self.available_countries[0])
subdivision = country.subdivisions[0]
with app.test_client() as c:
c.post(
'/cart/add', data={
'product': self.product1.id, 'quantity': 5
}
)
# Sign-in
rv = c.post(
'/checkout/sign-in', data={
'email': '[email protected]',
'checkout_mode': 'guest',
}
)
# redirect to shipment address page
self.assertEqual(rv.status_code, 302)
self.assertTrue(
rv.location.endswith('/checkout/shipping-address')
)
rv = c.post(
'/checkout/shipping-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'country': country.id,
'subdivision': subdivision.id,
}
)
self.assertEqual(rv.status_code, 302)
rv = c.get('/checkout/payment')
self.assertEqual(rv.status_code, 200)
def test_0020_no_skip_invoice_address(self):
"""
While possible to view the payment_method page without a
billing_address, it should not be possible to complete payment without
it.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
country = self.Country(self.available_countries[0])
subdivision = country.subdivisions[0]
with app.test_client() as c:
c.post(
'/cart/add', data={
'product': self.product1.id, 'quantity': 5
}
)
# Sign-in
rv = c.post(
'/checkout/sign-in', data={
'email': '[email protected]',
'checkout_mode': 'guest',
}
)
rv = c.post(
'/checkout/shipping-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'country': country.id,
'subdivision': subdivision.id,
}
)
# GET requetss get served
rv = c.get('/checkout/payment')
self.assertEqual(rv.status_code, 200)
# POST redirects to billing address
rv = c.post('/checkout/payment', data={})
# redirect to shipment address page
self.assertEqual(rv.status_code, 302)
self.assertTrue(
rv.location.endswith('/checkout/billing-address')
)
def test_0030_address_with_payment(self):
"Send address along with payment"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
country = self.Country(self.available_countries[0])
subdivision = country.subdivisions[0]
with app.test_client() as c:
c.post(
'/cart/add', data={
'product': self.product1.id, 'quantity': 5
}
)
# Sign-in
rv = c.post(
'/checkout/sign-in', data={
'email': '[email protected]',
'checkout_mode': 'guest',
}
)
rv = c.post(
'/checkout/shipping-address',
data={
'name': 'Sharoon Thomas',
'street': 'Biscayne Boulevard',
'streetbis': 'Apt. 1906, Biscayne Park',
'zip': 'FL33137',
'city': 'Miami',
'country': country.id,
'subdivision': subdivision.id,
}
)
# Post to payment delivery-address with same flag
rv = c.post(
'/checkout/payment',
data={'use_shipment_address': 'True'}
)
self.assertEqual(rv.status_code, 200)
# Assert that just one address was created
party, = self.Party.search([
('contact_mechanisms.value', '=', '[email protected]'),
('contact_mechanisms.type', '=', 'email'),
])
self.assertTrue(party)
self.assertEqual(len(party.addresses), 1)
address, = party.addresses
self.assertEqual(address.street, 'Biscayne Boulevard')
sales = Sale.search([
('shipment_address', '=', address.id),
('invoice_address', '=', address.id),
])
self.assertEqual(len(sales), 1)
def test_0100_guest_credit_card(self):
"Guest - Credit Card"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
with app.test_client() as c:
self._create_guest_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
}
)
# though the card is there, the website is not configured
# to accept credit_Card as there is no gateway defined.
self.assertEqual(rv.status_code, 200)
# Define a new payment gateway
self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_guest_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
self.assertTrue('access_code' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
# Process sale with payments
self._process_sale_by_completing_payments([sale])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
self.assertFalse(sale.payment_available)
self.assertTrue(sale.email_sent)
def test_0105_update_guest_name_with_address_name(self):
"Check if guest user name is updated as per billing address"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
with app.test_client() as c:
self._create_guest_order(c)
# Define a new payment gateway
self._create_auth_net_gateway_for_site()
# Check party name on checkout
party, = self.Party.search([
('contact_mechanisms.value', '=', '[email protected]'),
('contact_mechanisms.type', '=', 'email')
])
self.assertEqual(
party.name, 'Guest with email: [email protected]'
)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
self.assertTrue('access_code' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
# Party name is updated with the name on shipping address
party, = self.Party.search([
('contact_mechanisms.value', '=', '[email protected]'),
('contact_mechanisms.type', '=', 'email')
])
self.assertEqual(party.name, 'Sharoon Thomas')
def test_0110_guest_alternate_payment(self):
"Guest - Alternate Payment Method"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
# Define a new payment gateway
cheque_method = self._create_cheque_payment_method()
with app.test_client() as c:
self._create_guest_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={'alternate_payment_method': cheque_method.id}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
self.assertTrue('access_code' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
# Process sale with payments
self._process_sale_by_completing_payments([sale])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
self.assertEqual(payment_transaction.state, 'completed')
def test_0120_guest_profile_fail(self):
"Guest - Fucks with profile"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
# Define a new payment gateway
self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_guest_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment', data={
'payment_profile': 1
}
)
self.assertEqual(rv.status_code, 200)
payment_form_errors, _ = literal_eval(rv.data)
self.assertTrue('payment_profile' in payment_form_errors)
def test_0200_regd_new_credit_card_wo_save(self):
"Regd User - Credit Card"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
with app.test_client() as c:
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
'add_card_to_profiles': '',
}
)
# though the card is there, the website is not configured
# to accept credit_Card as there is no gateway defined.
self.assertEqual(rv.status_code, 200)
# Define a new payment gateway
self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
'add_card_to_profiles': '',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
# Process sale with payments
self._process_sale_by_completing_payments([sale])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
self.assertFalse(sale.payment_available)
# Payment profile will get saved always.
self.assertEqual(len(sale.party.payment_profiles), 1)
def test_0205_regd_new_credit_card(self):
"Regd User - Credit Card and save it"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
with app.test_client() as c:
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
'add_card_to_profiles': 'y',
}
)
# though the card is there, the website is not configured
# to accept credit_Card as there is no gateway defined.
self.assertEqual(rv.status_code, 200)
# Define a new payment gateway
self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
'add_card_to_profiles': 'y',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
# Process sale with payments
self._process_sale_by_completing_payments([sale])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
self.assertFalse(sale.payment_available)
# Ensure that the card is NOT saved
self.assertEqual(len(sale.party.payment_profiles), 1)
def test_0210_regd_alternate_payment(self):
"Regd User - Alternate Payment Method"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
# Define a new payment gateway
cheque_method = self._create_cheque_payment_method()
with app.test_client() as c:
self._create_regd_user_order(c, 10)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={'alternate_payment_method': cheque_method.id}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
# Process sale with payments
self._process_sale_by_completing_payments([sale])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
self.assertEqual(payment_transaction.state, 'completed')
def test_0220_regd_profile_fail(self):
"Regd User - Fucks with profile"
NereidUser = POOL.get('nereid.user')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
user, = NereidUser.search([
('email', '=', '[email protected]')
])
self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment', data={
'payment_profile': 1
}
)
self.assertEqual(rv.status_code, 200)
payment_form_errors, _ = literal_eval(rv.data)
self.assertTrue('payment_profile' in payment_form_errors)
def test_0225_regd_profile_success(self):
"Regd User - Correct with profile"
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
# Define a new payment gateway
self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
'add_card_to_profiles': 'y',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
# Process sale with payments
self._process_sale_by_completing_payments([sale])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
self.assertFalse(sale.payment_available)
# Ensure that the card is saved
self.assertEqual(len(sale.party.payment_profiles), 1)
payment_profile, = sale.party.payment_profiles
with app.test_client() as c:
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={'payment_profile': payment_profile.id}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
sale, = Sale.search([
('id', '!=', sale.id), # Not previous sale
('state', '=', 'confirmed'),
])
# Process sale with payments
self._process_sale_by_completing_payments([sale])
payment_transaction, = sale.gateway_transactions
self.assertEqual(payment_transaction.amount, sale.total_amount)
self.assertFalse(sale.payment_available)
# Ensure that the card is saved (the original one)
self.assertEqual(len(sale.party.payment_profiles), 1)
def test_0230_validate_payment_profile(self):
"""
Selecting billing address as saved address in payment profile
"""
Address = POOL.get('party.address')
Profile = POOL.get('party.payment_profile')
Gateway = POOL.get('payment_gateway.gateway')
Journal = POOL.get('account.journal')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
self.party2, = self.Party.create([{
'name': 'Registered User',
}])
with app.test_client() as c:
self.login(c, '[email protected]', 'password')
address, = Address.create([{
'party': current_user.party.id,
'name': 'Name',
'street': 'Street',
'streetbis': 'StreetBis',
'zip': 'zip',
'city': 'City',
'country': self.available_countries[0].id,
'subdivision':
self.available_countries[0].subdivisions[0].id,
}])
self._create_auth_net_gateway_for_site()
self.assertEqual(
len(current_user.party.payment_profiles), 0
)
gateway, = Gateway.search(['name', '=', 'Authorize.net'])
cash_journal, = Journal.search([
('name', '=', 'Cash')
])
profile, = Profile.create([{
'last_4_digits': '1111',
'sequence': '10',
'expiry_month': '01',
'expiry_year': '2018',
'address': address.id,
'party': current_user.party.id,
'provider_reference': '26037832',
'gateway': gateway.id,
'authorize_profile_id': '28545177',
}])
self.assertEqual(
len(current_user.party.payment_profiles), 1
)
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'payment_profile': '23'
}
)
self.assertTrue(
"Not a valid choice" in rv.data
)
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'payment_profile':
current_user.party.payment_profiles[0].id
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
sale, = self.Sale.search([('state', '=', 'confirmed')])
self.assertEqual(sale.invoice_address.id, address.id)
def test_0240_add_comment_to_sale(self):
"""
Add comment to sale for logged in user.
"""
Address = POOL.get('party.address')
Profile = POOL.get('party.payment_profile')
Gateway = POOL.get('payment_gateway.gateway')
Journal = POOL.get('account.journal')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
self.party2, = self.Party.create([{
'name': 'Registered User',
}])
with app.test_client() as c:
self.login(c, '[email protected]', 'password')
address, = Address.create([{
'party': current_user.party.id,
'name': 'Name',
'street': 'Street',
'streetbis': 'StreetBis',
'zip': 'zip',
'city': 'City',
'country': self.available_countries[0].id,
'subdivision':
self.available_countries[0].subdivisions[0].id,
}])
self._create_auth_net_gateway_for_site()
self.assertEqual(
len(current_user.party.payment_profiles), 0
)
gateway, = Gateway.search(['name', '=', 'Authorize.net'])
cash_journal, = Journal.search([
('name', '=', 'Cash')
])
profile, = Profile.create([{
'last_4_digits': '1111',
'sequence': '10',
'expiry_month': '01',
'expiry_year': '2018',
'address': address.id,
'party': current_user.party.id,
'provider_reference': '26037832',
'gateway': gateway.id,
'authorize_profile_id': '28545177',
}])
self.assertEqual(
len(current_user.party.payment_profiles), 1
)
self._create_regd_user_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'payment_profile':
current_user.party.payment_profiles[0].id
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
sale, = self.Sale.search([('state', '=', 'confirmed')])
rv = c.post(
'/order/%s/add-comment' % (sale.id,), data={
'comment': 'This is comment on sale!'
}, headers=[('X-Requested-With', 'XMLHttpRequest')]
)
json_data = json.loads(rv.data)['message']
self.assertEqual('Comment Added', json_data)
self.assertEqual('This is comment on sale!', sale.comment)
rv = c.post(
'/order/%s/add-comment' % (sale.id,), data={
'comment': 'This is comment!'
}
)
self.assertTrue(rv.status_code, 302)
def test_0245_no_comment_on_cancelled_sale(self):
"""
Trying to comment on a cancelled sale should return 403.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
self.login(c, '[email protected]', 'password')
# Create sale.
sale, = self.Sale.create([{
'party': self.registered_user.party.id,
'company': self.company.id,
'currency': self.usd.id,
}])
# Cancel the sale order now.
self.Sale.cancel([sale])
# Try commenting.
rv = c.post(
'/order/%s/add-comment' % (sale.id,), data={
'comment': 'This is comment!'
}
)
self.assertEqual(rv.status_code, 403)
def test_0250_add_comment_to_guest_sale(self):
"""
Add comment to sale for guest user
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
with app.test_client() as c:
self._create_guest_order(c)
# Define a new payment gateway
self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_guest_order(c)
# Try to pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
self.assertTrue('access_code' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
rv = c.post(
'/order/%s/add-comment' % (sale.id, ), data={
'comment': 'This is comment on sale!'
}, headers=[('X-Requested-With', 'XMLHttpRequest')]
)
self.assertEqual(rv.status_code, 403)
rv = c.post(
'/order/%s/add-comment?access_code=%s' % (
sale.id, sale.guest_access_code,
), data={
'comment': 'This is comment on sale!'
}, headers=[('X-Requested-With', 'XMLHttpRequest')]
)
json_data = json.loads(rv.data)['message']
self.assertEqual('Comment Added', json_data)
self.assertEqual('This is comment on sale!', sale.comment)
def test_0300_access_order_page(self):
"""
Test access order page
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
# Define a new payment gateway
self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_guest_order(c)
# pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
}
)
self.assertEqual(rv.status_code, 302)
self.assertTrue('/order/' in rv.location)
self.assertTrue('access_code' in rv.location)
sale, = Sale.search([('state', '=', 'confirmed')])
rv = c.get('/order/%s' % (sale.id, ))
self.assertEqual(rv.status_code, 302) # Redirect to login
rv = c.get(
'/order/%s?access_code=%s' % (sale.id, "wrong-access-code")
)
self.assertEqual(rv.status_code, 403)
rv = c.get(
'/order/%s?access_code=%s' % (
sale.id, sale.guest_access_code
)
)
self.assertEqual(rv.status_code, 200)
def test_0305_orders_page_regd(self):
"""
Accesses orders page for a registered user.
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
Sale = POOL.get('sale.sale')
party = self.registered_user.party
with app.test_client() as c:
self.login(c, '[email protected]', 'password')
# Create sales.
with Transaction().set_context(company=self.company.id):
sale1, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': party.addresses[0].id,
'shipment_address': party.addresses[0].id,
'party': party.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}])
]}])
sale2, = Sale.create([{
'reference': 'Sale2',
'sale_date': date.today(),
'invoice_address': party.addresses[0].id,
'shipment_address': party.addresses[0].id,
'state': 'done', # For testing purpose.
'party': party.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}])
]}])
sale3, = Sale.create([{
'reference': 'Sale3',
'sale_date': date.today(),
'invoice_address': party.addresses[0].id,
'shipment_address': party.addresses[0].id,
'party': party.id,
'sale_date': '2014-06-06', # For testing purpose.
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}])
]}])
Sale.quote([sale1])
Sale.confirm([sale1])
rv = c.get('/orders?filter_by=recent')
self.assertIn('recent', rv.data)
self.assertIn('#{0}'.format(sale1.id), rv.data)
self.assertIn('#{0}'.format(sale2.id), rv.data)
self.assertNotIn('#{0}'.format(sale3.id), rv.data)
rv = c.get('/orders?filter_by=done')
self.assertIn('done', rv.data)
self.assertIn('#{0}'.format(sale2.id), rv.data)
self.assertNotIn('#{0}'.format(sale1.id), rv.data)
self.assertNotIn('#{0}'.format(sale3.id), rv.data)
Sale.cancel([sale3])
rv = c.get('/orders?filter_by=canceled')
self.assertIn('cancel', rv.data)
self.assertIn('#{0}'.format(sale3.id), rv.data)
self.assertNotIn('#{0}'.format(sale1.id), rv.data)
self.assertNotIn('#{0}'.format(sale2.id), rv.data)
rv = c.get('/orders?filter_by=archived')
self.assertIn('archived', rv.data)
self.assertIn('#{0}'.format(sale3.id), rv.data)
self.assertNotIn('#{0}'.format(sale1.id), rv.data)
self.assertNotIn('#{0}'.format(sale2.id), rv.data)
def test_0310_guest_user_payment_using_credit_card(self):
"""
===================================
Total Sale Amount | $100
Payment Authorize On: | 'manual'
Payment Capture On: | 'sale_process'
===================================
Total Payment Lines | 1
Payment 1 | $100
===================================
"""
Sale = POOL.get('sale.sale')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
auth_gateway = self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_guest_order(c, 10)
sale, = Sale.search([], limit=1)
self.assertEqual(sale.total_amount, Decimal('100'))
self.assertEqual(sale.payment_total, Decimal('0'))
self.assertEqual(sale.payment_collected, Decimal('0'))
self.assertEqual(sale.payment_captured, Decimal('0'))
self.assertEqual(sale.payment_available, Decimal('0'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
# pay using credit card
rv = c.post(
'/checkout/payment',
data={
'owner': 'Joe Blow',
'number': '4111111111111111',
'expiry_year': '2018',
'expiry_month': '01',
'cvv': '911',
'add_card_to_profiles': True
}
)
self.assertEqual(rv.status_code, 302)
self.assertEqual(sale.state, 'confirmed')
self.assertEqual(len(sale.payments), 1)
sale_payment, = sale.payments
self.assertEqual(sale_payment.method, auth_gateway.method)
self.assertEqual(sale.payment_total, Decimal('100'))
self.assertEqual(sale.payment_available, Decimal('100'))
self.assertEqual(sale.payment_collected, Decimal('0'))
self.assertEqual(sale.payment_captured, Decimal('0'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
with Transaction().set_context(company=self.company.id):
self.Sale.process([sale])
self.Sale.process_all_pending_payments()
self.assertEqual(sale.payment_total, Decimal('100'))
self.assertEqual(sale.payment_available, Decimal('0'))
self.assertEqual(sale.payment_collected, Decimal('100'))
self.assertEqual(sale.payment_captured, Decimal('100'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
def test_0330_registered_user_payment_using_payment_profile(self):
"""
===================================
Total Sale Amount | $100
Payment Authorize On: | 'manual'
Payment Capture On: | 'sale_process'
===================================
Total Payment Lines | 1
Payment 1 | $100
===================================
"""
Sale = POOL.get('sale.sale')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
auth_gateway = self._create_auth_net_gateway_for_site()
with app.test_client() as c:
self._create_regd_user_order(c, 10)
sale, = Sale.search([], limit=1)
self.assertEqual(sale.total_amount, Decimal('100'))
self.assertEqual(sale.payment_total, Decimal('0'))
self.assertEqual(sale.payment_collected, Decimal('0'))
self.assertEqual(sale.payment_captured, Decimal('0'))
self.assertEqual(sale.payment_available, Decimal('0'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
gateway = self._create_auth_net_gateway_for_site()
payment_profile = self.create_payment_profile(
sale.party, gateway
)
rv = c.post(
'/checkout/payment',
data={'payment_profile': payment_profile.id}
)
self.assertEqual(rv.status_code, 302)
self.assertEqual(sale.state, 'confirmed')
self.assertEqual(len(sale.payments), 1)
sale_payment, = sale.payments
self.assertEqual(sale_payment.method, auth_gateway.method)
self.assertEqual(sale.payment_total, Decimal('100'))
self.assertEqual(sale.payment_available, Decimal('100'))
self.assertEqual(sale.payment_collected, Decimal('0'))
self.assertEqual(sale.payment_captured, Decimal('0'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
with Transaction().set_context(company=self.company.id):
self.Sale.process([sale])
self.Sale.process_all_pending_payments()
self.assertEqual(sale.payment_total, Decimal('100'))
self.assertEqual(sale.payment_available, Decimal('0'))
self.assertEqual(sale.payment_collected, Decimal('100'))
self.assertEqual(sale.payment_captured, Decimal('100'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
def test_0320_registered_user_payment_using_alternate_method(self):
"""
===================================
Total Sale Amount | $100
Payment Authorize On: | 'manual'
Payment Capture On: | 'sale_process'
===================================
Total Payment Lines | 1
Payment 1 | $100
===================================
"""
Sale = POOL.get('sale.sale')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
app = self.get_app()
with app.test_client() as c:
self._create_regd_user_order(c, 10)
sale, = Sale.search([], limit=1)
self.assertEqual(sale.total_amount, Decimal('100'))
self.assertEqual(sale.payment_total, Decimal('0'))
self.assertEqual(sale.payment_collected, Decimal('0'))
self.assertEqual(sale.payment_captured, Decimal('0'))
self.assertEqual(sale.payment_available, Decimal('0'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
payment_method = self._create_cheque_payment_method()
rv = c.post(
'/checkout/payment',
data={'alternate_payment_method': payment_method.id}
)
self.assertEqual(rv.status_code, 302)
self.assertEqual(sale.state, 'confirmed')
self.assertEqual(len(sale.payments), 1)
sale_payment, = sale.payments
self.assertEqual(
sale_payment.method, payment_method.gateway.method
)
self.assertEqual(sale.payment_total, Decimal('100'))
self.assertEqual(sale.payment_available, Decimal('100'))
self.assertEqual(sale.payment_collected, Decimal('0'))
self.assertEqual(sale.payment_captured, Decimal('0'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
with Transaction().set_context(company=self.company.id):
self.Sale.process([sale])
self.Sale.process_all_pending_payments()
self.assertEqual(sale.payment_total, Decimal('100'))
self.assertEqual(sale.payment_available, Decimal('0'))
self.assertEqual(sale.payment_collected, Decimal('100'))
self.assertEqual(sale.payment_captured, Decimal('100'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
def suite():
"Checkout test suite"
"Define suite"
test_suite = trytond.tests.test_tryton.suite()
loader = unittest.TestLoader()
test_suite.addTests(
loader.loadTestsFromTestCase(TestCheckoutPayment),
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| aroraumang/nereid-checkout | tests/test_payment.py | Python | gpl-3.0 | 53,036 |
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
from bisect import bisect_right
from collections import defaultdict
try:
import sqlite3 #@UnusedImport
has_sqlite = True
except ImportError:
has_sqlite = False
from whoosh.compat import integer_types, iteritems, text_type
from whoosh.fields import UnknownFieldError
from whoosh.filedb.fileindex import Segment
from whoosh.filedb.filepostings import FilePostingWriter
from whoosh.filedb.filetables import (TermIndexWriter, StoredFieldWriter,
TermVectorWriter)
from whoosh.filedb.pools import TempfilePool, DiskSet
from whoosh.store import LockError
from whoosh.support.dawg import DawgBuilder, flatten
from whoosh.support.filelock import try_for
from whoosh.util import fib
from whoosh.writing import IndexWriter, IndexingError
# Merge policies
# A merge policy is a callable that takes the Index object, the SegmentWriter
# object, and the current segment list (not including the segment being
# written), and returns an updated segment list (not including the segment
# being written).
def NO_MERGE(writer, segments):
"""This policy does not merge any existing segments.
"""
return segments
def MERGE_SMALL(writer, segments):
"""This policy merges small segments, where "small" is defined using a
heuristic based on the fibonacci sequence.
"""
from whoosh.filedb.filereading import SegmentReader
newsegments = []
sorted_segment_list = sorted((s.doc_count_all(), s) for s in segments)
total_docs = 0
for i, (count, seg) in enumerate(sorted_segment_list):
if count > 0:
total_docs += count
if total_docs < fib(i + 5):
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
else:
newsegments.append(seg)
return newsegments
def OPTIMIZE(writer, segments):
"""This policy merges all existing segments.
"""
from whoosh.filedb.filereading import SegmentReader
for seg in segments:
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
return []
def MERGE_SQUARES(writer, segments):
"""This is an alternative merge policy similar to Lucene's. It is less
optimal than the default MERGE_SMALL.
"""
from whoosh.filedb.filereading import SegmentReader
sizedsegs = [(s.doc_count_all(), s) for s in segments]
tomerge = []
for size in (10, 100, 1000, 10000, 100000):
smaller = [seg for segsize, seg in sizedsegs
if segsize < size - 1 and segsize >= size // 10]
if len(smaller) >= 10:
tomerge.extend(smaller)
for seg in smaller:
segments.remove(seg)
for seg in tomerge:
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
return segments
# Writer object
class SegmentWriter(IndexWriter):
def __init__(self, ix, poolclass=None, procs=0, blocklimit=128,
timeout=0.0, delay=0.1, name=None, _lk=True, **poolargs):
self.writelock = None
if _lk:
self.writelock = ix.lock("WRITELOCK")
if not try_for(self.writelock.acquire, timeout=timeout,
delay=delay):
raise LockError
info = ix._read_toc()
self.schema = info.schema
self.segments = info.segments
self.storage = storage = ix.storage
self.indexname = ix.indexname
self.is_closed = False
self.blocklimit = blocklimit
self.segment_number = info.segment_counter + 1
self.generation = info.generation + 1
self._doc_offsets = []
base = 0
for s in self.segments:
self._doc_offsets.append(base)
base += s.doc_count_all()
self.name = name or Segment.basename(self.indexname,
self.segment_number)
self.docnum = 0
self.fieldlength_totals = defaultdict(int)
self._added = False
self._unique_cache = {}
# Create a temporary segment to use its .*_filename attributes
segment = Segment(self.name, self.generation, 0, None, None, None)
# Spelling
self.wordsets = {}
self.dawg = None
if any(field.spelling for field in self.schema):
self.dawgfile = storage.create_file(segment.dawg_filename)
self.dawg = DawgBuilder(field_root=True)
# Terms index
tf = storage.create_file(segment.termsindex_filename)
ti = TermIndexWriter(tf)
# Term postings file
pf = storage.create_file(segment.termposts_filename)
pw = FilePostingWriter(pf, blocklimit=blocklimit)
# Terms writer
self.termswriter = TermsWriter(self.schema, ti, pw, self.dawg)
if self.schema.has_vectored_fields():
# Vector index
vf = storage.create_file(segment.vectorindex_filename)
self.vectorindex = TermVectorWriter(vf)
# Vector posting file
vpf = storage.create_file(segment.vectorposts_filename)
self.vpostwriter = FilePostingWriter(vpf, stringids=True)
else:
self.vectorindex = None
self.vpostwriter = None
# Stored fields file
sf = storage.create_file(segment.storedfields_filename)
self.storedfields = StoredFieldWriter(sf, self.schema.stored_names())
# Field lengths file
self.lengthfile = storage.create_file(segment.fieldlengths_filename)
# Create the pool
if poolclass is None:
if procs > 1:
from whoosh.filedb.multiproc import MultiPool
poolclass = MultiPool
else:
poolclass = TempfilePool
self.pool = poolclass(self.schema, procs=procs, **poolargs)
def _check_state(self):
if self.is_closed:
raise IndexingError("This writer is closed")
def add_field(self, fieldname, fieldspec, **kwargs):
self._check_state()
if self._added:
raise Exception("Can't modify schema after adding data to writer")
super(SegmentWriter, self).add_field(fieldname, fieldspec, **kwargs)
def remove_field(self, fieldname):
self._check_state()
if self._added:
raise Exception("Can't modify schema after adding data to writer")
super(SegmentWriter, self).remove_field(fieldname)
def _document_segment(self, docnum):
#Returns the index.Segment object containing the given document
#number.
offsets = self._doc_offsets
if len(offsets) == 1:
return 0
return bisect_right(offsets, docnum) - 1
def _segment_and_docnum(self, docnum):
#Returns an (index.Segment, segment_docnum) pair for the segment
#containing the given document number.
segmentnum = self._document_segment(docnum)
offset = self._doc_offsets[segmentnum]
segment = self.segments[segmentnum]
return segment, docnum - offset
def has_deletions(self):
"""
:returns: True if this index has documents that are marked deleted but
haven't been optimized out of the index yet.
"""
return any(s.has_deletions() for s in self.segments)
def delete_document(self, docnum, delete=True):
self._check_state()
if docnum >= sum(seg.doccount for seg in self.segments):
raise IndexingError("No document ID %r in this index" % docnum)
segment, segdocnum = self._segment_and_docnum(docnum)
segment.delete_document(segdocnum, delete=delete)
def deleted_count(self):
"""
:returns: the total number of deleted documents in the index.
"""
return sum(s.deleted_count() for s in self.segments)
def is_deleted(self, docnum):
segment, segdocnum = self._segment_and_docnum(docnum)
return segment.is_deleted(segdocnum)
def reader(self, reuse=None):
self._check_state()
from whoosh.filedb.fileindex import FileIndex
return FileIndex._reader(self.storage, self.schema, self.segments,
self.generation, reuse=reuse)
def add_reader(self, reader):
self._check_state()
startdoc = self.docnum
has_deletions = reader.has_deletions()
if has_deletions:
docmap = {}
fieldnames = set(self.schema.names())
# Add stored documents, vectors, and field lengths
for docnum in reader.all_doc_ids():
if (not has_deletions) or (not reader.is_deleted(docnum)):
d = dict(item for item
in iteritems(reader.stored_fields(docnum))
if item[0] in fieldnames)
# We have to append a dictionary for every document, even if
# it's empty.
self.storedfields.append(d)
if has_deletions:
docmap[docnum] = self.docnum
for fieldname in reader.schema.scorable_names():
length = reader.doc_field_length(docnum, fieldname)
if length and fieldname in fieldnames:
self.pool.add_field_length(self.docnum, fieldname,
length)
for fieldname in reader.schema.vector_names():
if (fieldname in fieldnames
and reader.has_vector(docnum, fieldname)):
vpostreader = reader.vector(docnum, fieldname)
self._add_vector_reader(self.docnum, fieldname,
vpostreader)
self.docnum += 1
# Add dawg contents to word sets for fields that require separate
# handling
for fieldname in self.schema.separate_spelling_names():
if reader.has_word_graph(fieldname):
graph = reader.word_graph(fieldname)
self.add_spell_words(fieldname, flatten(graph))
# Add postings
for fieldname, text in reader.all_terms():
if fieldname in fieldnames:
postreader = reader.postings(fieldname, text)
while postreader.is_active():
docnum = postreader.id()
valuestring = postreader.value()
if has_deletions:
newdoc = docmap[docnum]
else:
newdoc = startdoc + docnum
self.pool.add_posting(fieldname, text, newdoc,
postreader.weight(), valuestring)
postreader.next()
self._added = True
def add_document(self, **fields):
self._check_state()
schema = self.schema
docboost = self._doc_boost(fields)
# Sort the keys
fieldnames = sorted([name for name in fields.keys()
if not name.startswith("_")])
# Check if the caller gave us a bogus field
for name in fieldnames:
if name not in schema:
raise UnknownFieldError("No field named %r in %s"
% (name, schema))
storedvalues = {}
docnum = self.docnum
for fieldname in fieldnames:
value = fields.get(fieldname)
if value is None:
continue
field = schema[fieldname]
if field.indexed:
fieldboost = self._field_boost(fields, fieldname, docboost)
self.pool.add_content(docnum, fieldname, field, value,
fieldboost)
if field.separate_spelling():
# This field requires spelling words to be added in a separate
# step, instead of as part of indexing
self.add_spell_words(fieldname, field.spellable_words(value))
vformat = field.vector
if vformat:
wvs = vformat.word_values(value, field.analyzer, mode="index")
vlist = sorted((w, weight, valuestring)
for w, _, weight, valuestring in wvs)
self._add_vector(docnum, fieldname, vlist)
if field.stored:
# Caller can override the stored value by including a key
# _stored_<fieldname>
storedvalue = value
storedname = "_stored_" + fieldname
if storedname in fields:
storedvalue = fields[storedname]
storedvalues[fieldname] = storedvalue
self._added = True
self.storedfields.append(storedvalues)
self.docnum += 1
def add_spell_words(self, fieldname, words):
# Get or make a set for the words in this field
if fieldname not in self.wordsets:
self.wordsets[fieldname] = set()
wordset = self.wordsets[fieldname]
# If the in-memory set is getting big, replace it with an
# on-disk set
if has_sqlite and isinstance(wordset, set) and len(wordset) > 4096:
diskset = DiskSet(wordset)
self.wordsets[fieldname] = wordset = diskset
for word in words:
wordset.add(word)
self._added = True
def _add_wordsets(self):
dawg = self.dawg
for fieldname in self.wordsets:
ws = self.wordsets[fieldname]
ft = (fieldname,)
words = sorted(ws) if isinstance(ws, set) else iter(ws)
for text in words:
dawg.insert(ft + tuple(text))
if isinstance(ws, DiskSet):
ws.destroy()
def _add_vector(self, docnum, fieldname, vlist):
vpostwriter = self.vpostwriter
offset = vpostwriter.start(self.schema[fieldname].vector)
for text, weight, valuestring in vlist:
#assert isinstance(text, text_type), "%r is not unicode" % text
vpostwriter.write(text, weight, valuestring, 0)
vpostwriter.finish(inlinelimit=0)
self.vectorindex.add((docnum, fieldname), offset)
def _add_vector_reader(self, docnum, fieldname, vreader):
vpostwriter = self.vpostwriter
offset = vpostwriter.start(self.schema[fieldname].vector)
while vreader.is_active():
# text, weight, valuestring, fieldlen
vpostwriter.write(vreader.id(), vreader.weight(), vreader.value(),
0)
vreader.next()
vpostwriter.finish(inlinelimit=0)
self.vectorindex.add((docnum, fieldname), offset)
def _close_all(self):
self.is_closed = True
self.termswriter.close()
self.storedfields.close()
if not self.lengthfile.is_closed:
self.lengthfile.close()
if self.vectorindex:
self.vectorindex.close()
if self.vpostwriter:
self.vpostwriter.close()
def _getsegment(self):
return Segment(self.name, self.generation, self.docnum,
self.pool.fieldlength_totals(),
self.pool.fieldlength_mins(),
self.pool.fieldlength_maxes())
def commit(self, mergetype=None, optimize=False, merge=True):
"""Finishes writing and saves all additions and changes to disk.
There are four possible ways to use this method::
# Merge small segments but leave large segments, trying to
# balance fast commits with fast searching:
writer.commit()
# Merge all segments into a single segment:
writer.commit(optimize=True)
# Don't merge any existing segments:
writer.commit(merge=False)
# Use a custom merge function
writer.commit(mergetype=my_merge_function)
:param mergetype: a custom merge function taking a Writer object and
segment list as arguments, and returning a new segment list. If you
supply a ``mergetype`` function, the values of the ``optimize`` and
``merge`` arguments are ignored.
:param optimize: if True, all existing segments are merged with the
documents you've added to this writer (and the value of the
``merge`` argument is ignored).
:param merge: if False, do not merge small segments.
"""
self._check_state()
try:
if mergetype:
pass
elif optimize:
mergetype = OPTIMIZE
elif not merge:
mergetype = NO_MERGE
else:
mergetype = MERGE_SMALL
# Call the merge policy function. The policy may choose to merge
# other segments into this writer's pool
new_segments = mergetype(self, self.segments)
if self._added:
# Create a Segment object for the segment created by this
# writer
thissegment = self._getsegment()
# Tell the pool we're finished adding information, it should
# add its accumulated data to the lengths, terms index, and
# posting files.
self.pool.finish(self.termswriter, self.docnum,
self.lengthfile)
# Write out spelling files
if self.dawg:
# Insert any wordsets we've accumulated into the word graph
self._add_wordsets()
# Write out the word graph
self.dawg.write(self.dawgfile)
# Add new segment to the list of remaining segments returned by
# the merge policy function
new_segments.append(thissegment)
else:
self.pool.cleanup()
# Close all files, write a new TOC with the new segment list, and
# release the lock.
self._close_all()
from whoosh.filedb.fileindex import _write_toc, _clean_files
_write_toc(self.storage, self.schema, self.indexname,
self.generation, self.segment_number, new_segments)
# Delete leftover files
_clean_files(self.storage, self.indexname, self.generation,
new_segments)
finally:
if self.writelock:
self.writelock.release()
def cancel(self):
self._check_state()
try:
self.pool.cancel()
self._close_all()
finally:
if self.writelock:
self.writelock.release()
class TermsWriter(object):
def __init__(self, schema, termsindex, postwriter, dawg, inlinelimit=1):
self.schema = schema
# This file maps terms to TermInfo structures
self.termsindex = termsindex
# This object writes postings to the posting file and keeps track of
# blocks
self.postwriter = postwriter
# Spelling
self.dawg = dawg
# Posting lists with <= this number of postings will be inlined into
# the terms index instead of being written to the posting file
assert isinstance(inlinelimit, integer_types)
self.inlinelimit = inlinelimit
self.spelling = False
self.lastfn = None
self.lasttext = None
self.format = None
self.offset = None
def _new_term(self, fieldname, text):
# This method tests whether a new field/term has started in the stream
# of incoming postings, and if so performs appropriate work
lastfn = self.lastfn or ''
lasttext = self.lasttext or ''
if fieldname < lastfn or (fieldname == lastfn and text < lasttext):
raise Exception("Postings are out of order: %r:%s .. %r:%s" %
(lastfn, lasttext, fieldname, text))
# Is the fieldname of this posting different from the last one?
if fieldname != lastfn:
# Store information we need about the new field
field = self.schema[fieldname]
self.format = field.format
self.spelling = field.spelling and not field.separate_spelling()
# Is the term of this posting different from the last one?
if fieldname != lastfn or text != lasttext:
# Finish up the last term before starting a new one
self._finish_term()
# If this field has spelling, add the term to the word graph
if self.spelling:
self.dawg.insert((fieldname,) + tuple(text))
# Set up internal state for the new term
self.offset = self.postwriter.start(self.format)
self.lasttext = text
self.lastfn = fieldname
def _finish_term(self):
postwriter = self.postwriter
if self.lasttext is not None:
terminfo = postwriter.finish(self.inlinelimit)
self.termsindex.add((self.lastfn, self.lasttext), terminfo)
def add_postings(self, fieldname, text, matcher, getlen, offset=0,
docmap=None):
self._new_term(fieldname, text)
postwrite = self.postwriter.write
while matcher.is_active():
docnum = matcher.id()
weight = matcher.weight()
valuestring = matcher.value()
if docmap:
newdoc = docmap[docnum]
else:
newdoc = offset + docnum
postwrite(newdoc, weight, valuestring, getlen(docnum, fieldname))
matcher.next()
def add_iter(self, postiter, getlen, offset=0, docmap=None):
_new_term = self._new_term
postwrite = self.postwriter.write
for fieldname, text, docnum, weight, valuestring in postiter:
_new_term(fieldname, text)
if docmap:
newdoc = docmap[docnum]
else:
newdoc = offset + docnum
postwrite(newdoc, weight, valuestring, getlen(docnum, fieldname))
def add(self, fieldname, text, docnum, weight, valuestring, fieldlen):
self._new_term(fieldname, text)
self.postwriter.write(docnum, weight, valuestring, fieldlen)
def close(self):
self._finish_term()
self.termsindex.close()
self.postwriter.close()
# Retroactively add spelling files to an existing index
def add_spelling(ix, fieldnames, commit=True):
"""Adds spelling files to an existing index that was created without
them, and modifies the schema so the given fields have the ``spelling``
attribute. Only works on filedb indexes.
>>> ix = index.open_dir("testindex")
>>> add_spelling(ix, ["content", "tags"])
:param ix: a :class:`whoosh.filedb.fileindex.FileIndex` object.
:param fieldnames: a list of field names to create word graphs for.
:param force: if True, overwrites existing word graph files. This is only
useful for debugging.
"""
from whoosh.filedb.filereading import SegmentReader
writer = ix.writer()
storage = writer.storage
schema = writer.schema
segments = writer.segments
for segment in segments:
filename = segment.dawg_filename
r = SegmentReader(storage, schema, segment)
f = storage.create_file(filename)
dawg = DawgBuilder(field_root=True)
for fieldname in fieldnames:
ft = (fieldname,)
for word in r.lexicon(fieldname):
dawg.insert(ft + tuple(word))
dawg.write(f)
for fieldname in fieldnames:
schema[fieldname].spelling = True
if commit:
writer.commit(merge=False)
| cscott/wikiserver | whoosh/filedb/filewriting.py | Python | gpl-2.0 | 25,621 |
# ***************************************************************************
# * Copyright (c) 2015 Przemo Firszt <[email protected]> *
# * Copyright (c) 2015 Bernd Hahnebach <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM solver CalculiX writer"
__author__ = "Przemo Firszt, Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## \addtogroup FEM
# @{
# import io
import codecs
import os
import six
import sys
import time
from os.path import join
import FreeCAD
from .. import writerbase
from femmesh import meshtools
from femtools import geomtools
# Interesting forum topic: https://forum.freecadweb.org/viewtopic.php?&t=48451
# TODO somehow set units at beginning and every time a value is retrieved use this identifier
# this would lead to support of unit system, force might be retrieved in base writer!
# the following text will be at the end of the main calculix input file
units_information = """***********************************************************
** About units:
** See ccx manual, ccx does not know about any unit.
** Golden rule: The user must make sure that the numbers he provides have consistent units.
** The user is the FreeCAD calculix writer module ;-)
**
** The unit system which is used at Guido Dhondt's company: mm, N, s, K
** Since Length and Mass are connected by Force, if Length is mm the Mass is in t to get N
** The following units are used to write to inp file:
**
** Length: mm (this includes the mesh geometry)
** Mass: t
** TimeSpan: s
** Temperature: K
**
** This leads to:
** Force: N
** Pressure: N/mm^2
** Density: t/mm^3
** Gravity: mm/s^2
** Thermal conductivity: t*mm/K/s^3 (same as W/m/K)
** Specific Heat: mm^2/s^2/K (same as J/kg/K)
"""
class FemInputWriterCcx(writerbase.FemInputWriter):
def __init__(
self,
analysis_obj,
solver_obj,
mesh_obj,
member,
dir_name=None
):
writerbase.FemInputWriter.__init__(
self,
analysis_obj,
solver_obj,
mesh_obj,
member,
dir_name
)
self.mesh_name = self.mesh_object.Name
self.include = join(self.dir_name, self.mesh_name)
self.file_name = self.include + ".inp"
self.FluidInletoutlet_ele = []
self.fluid_inout_nodes_file = join(
self.dir_name,
"{}_inout_nodes.txt".format(self.mesh_name)
)
from femtools import constants
from FreeCAD import Units
self.gravity = int(Units.Quantity(constants.gravity()).getValueAs("mm/s^2")) # 9820 mm/s2
# ********************************************************************************************
# write calculix input
def write_calculix_input_file(self):
timestart = time.process_time()
FreeCAD.Console.PrintMessage("Start writing CalculiX input file\n")
FreeCAD.Console.PrintMessage("Write ccx input file to: {}\n".format(self.file_name))
FreeCAD.Console.PrintLog(
"writerbaseCcx --> self.mesh_name --> " + self.mesh_name + "\n"
)
FreeCAD.Console.PrintLog(
"writerbaseCcx --> self.dir_name --> " + self.dir_name + "\n"
)
FreeCAD.Console.PrintLog(
"writerbaseCcx --> self.include --> " + self.mesh_name + "\n"
)
FreeCAD.Console.PrintLog(
"writerbaseCcx --> self.file_name --> " + self.file_name + "\n"
)
self.write_calculix_input()
writing_time_string = (
"Writing time CalculiX input file: {} seconds"
.format(round((time.process_time() - timestart), 2))
)
if self.femelement_count_test is True:
FreeCAD.Console.PrintMessage(writing_time_string + " \n\n")
return self.file_name
else:
FreeCAD.Console.PrintMessage(writing_time_string + " \n")
FreeCAD.Console.PrintError(
"Problems on writing input file, check report prints.\n\n"
)
return ""
def write_calculix_input(self):
if self.solver_obj.SplitInputWriter is True:
self.split_inpfile = True
else:
self.split_inpfile = False
# mesh
inpfileMain = self.write_mesh(self.split_inpfile)
# element and material sets
self.write_element_sets_material_and_femelement_type(inpfileMain)
# node sets and surface sets
self.write_node_sets_constraints_fixed(inpfileMain, self.split_inpfile)
self.write_node_sets_constraints_displacement(inpfileMain, self.split_inpfile)
self.write_node_sets_constraints_planerotation(inpfileMain, self.split_inpfile)
self.write_surfaces_constraints_contact(inpfileMain, self.split_inpfile)
self.write_surfaces_constraints_tie(inpfileMain, self.split_inpfile)
self.write_surfaces_constraints_sectionprint(inpfileMain, self.split_inpfile)
self.write_node_sets_constraints_transform(inpfileMain, self.split_inpfile)
self.write_node_sets_constraints_temperature(inpfileMain, self.split_inpfile)
# materials and fem element types
self.write_materials(inpfileMain)
self.write_constraints_initialtemperature(inpfileMain)
self.write_femelementsets(inpfileMain)
# Fluid sections:
# Inlet and Outlet requires special element definition
# some data from the elsets are needed thus this can not be moved
# to mesh writing TODO it would be much better if this would be
# at mesh writing as the mesh will be changed
if self.fluidsection_objects:
if is_fluid_section_inlet_outlet(self.ccx_elsets) is True:
if self.split_inpfile is True:
meshtools.use_correct_fluidinout_ele_def(
self.FluidInletoutlet_ele,
# use mesh file split, see write_mesh method split_mesh_file_path
join(self.dir_name, self.mesh_name + "_femesh.inp"),
self.fluid_inout_nodes_file
)
else:
inpfileMain.close()
meshtools.use_correct_fluidinout_ele_def(
self.FluidInletoutlet_ele,
self.file_name,
self.fluid_inout_nodes_file
)
# inpfileMain = io.open(self.file_name, "a", encoding="utf-8")
inpfileMain = codecs.open(self.file_name, "a", encoding="utf-8")
# constraints independent from steps
self.write_constraints_planerotation(inpfileMain)
self.write_constraints_contact(inpfileMain)
self.write_constraints_tie(inpfileMain)
self.write_constraints_transform(inpfileMain)
# step begin
self.write_step_begin(inpfileMain)
# constraints dependent from steps
self.write_constraints_fixed(inpfileMain)
self.write_constraints_displacement(inpfileMain)
self.write_constraints_sectionprint(inpfileMain)
self.write_constraints_selfweight(inpfileMain)
self.write_constraints_force(inpfileMain, self.split_inpfile)
self.write_constraints_pressure(inpfileMain, self.split_inpfile)
self.write_constraints_temperature(inpfileMain)
self.write_constraints_heatflux(inpfileMain, self.split_inpfile)
self.write_constraints_fluidsection(inpfileMain)
# output and step end
self.write_outputs_types(inpfileMain)
self.write_step_end(inpfileMain)
# footer
self.write_footer(inpfileMain)
inpfileMain.close()
# ********************************************************************************************
# mesh
def write_mesh(self, inpfile_split=None):
# write mesh to file
element_param = 1 # highest element order only
group_param = False # do not write mesh group data
if inpfile_split is True:
write_name = "femesh"
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
split_mesh_file_path = join(self.dir_name, file_name_splitt)
self.femmesh.writeABAQUS(
split_mesh_file_path,
element_param,
group_param
)
# Check to see if fluid sections are in analysis and use D network element type
if self.fluidsection_objects:
meshtools.write_D_network_element_to_inputfile(split_mesh_file_path)
# inpfile = io.open(self.file_name, "w", encoding="utf-8")
inpfile = codecs.open(self.file_name, "w", encoding="utf-8")
inpfile.write("***********************************************************\n")
inpfile.write("** {}\n".format(write_name))
inpfile.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
else:
self.femmesh.writeABAQUS(
self.file_name,
element_param,
group_param
)
# Check to see if fluid sections are in analysis and use D network element type
if self.fluidsection_objects:
# inpfile is closed
meshtools.write_D_network_element_to_inputfile(self.file_name)
# reopen file with "append" to add all the rest
# inpfile = io.open(self.file_name, "a", encoding="utf-8")
inpfile = codecs.open(self.file_name, "a", encoding="utf-8")
inpfile.write("\n\n")
return inpfile
# ********************************************************************************************
# constraints fixed
def write_node_sets_constraints_fixed(self, f, inpfile_split=None):
if not self.fixed_objects:
return
# write for all analysis types
# get nodes
self.get_constraints_fixed_nodes()
write_name = "constraints_fixed_node_sets"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_node_sets_nodes_constraints_fixed(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_node_sets_nodes_constraints_fixed(f)
def write_node_sets_nodes_constraints_fixed(self, f):
# write nodes to file
for femobj in self.fixed_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
fix_obj = femobj["Object"]
f.write("** " + fix_obj.Label + "\n")
if self.femmesh.Volumes \
and (len(self.shellthickness_objects) > 0 or len(self.beamsection_objects) > 0):
if len(femobj["NodesSolid"]) > 0:
f.write("*NSET,NSET=" + fix_obj.Name + "Solid\n")
for n in femobj["NodesSolid"]:
f.write(str(n) + ",\n")
if len(femobj["NodesFaceEdge"]) > 0:
f.write("*NSET,NSET=" + fix_obj.Name + "FaceEdge\n")
for n in femobj["NodesFaceEdge"]:
f.write(str(n) + ",\n")
else:
f.write("*NSET,NSET=" + fix_obj.Name + "\n")
for n in femobj["Nodes"]:
f.write(str(n) + ",\n")
def write_constraints_fixed(self, f):
if not self.fixed_objects:
return
# write for all analysis types
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** Fixed Constraints\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for femobj in self.fixed_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
f.write("** " + femobj["Object"].Label + "\n")
fix_obj_name = femobj["Object"].Name
if self.femmesh.Volumes \
and (len(self.shellthickness_objects) > 0 or len(self.beamsection_objects) > 0):
if len(femobj["NodesSolid"]) > 0:
f.write("*BOUNDARY\n")
f.write(fix_obj_name + "Solid" + ",1\n")
f.write(fix_obj_name + "Solid" + ",2\n")
f.write(fix_obj_name + "Solid" + ",3\n")
f.write("\n")
if len(femobj["NodesFaceEdge"]) > 0:
f.write("*BOUNDARY\n")
f.write(fix_obj_name + "FaceEdge" + ",1\n")
f.write(fix_obj_name + "FaceEdge" + ",2\n")
f.write(fix_obj_name + "FaceEdge" + ",3\n")
f.write(fix_obj_name + "FaceEdge" + ",4\n")
f.write(fix_obj_name + "FaceEdge" + ",5\n")
f.write(fix_obj_name + "FaceEdge" + ",6\n")
f.write("\n")
else:
f.write("*BOUNDARY\n")
f.write(fix_obj_name + ",1\n")
f.write(fix_obj_name + ",2\n")
f.write(fix_obj_name + ",3\n")
if self.beamsection_objects or self.shellthickness_objects:
f.write(fix_obj_name + ",4\n")
f.write(fix_obj_name + ",5\n")
f.write(fix_obj_name + ",6\n")
f.write("\n")
# ********************************************************************************************
# constraints displacement
def write_node_sets_constraints_displacement(self, f, inpfile_split=None):
if not self.displacement_objects:
return
# write for all analysis types
# get nodes
self.get_constraints_displacement_nodes()
write_name = "constraints_displacement_node_sets"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_node_sets_nodes_constraints_displacement(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_node_sets_nodes_constraints_displacement(f)
def write_node_sets_nodes_constraints_displacement(self, f):
# write nodes to file
for femobj in self.displacement_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
disp_obj = femobj["Object"]
f.write("** " + disp_obj.Label + "\n")
f.write("*NSET,NSET=" + disp_obj.Name + "\n")
for n in femobj["Nodes"]:
f.write(str(n) + ",\n")
def write_constraints_displacement(self, f):
if not self.displacement_objects:
return
# write for all analysis types
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** Displacement constraint applied\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for femobj in self.displacement_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
f.write("** " + femobj["Object"].Label + "\n")
disp_obj = femobj["Object"]
disp_obj_name = disp_obj.Name
f.write("*BOUNDARY\n")
if disp_obj.xFix:
f.write(disp_obj_name + ",1\n")
elif not disp_obj.xFree:
f.write(disp_obj_name + ",1,1," + str(disp_obj.xDisplacement) + "\n")
if disp_obj.yFix:
f.write(disp_obj_name + ",2\n")
elif not disp_obj.yFree:
f.write(disp_obj_name + ",2,2," + str(disp_obj.yDisplacement) + "\n")
if disp_obj.zFix:
f.write(disp_obj_name + ",3\n")
elif not disp_obj.zFree:
f.write(disp_obj_name + ",3,3," + str(disp_obj.zDisplacement) + "\n")
if self.beamsection_objects or self.shellthickness_objects:
if disp_obj.rotxFix:
f.write(disp_obj_name + ",4\n")
elif not disp_obj.rotxFree:
f.write(disp_obj_name + ",4,4," + str(disp_obj.xRotation) + "\n")
if disp_obj.rotyFix:
f.write(disp_obj_name + ",5\n")
elif not disp_obj.rotyFree:
f.write(disp_obj_name + ",5,5," + str(disp_obj.yRotation) + "\n")
if disp_obj.rotzFix:
f.write(disp_obj_name + ",6\n")
elif not disp_obj.rotzFree:
f.write(disp_obj_name + ",6,6," + str(disp_obj.zRotation) + "\n")
f.write("\n")
# ********************************************************************************************
# constraints planerotation
def write_node_sets_constraints_planerotation(self, f, inpfile_split=None):
if not self.planerotation_objects:
return
# write for all analysis types
# get nodes
self.get_constraints_planerotation_nodes()
write_name = "constraints_planerotation_node_sets"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_node_sets_nodes_constraints_planerotation(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_node_sets_nodes_constraints_planerotation(f)
def write_node_sets_nodes_constraints_planerotation(self, f):
# write nodes to file
if not self.femnodes_mesh:
self.femnodes_mesh = self.femmesh.Nodes
# info about self.constraint_conflict_nodes:
# is used to check if MPC and constraint fixed and
# constraint displacement share same nodes
# because MPC"s and constraints fixed and
# constraints displacement can't share same nodes.
# Thus call write_node_sets_constraints_planerotation has to be
# after constraint fixed and constraint displacement
for femobj in self.planerotation_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
l_nodes = femobj["Nodes"]
fric_obj = femobj["Object"]
f.write("** " + fric_obj.Label + "\n")
f.write("*NSET,NSET=" + fric_obj.Name + "\n")
# Code to extract nodes and coordinates on the PlaneRotation support face
nodes_coords = []
for node in l_nodes:
nodes_coords.append((
node,
self.femnodes_mesh[node].x,
self.femnodes_mesh[node].y,
self.femnodes_mesh[node].z
))
node_planerotation = meshtools.get_three_non_colinear_nodes(nodes_coords)
for i in range(len(l_nodes)):
if l_nodes[i] not in node_planerotation:
node_planerotation.append(l_nodes[i])
MPC_nodes = []
for i in range(len(node_planerotation)):
cnt = 0
for j in range(len(self.constraint_conflict_nodes)):
if node_planerotation[i] == self.constraint_conflict_nodes[j]:
cnt = cnt + 1
if cnt == 0:
MPC = node_planerotation[i]
MPC_nodes.append(MPC)
for i in range(len(MPC_nodes)):
f.write(str(MPC_nodes[i]) + ",\n")
def write_constraints_planerotation(self, f):
if not self.planerotation_objects:
return
# write for all analysis types
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** PlaneRotation Constraints\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for femobj in self.planerotation_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
f.write("** " + femobj["Object"].Label + "\n")
fric_obj_name = femobj["Object"].Name
f.write("*MPC\n")
f.write("PLANE," + fric_obj_name + "\n")
# ********************************************************************************************
# constraints contact
def write_surfaces_constraints_contact(self, f, inpfile_split=None):
if not self.contact_objects:
return
# write for all analysis types
# get faces
self.get_constraints_contact_faces()
write_name = "constraints_contact_surface_sets"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_surfacefaces_constraints_contact(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_surfacefaces_constraints_contact(f)
def write_surfacefaces_constraints_contact(self, f):
# write faces to file
for femobj in self.contact_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
contact_obj = femobj["Object"]
f.write("** " + contact_obj.Label + "\n")
# slave DEP
f.write("*SURFACE, NAME=DEP{}\n".format(contact_obj.Name))
for i in femobj["ContactSlaveFaces"]:
f.write("{},S{}\n".format(i[0], i[1]))
# master IND
f.write("*SURFACE, NAME=IND{}\n".format(contact_obj.Name))
for i in femobj["ContactMasterFaces"]:
f.write("{},S{}\n".format(i[0], i[1]))
def write_constraints_contact(self, f):
if not self.contact_objects:
return
# write for all analysis types
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** Contact Constraints\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for femobj in self.contact_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
contact_obj = femobj["Object"]
f.write("** " + contact_obj.Label + "\n")
f.write(
"*CONTACT PAIR, INTERACTION=INT{},TYPE=SURFACE TO SURFACE\n"
.format(contact_obj.Name)
)
ind_surf = "IND" + contact_obj.Name
dep_surf = "DEP" + contact_obj.Name
f.write(dep_surf + "," + ind_surf + "\n")
f.write("*SURFACE INTERACTION, NAME=INT{}\n".format(contact_obj.Name))
f.write("*SURFACE BEHAVIOR,PRESSURE-OVERCLOSURE=LINEAR\n")
slope = contact_obj.Slope
f.write(str(slope) + " \n")
friction = contact_obj.Friction
if friction > 0:
f.write("*FRICTION \n")
stick = (slope / 10.0)
f.write(str(friction) + ", " + str(stick) + " \n")
# ********************************************************************************************
# constraints tie
def write_surfaces_constraints_tie(self, f, inpfile_split=None):
if not self.tie_objects:
return
# write for all analysis types
# get faces
self.get_constraints_tie_faces()
write_name = "constraints_tie_surface_sets"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_surfacefaces_constraints_tie(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_surfacefaces_constraints_tie(f)
def write_surfacefaces_constraints_tie(self, f):
# write faces to file
for femobj in self.tie_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
tie_obj = femobj["Object"]
f.write("** " + tie_obj.Label + "\n")
# slave DEP
f.write("*SURFACE, NAME=TIE_DEP{}\n".format(tie_obj.Name))
for i in femobj["TieSlaveFaces"]:
f.write("{},S{}\n".format(i[0], i[1]))
# master IND
f.write("*SURFACE, NAME=TIE_IND{}\n".format(tie_obj.Name))
for i in femobj["TieMasterFaces"]:
f.write("{},S{}\n".format(i[0], i[1]))
def write_constraints_tie(self, f):
if not self.tie_objects:
return
# write for all analysis types
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** Tie Constraints\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for femobj in self.tie_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
tie_obj = femobj["Object"]
f.write("** {}\n".format(tie_obj.Label))
tolerance = str(tie_obj.Tolerance.getValueAs("mm")).rstrip()
f.write(
"*TIE, POSITION TOLERANCE={}, ADJUST=NO, NAME=TIE{}\n"
.format(tolerance, tie_obj.Name)
)
ind_surf = "TIE_IND" + tie_obj.Name
dep_surf = "TIE_DEP" + tie_obj.Name
f.write("{},{}\n".format(dep_surf, ind_surf))
# ********************************************************************************************
# constraints sectionprint
def write_surfaces_constraints_sectionprint(self, f, inpfile_split=None):
if not self.sectionprint_objects:
return
# write for all analysis types
write_name = "constraints_sectionprint_surface_sets"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_surfacefaces_constraints_sectionprint(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_surfacefaces_constraints_sectionprint(f)
# TODO move code parts from this method to base writer module
def write_surfacefaces_constraints_sectionprint(self, f):
# get surface nodes and write them to file
obj = 0
for femobj in self.sectionprint_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
sectionprint_obj = femobj["Object"]
f.write("** " + sectionprint_obj.Label + "\n")
obj = obj + 1
for o, elem_tup in sectionprint_obj.References:
for elem in elem_tup:
ref_shape = o.Shape.getElement(elem)
if ref_shape.ShapeType == "Face":
name = "SECTIONFACE" + str(obj)
f.write("*SURFACE, NAME=" + name + "\n")
v = self.mesh_object.FemMesh.getccxVolumesByFace(ref_shape)
if len(v) > 0:
# volume elements found
FreeCAD.Console.PrintLog(
"{}, surface {}, {} touching volume elements found\n"
.format(sectionprint_obj.Label, name, len(v))
)
for i in v:
f.write("{},S{}\n".format(i[0], i[1]))
else:
# no volume elements found, shell elements not allowed
FreeCAD.Console.PrintError(
"{}, surface {}, Error: "
"No volume elements found!\n"
.format(sectionprint_obj.Label, name)
)
f.write("** Error: empty list\n")
def write_constraints_sectionprint(self, f):
if not self.sectionprint_objects:
return
# write for all analysis types
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** SectionPrint Constraints\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
obj = 0
for femobj in self.sectionprint_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
obj = obj + 1
sectionprint_obj = femobj["Object"]
f.write("** {}\n".format(sectionprint_obj.Label))
f.write(
"*SECTION PRINT, SURFACE=SECTIONFACE{}, NAME=SECTIONPRINT{}\n"
.format(obj, obj)
)
f.write("SOF, SOM, SOAREA\n")
# ********************************************************************************************
# constraints transform
def write_node_sets_constraints_transform(self, f, inpfile_split=None):
if not self.transform_objects:
return
# write for all analysis types
# get nodes
self.get_constraints_transform_nodes()
write_name = "constraints_transform_node_sets"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_node_sets_nodes_constraints_transform(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_node_sets_nodes_constraints_transform(f)
def write_node_sets_nodes_constraints_transform(self, f):
# write nodes to file
for femobj in self.transform_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
trans_obj = femobj["Object"]
f.write("** " + trans_obj.Label + "\n")
if trans_obj.TransformType == "Rectangular":
f.write("*NSET,NSET=Rect" + trans_obj.Name + "\n")
elif trans_obj.TransformType == "Cylindrical":
f.write("*NSET,NSET=Cylin" + trans_obj.Name + "\n")
for n in femobj["Nodes"]:
f.write(str(n) + ",\n")
def write_constraints_transform(self, f):
if not self.transform_objects:
return
# write for all analysis types
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** Transform Constraints\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for trans_object in self.transform_objects:
trans_obj = trans_object["Object"]
trans_name = ""
trans_type = ""
if trans_obj.TransformType == "Rectangular":
trans_name = "Rect"
trans_type = "R"
coords = geomtools.get_rectangular_coords(trans_obj)
elif trans_obj.TransformType == "Cylindrical":
trans_name = "Cylin"
trans_type = "C"
coords = geomtools.get_cylindrical_coords(trans_obj)
f.write("** {}\n".format(trans_obj.Label))
f.write("*TRANSFORM, NSET={}{}, TYPE={}\n".format(
trans_name,
trans_obj.Name,
trans_type,
))
f.write("{:f},{:f},{:f},{:f},{:f},{:f}\n".format(
coords[0],
coords[1],
coords[2],
coords[3],
coords[4],
coords[5],
))
# ********************************************************************************************
# constraints temperature
def write_node_sets_constraints_temperature(self, f, inpfile_split=None):
if not self.temperature_objects:
return
if not self.analysis_type == "thermomech":
return
# get nodes
self.get_constraints_temperature_nodes()
write_name = "constraints_temperature_node_sets"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_node_sets_nodes_constraints_temperature(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_node_sets_nodes_constraints_temperature(f)
def write_node_sets_nodes_constraints_temperature(self, f):
# write nodes to file
for femobj in self.temperature_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
temp_obj = femobj["Object"]
f.write("** " + temp_obj.Label + "\n")
f.write("*NSET,NSET=" + temp_obj.Name + "\n")
for n in femobj["Nodes"]:
f.write(str(n) + ",\n")
def write_constraints_temperature(self, f):
if not self.temperature_objects:
return
if not self.analysis_type == "thermomech":
return
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** Fixed temperature constraint applied\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for ftobj in self.temperature_objects:
fixedtemp_obj = ftobj["Object"]
f.write("** " + fixedtemp_obj.Label + "\n")
NumberOfNodes = len(ftobj["Nodes"])
if fixedtemp_obj.ConstraintType == "Temperature":
f.write("*BOUNDARY\n")
f.write("{},11,11,{}\n".format(fixedtemp_obj.Name, fixedtemp_obj.Temperature))
f.write("\n")
elif fixedtemp_obj.ConstraintType == "CFlux":
f.write("*CFLUX\n")
f.write("{},11,{}\n".format(
fixedtemp_obj.Name,
fixedtemp_obj.CFlux * 0.001 / NumberOfNodes
))
f.write("\n")
# ********************************************************************************************
# constraints initialtemperature
def write_constraints_initialtemperature(self, f):
if not self.initialtemperature_objects:
return
if not self.analysis_type == "thermomech":
return
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** Initial temperature constraint\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
f.write("*INITIAL CONDITIONS,TYPE=TEMPERATURE\n")
for itobj in self.initialtemperature_objects: # Should only be one
inittemp_obj = itobj["Object"]
# OvG: Initial temperature
f.write("{0},{1}\n".format(self.ccx_nall, inittemp_obj.initialTemperature))
# ********************************************************************************************
# constraints selfweight
def write_constraints_selfweight(self, f):
if not self.selfweight_objects:
return
if not (self.analysis_type == "static" or self.analysis_type == "thermomech"):
return
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** Self weight Constraint\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for femobj in self.selfweight_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
selwei_obj = femobj["Object"]
f.write("** " + selwei_obj.Label + "\n")
f.write("*DLOAD\n")
f.write(
# elset, GRAV, magnitude, direction x, dir y ,dir z
"{},GRAV,{},{},{},{}\n"
.format(
self.ccx_eall,
self.gravity, # actual magnitude of gravity vector
selwei_obj.Gravity_x, # coordinate x of normalized gravity vector
selwei_obj.Gravity_y, # y
selwei_obj.Gravity_z # z
)
)
f.write("\n")
# grav (erdbeschleunigung) is equal for all elements
# should be only one constraint
# different element sets for different density
# are written in the material element sets already
# ********************************************************************************************
# constraints force
def write_constraints_force(self, f, inpfile_split=None):
if not self.force_objects:
return
if not (self.analysis_type == "static" or self.analysis_type == "thermomech"):
return
# check shape type of reference shape and get node loads
self.get_constraints_force_nodeloads()
write_name = "constraints_force_node_loads"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_nodeloads_constraints_force(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_nodeloads_constraints_force(f)
def write_nodeloads_constraints_force(self, f):
# write node loads to file
f.write("*CLOAD\n")
for femobj in self.force_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
f.write("** " + femobj["Object"].Label + "\n")
direction_vec = femobj["Object"].DirectionVector
for ref_shape in femobj["NodeLoadTable"]:
f.write("** " + ref_shape[0] + "\n")
for n in sorted(ref_shape[1]):
node_load = ref_shape[1][n]
if (direction_vec.x != 0.0):
v1 = "{:.13E}".format(direction_vec.x * node_load)
f.write(str(n) + ",1," + v1 + "\n")
if (direction_vec.y != 0.0):
v2 = "{:.13E}".format(direction_vec.y * node_load)
f.write(str(n) + ",2," + v2 + "\n")
if (direction_vec.z != 0.0):
v3 = "{:.13E}".format(direction_vec.z * node_load)
f.write(str(n) + ",3," + v3 + "\n")
f.write("\n")
f.write("\n")
# ********************************************************************************************
# constraints pressure
def write_constraints_pressure(self, f, inpfile_split=None):
if not self.pressure_objects:
return
if not (self.analysis_type == "static" or self.analysis_type == "thermomech"):
return
# get the faces and face numbers
self.get_constraints_pressure_faces()
write_name = "constraints_pressure_element_face_loads"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_faceloads_constraints_pressure(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_faceloads_constraints_pressure(f)
def write_faceloads_constraints_pressure(self, f):
# write face loads to file
for femobj in self.pressure_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
prs_obj = femobj["Object"]
f.write("** " + prs_obj.Label + "\n")
rev = -1 if prs_obj.Reversed else 1
f.write("*DLOAD\n")
for ref_shape in femobj["PressureFaces"]:
# the loop is needed for compatibility reason
# in deprecated method get_pressure_obj_faces_depreciated
# the face ids where per ref_shape
f.write("** " + ref_shape[0] + "\n")
for face, fno in ref_shape[1]:
if fno > 0: # solid mesh face
f.write("{},P{},{}\n".format(face, fno, rev * prs_obj.Pressure))
# on shell mesh face: fno == 0
# normal of element face == face normal
elif fno == 0:
f.write("{},P,{}\n".format(face, rev * prs_obj.Pressure))
# on shell mesh face: fno == -1
# normal of element face opposite direction face normal
elif fno == -1:
f.write("{},P,{}\n".format(face, -1 * rev * prs_obj.Pressure))
# ********************************************************************************************
# constraints heatflux
def write_constraints_heatflux(self, f, inpfile_split=None):
if not self.heatflux_objects:
return
if not self.analysis_type == "thermomech":
return
write_name = "constraints_heatflux_element_face_heatflux"
f.write("\n***********************************************************\n")
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if inpfile_split is True:
file_name_splitt = self.mesh_name + "_" + write_name + ".inp"
f.write("** {}\n".format(write_name.replace("_", " ")))
f.write("*INCLUDE,INPUT={}\n".format(file_name_splitt))
inpfile_splitt = open(join(self.dir_name, file_name_splitt), "w")
self.write_faceheatflux_constraints_heatflux(inpfile_splitt)
inpfile_splitt.close()
else:
self.write_faceheatflux_constraints_heatflux(f)
def write_faceheatflux_constraints_heatflux(self, f):
# write heat flux faces to file
for hfobj in self.heatflux_objects:
heatflux_obj = hfobj["Object"]
f.write("** " + heatflux_obj.Label + "\n")
if heatflux_obj.ConstraintType == "Convection":
f.write("*FILM\n")
for o, elem_tup in heatflux_obj.References:
for elem in elem_tup:
ho = o.Shape.getElement(elem)
if ho.ShapeType == "Face":
v = self.mesh_object.FemMesh.getccxVolumesByFace(ho)
f.write("** Heat flux on face {}\n".format(elem))
for i in v:
# SvdW: add factor to force heatflux to units system of t/mm/s/K
# OvG: Only write out the VolumeIDs linked to a particular face
f.write("{},F{},{},{}\n".format(
i[0],
i[1],
heatflux_obj.AmbientTemp,
heatflux_obj.FilmCoef * 0.001
))
elif heatflux_obj.ConstraintType == "DFlux":
f.write("*DFLUX\n")
for o, elem_tup in heatflux_obj.References:
for elem in elem_tup:
ho = o.Shape.getElement(elem)
if ho.ShapeType == "Face":
v = self.mesh_object.FemMesh.getccxVolumesByFace(ho)
f.write("** Heat flux on face {}\n".format(elem))
for i in v:
f.write("{},S{},{}\n".format(
i[0],
i[1],
heatflux_obj.DFlux * 0.001
))
# ********************************************************************************************
# constraints fluidsection
def write_constraints_fluidsection(self, f):
if not self.fluidsection_objects:
return
if not self.analysis_type == "thermomech":
return
# write constraint to file
f.write("\n***********************************************************\n")
f.write("** FluidSection constraints\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if os.path.exists(self.fluid_inout_nodes_file):
inout_nodes_file = open(self.fluid_inout_nodes_file, "r")
lines = inout_nodes_file.readlines()
inout_nodes_file.close()
else:
FreeCAD.Console.PrintError(
"1DFlow inout nodes file not found: {}\n"
.format(self.fluid_inout_nodes_file)
)
# get nodes
self.get_constraints_fluidsection_nodes()
for femobj in self.fluidsection_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
fluidsection_obj = femobj["Object"]
f.write("** " + fluidsection_obj.Label + "\n")
if fluidsection_obj.SectionType == "Liquid":
if fluidsection_obj.LiquidSectionType == "PIPE INLET":
f.write("**Fluid Section Inlet \n")
if fluidsection_obj.InletPressureActive is True:
f.write("*BOUNDARY \n")
for n in femobj["Nodes"]:
for line in lines:
b = line.split(",")
if int(b[0]) == n and b[3] == "PIPE INLET\n":
# degree of freedom 2 is for defining pressure
f.write("{},{},{},{}\n".format(
b[0],
"2",
"2",
fluidsection_obj.InletPressure
))
if fluidsection_obj.InletFlowRateActive is True:
f.write("*BOUNDARY,MASS FLOW \n")
for n in femobj["Nodes"]:
for line in lines:
b = line.split(",")
if int(b[0]) == n and b[3] == "PIPE INLET\n":
# degree of freedom 1 is for defining flow rate
# factor applied to convert unit from kg/s to t/s
f.write("{},{},{},{}\n".format(
b[1],
"1",
"1",
fluidsection_obj.InletFlowRate * 0.001
))
elif fluidsection_obj.LiquidSectionType == "PIPE OUTLET":
f.write("**Fluid Section Outlet \n")
if fluidsection_obj.OutletPressureActive is True:
f.write("*BOUNDARY \n")
for n in femobj["Nodes"]:
for line in lines:
b = line.split(",")
if int(b[0]) == n and b[3] == "PIPE OUTLET\n":
# degree of freedom 2 is for defining pressure
f.write("{},{},{},{}\n".format(
b[0],
"2",
"2",
fluidsection_obj.OutletPressure
))
if fluidsection_obj.OutletFlowRateActive is True:
f.write("*BOUNDARY,MASS FLOW \n")
for n in femobj["Nodes"]:
for line in lines:
b = line.split(",")
if int(b[0]) == n and b[3] == "PIPE OUTLET\n":
# degree of freedom 1 is for defining flow rate
# factor applied to convert unit from kg/s to t/s
f.write("{},{},{},{}\n".format(
b[1],
"1",
"1",
fluidsection_obj.OutletFlowRate * 0.001
))
# ********************************************************************************************
# step begin and end
def write_step_begin(self, f):
f.write("\n***********************************************************\n")
f.write("** At least one step is needed to run an CalculiX analysis of FreeCAD\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
# STEP line
step = "*STEP"
if self.solver_obj.GeometricalNonlinearity == "nonlinear":
if self.analysis_type == "static" or self.analysis_type == "thermomech":
# https://www.comsol.com/blogs/what-is-geometric-nonlinearity
step += ", NLGEOM"
elif self.analysis_type == "frequency":
FreeCAD.Console.PrintMessage(
"Analysis type frequency and geometrical nonlinear "
"analysis are not allowed together, linear is used instead!\n"
)
if self.solver_obj.IterationsThermoMechMaximum:
if self.analysis_type == "thermomech":
step += ", INC=" + str(self.solver_obj.IterationsThermoMechMaximum)
elif self.analysis_type == "static" or self.analysis_type == "frequency":
# parameter is for thermomechanical analysis only, see ccx manual *STEP
pass
# write step line
f.write(step + "\n")
# CONTROLS line
# all analysis types, ... really in frequency too?!?
if self.solver_obj.IterationsControlParameterTimeUse:
f.write("*CONTROLS, PARAMETERS=TIME INCREMENTATION\n")
f.write(self.solver_obj.IterationsControlParameterIter + "\n")
f.write(self.solver_obj.IterationsControlParameterCutb + "\n")
# ANALYSIS type line
# analysis line --> analysis type
if self.analysis_type == "static":
analysis_type = "*STATIC"
elif self.analysis_type == "frequency":
analysis_type = "*FREQUENCY"
elif self.analysis_type == "thermomech":
analysis_type = "*COUPLED TEMPERATURE-DISPLACEMENT"
elif self.analysis_type == "check":
analysis_type = "*NO ANALYSIS"
# analysis line --> solver type
# https://forum.freecadweb.org/viewtopic.php?f=18&t=43178
if self.solver_obj.MatrixSolverType == "default":
pass
elif self.solver_obj.MatrixSolverType == "spooles":
analysis_type += ", SOLVER=SPOOLES"
elif self.solver_obj.MatrixSolverType == "iterativescaling":
analysis_type += ", SOLVER=ITERATIVE SCALING"
elif self.solver_obj.MatrixSolverType == "iterativecholesky":
analysis_type += ", SOLVER=ITERATIVE CHOLESKY"
# analysis line --> user defined incrementations --> parameter DIRECT
# --> completely switch off ccx automatic incrementation
if self.solver_obj.IterationsUserDefinedIncrementations:
if self.analysis_type == "static":
analysis_type += ", DIRECT"
elif self.analysis_type == "thermomech":
analysis_type += ", DIRECT"
elif self.analysis_type == "frequency":
FreeCAD.Console.PrintMessage(
"Analysis type frequency and IterationsUserDefinedIncrementations "
"are not allowed together, it is ignored\n"
)
# analysis line --> steadystate --> thermomech only
if self.solver_obj.ThermoMechSteadyState:
# bernd: I do not know if STEADY STATE is allowed with DIRECT
# but since time steps are 1.0 it makes no sense IMHO
if self.analysis_type == "thermomech":
analysis_type += ", STEADY STATE"
# Set time to 1 and ignore user inputs for steady state
self.solver_obj.TimeInitialStep = 1.0
self.solver_obj.TimeEnd = 1.0
elif self.analysis_type == "static" or self.analysis_type == "frequency":
pass # not supported for static and frequency!
# ANALYSIS parameter line
analysis_parameter = ""
if self.analysis_type == "static" or self.analysis_type == "check":
if self.solver_obj.IterationsUserDefinedIncrementations is True \
or self.solver_obj.IterationsUserDefinedTimeStepLength is True:
analysis_parameter = "{},{}".format(
self.solver_obj.TimeInitialStep,
self.solver_obj.TimeEnd
)
elif self.analysis_type == "frequency":
if self.solver_obj.EigenmodeLowLimit == 0.0 \
and self.solver_obj.EigenmodeHighLimit == 0.0:
analysis_parameter = "{}\n".format(self.solver_obj.EigenmodesCount)
else:
analysis_parameter = "{},{},{}\n".format(
self.solver_obj.EigenmodesCount,
self.solver_obj.EigenmodeLowLimit,
self.solver_obj.EigenmodeHighLimit
)
elif self.analysis_type == "thermomech":
# OvG: 1.0 increment, total time 1 for steady state will cut back automatically
analysis_parameter = "{},{}".format(
self.solver_obj.TimeInitialStep,
self.solver_obj.TimeEnd
)
# write analysis type line, analysis parameter line
f.write(analysis_type + "\n")
f.write(analysis_parameter + "\n")
def write_step_end(self, f):
f.write("\n***********************************************************\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
f.write("*END STEP \n")
# ********************************************************************************************
# output types
def write_outputs_types(self, f):
f.write("\n***********************************************************\n")
f.write("** Outputs --> frd file\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
if self.beamsection_objects or self.shellthickness_objects or self.fluidsection_objects:
if self.solver_obj.BeamShellResultOutput3D is False:
f.write("*NODE FILE, OUTPUT=2d\n")
else:
f.write("*NODE FILE, OUTPUT=3d\n")
else:
f.write("*NODE FILE\n")
# MPH write out nodal temperatures if thermomechanical
if self.analysis_type == "thermomech":
if not self.fluidsection_objects:
f.write("U, NT\n")
else:
f.write("MF, PS\n")
else:
f.write("U\n")
if not self.fluidsection_objects:
f.write("*EL FILE\n")
if self.solver_obj.MaterialNonlinearity == "nonlinear":
f.write("S, E, PEEQ\n")
else:
f.write("S, E\n")
# dat file
# reaction forces: freecadweb.org/tracker/view.php?id=2934
if self.fixed_objects:
f.write("** outputs --> dat file\n")
# reaction forces for all Constraint fixed
f.write("** reaction forces for Constraint fixed\n")
for femobj in self.fixed_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
fix_obj_name = femobj["Object"].Name
f.write("*NODE PRINT, NSET={}, TOTALS=ONLY\n".format(fix_obj_name))
f.write("RF\n")
# TODO: add Constraint Displacement if nodes are restrained
f.write("\n")
# there is no need to write all integration point results
# as long as there is no reader for them
# see https://forum.freecadweb.org/viewtopic.php?f=18&t=29060
# f.write("*NODE PRINT , NSET=" + self.ccx_nall + "\n")
# f.write("U \n")
# f.write("*EL PRINT , ELSET=" + self.ccx_eall + "\n")
# f.write("S \n")
# ********************************************************************************************
# footer
def write_footer(self, f):
f.write("\n***********************************************************\n")
f.write("** CalculiX Input file\n")
f.write("** written by {} function\n".format(
sys._getframe().f_code.co_name
))
f.write("** written by --> FreeCAD {}.{}.{}\n".format(
self.fc_ver[0],
self.fc_ver[1],
self.fc_ver[2]
))
f.write("** written on --> {}\n".format(
time.ctime()
))
f.write("** file name --> {}\n".format(
os.path.basename(self.document.FileName)
))
f.write("** analysis name --> {}\n".format(
self.analysis.Name
))
f.write("**\n")
f.write("**\n")
f.write(units_information)
f.write("**\n")
# ********************************************************************************************
# material and fem element type
def write_element_sets_material_and_femelement_type(self, f):
f.write("\n***********************************************************\n")
f.write("** Element sets for materials and FEM element type (solid, shell, beam, fluid)\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
# in any case if we have beams, we're going to need the element ids for the rotation elsets
if self.beamsection_objects:
# we will need to split the beam even for one beamobj
# because no beam in z-direction can be used in ccx without a special adjustment
# thus they need an own ccx_elset
self.get_element_rotation1D_elements()
# get the element ids for face and edge elements and write them into the objects
if len(self.shellthickness_objects) > 1:
self.get_element_geometry2D_elements()
if len(self.beamsection_objects) > 1:
self.get_element_geometry1D_elements()
if len(self.fluidsection_objects) > 1:
self.get_element_fluid1D_elements()
# get the element ids for material objects and write them into the material object
if len(self.material_objects) > 1:
self.get_material_elements()
# create the ccx_elsets
if len(self.material_objects) == 1:
if self.femmesh.Volumes:
# we only could do this for volumes, if a mesh contains volumes
# we're going to use them in the analysis
# but a mesh could contain the element faces of the volumes as faces
# and the edges of the faces as edges
# there we have to check for some geometric objects
self.get_ccx_elsets_single_mat_solid()
if len(self.shellthickness_objects) == 1:
self.get_ccx_elsets_single_mat_single_shell()
elif len(self.shellthickness_objects) > 1:
self.get_ccx_elsets_single_mat_multiple_shell()
if len(self.beamsection_objects) == 1:
self.get_ccx_elsets_single_mat_single_beam()
elif len(self.beamsection_objects) > 1:
self.get_ccx_elsets_single_mat_multiple_beam()
if len(self.fluidsection_objects) == 1:
self.get_ccx_elsets_single_mat_single_fluid()
elif len(self.fluidsection_objects) > 1:
self.get_ccx_elsets_single_mat_multiple_fluid()
elif len(self.material_objects) > 1:
if self.femmesh.Volumes:
# we only could do this for volumes, if a mseh contains volumes
# we're going to use them in the analysis
# but a mesh could contain the element faces of the volumes as faces
# and the edges of the faces as edges
# there we have to check for some geometric objects
# volume is a bit special
# because retrieving ids from group mesh data is implemented
self.get_ccx_elsets_multiple_mat_solid()
if len(self.shellthickness_objects) == 1:
self.get_ccx_elsets_multiple_mat_single_shell()
elif len(self.shellthickness_objects) > 1:
self.get_ccx_elsets_multiple_mat_multiple_shell()
if len(self.beamsection_objects) == 1:
self.get_ccx_elsets_multiple_mat_single_beam()
elif len(self.beamsection_objects) > 1:
self.get_ccx_elsets_multiple_mat_multiple_beam()
if len(self.fluidsection_objects) == 1:
self.get_ccx_elsets_multiple_mat_single_fluid()
elif len(self.fluidsection_objects) > 1:
self.get_ccx_elsets_multiple_mat_multiple_fluid()
# TODO: some elementIDs are collected for 1D-Flow calculation,
# this should be a def somewhere else, preferable inside the get_ccx_elsets_... methods
for ccx_elset in self.ccx_elsets:
# use six to be sure to be Python 2.7 and 3.x compatible
if ccx_elset["ccx_elset"] \
and not isinstance(ccx_elset["ccx_elset"], six.string_types):
if "fluidsection_obj"in ccx_elset:
fluidsec_obj = ccx_elset["fluidsection_obj"]
if fluidsec_obj.SectionType == "Liquid":
if (fluidsec_obj.LiquidSectionType == "PIPE INLET") \
or (fluidsec_obj.LiquidSectionType == "PIPE OUTLET"):
elsetchanged = False
counter = 0
for elid in ccx_elset["ccx_elset"]:
counter = counter + 1
if (elsetchanged is False) \
and (fluidsec_obj.LiquidSectionType == "PIPE INLET"):
# 3rd index is to track which line nr the element is defined
self.FluidInletoutlet_ele.append(
[str(elid), fluidsec_obj.LiquidSectionType, 0]
)
elsetchanged = True
elif (fluidsec_obj.LiquidSectionType == "PIPE OUTLET") \
and (counter == len(ccx_elset["ccx_elset"])):
# 3rd index is to track which line nr the element is defined
self.FluidInletoutlet_ele.append(
[str(elid), fluidsec_obj.LiquidSectionType, 0]
)
# write ccx_elsets to file
for ccx_elset in self.ccx_elsets:
f.write("*ELSET,ELSET=" + ccx_elset["ccx_elset_name"] + "\n")
# use six to be sure to be Python 2.7 and 3.x compatible
if isinstance(ccx_elset["ccx_elset"], six.string_types):
f.write(ccx_elset["ccx_elset"] + "\n")
else:
for elid in ccx_elset["ccx_elset"]:
f.write(str(elid) + ",\n")
# self.ccx_elsets = [ {
# "ccx_elset" : [e1, e2, e3, ... , en] or elements set name strings
# "ccx_elset_name" : "ccx_identifier_elset"
# "mat_obj_name" : "mat_obj.Name"
# "ccx_mat_name" : "mat_obj.Material["Name"]" !!! not unique !!!
# "beamsection_obj" : "beamsection_obj" if exists
# "fluidsection_obj" : "fluidsection_obj" if exists
# "shellthickness_obj" : shellthickness_obj" if exists
# "beam_normal" : normal vector for beams only
# },
# {}, ... , {} ]
# beam
# TODO support multiple beamrotations
# we do not need any more any data from the rotation document object,
# thus we do not need to save the rotation document object name in the else
def get_ccx_elsets_single_mat_single_beam(self):
mat_obj = self.material_objects[0]["Object"]
beamsec_obj = self.beamsection_objects[0]["Object"]
beamrot_data = self.beamrotation_objects[0]
for i, beamdirection in enumerate(beamrot_data["FEMRotations1D"]):
# ID's for this direction
elset_data = beamdirection["ids"]
names = [
{"short": "M0"},
{"short": "B0"},
{"short": beamrot_data["ShortName"]},
{"short": "D" + str(i)}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_short(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["beamsection_obj"] = beamsec_obj
# normal for this direction
ccx_elset["beam_normal"] = beamdirection["normal"]
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_single_mat_multiple_beam(self):
mat_obj = self.material_objects[0]["Object"]
beamrot_data = self.beamrotation_objects[0]
for beamsec_data in self.beamsection_objects:
beamsec_obj = beamsec_data["Object"]
beamsec_ids = set(beamsec_data["FEMElements"])
for i, beamdirection in enumerate(beamrot_data["FEMRotations1D"]):
beamdir_ids = set(beamdirection["ids"])
# empty intersection sets possible
elset_data = list(sorted(beamsec_ids.intersection(beamdir_ids)))
if elset_data:
names = [
{"short": "M0"},
{"short": beamsec_data["ShortName"]},
{"short": beamrot_data["ShortName"]},
{"short": "D" + str(i)}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_short(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["beamsection_obj"] = beamsec_obj
# normal for this direction
ccx_elset["beam_normal"] = beamdirection["normal"]
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_multiple_mat_single_beam(self):
beamsec_obj = self.beamsection_objects[0]["Object"]
beamrot_data = self.beamrotation_objects[0]
for mat_data in self.material_objects:
mat_obj = mat_data["Object"]
mat_ids = set(mat_data["FEMElements"])
for i, beamdirection in enumerate(beamrot_data["FEMRotations1D"]):
beamdir_ids = set(beamdirection["ids"])
elset_data = list(sorted(mat_ids.intersection(beamdir_ids)))
if elset_data:
names = [
{"short": mat_data["ShortName"]},
{"short": "B0"},
{"short": beamrot_data["ShortName"]},
{"short": "D" + str(i)}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_short(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["beamsection_obj"] = beamsec_obj
# normal for this direction
ccx_elset["beam_normal"] = beamdirection["normal"]
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_multiple_mat_multiple_beam(self):
beamrot_data = self.beamrotation_objects[0]
for beamsec_data in self.beamsection_objects:
beamsec_obj = beamsec_data["Object"]
beamsec_ids = set(beamsec_data["FEMElements"])
for mat_data in self.material_objects:
mat_obj = mat_data["Object"]
mat_ids = set(mat_data["FEMElements"])
for i, beamdirection in enumerate(beamrot_data["FEMRotations1D"]):
beamdir_ids = set(beamdirection["ids"])
# empty intersection sets possible
elset_data = list(sorted(
beamsec_ids.intersection(mat_ids).intersection(beamdir_ids)
))
if elset_data:
names = [
{"short": mat_data["ShortName"]},
{"short": beamsec_data["ShortName"]},
{"short": beamrot_data["ShortName"]},
{"short": "D" + str(i)}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_short(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["beamsection_obj"] = beamsec_obj
# normal for this direction
ccx_elset["beam_normal"] = beamdirection["normal"]
self.ccx_elsets.append(ccx_elset)
# fluid
def get_ccx_elsets_single_mat_single_fluid(self):
mat_obj = self.material_objects[0]["Object"]
fluidsec_obj = self.fluidsection_objects[0]["Object"]
elset_data = self.ccx_eedges
names = [{"short": "M0"}, {"short": "F0"}]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_short(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["fluidsection_obj"] = fluidsec_obj
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_single_mat_multiple_fluid(self):
mat_obj = self.material_objects[0]["Object"]
for fluidsec_data in self.fluidsection_objects:
fluidsec_obj = fluidsec_data["Object"]
elset_data = fluidsec_data["FEMElements"]
names = [{"short": "M0"}, {"short": fluidsec_data["ShortName"]}]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_short(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["fluidsection_obj"] = fluidsec_obj
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_multiple_mat_single_fluid(self):
fluidsec_obj = self.fluidsection_objects[0]["Object"]
for mat_data in self.material_objects:
mat_obj = mat_data["Object"]
elset_data = mat_data["FEMElements"]
names = [{"short": mat_data["ShortName"]}, {"short": "F0"}]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_short(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["fluidsection_obj"] = fluidsec_obj
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_multiple_mat_multiple_fluid(self):
for fluidsec_data in self.fluidsection_objects:
fluidsec_obj = fluidsec_data["Object"]
for mat_data in self.material_objects:
mat_obj = mat_data["Object"]
fluidsec_ids = set(fluidsec_data["FEMElements"])
mat_ids = set(mat_data["FEMElements"])
# empty intersection sets possible
elset_data = list(sorted(fluidsec_ids.intersection(mat_ids)))
if elset_data:
names = [
{"short": mat_data["ShortName"]},
{"short": fluidsec_data["ShortName"]}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_short(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["fluidsection_obj"] = fluidsec_obj
self.ccx_elsets.append(ccx_elset)
# shell
def get_ccx_elsets_single_mat_single_shell(self):
mat_obj = self.material_objects[0]["Object"]
shellth_obj = self.shellthickness_objects[0]["Object"]
elset_data = self.ccx_efaces
names = [
{"long": mat_obj.Name, "short": "M0"},
{"long": shellth_obj.Name, "short": "S0"}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_standard(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["shellthickness_obj"] = shellth_obj
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_single_mat_multiple_shell(self):
mat_obj = self.material_objects[0]["Object"]
for shellth_data in self.shellthickness_objects:
shellth_obj = shellth_data["Object"]
elset_data = shellth_data["FEMElements"]
names = [
{"long": mat_obj.Name, "short": "M0"},
{"long": shellth_obj.Name, "short": shellth_data["ShortName"]}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_standard(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["shellthickness_obj"] = shellth_obj
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_multiple_mat_single_shell(self):
shellth_obj = self.shellthickness_objects[0]["Object"]
for mat_data in self.material_objects:
mat_obj = mat_data["Object"]
elset_data = mat_data["FEMElements"]
names = [
{"long": mat_obj.Name, "short": mat_data["ShortName"]},
{"long": shellth_obj.Name, "short": "S0"}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_standard(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["shellthickness_obj"] = shellth_obj
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_multiple_mat_multiple_shell(self):
for shellth_data in self.shellthickness_objects:
shellth_obj = shellth_data["Object"]
for mat_data in self.material_objects:
mat_obj = mat_data["Object"]
shellth_ids = set(shellth_data["FEMElements"])
mat_ids = set(mat_data["FEMElements"])
# empty intersection sets possible
elset_data = list(sorted(shellth_ids.intersection(mat_ids)))
if elset_data:
names = [
{"long": mat_obj.Name, "short": mat_data["ShortName"]},
{"long": shellth_obj.Name, "short": shellth_data["ShortName"]}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_standard(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
ccx_elset["shellthickness_obj"] = shellth_obj
self.ccx_elsets.append(ccx_elset)
# solid
def get_ccx_elsets_single_mat_solid(self):
mat_obj = self.material_objects[0]["Object"]
elset_data = self.ccx_evolumes
names = [
{"long": mat_obj.Name, "short": "M0"},
{"long": "Solid", "short": "Solid"}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_standard(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
self.ccx_elsets.append(ccx_elset)
def get_ccx_elsets_multiple_mat_solid(self):
for mat_data in self.material_objects:
mat_obj = mat_data["Object"]
elset_data = mat_data["FEMElements"]
names = [
{"long": mat_obj.Name, "short": mat_data["ShortName"]},
{"long": "Solid", "short": "Solid"}
]
ccx_elset = {}
ccx_elset["ccx_elset"] = elset_data
ccx_elset["ccx_elset_name"] = get_ccx_elset_name_standard(names)
ccx_elset["mat_obj_name"] = mat_obj.Name
ccx_elset["ccx_mat_name"] = mat_obj.Material["Name"]
self.ccx_elsets.append(ccx_elset)
def write_materials(self, f):
f.write("\n***********************************************************\n")
f.write("** Materials\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
f.write("** Young\'s modulus unit is MPa = N/mm2\n")
if self.analysis_type == "frequency" \
or self.selfweight_objects \
or (
self.analysis_type == "thermomech"
and not self.solver_obj.ThermoMechSteadyState
):
f.write("** Density\'s unit is t/mm^3\n")
if self.analysis_type == "thermomech":
f.write("** Thermal conductivity unit is kW/mm/K = t*mm/K*s^3\n")
f.write("** Specific Heat unit is kJ/t/K = mm^2/s^2/K\n")
for femobj in self.material_objects:
# femobj --> dict, FreeCAD document object is femobj["Object"]
mat_obj = femobj["Object"]
mat_info_name = mat_obj.Material["Name"]
mat_name = mat_obj.Name
mat_label = mat_obj.Label
# get material properties of solid material, Currently in SI units: M/kg/s/Kelvin
if mat_obj.Category == "Solid":
YM = FreeCAD.Units.Quantity(mat_obj.Material["YoungsModulus"])
YM_in_MPa = float(YM.getValueAs("MPa"))
PR = float(mat_obj.Material["PoissonRatio"])
if self.analysis_type == "frequency" \
or self.selfweight_objects \
or (
self.analysis_type == "thermomech"
and not self.solver_obj.ThermoMechSteadyState
):
density = FreeCAD.Units.Quantity(mat_obj.Material["Density"])
density_in_tonne_per_mm3 = float(density.getValueAs("t/mm^3"))
if self.analysis_type == "thermomech":
TC = FreeCAD.Units.Quantity(mat_obj.Material["ThermalConductivity"])
# SvdW: Add factor to force units to results base units
# of t/mm/s/K - W/m/K results in no factor needed
TC_in_WmK = float(TC.getValueAs("W/m/K"))
SH = FreeCAD.Units.Quantity(mat_obj.Material["SpecificHeat"])
# SvdW: Add factor to force units to results base units of t/mm/s/K
SH_in_JkgK = float(SH.getValueAs("J/kg/K")) * 1e+06
if mat_obj.Category == "Solid":
TEC = FreeCAD.Units.Quantity(mat_obj.Material["ThermalExpansionCoefficient"])
TEC_in_mmK = float(TEC.getValueAs("mm/mm/K"))
elif mat_obj.Category == "Fluid":
DV = FreeCAD.Units.Quantity(mat_obj.Material["DynamicViscosity"])
DV_in_tmms = float(DV.getValueAs("t/mm/s"))
# write material properties
f.write("** FreeCAD material name: " + mat_info_name + "\n")
f.write("** " + mat_label + "\n")
f.write("*MATERIAL, NAME=" + mat_name + "\n")
if mat_obj.Category == "Solid":
f.write("*ELASTIC\n")
f.write("{0:.0f}, {1:.3f}\n".format(YM_in_MPa, PR))
if self.analysis_type == "frequency" \
or self.selfweight_objects \
or (
self.analysis_type == "thermomech"
and not self.solver_obj.ThermoMechSteadyState
):
f.write("*DENSITY\n")
f.write("{0:.3e}\n".format(density_in_tonne_per_mm3))
if self.analysis_type == "thermomech":
if mat_obj.Category == "Solid":
f.write("*CONDUCTIVITY\n")
f.write("{0:.3f}\n".format(TC_in_WmK))
f.write("*EXPANSION\n")
f.write("{0:.3e}\n".format(TEC_in_mmK))
f.write("*SPECIFIC HEAT\n")
f.write("{0:.3e}\n".format(SH_in_JkgK))
elif mat_obj.Category == "Fluid":
f.write("*FLUID CONSTANTS\n")
f.write("{0:.3e}, {1:.3e}\n".format(SH_in_JkgK, DV_in_tmms))
# nonlinear material properties
if self.solver_obj.MaterialNonlinearity == "nonlinear":
for nlfemobj in self.material_nonlinear_objects:
# femobj --> dict, FreeCAD document object is nlfemobj["Object"]
nl_mat_obj = nlfemobj["Object"]
if nl_mat_obj.LinearBaseMaterial == mat_obj:
if nl_mat_obj.MaterialModelNonlinearity == "simple hardening":
f.write("*PLASTIC\n")
if nl_mat_obj.YieldPoint1:
f.write(nl_mat_obj.YieldPoint1 + "\n")
if nl_mat_obj.YieldPoint2:
f.write(nl_mat_obj.YieldPoint2 + "\n")
if nl_mat_obj.YieldPoint3:
f.write(nl_mat_obj.YieldPoint3 + "\n")
f.write("\n")
def write_femelementsets(self, f):
f.write("\n***********************************************************\n")
f.write("** Sections\n")
f.write("** written by {} function\n".format(sys._getframe().f_code.co_name))
for ccx_elset in self.ccx_elsets:
if ccx_elset["ccx_elset"]:
if "beamsection_obj"in ccx_elset: # beam mesh
beamsec_obj = ccx_elset["beamsection_obj"]
elsetdef = "ELSET=" + ccx_elset["ccx_elset_name"] + ", "
material = "MATERIAL=" + ccx_elset["mat_obj_name"]
normal = ccx_elset["beam_normal"]
if beamsec_obj.SectionType == "Rectangular":
height = beamsec_obj.RectHeight.getValueAs("mm")
width = beamsec_obj.RectWidth.getValueAs("mm")
section_type = ", SECTION=RECT"
section_geo = str(height) + ", " + str(width) + "\n"
section_def = "*BEAM SECTION, {}{}{}\n".format(
elsetdef,
material,
section_type
)
elif beamsec_obj.SectionType == "Circular":
radius = 0.5 * beamsec_obj.CircDiameter.getValueAs("mm")
section_type = ", SECTION=CIRC"
section_geo = str(radius) + "\n"
section_def = "*BEAM SECTION, {}{}{}\n".format(
elsetdef,
material,
section_type
)
elif beamsec_obj.SectionType == "Pipe":
radius = 0.5 * beamsec_obj.PipeDiameter.getValueAs("mm")
thickness = beamsec_obj.PipeThickness.getValueAs("mm")
section_type = ", SECTION=PIPE"
section_geo = str(radius) + ", " + str(thickness) + "\n"
section_def = "*BEAM GENERAL SECTION, {}{}{}\n".format(
elsetdef,
material,
section_type
)
# see forum topic for output formatting of rotation
# https://forum.freecadweb.org/viewtopic.php?f=18&t=46133&p=405142#p405142
section_nor = "{:f}, {:f}, {:f}\n".format(
normal[0],
normal[1],
normal[2]
)
f.write(section_def)
f.write(section_geo)
f.write(section_nor)
elif "fluidsection_obj"in ccx_elset: # fluid mesh
fluidsec_obj = ccx_elset["fluidsection_obj"]
elsetdef = "ELSET=" + ccx_elset["ccx_elset_name"] + ", "
material = "MATERIAL=" + ccx_elset["mat_obj_name"]
if fluidsec_obj.SectionType == "Liquid":
section_type = fluidsec_obj.LiquidSectionType
if (section_type == "PIPE INLET") or (section_type == "PIPE OUTLET"):
section_type = "PIPE INOUT"
section_def = "*FLUID SECTION, {}TYPE={}, {}\n".format(
elsetdef,
section_type,
material
)
section_geo = liquid_section_def(fluidsec_obj, section_type)
"""
# deactivate as it would result in section_def and section_geo not defined
# deactivated in the App and Gui object and thus in the task panel as well
elif fluidsec_obj.SectionType == "Gas":
section_type = fluidsec_obj.GasSectionType
elif fluidsec_obj.SectionType == "Open Channel":
section_type = fluidsec_obj.ChannelSectionType
"""
f.write(section_def)
f.write(section_geo)
elif "shellthickness_obj"in ccx_elset: # shell mesh
shellth_obj = ccx_elset["shellthickness_obj"]
elsetdef = "ELSET=" + ccx_elset["ccx_elset_name"] + ", "
material = "MATERIAL=" + ccx_elset["mat_obj_name"]
section_def = "*SHELL SECTION, " + elsetdef + material + "\n"
section_geo = str(shellth_obj.Thickness.getValueAs("mm")) + "\n"
f.write(section_def)
f.write(section_geo)
else: # solid mesh
elsetdef = "ELSET=" + ccx_elset["ccx_elset_name"] + ", "
material = "MATERIAL=" + ccx_elset["mat_obj_name"]
section_def = "*SOLID SECTION, " + elsetdef + material + "\n"
f.write(section_def)
# ************************************************************************************************
# Helpers
# ccx elset names:
# M .. Material
# B .. Beam
# R .. BeamRotation
# D ..Direction
# F .. Fluid
# S .. Shell,
# TODO write comment into input file to elset ids and elset attributes
def get_ccx_elset_name_standard(names):
# standard max length = 80
ccx_elset_name = ""
for name in names:
ccx_elset_name += name["long"]
if len(ccx_elset_name) < 81:
return ccx_elset_name
else:
ccx_elset_name = ""
for name in names:
ccx_elset_name += name["short"]
if len(ccx_elset_name) < 81:
return ccx_elset_name
else:
error = (
"FEM: Trouble in ccx input file, because an "
"elset name is longer than 80 character! {}\n"
.format(ccx_elset_name)
)
raise Exception(error)
def get_ccx_elset_name_short(names):
# restricted max length = 20 (beam elsets)
ccx_elset_name = ""
for name in names:
ccx_elset_name += name["short"]
if len(ccx_elset_name) < 21:
return ccx_elset_name
else:
error = (
"FEM: Trouble in ccx input file, because an"
"beam elset name is longer than 20 character! {}\n"
.format(ccx_elset_name)
)
raise Exception(error)
def is_fluid_section_inlet_outlet(ccx_elsets):
""" Fluid section: Inlet and Outlet requires special element definition
"""
for ccx_elset in ccx_elsets:
if ccx_elset["ccx_elset"]:
if "fluidsection_obj" in ccx_elset: # fluid mesh
fluidsec_obj = ccx_elset["fluidsection_obj"]
if fluidsec_obj.SectionType == "Liquid":
if (fluidsec_obj.LiquidSectionType == "PIPE INLET") \
or (fluidsec_obj.LiquidSectionType == "PIPE OUTLET"):
return True
return False
def liquid_section_def(obj, section_type):
if section_type == "PIPE MANNING":
manning_area = str(obj.ManningArea.getValueAs("mm^2").Value)
manning_radius = str(obj.ManningRadius.getValueAs("mm"))
manning_coefficient = str(obj.ManningCoefficient)
section_geo = manning_area + "," + manning_radius + "," + manning_coefficient + "\n"
return section_geo
elif section_type == "PIPE ENLARGEMENT":
enlarge_area1 = str(obj.EnlargeArea1.getValueAs("mm^2").Value)
enlarge_area2 = str(obj.EnlargeArea2.getValueAs("mm^2").Value)
section_geo = enlarge_area1 + "," + enlarge_area2 + "\n"
return section_geo
elif section_type == "PIPE CONTRACTION":
contract_area1 = str(obj.ContractArea1.getValueAs("mm^2").Value)
contract_area2 = str(obj.ContractArea2.getValueAs("mm^2").Value)
section_geo = contract_area1 + "," + contract_area2 + "\n"
return section_geo
elif section_type == "PIPE ENTRANCE":
entrance_pipe_area = str(obj.EntrancePipeArea.getValueAs("mm^2").Value)
entrance_area = str(obj.EntranceArea.getValueAs("mm^2").Value)
section_geo = entrance_pipe_area + "," + entrance_area + "\n"
return section_geo
elif section_type == "PIPE DIAPHRAGM":
diaphragm_pipe_area = str(obj.DiaphragmPipeArea.getValueAs("mm^2").Value)
diaphragm_area = str(obj.DiaphragmArea.getValueAs("mm^2").Value)
section_geo = diaphragm_pipe_area + "," + diaphragm_area + "\n"
return section_geo
elif section_type == "PIPE BEND":
bend_pipe_area = str(obj.BendPipeArea.getValueAs("mm^2").Value)
bend_radius_diameter = str(obj.BendRadiusDiameter)
bend_angle = str(obj.BendAngle)
bend_loss_coefficient = str(obj.BendLossCoefficient)
section_geo = ("{},{},{},{}\n".format(
bend_pipe_area,
bend_radius_diameter,
bend_angle,
bend_loss_coefficient
))
return section_geo
elif section_type == "PIPE GATE VALVE":
gatevalve_pipe_area = str(obj.GateValvePipeArea.getValueAs("mm^2").Value)
gatevalve_closing_coeff = str(obj.GateValveClosingCoeff)
section_geo = gatevalve_pipe_area + "," + gatevalve_closing_coeff + "\n"
return section_geo
elif section_type == "PIPE WHITE-COLEBROOK":
colebrooke_area = str(obj.ColebrookeArea.getValueAs("mm^2").Value)
colebrooke_diameter = str(2 * obj.ColebrookeRadius.getValueAs("mm"))
colebrooke_grain_diameter = str(obj.ColebrookeGrainDiameter.getValueAs("mm"))
colebrooke_form_factor = str(obj.ColebrookeFormFactor)
section_geo = ("{},{},{},{},{}\n".format(
colebrooke_area,
colebrooke_diameter,
"-1",
colebrooke_grain_diameter,
colebrooke_form_factor
))
return section_geo
elif section_type == "LIQUID PUMP":
section_geo = ""
for i in range(len(obj.PumpFlowRate)):
flow_rate = str(obj.PumpFlowRate[i])
head = str(obj.PumpHeadLoss[i])
section_geo = section_geo + flow_rate + "," + head + ","
section_geo = section_geo + "\n"
return section_geo
else:
return ""
## @}
| Fat-Zer/FreeCAD_sf_master | src/Mod/Fem/femsolver/calculix/writer.py | Python | lgpl-2.1 | 99,378 |
#! /usr/bin/env python3
# Print the product of age and size of each file, in suitable units.
#
# Usage: byteyears [ -a | -m | -c ] file ...
#
# Options -[amc] select atime, mtime (default) or ctime as age.
import sys, os, time
from stat import *
def main():
# Use lstat() to stat files if it exists, else stat()
try:
statfunc = os.lstat
except AttributeError:
statfunc = os.stat
# Parse options
if sys.argv[1] == '-m':
itime = ST_MTIME
del sys.argv[1]
elif sys.argv[1] == '-c':
itime = ST_CTIME
del sys.argv[1]
elif sys.argv[1] == '-a':
itime = ST_CTIME
del sys.argv[1]
else:
itime = ST_MTIME
secs_per_year = 365.0 * 24.0 * 3600.0 # Scale factor
now = time.time() # Current time, for age computations
status = 0 # Exit status, set to 1 on errors
# Compute max file name length
maxlen = 1
for filename in sys.argv[1:]:
maxlen = max(maxlen, len(filename))
# Process each argument in turn
for filename in sys.argv[1:]:
try:
st = statfunc(filename)
except os.error as msg:
sys.stderr.write("can't stat %r: %r\n" % (filename, msg))
status = 1
st = ()
if st:
anytime = st[itime]
size = st[ST_SIZE]
age = now - anytime
byteyears = float(size) * float(age) / secs_per_year
print(filename.ljust(maxlen), end=' ')
print(repr(int(byteyears)).rjust(8))
sys.exit(status)
if __name__ == '__main__':
main()
| Immortalin/python-for-android | python3-alpha/python3-src/Tools/scripts/byteyears.py | Python | apache-2.0 | 1,651 |
from intermol.orderedset import OrderedSet
class MoleculeType(object):
"""An abstract container for molecules of one type. """
def __init__(self, name=None):
"""Initialize the MoleculeType container.
Args:
name (str): the name of the moleculetype to add
"""
if not name:
name = 'MOL'
self.name = name
self.molecules = OrderedSet()
self.bond_forces = set()
self.pair_forces = set()
self.angle_forces = set()
self.dihedral_forces = set()
self.virtual_forces = set()
self.torsiontorsion_forces = set()
self.constraints = set()
self.exclusions = set()
self.rigidwaters = set()
self.nrexcl = None
def add_molecule(self, molecule):
"""Add a molecule into the moleculetype. """
self.molecules.add(molecule)
# These three functions could probably be made faster through some sort of list comprehension
# rather than iterating explicitly over the lists
def _match_two_atoms(self, newforce, oldforces):
""" find and return any force with the same three atoms. For now, respect ordering in matches. """
newatoms = [newforce.atom1, newforce.atom2]
for force in oldforces:
oldatoms = [force.atom1, force.atom2]
if newatoms == oldatoms:
return force
return False
def _match_three_atoms(self, newforce, oldforces):
""" find and return any force with the same three atoms. For now, respect ordering in matches. """
newatoms = [newforce.atom1, newforce.atom2, newforce.atom3]
for force in oldforces:
oldatoms = [force.atom1, force.atom2, force.atom3]
if newatoms == oldatoms:
return force
return False
def _match_four_atoms(self, newforce, oldforces):
""" find and return any force with the same three atoms. For now, respect ordering in matches. """
newatoms = [newforce.atom1, newforce.atom2, newforce.atom3, newforce.atom4]
for force in oldforces:
oldatoms = [force.atom1, force.atom2, force.atom3, force.atom4]
if newatoms == oldatoms:
return force
return False
def match_bonds(self, bond):
return self._match_two_atoms(bond, self.bond_forces)
def match_pairs(self, pair):
return self._match_two_atoms(pair, self.pair_forces)
def match_angles(self, angle):
return self._match_three_atoms(angle, self.angle_forces)
def match_dihedrals(self, dihedral):
return self._match_four_atoms(dihedral, self.dihedral_forces)
def __repr__(self):
return "MoleculeType '{}' with {} molecules".format(
self.name, len(self.molecules))
def __str__(self):
return "MoleculeType{} '{}' with {} molecules".format(
id(self), self.name, len(self.molecules))
| shirtsgroup/InterMol | intermol/moleculetype.py | Python | mit | 2,948 |
# Copyright (c) 2015-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import hmac
import json
import os
import unittest
import mock
from six.moves.urllib import parse as urlparse
from swift.common.middleware.crypto import encrypter
from swift.common.middleware.crypto.crypto_utils import (
CRYPTO_KEY_CALLBACK, Crypto)
from swift.common.swob import (
Request, HTTPException, HTTPCreated, HTTPAccepted, HTTPOk, HTTPBadRequest)
from swift.common.utils import FileLikeIter
from test.unit import FakeLogger, EMPTY_ETAG
from test.unit.common.middleware.crypto.crypto_helpers import (
fetch_crypto_keys, md5hex, FAKE_IV, encrypt)
from test.unit.common.middleware.helpers import FakeSwift, FakeAppThatExcepts
@mock.patch('swift.common.middleware.crypto.crypto_utils.Crypto.create_iv',
lambda *args: FAKE_IV)
class TestEncrypter(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.encrypter = encrypter.Encrypter(self.app, {})
self.encrypter.logger = FakeLogger()
def _verify_user_metadata(self, req_hdrs, name, value, key):
# verify encrypted version of user metadata
self.assertNotIn('X-Object-Meta-' + name, req_hdrs)
expected_hdr = 'X-Object-Transient-Sysmeta-Crypto-Meta-' + name
self.assertIn(expected_hdr, req_hdrs)
enc_val, param = req_hdrs[expected_hdr].split(';')
param = param.strip()
self.assertTrue(param.startswith('swift_meta='))
actual_meta = json.loads(
urlparse.unquote_plus(param[len('swift_meta='):]))
self.assertEqual(Crypto.cipher, actual_meta['cipher'])
meta_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, meta_iv)
self.assertEqual(
base64.b64encode(encrypt(value, key, meta_iv)),
enc_val)
# if there is any encrypted user metadata then this header should exist
self.assertIn('X-Object-Transient-Sysmeta-Crypto-Meta', req_hdrs)
common_meta = json.loads(urlparse.unquote_plus(
req_hdrs['X-Object-Transient-Sysmeta-Crypto-Meta']))
self.assertDictEqual({'cipher': Crypto.cipher,
'key_id': {'v': 'fake', 'path': '/a/c/fake'}},
common_meta)
def test_PUT_req(self):
body_key = os.urandom(32)
object_key = fetch_crypto_keys()['object']
plaintext = 'FAKE APP'
plaintext_etag = md5hex(plaintext)
ciphertext = encrypt(plaintext, body_key, FAKE_IV)
ciphertext_etag = md5hex(ciphertext)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'etag': plaintext_etag,
'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'x-object-meta-etag': 'not to be confused with the Etag!',
'x-object-meta-test': 'encrypt me',
'x-object-sysmeta-test': 'do not encrypt me'}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
with mock.patch(
'swift.common.middleware.crypto.crypto_utils.'
'Crypto.create_random_key',
return_value=body_key):
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual(plaintext_etag, resp.headers['Etag'])
# verify metadata items
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual('PUT', self.app.calls[0][0])
req_hdrs = self.app.headers[0]
# verify body crypto meta
actual = req_hdrs['X-Object-Sysmeta-Crypto-Body-Meta']
actual = json.loads(urlparse.unquote_plus(actual))
self.assertEqual(Crypto().cipher, actual['cipher'])
self.assertEqual(FAKE_IV, base64.b64decode(actual['iv']))
# verify wrapped body key
expected_wrapped_key = encrypt(body_key, object_key, FAKE_IV)
self.assertEqual(expected_wrapped_key,
base64.b64decode(actual['body_key']['key']))
self.assertEqual(FAKE_IV,
base64.b64decode(actual['body_key']['iv']))
self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
# verify etag
self.assertEqual(ciphertext_etag, req_hdrs['Etag'])
encrypted_etag, _junk, etag_meta = \
req_hdrs['X-Object-Sysmeta-Crypto-Etag'].partition('; swift_meta=')
# verify crypto_meta was appended to this etag
self.assertTrue(etag_meta)
actual_meta = json.loads(urlparse.unquote_plus(etag_meta))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
# verify encrypted version of plaintext etag
actual = base64.b64decode(encrypted_etag)
etag_iv = base64.b64decode(actual_meta['iv'])
enc_etag = encrypt(plaintext_etag, object_key, etag_iv)
self.assertEqual(enc_etag, actual)
# verify etag MAC for conditional requests
actual_hmac = base64.b64decode(
req_hdrs['X-Object-Sysmeta-Crypto-Etag-Mac'])
self.assertEqual(actual_hmac, hmac.new(
object_key, plaintext_etag, hashlib.sha256).digest())
# verify encrypted etag for container update
self.assertIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs)
parts = req_hdrs[
'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1)
self.assertEqual(2, len(parts))
# extract crypto_meta from end of etag for container update
param = parts[1].strip()
crypto_meta_tag = 'swift_meta='
self.assertTrue(param.startswith(crypto_meta_tag), param)
actual_meta = json.loads(
urlparse.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id'])
cont_key = fetch_crypto_keys()['container']
cont_etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, cont_etag_iv)
self.assertEqual(encrypt(plaintext_etag, cont_key, cont_etag_iv),
base64.b64decode(parts[0]))
# content-type is not encrypted
self.assertEqual('text/plain', req_hdrs['Content-Type'])
# user meta is encrypted
self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', object_key)
self._verify_user_metadata(
req_hdrs, 'Etag', 'not to be confused with the Etag!', object_key)
# sysmeta is not encrypted
self.assertEqual('do not encrypt me',
req_hdrs['X-Object-Sysmeta-Test'])
# verify object is encrypted by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = get_req.get_response(self.app)
self.assertEqual(ciphertext, resp.body)
self.assertEqual(ciphertext_etag, resp.headers['Etag'])
def test_PUT_zero_size_object(self):
# object body encryption should be skipped for zero sized object body
object_key = fetch_crypto_keys()['object']
plaintext_etag = EMPTY_ETAG
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'etag': EMPTY_ETAG,
'content-type': 'text/plain',
'content-length': '0',
'x-object-meta-etag': 'not to be confused with the Etag!',
'x-object-meta-test': 'encrypt me',
'x-object-sysmeta-test': 'do not encrypt me'}
req = Request.blank(
'/v1/a/c/o', environ=env, body='', headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual(plaintext_etag, resp.headers['Etag'])
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual('PUT', self.app.calls[0][0])
req_hdrs = self.app.headers[0]
# verify that there is no body crypto meta
self.assertNotIn('X-Object-Sysmeta-Crypto-Meta', req_hdrs)
# verify etag is md5 of plaintext
self.assertEqual(EMPTY_ETAG, req_hdrs['Etag'])
# verify there is no etag crypto meta
self.assertNotIn('X-Object-Sysmeta-Crypto-Etag', req_hdrs)
# verify there is no container update override for etag
self.assertNotIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs)
# user meta is still encrypted
self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', object_key)
self._verify_user_metadata(
req_hdrs, 'Etag', 'not to be confused with the Etag!', object_key)
# sysmeta is not encrypted
self.assertEqual('do not encrypt me',
req_hdrs['X-Object-Sysmeta-Test'])
# verify object is empty by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
resp = get_req.get_response(self.app)
self.assertEqual('', resp.body)
self.assertEqual(EMPTY_ETAG, resp.headers['Etag'])
def _test_PUT_with_other_footers(self, override_etag):
# verify handling of another middleware's footer callback
cont_key = fetch_crypto_keys()['container']
body_key = os.urandom(32)
object_key = fetch_crypto_keys()['object']
plaintext = 'FAKE APP'
plaintext_etag = md5hex(plaintext)
ciphertext = encrypt(plaintext, body_key, FAKE_IV)
ciphertext_etag = md5hex(ciphertext)
other_footers = {
'Etag': plaintext_etag,
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Size':
'other override',
'X-Object-Sysmeta-Container-Update-Override-Etag':
override_etag}
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'swift.callback.update_footers':
lambda footers: footers.update(other_footers)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'Etag': 'correct etag is in footers'}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
with mock.patch(
'swift.common.middleware.crypto.crypto_utils.'
'Crypto.create_random_key',
lambda *args: body_key):
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual(plaintext_etag, resp.headers['Etag'])
# verify metadata items
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual('PUT', self.app.calls[0][0])
req_hdrs = self.app.headers[0]
# verify that other middleware's footers made it to app, including any
# container update overrides but nothing Etag-related
other_footers.pop('Etag')
other_footers.pop('X-Object-Sysmeta-Container-Update-Override-Etag')
for k, v in other_footers.items():
self.assertEqual(v, req_hdrs[k])
# verify encryption footers are ok
encrypted_etag, _junk, etag_meta = \
req_hdrs['X-Object-Sysmeta-Crypto-Etag'].partition('; swift_meta=')
self.assertTrue(etag_meta)
actual_meta = json.loads(urlparse.unquote_plus(etag_meta))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
self.assertEqual(ciphertext_etag, req_hdrs['Etag'])
actual = base64.b64decode(encrypted_etag)
etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(encrypt(plaintext_etag, object_key, etag_iv), actual)
# verify encrypted etag for container update
self.assertIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs)
parts = req_hdrs[
'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1)
self.assertEqual(2, len(parts))
# extract crypto_meta from end of etag for container update
param = parts[1].strip()
crypto_meta_tag = 'swift_meta='
self.assertTrue(param.startswith(crypto_meta_tag), param)
actual_meta = json.loads(
urlparse.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
cont_key = fetch_crypto_keys()['container']
cont_etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, cont_etag_iv)
self.assertEqual(encrypt(override_etag, cont_key, cont_etag_iv),
base64.b64decode(parts[0]))
# verify body crypto meta
actual = req_hdrs['X-Object-Sysmeta-Crypto-Body-Meta']
actual = json.loads(urlparse.unquote_plus(actual))
self.assertEqual(Crypto().cipher, actual['cipher'])
self.assertEqual(FAKE_IV, base64.b64decode(actual['iv']))
# verify wrapped body key
expected_wrapped_key = encrypt(body_key, object_key, FAKE_IV)
self.assertEqual(expected_wrapped_key,
base64.b64decode(actual['body_key']['key']))
self.assertEqual(FAKE_IV,
base64.b64decode(actual['body_key']['iv']))
self.assertEqual(fetch_crypto_keys()['id'], actual['key_id'])
def test_PUT_with_other_footers(self):
self._test_PUT_with_other_footers('override etag')
def test_PUT_with_other_footers_and_empty_etag(self):
# verify that an override etag value of EMPTY_ETAG will be encrypted
# when there was a non-zero body length
self._test_PUT_with_other_footers(EMPTY_ETAG)
def _test_PUT_with_etag_override_in_headers(self, override_etag):
# verify handling of another middleware's
# container-update-override-etag in headers
plaintext = 'FAKE APP'
plaintext_etag = md5hex(plaintext)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'Etag': plaintext_etag,
'X-Object-Sysmeta-Container-Update-Override-Etag':
override_etag}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual(plaintext_etag, resp.headers['Etag'])
# verify metadata items
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual(('PUT', '/v1/a/c/o'), self.app.calls[0])
req_hdrs = self.app.headers[0]
# verify encrypted etag for container update
self.assertIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs)
parts = req_hdrs[
'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1)
self.assertEqual(2, len(parts))
cont_key = fetch_crypto_keys()['container']
# extract crypto_meta from end of etag for container update
param = parts[1].strip()
crypto_meta_tag = 'swift_meta='
self.assertTrue(param.startswith(crypto_meta_tag), param)
actual_meta = json.loads(
urlparse.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id'])
cont_etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, cont_etag_iv)
self.assertEqual(encrypt(override_etag, cont_key, cont_etag_iv),
base64.b64decode(parts[0]))
def test_PUT_with_etag_override_in_headers(self):
self._test_PUT_with_etag_override_in_headers('override_etag')
def test_PUT_with_etag_override_in_headers_and_empty_etag(self):
# verify that an override etag value of EMPTY_ETAG will be encrypted
# when there was a non-zero body length
self._test_PUT_with_etag_override_in_headers(EMPTY_ETAG)
def test_PUT_with_bad_etag_in_other_footers(self):
# verify that etag supplied in footers from other middleware overrides
# header etag when validating inbound plaintext etags
plaintext = 'FAKE APP'
plaintext_etag = md5hex(plaintext)
other_footers = {
'Etag': 'bad etag',
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'other override'}
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'swift.callback.update_footers':
lambda footers: footers.update(other_footers)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'Etag': plaintext_etag}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('422 Unprocessable Entity', resp.status)
self.assertNotIn('Etag', resp.headers)
def test_PUT_with_bad_etag_in_headers_and_other_footers(self):
# verify that etag supplied in headers from other middleware is used if
# none is supplied in footers when validating inbound plaintext etags
plaintext = 'FAKE APP'
other_footers = {
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'other override'}
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'swift.callback.update_footers':
lambda footers: footers.update(other_footers)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(plaintext)),
'Etag': 'bad etag'}
req = Request.blank(
'/v1/a/c/o', environ=env, body=plaintext, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('422 Unprocessable Entity', resp.status)
self.assertNotIn('Etag', resp.headers)
def test_PUT_nothing_read(self):
# simulate an artificial scenario of a downstream filter/app not
# actually reading the input stream from encrypter.
class NonReadingApp(object):
def __call__(self, env, start_response):
# note: no read from wsgi.input
req = Request(env)
env['swift.callback.update_footers'](req.headers)
call_headers.append(req.headers)
resp = HTTPCreated(req=req, headers={'Etag': 'response etag'})
return resp(env, start_response)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'content-type': 'text/plain',
'content-length': 0,
'etag': 'etag from client'}
req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs)
call_headers = []
resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {}))
self.assertEqual('201 Created', resp.status)
self.assertEqual('response etag', resp.headers['Etag'])
self.assertEqual(1, len(call_headers))
self.assertEqual('etag from client', call_headers[0]['etag'])
# verify no encryption footers
for k in call_headers[0]:
self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-'))
# check that an upstream footer callback gets called
other_footers = {
'Etag': EMPTY_ETAG,
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'other override'}
env.update({'swift.callback.update_footers':
lambda footers: footers.update(other_footers)})
req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs)
call_headers = []
resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {}))
self.assertEqual('201 Created', resp.status)
self.assertEqual('response etag', resp.headers['Etag'])
self.assertEqual(1, len(call_headers))
# verify encrypted override etag for container update.
self.assertIn(
'X-Object-Sysmeta-Container-Update-Override-Etag', call_headers[0])
parts = call_headers[0][
'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1)
self.assertEqual(2, len(parts))
cont_key = fetch_crypto_keys()['container']
param = parts[1].strip()
crypto_meta_tag = 'swift_meta='
self.assertTrue(param.startswith(crypto_meta_tag), param)
actual_meta = json.loads(
urlparse.unquote_plus(param[len(crypto_meta_tag):]))
self.assertEqual(Crypto().cipher, actual_meta['cipher'])
self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id'])
cont_etag_iv = base64.b64decode(actual_meta['iv'])
self.assertEqual(FAKE_IV, cont_etag_iv)
self.assertEqual(encrypt('other override', cont_key, cont_etag_iv),
base64.b64decode(parts[0]))
# verify that other middleware's footers made it to app
other_footers.pop('X-Object-Sysmeta-Container-Update-Override-Etag')
for k, v in other_footers.items():
self.assertEqual(v, call_headers[0][k])
# verify no encryption footers
for k in call_headers[0]:
self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-'))
# if upstream footer override etag is for an empty body then check that
# it is not encrypted
other_footers = {
'Etag': EMPTY_ETAG,
'X-Object-Sysmeta-Container-Update-Override-Etag': EMPTY_ETAG}
env.update({'swift.callback.update_footers':
lambda footers: footers.update(other_footers)})
req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs)
call_headers = []
resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {}))
self.assertEqual('201 Created', resp.status)
self.assertEqual('response etag', resp.headers['Etag'])
self.assertEqual(1, len(call_headers))
# verify that other middleware's footers made it to app
for k, v in other_footers.items():
self.assertEqual(v, call_headers[0][k])
# verify no encryption footers
for k in call_headers[0]:
self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-'))
def test_POST_req(self):
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'POST',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'x-object-meta-test': 'encrypt me',
'x-object-meta-test2': '',
'x-object-sysmeta-test': 'do not encrypt me'}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
key = fetch_crypto_keys()['object']
self.app.register('POST', '/v1/a/c/o', HTTPAccepted, {})
resp = req.get_response(self.encrypter)
self.assertEqual('202 Accepted', resp.status)
self.assertNotIn('Etag', resp.headers)
# verify metadata items
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual('POST', self.app.calls[0][0])
req_hdrs = self.app.headers[0]
# user meta is encrypted
self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', key)
# unless it had no value
self.assertEqual('', req_hdrs['X-Object-Meta-Test2'])
# sysmeta is not encrypted
self.assertEqual('do not encrypt me',
req_hdrs['X-Object-Sysmeta-Test'])
def _test_no_user_metadata(self, method):
# verify that x-object-transient-sysmeta-crypto-meta is not set when
# there is no user metadata
env = {'REQUEST_METHOD': method,
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
req = Request.blank('/v1/a/c/o', environ=env, body='body')
self.app.register(method, '/v1/a/c/o', HTTPAccepted, {})
resp = req.get_response(self.encrypter)
self.assertEqual('202 Accepted', resp.status)
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual(method, self.app.calls[0][0])
self.assertNotIn('x-object-transient-sysmeta-crypto-meta',
self.app.headers[0])
def test_PUT_no_user_metadata(self):
self._test_no_user_metadata('PUT')
def test_POST_no_user_metadata(self):
self._test_no_user_metadata('POST')
def _test_if_match(self, method, match_header_name):
def do_test(method, plain_etags, expected_plain_etags=None):
env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
match_header_value = ', '.join(plain_etags)
req = Request.blank(
'/v1/a/c/o', environ=env, method=method,
headers={match_header_name: match_header_value})
app = FakeSwift()
app.register(method, '/v1/a/c/o', HTTPOk, {})
resp = req.get_response(encrypter.Encrypter(app, {}))
self.assertEqual('200 OK', resp.status)
self.assertEqual(1, len(app.calls), app.calls)
self.assertEqual(method, app.calls[0][0])
actual_headers = app.headers[0]
# verify the alternate etag location has been specified
if match_header_value and match_header_value != '*':
self.assertIn('X-Backend-Etag-Is-At', actual_headers)
self.assertEqual('X-Object-Sysmeta-Crypto-Etag-Mac',
actual_headers['X-Backend-Etag-Is-At'])
# verify etags have been supplemented with masked values
self.assertIn(match_header_name, actual_headers)
actual_etags = set(actual_headers[match_header_name].split(', '))
key = fetch_crypto_keys()['object']
masked_etags = [
'"%s"' % base64.b64encode(hmac.new(
key, etag.strip('"'), hashlib.sha256).digest())
for etag in plain_etags if etag not in ('*', '')]
expected_etags = set((expected_plain_etags or plain_etags) +
masked_etags)
self.assertEqual(expected_etags, actual_etags)
# check that the request environ was returned to original state
self.assertEqual(set(plain_etags),
set(req.headers[match_header_name].split(', ')))
do_test(method, [''])
do_test(method, ['"an etag"'])
do_test(method, ['"an etag"', '"another_etag"'])
do_test(method, ['*'])
# rfc2616 does not allow wildcard *and* etag but test it anyway
do_test(method, ['*', '"an etag"'])
# etags should be quoted but check we can cope if they are not
do_test(
method, ['*', 'an etag', 'another_etag'],
expected_plain_etags=['*', '"an etag"', '"another_etag"'])
def test_GET_if_match(self):
self._test_if_match('GET', 'If-Match')
def test_HEAD_if_match(self):
self._test_if_match('HEAD', 'If-Match')
def test_GET_if_none_match(self):
self._test_if_match('GET', 'If-None-Match')
def test_HEAD_if_none_match(self):
self._test_if_match('HEAD', 'If-None-Match')
def _test_existing_etag_is_at_header(self, method, match_header_name):
# if another middleware has already set X-Backend-Etag-Is-At then
# encrypter should not override that value
env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
req = Request.blank(
'/v1/a/c/o', environ=env, method=method,
headers={match_header_name: "an etag",
'X-Backend-Etag-Is-At': 'X-Object-Sysmeta-Other-Etag'})
self.app.register(method, '/v1/a/c/o', HTTPOk, {})
resp = req.get_response(self.encrypter)
self.assertEqual('200 OK', resp.status)
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual(method, self.app.calls[0][0])
actual_headers = self.app.headers[0]
self.assertIn('X-Backend-Etag-Is-At', actual_headers)
self.assertEqual(
'X-Object-Sysmeta-Other-Etag,X-Object-Sysmeta-Crypto-Etag-Mac',
actual_headers['X-Backend-Etag-Is-At'])
actual_etags = set(actual_headers[match_header_name].split(', '))
self.assertIn('"an etag"', actual_etags)
def test_GET_if_match_with_existing_etag_is_at_header(self):
self._test_existing_etag_is_at_header('GET', 'If-Match')
def test_HEAD_if_match_with_existing_etag_is_at_header(self):
self._test_existing_etag_is_at_header('HEAD', 'If-Match')
def test_GET_if_none_match_with_existing_etag_is_at_header(self):
self._test_existing_etag_is_at_header('GET', 'If-None-Match')
def test_HEAD_if_none_match_with_existing_etag_is_at_header(self):
self._test_existing_etag_is_at_header('HEAD', 'If-None-Match')
def _test_etag_is_at_not_duplicated(self, method):
# verify only one occurrence of X-Object-Sysmeta-Crypto-Etag-Mac in
# X-Backend-Etag-Is-At
key = fetch_crypto_keys()['object']
env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
req = Request.blank(
'/v1/a/c/o', environ=env, method=method,
headers={'If-Match': '"an etag"',
'If-None-Match': '"another etag"'})
self.app.register(method, '/v1/a/c/o', HTTPOk, {})
resp = req.get_response(self.encrypter)
self.assertEqual('200 OK', resp.status)
self.assertEqual(1, len(self.app.calls), self.app.calls)
self.assertEqual(method, self.app.calls[0][0])
actual_headers = self.app.headers[0]
self.assertIn('X-Backend-Etag-Is-At', actual_headers)
self.assertEqual('X-Object-Sysmeta-Crypto-Etag-Mac',
actual_headers['X-Backend-Etag-Is-At'])
self.assertIn('"%s"' % base64.b64encode(
hmac.new(key, 'an etag', hashlib.sha256).digest()),
actual_headers['If-Match'])
self.assertIn('"another etag"', actual_headers['If-None-Match'])
self.assertIn('"%s"' % base64.b64encode(
hmac.new(key, 'another etag', hashlib.sha256).digest()),
actual_headers['If-None-Match'])
def test_GET_etag_is_at_not_duplicated(self):
self._test_etag_is_at_not_duplicated('GET')
def test_HEAD_etag_is_at_not_duplicated(self):
self._test_etag_is_at_not_duplicated('HEAD')
def test_PUT_response_inconsistent_etag_is_not_replaced(self):
# if response is success but etag does not match the ciphertext md5
# then verify that we do *not* replace it with the plaintext etag
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated,
{'Etag': 'not the ciphertext etag'})
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
self.assertEqual('not the ciphertext etag', resp.headers['Etag'])
def test_PUT_multiseg_no_client_etag(self):
body_key = os.urandom(32)
chunks = ['some', 'chunks', 'of data']
body = ''.join(chunks)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'wsgi.input': FileLikeIter(chunks)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
with mock.patch(
'swift.common.middleware.crypto.crypto_utils.'
'Crypto.create_random_key',
lambda *args: body_key):
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
# verify object is encrypted by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.assertEqual(encrypt(body, body_key, FAKE_IV),
get_req.get_response(self.app).body)
def test_PUT_multiseg_good_client_etag(self):
body_key = os.urandom(32)
chunks = ['some', 'chunks', 'of data']
body = ''.join(chunks)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'wsgi.input': FileLikeIter(chunks)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body)),
'Etag': md5hex(body)}
req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
with mock.patch(
'swift.common.middleware.crypto.crypto_utils.'
'Crypto.create_random_key',
lambda *args: body_key):
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
# verify object is encrypted by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.assertEqual(encrypt(body, body_key, FAKE_IV),
get_req.get_response(self.app).body)
def test_PUT_multiseg_bad_client_etag(self):
chunks = ['some', 'chunks', 'of data']
body = ''.join(chunks)
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: fetch_crypto_keys,
'wsgi.input': FileLikeIter(chunks)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body)),
'Etag': 'badclientetag'}
req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('422 Unprocessable Entity', resp.status)
def test_PUT_missing_key_callback(self):
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
resp = req.get_response(self.encrypter)
self.assertEqual('500 Internal Error', resp.status)
self.assertIn('missing callback',
self.encrypter.logger.get_lines_for_level('error')[0])
self.assertEqual('Unable to retrieve encryption keys.', resp.body)
def test_PUT_error_in_key_callback(self):
def raise_exc():
raise Exception('Testing')
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'PUT',
CRYPTO_KEY_CALLBACK: raise_exc}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
resp = req.get_response(self.encrypter)
self.assertEqual('500 Internal Error', resp.status)
self.assertIn('from callback: Testing',
self.encrypter.logger.get_lines_for_level('error')[0])
self.assertEqual('Unable to retrieve encryption keys.', resp.body)
def test_PUT_encryption_override(self):
# set crypto override to disable encryption.
# simulate another middleware wanting to set footers
other_footers = {
'Etag': 'other etag',
'X-Object-Sysmeta-Other': 'other sysmeta',
'X-Object-Sysmeta-Container-Update-Override-Etag':
'other override'}
body = 'FAKE APP'
env = {'REQUEST_METHOD': 'PUT',
'swift.crypto.override': True,
'swift.callback.update_footers':
lambda footers: footers.update(other_footers)}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {})
resp = req.get_response(self.encrypter)
self.assertEqual('201 Created', resp.status)
# verify that other middleware's footers made it to app
req_hdrs = self.app.headers[0]
for k, v in other_footers.items():
self.assertEqual(v, req_hdrs[k])
# verify object is NOT encrypted by getting direct from the app
get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'})
self.assertEqual(body, get_req.get_response(self.app).body)
def _test_constraints_checking(self, method):
# verify that the check_metadata function is called on PUT and POST
body = 'FAKE APP'
env = {'REQUEST_METHOD': method,
CRYPTO_KEY_CALLBACK: fetch_crypto_keys}
hdrs = {'content-type': 'text/plain',
'content-length': str(len(body))}
req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs)
mocked_func = 'swift.common.middleware.crypto.encrypter.check_metadata'
with mock.patch(mocked_func) as mocked:
mocked.side_effect = [HTTPBadRequest('testing')]
resp = req.get_response(self.encrypter)
self.assertEqual('400 Bad Request', resp.status)
self.assertEqual(1, mocked.call_count)
mocked.assert_called_once_with(mock.ANY, 'object')
self.assertEqual(req.headers,
mocked.call_args_list[0][0][0].headers)
def test_PUT_constraints_checking(self):
self._test_constraints_checking('PUT')
def test_POST_constraints_checking(self):
self._test_constraints_checking('POST')
def test_config_true_value_on_disable_encryption(self):
app = FakeSwift()
self.assertFalse(encrypter.Encrypter(app, {}).disable_encryption)
for val in ('true', '1', 'yes', 'on', 't', 'y'):
app = encrypter.Encrypter(app,
{'disable_encryption': val})
self.assertTrue(app.disable_encryption)
def test_PUT_app_exception(self):
app = encrypter.Encrypter(FakeAppThatExcepts(HTTPException), {})
req = Request.blank('/', environ={'REQUEST_METHOD': 'PUT'})
with self.assertRaises(HTTPException) as catcher:
req.get_response(app)
self.assertEqual(FakeAppThatExcepts.MESSAGE, catcher.exception.body)
def test_encrypt_header_val(self):
# Prepare key and Crypto instance
object_key = fetch_crypto_keys()['object']
# - Normal string can be crypted
encrypted = encrypter.encrypt_header_val(Crypto(), 'aaa', object_key)
# sanity: return value is 2 item tuple
self.assertEqual(2, len(encrypted))
crypted_val, crypt_info = encrypted
expected_crypt_val = base64.b64encode(
encrypt('aaa', object_key, FAKE_IV))
expected_crypt_info = {
'cipher': 'AES_CTR_256', 'iv': 'This is an IV123'}
self.assertEqual(expected_crypt_val, crypted_val)
self.assertEqual(expected_crypt_info, crypt_info)
# - Empty string raises a ValueError for safety
with self.assertRaises(ValueError) as cm:
encrypter.encrypt_header_val(Crypto(), '', object_key)
self.assertEqual('empty value is not acceptable',
cm.exception.message)
# - None also raises a ValueError for safety
with self.assertRaises(ValueError) as cm:
encrypter.encrypt_header_val(Crypto(), None, object_key)
self.assertEqual('empty value is not acceptable',
cm.exception.message)
if __name__ == '__main__':
unittest.main()
| psachin/swift | test/unit/common/middleware/crypto/test_encrypter.py | Python | apache-2.0 | 41,463 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the tracing_controller_backend.
These are written to test the public API of the TracingControllerBackend,
using a mock platform and mock tracing agents.
Integrations tests using a real running browser and tracing agents are included
among tests for the public facing telemetry.core.tracing_controller.
"""
import unittest
from telemetry import decorators
from telemetry.internal.platform import platform_backend
from telemetry.internal.platform import tracing_agent
from telemetry.internal.platform.tracing_agent import telemetry_tracing_agent
from telemetry.internal.platform import tracing_controller_backend
from telemetry.timeline import tracing_config
import mock
class FakeTraceDataBuilder(object):
"""Discards trace data but used to keep track of clock syncs."""
def __init__(self):
self.clock_syncs = []
def AddTraceFor(self, trace_part, value):
del trace_part # Unused.
del value # Unused.
def Freeze(self):
return self
class TracingControllerBackendTest(unittest.TestCase):
def setUp(self):
# Create a real TracingControllerBackend with a mock platform backend.
mock_platform = mock.Mock(spec=platform_backend.PlatformBackend)
self.controller = (
tracing_controller_backend.TracingControllerBackend(mock_platform))
self.config = tracing_config.TracingConfig()
# Replace the telemetry_tracing_agent module with a mock.
self._clock_syncs = []
def record_issuer_clock_sync(sync_id, issue_ts):
del issue_ts # Unused.
self._clock_syncs.append(sync_id)
self.telemetry_tracing_agent = mock.patch(
'telemetry.internal.platform.tracing_controller_backend'
'.telemetry_tracing_agent').start()
self.telemetry_tracing_agent.RecordIssuerClockSyncMarker.side_effect = (
record_issuer_clock_sync)
# Replace the list of real tracing agent classes with one containing:
# - a mock TelemetryTracingAgent to work as clock sync recorder, and
# - a simple mock TracingAgent.
# Tests can also override this list using _SetTracingAgentClasses.
self._TRACING_AGENT_CLASSES = [
self.MockAgentClass(clock_sync_recorder=True),
self.MockAgentClass()]
mock.patch(
'telemetry.internal.platform.tracing_controller_backend'
'._TRACING_AGENT_CLASSES', new=self._TRACING_AGENT_CLASSES).start()
# Replace the real TraceDataBuilder with a fake one to collect clock_syncs.
mock.patch('tracing.trace_data.trace_data.TraceDataBuilder',
new=FakeTraceDataBuilder).start()
def tearDown(self):
if self.controller.is_tracing_running:
self.controller.StopTracing()
mock.patch.stopall()
def MockAgentClass(self, can_start=True, supports_clock_sync=True,
clock_sync_recorder=False):
"""Factory to create mock tracing agent classes."""
if clock_sync_recorder:
supports_clock_sync = False # Can't be both issuer and recorder.
spec = telemetry_tracing_agent.TelemetryTracingAgent
else:
spec = tracing_agent.TracingAgent
agent = mock.Mock(spec=spec)
agent.StartAgentTracing.return_value = can_start
agent.SupportsExplicitClockSync.return_value = supports_clock_sync
if clock_sync_recorder:
def collect_clock_syncs(trace_data, timeout=None):
del timeout # Unused.
# Copy the clock_syncs to the trace data, then clear our own list.
trace_data.clock_syncs = list(self._clock_syncs)
self._clock_syncs[:] = []
agent.CollectAgentTraceData.side_effect = collect_clock_syncs
elif supports_clock_sync:
def issue_clock_sync(sync_id, callback):
callback(sync_id, 1)
agent.RecordClockSyncMarker.side_effect = issue_clock_sync
AgentClass = mock.Mock(return_value=agent)
AgentClass.IsSupported.return_value = True
return AgentClass
def _SetTracingAgentClasses(self, *agent_classes):
# Replace contents of the list with the agent classes given as args.
self._TRACING_AGENT_CLASSES[:] = agent_classes
@decorators.Isolated
def testStartTracing(self):
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
@decorators.Isolated
def testDoubleStartTracing(self):
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
self.assertFalse(self.controller.StartTracing(self.config, 30))
@decorators.Isolated
def testStopTracingNotStarted(self):
with self.assertRaises(AssertionError):
self.controller.StopTracing()
@decorators.Isolated
def testStopTracing(self):
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
data = self.controller.StopTracing()
self.assertEqual(len(data.clock_syncs), 1)
self.assertFalse(self.controller.is_tracing_running)
@decorators.Isolated
def testDoubleStopTracing(self):
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
data = self.controller.StopTracing()
self.assertEqual(len(data.clock_syncs), 1)
self.assertFalse(self.controller.is_tracing_running)
with self.assertRaises(AssertionError):
self.controller.StopTracing()
@decorators.Isolated
def testMultipleStartStop(self):
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
data = self.controller.StopTracing()
self.assertEqual(len(data.clock_syncs), 1)
sync_event_one = data.clock_syncs[0]
self.assertFalse(self.controller.is_tracing_running)
# Run 2
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
data = self.controller.StopTracing()
self.assertEqual(len(data.clock_syncs), 1)
sync_event_two = data.clock_syncs[0]
self.assertFalse(self.controller.is_tracing_running)
# Test difference between events
self.assertNotEqual(sync_event_one, sync_event_two)
@decorators.Isolated
def testFlush(self):
self.assertFalse(self.controller.is_tracing_running)
self.assertIsNone(self.controller._current_state)
# Start tracing.
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
self.assertIs(self.controller._current_state.config, self.config)
self.assertEqual(self.controller._current_state.timeout, 30)
self.assertIsNotNone(self.controller._current_state.builder)
# Flush tracing several times.
for _ in xrange(5):
self.controller.FlushTracing()
self.assertTrue(self.controller.is_tracing_running)
self.assertIs(self.controller._current_state.config, self.config)
self.assertEqual(self.controller._current_state.timeout, 30)
self.assertIsNotNone(self.controller._current_state.builder)
# Stop tracing.
data = self.controller.StopTracing()
self.assertFalse(self.controller.is_tracing_running)
self.assertIsNone(self.controller._current_state)
self.assertEqual(len(data.clock_syncs), 6)
@decorators.Isolated
def testNoWorkingAgents(self):
self._SetTracingAgentClasses(self.MockAgentClass(can_start=False))
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
self.assertEquals(self.controller._active_agents_instances, [])
data = self.controller.StopTracing()
self.assertEqual(len(data.clock_syncs), 0)
self.assertFalse(self.controller.is_tracing_running)
@decorators.Isolated
def testNoClockSyncSupport(self):
self._SetTracingAgentClasses(
self.MockAgentClass(clock_sync_recorder=True),
self.MockAgentClass(supports_clock_sync=False))
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
self.assertEquals(len(self.controller._active_agents_instances), 2)
data = self.controller.StopTracing()
self.assertFalse(self.controller.is_tracing_running)
self.assertEqual(len(data.clock_syncs), 0)
@decorators.Isolated
def testMultipleAgents(self):
# Only 5 agents can start and, from those, only 2 support clock sync.
self._SetTracingAgentClasses(
self.MockAgentClass(clock_sync_recorder=True),
self.MockAgentClass(),
self.MockAgentClass(),
self.MockAgentClass(can_start=False),
self.MockAgentClass(can_start=False),
self.MockAgentClass(supports_clock_sync=False),
self.MockAgentClass(supports_clock_sync=False)
)
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
self.assertEquals(len(self.controller._active_agents_instances), 5)
data = self.controller.StopTracing()
self.assertFalse(self.controller.is_tracing_running)
self.assertEqual(len(data.clock_syncs), 2)
@decorators.Isolated
def testIssueClockSyncMarker_noClockSyncRecorder(self):
# Only 4 agents can start, but the clock sync recorder cant.
self._SetTracingAgentClasses(
self.MockAgentClass(clock_sync_recorder=True, can_start=False),
self.MockAgentClass(),
self.MockAgentClass(),
self.MockAgentClass(can_start=False),
self.MockAgentClass(can_start=False),
self.MockAgentClass(supports_clock_sync=False),
self.MockAgentClass(supports_clock_sync=False)
)
self.assertFalse(self.controller.is_tracing_running)
self.assertTrue(self.controller.StartTracing(self.config, 30))
self.assertTrue(self.controller.is_tracing_running)
self.assertEquals(len(self.controller._active_agents_instances), 4)
data = self.controller.StopTracing()
self.assertFalse(self.controller.is_tracing_running)
self.assertEqual(len(data.clock_syncs), 0) # No clock syncs found.
| endlessm/chromium-browser | third_party/catapult/telemetry/telemetry/internal/platform/tracing_controller_backend_unittest.py | Python | bsd-3-clause | 10,619 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from pulp.client.extensions.extensions import PulpCliSection
def initialize(context):
section = PulpCliSection('section-z', 'Section Z')
context.cli.add_section(section)
| beav/pulp | client_lib/test/data/extensions_loader_tests/partial_fail_set/z_ext/pulp_cli.py | Python | gpl-2.0 | 754 |
# coding: utf8
from __future__ import unicode_literals
from .._messages import Messages
from ...compat import json_dumps, path2str
from ...util import prints
def conllu2json(input_path, output_path, n_sents=10, use_morphology=False):
"""
Convert conllu files into JSON format for use with train cli.
use_morphology parameter enables appending morphology to tags, which is
useful for languages such as Spanish, where UD tags are not so rich.
"""
# by @dvsrepo, via #11 explosion/spacy-dev-resources
docs = []
sentences = []
conll_tuples = read_conllx(input_path, use_morphology=use_morphology)
for i, (raw_text, tokens) in enumerate(conll_tuples):
sentence, brackets = tokens[0]
sentences.append(generate_sentence(sentence))
# Real-sized documents could be extracted using the comments on the
# conluu document
if(len(sentences) % n_sents == 0):
doc = create_doc(sentences, i)
docs.append(doc)
sentences = []
output_filename = input_path.parts[-1].replace(".conll", ".json")
output_filename = input_path.parts[-1].replace(".conllu", ".json")
output_file = output_path / output_filename
with output_file.open('w', encoding='utf-8') as f:
f.write(json_dumps(docs))
prints(Messages.M033.format(n_docs=len(docs)),
title=Messages.M032.format(name=path2str(output_file)))
def read_conllx(input_path, use_morphology=False, n=0):
text = input_path.open('r', encoding='utf-8').read()
i = 0
for sent in text.strip().split('\n\n'):
lines = sent.strip().split('\n')
if lines:
while lines[0].startswith('#'):
lines.pop(0)
tokens = []
for line in lines:
parts = line.split('\t')
id_, word, lemma, pos, tag, morph, head, dep, _1, _2 = parts
if '-' in id_ or '.' in id_:
continue
try:
id_ = int(id_) - 1
head = (int(head) - 1) if head != '0' else id_
dep = 'ROOT' if dep == 'root' else dep
tag = pos if tag == '_' else tag
tag = tag+'__'+morph if use_morphology else tag
tokens.append((id_, word, tag, head, dep, 'O'))
except:
print(line)
raise
tuples = [list(t) for t in zip(*tokens)]
yield (None, [[tuples, []]])
i += 1
if n >= 1 and i >= n:
break
def generate_sentence(sent):
(id_, word, tag, head, dep, _) = sent
sentence = {}
tokens = []
for i, id in enumerate(id_):
token = {}
token["orth"] = word[i]
token["tag"] = tag[i]
token["head"] = head[i] - id
token["dep"] = dep[i]
tokens.append(token)
sentence["tokens"] = tokens
return sentence
def create_doc(sentences,id):
doc = {}
paragraph = {}
doc["id"] = id
doc["paragraphs"] = []
paragraph["sentences"] = sentences
doc["paragraphs"].append(paragraph)
return doc
| ryfeus/lambda-packs | Spacy/source2.7/spacy/cli/converters/conllu2json.py | Python | mit | 3,175 |
import random
from pokemongo_bot import logger
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.human_behaviour import sleep
class CompleteTutorial(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self.api = self.bot.api
self.nickname = self.config.get('nickname','')
self.team = self.config.get('team',0)
def should_run(self):
return True
def work(self):
if not self.should_run():
return WorkerResult.SUCCESS
if self._check_tutorial_state():
return WorkerResult.SUCCESS
else:
return WorkerResult.ERROR
def _check_tutorial_state(self):
self._player=self.bot.player_data
tutorial_state = self._player.get('tutorial_state', [])
# LEGAL_SCREEN = 0
if not 0 in tutorial_state:
sleep(2)
if self._set_tutorial_state(0):
self.logger.info('Completed legal screen')
tutorial_state = self._player.get('tutorial_state', [])
else:
return False
# AVATAR_SELECTION = 1
if not 1 in tutorial_state:
# TODO : choose avatar ?
sleep(3)
if self._set_tutorial_state(1):
self.logger.info('Completed avatar selection')
tutorial_state = self._player.get('tutorial_state', [])
else:
return False
# POKEMON_CAPTURE = 3
if not 3 in tutorial_state:
sleep(10)
if self._encounter_tutorial():
self.logger.info('Completed first capture')
else:
self.logger.error('Error during first capture')
return False
# NAME_SELECTION = 4
if not 4 in tutorial_state:
if not self.nickname:
self.logger.info("No nickname defined in config")
return False
self.logger.info(u'Trying to set {} as nickname'.format(self.nickname))
sleep(5)
if self._set_nickname(self.nickname):
self._set_tutorial_state(4)
tutorial_state = self._player.get('tutorial_state', [])
else:
self.logger.error('Error trying to set nickname')
return False
# FIRST_TIME_EXPERIENCE_COMPLETE = 7
if not 7 in tutorial_state:
if self._set_tutorial_state(7):
self.logger.info('Completed first time experience')
else:
return False
return True
def _encounter_tutorial(self):
# You just need to call the API with the pokemon you choose
# Probably can't get MewTwo as first pokemon though
first_pokemon_id = random.choice([1, 4, 7])
response_dict = self.api.encounter_tutorial_complete(
pokemon_id=first_pokemon_id)
try:
if response_dict['responses']['ENCOUNTER_TUTORIAL_COMPLETE']['result'] == 1:
return True
else:
self.logger.error("Error during encouter tutorial")
return False
except KeyError:
self.logger.error("KeyError during encouter tutorial")
return False
def _set_nickname(self, nickname):
response_dict = self.api.claim_codename(codename=nickname)
try:
result = response_dict['responses']['CLAIM_CODENAME']['status']
if result == 1:
self.logger.info(u'Name changed to {}'.format(nickname))
return True
else:
# Would be nice to get the text directly from the proto Enum
error_codes = {
0: 'UNSET',
1: 'SUCCESS',
2: 'CODENAME_NOT_AVAILABLE',
3: 'CODENAME_NOT_VALID',
4: 'CURRENT_OWNER',
5: 'CODENAME_CHANGE_NOT_ALLOWED'
}
self.logger.error(
u'Error while changing nickname : {}'.format(error_codes[result]))
return False
except KeyError:
return False
def _set_tutorial_state(self, completed):
response_dict = self.api.mark_tutorial_complete(tutorials_completed=[
completed], send_marketing_emails=False, send_push_notifications=False)
try:
self._player = response_dict['responses'][
'MARK_TUTORIAL_COMPLETE']['player_data']
return response_dict['responses']['MARK_TUTORIAL_COMPLETE']['success']
except KeyError:
self.logger.error("KeyError while setting tutorial state")
return False
| Compjeff/PokemonGo-Bot | pokemongo_bot/cell_workers/complete_tutorial.py | Python | mit | 4,839 |
import math
import time
t1 = time.time()
# read the words into a list
f = open('pb042_words.txt','r')
words = f.read().split(',')
for i in range(0,len(words)):
words[i] = words[i][1:len(words[i])-1]
f.close()
def valueOf(word):
value = 0
for i in range(0,len(word)):
value += ord(word[i])-64
return value
# n(n+1)/2
def isTriangle(x):
n = math.floor(math.sqrt(2*x))
if n*(n+1) == 2*x:
return True
return False
count = 0
for i in range(0,len(words)):
if isTriangle(valueOf(words[i])):
count += 1
print (count)
print("time:",time.time()-t1)
| Adamssss/projectEuler | Problem 001-150 Python/pb042.py | Python | mit | 615 |
from test.querybuildertestcase import QueryBuilderTestCase
EXPECTED_TYPES = ['Bank', 'Broke', 'Employment Period', 'Has Address',
'Has Secretarys', 'Important Person', 'Random Interface', 'Range', 'Secretary', 'Thing', 'Types']
class BrowseDataModelTest(QueryBuilderTestCase):
def test_browse_data_model(self):
link = self.findLink("Browse data model")
self.assertIsNotNone(link)
link.click()
help_text = self.wait_for_elem('.body > p').text
self.assertIn("browse the tree", help_text)
for type_name in EXPECTED_TYPES:
self.assertIsNotNone(self.findLink(type_name))
self.findLink('Bank').click()
self.wait().until(lambda d: 'builder' in d.title)
self.assertIn('Query builder', self.browser.title)
self.assertEquals('Bank', self.elem('.typeSelected').text)
| elsiklab/intermine | testmodel/webapp/selenium/test/browse-data-model-test.py | Python | lgpl-2.1 | 860 |
# Copyright (C) 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The dataclasses for representing a Program Graph."""
import enum
from typing import List, Optional, Text
import dataclasses
class NodeType(enum.Enum):
UNSPECIFIED = 0
AST_NODE = 1
AST_LIST = 2
AST_VALUE = 3
SYNTAX_NODE = 4
PLACEHOLDER = 5
@dataclasses.dataclass
class Node:
"""Represents a node in a program graph."""
id: int
type: NodeType
# If an AST node, a string that identifies what type of AST node,
# e.g. "Num" or "Expr". These are defined by the underlying AST for the
# language.
ast_type: Optional[Text] = ""
# Primitive valued AST node, such as:
# - the name of an identifier for a Name node
# - the number attached to a Num node
# The corresponding ast_type value is the Python type of ast_value, not the
# type of the parent AST node.
ast_value_repr: Optional[Text] = ""
# For syntax nodes, the syntax attached to the node.
syntax: Optional[Text] = ""
class EdgeType(enum.Enum):
"""The different kinds of edges that can appear in a program graph."""
UNSPECIFIED = 0
CFG_NEXT = 1
LAST_READ = 2
LAST_WRITE = 3
COMPUTED_FROM = 4
RETURNS_TO = 5
FORMAL_ARG_NAME = 6
FIELD = 7
SYNTAX = 8
NEXT_SYNTAX = 9
LAST_LEXICAL_USE = 10
CALLS = 11
@dataclasses.dataclass
class Edge:
id1: int
id2: int
type: EdgeType
field_name: Optional[Text] = None # For FIELD edges, the field name.
has_back_edge: bool = False
@dataclasses.dataclass
class Graph:
nodes: List[Node]
edges: List[Edge]
root_id: int
| google-research/python-graphs | python_graphs/program_graph_dataclasses.py | Python | apache-2.0 | 2,074 |
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
from collections import namedtuple
import rpmfluff
RPM = namedtuple('RPM', ['name', 'version', 'release', 'epoch', 'recommends'])
SPECS = [
RPM('dinginessentail', '1.0', '1', None, None),
RPM('dinginessentail', '1.0', '2', '1', None),
RPM('dinginessentail', '1.1', '1', '1', None),
RPM('dinginessentail-olive', '1.0', '1', None, None),
RPM('dinginessentail-olive', '1.1', '1', None, None),
RPM('landsidescalping', '1.0', '1', None, None),
RPM('landsidescalping', '1.1', '1', None, None),
RPM('dinginessentail-with-weak-dep', '1.0', '1', None, ['dinginessentail-weak-dep']),
RPM('dinginessentail-weak-dep', '1.0', '1', None, None),
]
def main():
try:
arch = sys.argv[1]
except IndexError:
arch = 'x86_64'
pkgs = []
for spec in SPECS:
pkg = rpmfluff.SimpleRpmBuild(spec.name, spec.version, spec.release, [arch])
pkg.epoch = spec.epoch
if spec.recommends:
# Skip packages that require weak deps but an older version of RPM is being used
if not hasattr(rpmfluff, "can_use_rpm_weak_deps") or not rpmfluff.can_use_rpm_weak_deps():
continue
for recommend in spec.recommends:
pkg.add_recommends(recommend)
pkgs.append(pkg)
repo = rpmfluff.YumRepoBuild(pkgs)
repo.make(arch)
for pkg in pkgs:
pkg.clean()
print(repo.repoDir)
if __name__ == "__main__":
main()
| dpassante/ansible | test/integration/targets/setup_rpm_repo/files/create-repo.py | Python | gpl-3.0 | 1,583 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Home of estimator related functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import export as export_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_module
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import data_structures
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _cast_tensor_to_floatx(x):
"""Cast tensor to keras's floatx dtype if it is not already the same dtype."""
if x.dtype == K.floatx():
return x
else:
return math_ops.cast(x, K.floatx())
def _convert_tensor(x):
"""Create or cast tensor if needed."""
if not tensor_util.is_tensor(x):
# x is a numpy array
x = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(x)
if check_ops.is_numeric_tensor(x):
# is_numeric_tensor returns False if provided with a numpy array
x = _cast_tensor_to_floatx(x)
return x
def _any_weight_initialized(keras_model):
"""Check if any weights has been initialized in the Keras model.
Args:
keras_model: An instance of compiled keras model.
Returns:
boolean, True if at least one weight has been initialized, else False.
Currently keras initialize all weights at get_session().
"""
if keras_model is None:
return False
for layer in keras_model.layers:
for weight in layer.weights:
if hasattr(weight, '_keras_initialized'):
return True
return False
def _create_ordered_io(keras_model, estimator_io, is_input=True):
"""Create a list of tensors from IO dictionary based on Keras IO order.
Args:
keras_model: An instance of compiled keras model.
estimator_io: The features or labels (dict or plain array) from model_fn.
is_input: True if dictionary is for inputs.
Returns:
A list of tensors based on Keras IO order.
Raises:
ValueError: if dictionary keys cannot be found in Keras model input_names
or output_names.
"""
if isinstance(estimator_io, (list, tuple)):
# Case currently not supported by most built-in input_fn,
# but it's good to have for sanity
return [_convert_tensor(x) for x in estimator_io]
elif isinstance(estimator_io, dict):
if is_input:
if keras_model._is_graph_network:
keras_io_names = keras_model.input_names
else:
keras_io_names = [
'input_%d' % i for i in range(1, len(estimator_io) + 1)]
else:
if keras_model._is_graph_network:
keras_io_names = keras_model.output_names
else:
keras_io_names = [
'output_%d' % i for i in range(1, len(estimator_io) + 1)]
for key in estimator_io:
if key not in keras_io_names:
raise ValueError(
'Cannot find %s with name "%s" in Keras Model. '
'It needs to match one '
'of the following: %s' % ('input' if is_input else 'output', key,
', '.join(keras_io_names)))
tensors = [_convert_tensor(estimator_io[io_name])
for io_name in keras_io_names]
return tensors
else:
# Plain array.
return _convert_tensor(estimator_io)
def _in_place_subclassed_model_reset(model):
"""Substitute for model cloning that works for subclassed models.
Subclassed models cannot be cloned because their topology is not serializable.
To "instantiate" an identical model in a new TF graph, we reuse the original
model object, but we clear its state.
After calling this function on a model instance, you can use the model
instance as if it were a model clone (in particular you can use it in a new
graph).
This method clears the state of the input model. It is thus destructive.
However the original state can be restored fully by calling
`_in_place_subclassed_model_state_restoration`.
Args:
model: Instance of a Keras model created via subclassing.
Raises:
ValueError: In case the model uses a subclassed model as inner layer.
"""
assert not model._is_graph_network # Only makes sense for subclassed networks
# Retrieve all layers tracked by the model as well as their attribute names
attributes_cache = {}
for name in dir(model):
try:
value = getattr(model, name)
except (AttributeError, ValueError, TypeError):
continue
if isinstance(value, Layer):
attributes_cache[name] = value
assert value in model._layers
elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers'):
# Handle case: list/tuple of layers (also tracked by the Network API).
if value and all(isinstance(val, Layer) for val in value):
raise ValueError('We do not support the use of list-of-layers '
'attributes in subclassed models used with '
'`model_to_estimator` at this time. Found list '
'model: %s' % name)
# Replace layers on the model with fresh layers
layers_to_names = {value: key for key, value in attributes_cache.items()}
original_layers = model._layers[:]
model._layers = data_structures.NoDependency([])
for layer in original_layers: # We preserve layer order.
config = layer.get_config()
# This will not work for nested subclassed models used as layers.
# This would be theoretically possible to support, but would add complexity.
# Only do it if users complain.
if isinstance(layer, Network) and not layer._is_graph_network:
raise ValueError('We do not support the use of nested subclassed models '
'in `model_to_estimator` at this time. Found nested '
'model: %s' % layer)
fresh_layer = layer.__class__.from_config(config)
name = layers_to_names[layer]
setattr(model, name, fresh_layer)
# Cache original model build attributes (in addition to layers)
if (not hasattr(model, '_original_attributes_cache') or
model._original_attributes_cache is None):
if model.built:
attributes_to_cache = [
'inputs',
'outputs',
'_feed_outputs',
'_feed_output_names',
'_feed_output_shapes',
'_feed_loss_fns',
'loss_weights_list',
'targets',
'_feed_targets',
'sample_weight_modes',
'weighted_metrics',
'metrics_names',
'metrics_tensors',
'metrics_updates',
'stateful_metric_names',
'total_loss',
'sample_weights',
'_feed_sample_weights',
'train_function',
'test_function',
'predict_function',
'_collected_trainable_weights',
'_feed_inputs',
'_feed_input_names',
'_feed_input_shapes',
'optimizer',
]
for name in attributes_to_cache:
attributes_cache[name] = getattr(model, name)
model._original_attributes_cache = data_structures.NoDependency(
attributes_cache)
# Reset built state
model.built = False
model.inputs = None
model.outputs = None
def _in_place_subclassed_model_state_restoration(model):
"""Restores the original state of a model after it was "reset".
This undoes this action of `_in_place_subclassed_model_reset`.
Args:
model: Instance of a Keras model created via subclassing, on which
`_in_place_subclassed_model_reset` was previously called.
"""
assert not model._is_graph_network
# Restore layers and build attributes
if (hasattr(model, '_original_attributes_cache') and
model._original_attributes_cache is not None):
# Models have sticky attribute assignment, so we want to be careful to add
# back the previous attributes and track Layers by their original names
# without adding dependencies on "utility" attributes which Models exempt
# when they're constructed.
model._layers = data_structures.NoDependency([])
for name, value in model._original_attributes_cache.items():
if not isinstance(value, checkpointable.CheckpointableBase):
# If this value is not already checkpointable, it's probably that way
# for a reason; we don't want to start tracking data structures that the
# original Model didn't.
value = data_structures.NoDependency(value)
setattr(model, name, value)
model._original_attributes_cache = None
else:
# Restore to the state of a never-called model.
model.built = False
model.inputs = None
model.outputs = None
def _clone_and_build_model(mode,
keras_model,
custom_objects,
features=None,
labels=None):
"""Clone and build the given keras_model.
Args:
mode: training mode.
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
features: Dict of tensors.
labels: Dict of tensors, or single tensor instance.
Returns:
The newly built model.
"""
# Set to True during training, False for inference.
K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)
# Get list of inputs.
if features is None:
input_tensors = None
else:
input_tensors = _create_ordered_io(keras_model,
estimator_io=features,
is_input=True)
# Get list of outputs.
if labels is None:
target_tensors = None
elif isinstance(labels, dict):
target_tensors = _create_ordered_io(keras_model,
estimator_io=labels,
is_input=False)
else:
target_tensors = [
_convert_tensor(labels)
]
if keras_model._is_graph_network:
if custom_objects:
with CustomObjectScope(custom_objects):
model = models.clone_model(keras_model, input_tensors=input_tensors)
else:
model = models.clone_model(keras_model, input_tensors=input_tensors)
else:
model = keras_model
_in_place_subclassed_model_reset(model)
if input_tensors is not None:
model._set_inputs(input_tensors)
# Compile/Build model
if mode is model_fn_lib.ModeKeys.PREDICT:
if isinstance(model, models.Sequential):
model.build()
else:
if isinstance(keras_model.optimizer, optimizers.TFOptimizer):
optimizer = keras_model.optimizer
else:
optimizer_config = keras_model.optimizer.get_config()
optimizer = keras_model.optimizer.__class__.from_config(optimizer_config)
optimizer.iterations = training_util.get_or_create_global_step()
model.compile(
optimizer,
keras_model.loss,
metrics=keras_model.metrics,
loss_weights=keras_model.loss_weights,
sample_weight_mode=keras_model.sample_weight_mode,
weighted_metrics=keras_model.weighted_metrics,
target_tensors=target_tensors)
return model
def _create_keras_model_fn(keras_model, custom_objects=None):
"""Creates model_fn for keras Estimator.
Args:
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
Returns:
The model_fn for a keras Estimator.
"""
def model_fn(features, labels, mode):
"""model_fn for keras Estimator."""
model = _clone_and_build_model(mode, keras_model, custom_objects, features,
labels)
model_output_names = []
# We need to make sure that the output names of the last layer in the model
# is the same for each of the cloned models. This is required for mirrored
# strategy when we call regroup.
if distribute_lib.has_distribution_strategy():
for name in model.output_names:
name = re.compile(r'_\d$').sub('', name)
model_output_names.append(name)
else:
model_output_names = model.output_names
# Get inputs to EstimatorSpec
predictions = dict(zip(model_output_names, model.outputs))
loss = None
train_op = None
eval_metric_ops = None
# Set loss and metric only during train and evaluate.
if mode is not model_fn_lib.ModeKeys.PREDICT:
if mode is model_fn_lib.ModeKeys.TRAIN:
model._make_train_function() # pylint: disable=protected-access
else:
model._make_test_function() # pylint: disable=protected-access
loss = model.total_loss
if model.metrics:
# TODO(fchollet): support stateful metrics
eval_metric_ops = {}
# When each metric maps to an output
if isinstance(model.metrics, dict):
for i, output_name in enumerate(model.metrics.keys()):
metric_name = model.metrics[output_name]
if callable(metric_name):
metric_name = metric_name.__name__
# When some outputs use the same metric
if list(model.metrics.values()).count(metric_name) > 1:
metric_name += '_' + output_name
eval_metric_ops[metric_name] = metrics_module.mean(
model.metrics_tensors[i - len(model.metrics)])
else:
for i, metric_name in enumerate(model.metrics):
if callable(metric_name):
metric_name = metric_name.__name__
eval_metric_ops[metric_name] = metrics_module.mean(
model.metrics_tensors[i])
# Set train_op only during train.
if mode is model_fn_lib.ModeKeys.TRAIN:
train_op = model.train_function.updates_op
if not model._is_graph_network:
# Reset model state to original state,
# to avoid `model_fn` being destructive for the initial model argument.
_in_place_subclassed_model_state_restoration(keras_model)
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs={
_DEFAULT_SERVING_KEY:
export_lib.export_output.PredictOutput(predictions)
})
return model_fn
def _save_first_checkpoint(keras_model, custom_objects, config):
"""Save first checkpoint for the keras Estimator.
Args:
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
config: Estimator config.
Returns:
The path where keras model checkpoint is saved.
"""
# save checkpoint into subdirectory to allow warm start
keras_model_dir = os.path.join(config.model_dir, 'keras')
# Load weights and save to checkpoint if there is no checkpoint
latest_path = checkpoint_management.latest_checkpoint(keras_model_dir)
if not latest_path:
keras_weights = None
if _any_weight_initialized(keras_model):
keras_weights = keras_model.get_weights()
if not gfile.IsDirectory(keras_model_dir):
gfile.MakeDirs(keras_model_dir)
with ops.Graph().as_default():
random_seed.set_random_seed(config.tf_random_seed)
training_util.create_global_step()
model = _clone_and_build_model(model_fn_lib.ModeKeys.TRAIN, keras_model,
custom_objects)
# save to checkpoint
with session.Session(config=config.session_config) as sess:
if keras_weights:
model.set_weights(keras_weights)
# Make update ops and initialize all variables.
if not model.train_function:
# pylint: disable=protected-access
model._make_train_function()
K._initialize_variables(sess)
# pylint: enable=protected-access
saver = saver_lib.Saver()
latest_path = os.path.join(keras_model_dir, 'keras_model.ckpt')
saver.save(sess, latest_path)
return latest_path
def model_to_estimator(keras_model=None,
keras_model_path=None,
custom_objects=None,
model_dir=None,
config=None):
"""Constructs an `Estimator` instance from given keras model.
For usage example, please see
@{$guide/estimators$creating_estimators_from_keras_models}.
Args:
keras_model: A compiled Keras model object. This argument is mutually
exclusive with `keras_model_path`.
keras_model_path: Path to a compiled Keras model saved on disk, in HDF5
format, which can be generated with the `save()` method of a Keras model.
This argument is mutually exclusive with `keras_model`.
custom_objects: Dictionary for custom objects.
model_dir: Directory to save `Estimator` model parameters, graph, summary
files for TensorBoard, etc.
config: `RunConfig` to config `Estimator`.
Returns:
An Estimator from given keras model.
Raises:
ValueError: if neither keras_model nor keras_model_path was given.
ValueError: if both keras_model and keras_model_path was given.
ValueError: if the keras_model_path is a GCS URI.
ValueError: if keras_model has not been compiled.
"""
if not (keras_model or keras_model_path):
raise ValueError(
'Either `keras_model` or `keras_model_path` needs to be provided.')
if keras_model and keras_model_path:
raise ValueError(
'Please specity either `keras_model` or `keras_model_path`, '
'but not both.')
if not keras_model:
if keras_model_path.startswith(
'gs://') or 'storage.googleapis.com' in keras_model_path:
raise ValueError(
'%s is not a local path. Please copy the model locally first.' %
keras_model_path)
logging.info('Loading models from %s', keras_model_path)
keras_model = models.load_model(keras_model_path)
else:
logging.info('Using the Keras model provided.')
keras_model = keras_model
if not hasattr(keras_model, 'optimizer') or not keras_model.optimizer:
raise ValueError(
'The given keras model has not been compiled yet. '
'Please compile the model with `model.compile()` '
'before calling `model_to_estimator()`.')
config = estimator_lib.maybe_overwrite_model_dir_and_session_config(config,
model_dir)
keras_model_fn = _create_keras_model_fn(keras_model, custom_objects)
if _any_weight_initialized(keras_model):
# Warn if config passed to estimator tries to update GPUOptions. If a
# session has already been created, the GPUOptions passed to the first
# session sticks.
if config.session_config.HasField('gpu_options'):
logging.warning(
'The Keras backend session has already been set. '
'The _session_config passed to model_to_estimator will not be used.')
else:
# Pass the config into keras backend's default session.
sess = session.Session(config=config.session_config)
K.set_session(sess)
warm_start_path = None
if keras_model._is_graph_network:
warm_start_path = _save_first_checkpoint(keras_model, custom_objects,
config)
elif keras_model.built:
logging.warning('You are creating an Estimator from a Keras model manually '
'subclassed from `Model`, that was already called on some '
'inputs (and thus already had weights). We are currently '
'unable to preserve the model\'s state (its weights) as '
'part of the estimator in this case. Be warned that the '
'estimator has been created using a freshly initialized '
'version of your model.\n'
'Note that this doesn\'t affect the state of the model '
'instance you passed as `keras_model` argument.')
estimator = estimator_lib.Estimator(keras_model_fn,
config=config,
warm_start_from=warm_start_path)
return estimator
| aselle/tensorflow | tensorflow/python/estimator/keras.py | Python | apache-2.0 | 21,876 |
# -*- coding: utf-8 -*-
#
# PythonVideoAnnotator documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 12 14:10:03 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Home'
copyright = u'2017, Ricardo Jorge Vieira Ribeiro'
author = u'Ricardo Jorge Vieira Ribeiro'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = u'2.x'
# The full version, including alpha/beta/rc tags.
#release = u'2.x'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#import sphinx_bootstrap_theme
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'navigation_depth': 3
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
#html_context = {
# 'css_files': ['_static/custom.css'],
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonVideoAnnotatordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PythonVideoAnnotator.tex', u'PythonVideoAnnotator Documentation',
u'Ricardo Jorge Vieira Ribeiro', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pythonvideoannotator', u'PythonVideoAnnotator Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PythonVideoAnnotator', u'PythonVideoAnnotator Documentation',
author, 'PythonVideoAnnotator', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/3.5': None}
| UmSenhorQualquer/pythonVideoAnnotator | docs/source/conf.py | Python | mit | 5,886 |
"""
Django settings for site_SE project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%%=#g2==@)0y(zz0vit1fezyx%c52zya88f^i$+2j0g#z12qgf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments.urls',
'rest_framework',
'databases',
#'query',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'site_SE.urls'
WSGI_APPLICATION = 'site_SE.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
#'NAME': os.path.join(BASE_DIR, 'beer.stackexchange.dump.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| collab-uniba/emotions-online-qa | site_SE/site_SE/settings.py | Python | mit | 2,124 |
#!/usr/bin/env python
# Copyright (c) 2016, Daniel Liew
# This file is covered by the license in LICENSE-SVCB.txt
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
"""
Read a result info describing a set of KLEE test case replays.
"""
from load_klee_runner import add_KleeRunner_to_module_search_path
from load_klee_analysis import add_kleeanalysis_to_module_search_path
from load_native_analysis import add_nativeanalysis_to_module_search_path
add_KleeRunner_to_module_search_path()
add_kleeanalysis_to_module_search_path()
add_nativeanalysis_to_module_search_path()
from KleeRunner import ResultInfo
import KleeRunner.DriverUtil as DriverUtil
import KleeRunner.InvocationInfo
import KleeRunner.util
import nativeanalysis.analyse
import argparse
import logging
import os
import pprint
import subprocess
import sys
import yaml
_logger = logging.getLogger(__name__)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('result_info_file',
help='Result info file',
type=argparse.FileType('r'))
parser.add_argument('--dump-unknowns',
dest='dump_unknowns',
action='store_true')
parser.add_argument('--dump-timeouts',
dest='dump_timeouts',
action='store_true')
DriverUtil.parserAddLoggerArg(parser)
pargs = parser.parse_args()
DriverUtil.handleLoggerArgs(pargs, parser)
_logger.info('Loading "{}"...'.format(pargs.result_info_file.name))
resultInfos, resultInfoMisc = ResultInfo.loadResultInfos(pargs.result_info_file)
_logger.info('Loading complete')
# Check the misc data
if resultInfoMisc is None:
_logger.error('Expected result info to have misc data')
return 1
if resultInfoMisc['runner'] != 'NativeReplay':
_logger.error('Expected runner to have been NativeReplay but was "{}"'.format(
resultInfoMisc['runner']))
return 1
errorTypeToErrorListMap = dict()
multipeOutcomeList = []
for result_index, r in enumerate(resultInfos):
_logger.info('Processing {}/{}'.format(result_index + 1, len(resultInfos)))
raw_result = r.GetInternalRepr()
program_path = r.RawInvocationInfo['program']
outcome = nativeanalysis.analyse.get_test_case_run_outcome(raw_result)
error_list = None
try:
error_list = errorTypeToErrorListMap[type(outcome)]
except KeyError:
error_list = []
errorTypeToErrorListMap[type(outcome)] = error_list
error_list.append(outcome)
# Print report
print('#'*70)
print("# of test cases with multiple outcomes: {}".format(len(multipeOutcomeList)))
for ty, error_list in errorTypeToErrorListMap.items():
print("# of {}: {}".format(ty, len(error_list)))
if ty == nativeanalysis.analyse.UnknownError and pargs.dump_unknowns:
for error in error_list:
print(error)
if ty == nativeanalysis.analyse.TimeoutError and pargs.dump_timeouts:
for error in error_list:
print(error)
# Now emit as YAML
#as_yaml = yaml.dump(program_to_coverage_info, default_flow_style=False)
#pargs.output_yaml.write(as_yaml)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| delcypher/klee-runner | tools/result-info-native-replay-summary.py | Python | mit | 3,311 |
#
# Instant Python
# $Id: tkColorChooser.py,v 1.6 2003/04/06 09:00:52 rhettinger Exp $
#
# tk common colour chooser dialogue
#
# this module provides an interface to the native color dialogue
# available in Tk 4.2 and newer.
#
# written by Fredrik Lundh, May 1997
#
# fixed initialcolor handling in August 1998
#
#
# options (all have default values):
#
# - initialcolor: colour to mark as selected when dialog is displayed
# (given as an RGB triplet or a Tk color string)
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
from tkCommonDialog import Dialog
#
# color chooser class
class Chooser(Dialog):
"Ask for a color"
command = "tk_chooseColor"
def _fixoptions(self):
try:
# make sure initialcolor is a tk color string
color = self.options["initialcolor"]
if type(color) == type(()):
# assume an RGB triplet
self.options["initialcolor"] = "#%02x%02x%02x" % color
except KeyError:
pass
def _fixresult(self, widget, result):
# to simplify application code, the color chooser returns
# an RGB tuple together with the Tk color string
if not result:
return None, None # canceled
r, g, b = widget.winfo_rgb(result)
return (r/256, g/256, b/256), result
#
# convenience stuff
def askcolor(color = None, **options):
"Ask for a color"
if color:
options = options.copy()
options["initialcolor"] = color
return Chooser(**options).show()
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
print "color", askcolor()
| trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/lib-tk/tkColorChooser.py | Python | gpl-2.0 | 1,716 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility functions."""
import datetime
import json
import os
import urllib
from simian.mac import common
class Error(Exception):
"""Base error."""
class SerializeError(Error):
"""Error during serialization."""
class DeserializeError(Error):
"""Error during deserialization."""
class EpochValueError(Error):
"""Error for epoch to datetime conversion problems."""
class EpochFutureValueError(EpochValueError):
"""An epoch time is in the future."""
class EpochExtremeFutureValueError(EpochFutureValueError):
"""An epoch time extremely too far in the future."""
class Datetime(object):
"""Datetime class for extending utcfromtimestamp()."""
@classmethod
def utcfromtimestamp(cls, timestamp, allow_future=False):
"""Converts a str or int epoch time to datetime.
Note: this method drops ms from timestamps.
Args:
timestamp: str, int, or float epoch timestamp.
allow_future: boolean, default False, True to allow future timestamps.
Returns:
datetime representation of the timestamp.
Raises:
ValueError: timestamp is invalid.
EpochValueError: the timestamp is valid, but unacceptable.
EpochFutureValueError: timestamp under an hour in future.
EpochExtremeFutureValueError: timestamp over an hour in future.
"""
try:
timestamp = int(float(timestamp))
dt = datetime.datetime.utcfromtimestamp(timestamp)
except (TypeError, ValueError):
raise ValueError(
'timestamp is None, empty, or otherwise invalid: %s' % timestamp)
now = datetime.datetime.utcnow()
if not allow_future and dt > now:
msg = 'datetime in the future: %s' % dt
if dt > (now + datetime.timedelta(minutes=66)):
# raise a slightly different exception for > 66mins to allow for more
# verbose logging.
raise EpochExtremeFutureValueError(msg)
raise EpochFutureValueError(msg)
return dt
def Serialize(obj):
"""Return a binary serialized version of object.
Depending on the serialization method, some complex objects or input
formats may not be serializable.
UTF-8 strings (by themselves or in other structures e.g. lists) are always
supported.
Args:
obj: any object
Returns:
str, possibly containing ascii values >127
Raises:
SerializeError: if an error occured during serialization
"""
try:
return json.dumps(obj)
except TypeError as e:
raise SerializeError(e)
def Deserialize(s, parse_float=float):
"""Return an object for a binary serialized version.
Depending on the target platform, precision of float values may be lowered
on deserialization. Use parse_float to provide an alternative
floating point translation function, e.g. decimal.Decimal, if retaining
high levels of float precision (> ~10 places) is important.
Args:
s: str
parse_float: callable, optional, to translate floating point values
Returns:
any object that was serialized
Raises:
DeserializeError: if an error occured during deserialization
"""
try:
if s is None:
raise DeserializeError('Nothing to deserialize: %s' % type(s))
return json.loads(s, parse_float=parse_float)
except ValueError as e:
raise DeserializeError(e)
def UrlUnquote(s):
"""Return unquoted version of a url string."""
return urllib.unquote(s)
def MakeTrackMatrix(tracks, proposed_tracks=None):
"""Generates dict of tracks with string values indicating track status.
Args:
tracks: list of tracks the package is currently in.
proposed_tracks: list of tracks the package is proposed to be in.
Returns:
A dict of tracks with string values for status, these values are
in turn used by CSS to display the tracks color coded by status. Values
returned: current, proposed_in, proposed_out, not_in. These correspond
to CSS classes .track.current, .track.proposed_in, .track.proposed_out,
and .track.not_in.
"""
track_matrix = {}
tracks = frozenset(tracks)
if proposed_tracks is not None:
proposed_tracks = frozenset(proposed_tracks)
for track in common.TRACKS:
if track in tracks and track in proposed_tracks:
track_matrix[track] = 'current'
elif track in tracks:
track_matrix[track] = 'proposed_out'
elif track in proposed_tracks:
track_matrix[track] = 'proposed_in'
else:
track_matrix[track] = 'not_in'
else:
for track in common.TRACKS:
if track in tracks:
track_matrix[track] = 'current'
else:
track_matrix[track] = 'not_in'
return track_matrix
def GetBlobstoreGSBucket():
"""GS Bucket For Blobsore.
Returns:
GS Bucket Name in case we want to use Blobstore API with Google Cloud
Storage, None otherwise.
"""
return os.environ.get('BLOBSTORE_GS_BUCKET')
| sillywilly42/simian | src/simian/mac/common/util.py | Python | apache-2.0 | 5,436 |
from .periodic_processor import PeriodicProcessor
from .simple_processor import SimpleProcessor
from .tarantool_processor import TarantoolProcessor
__all__ = ['SimpleProcessor', 'TarantoolProcessor', 'PeriodicProcessor']
| moelius/async-task-processor | async_task_processor/processors/__init__.py | Python | mit | 222 |
def _update_path():
import os, sys
resources = os.environ['RESOURCEPATH']
sys.path.append(os.path.join(
resources, 'lib', 'python%d.%d'%(sys.version_info[:2]), 'lib-dynload'))
sys.path.append(os.path.join(
resources, 'lib', 'python%d.%d'%(sys.version_info[:2])))
sys.path.append(os.path.join(
resources, 'lib', 'python%d.%d'%(sys.version_info[:2]), 'site-packages.zip'))
_update_path()
| orangeYao/twiOpinion | dustbin/py2app/.eggs/py2app-0.12-py2.7.egg/py2app/bootstrap/semi_standalone_path.py | Python | mit | 431 |
SECRET_KEY = ''
| joedborg/tapedeck | tapedeck/secrets.py | Python | gpl-2.0 | 16 |
"""
Django settings for pychart project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY", "")
# SECURITY WARNING: don't run with debug turned on in production!
#TODO: Make this an environmental variable.
DEBUG = True
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS").split()
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pychart',
'pychart_profile',
'pychart_datarender',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pychart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pychart.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("PYCHART_DB", "pychart_db"),
'USER': os.environ.get("DB_USERNAME", ""),
'PASSWORD': os.environ.get("DB_PASSWORD", ""),
'HOST': os.environ.get("DB_ENDPOINT", ""),
'PORT': '5432',
'TEST': {
'NAME': os.environ.get("TEST_PYCHART_DB", "test_pychart_db")
}
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'pychart', 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'MEDIA')
MEDIA_URL = "/media/"
LOGIN_REDIRECT_URL = 'pychart_profile:profile'
# Email setup
ACCOUNT_ACTIVATION_DAYS = 7
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD", "")
# Set to None to prevent issues with ajax calls.
# TODO: Research best way to make data-heavy AJAX calls to back end.
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
| CCallahanIV/PyChart | pychart/pychart/settings.py | Python | mit | 4,140 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Espaces détaillée plus bas."""
from abstraits.obase import *
class Espaces(BaseObj):
"""Classe contenant plusieurs espaces de nom.
Chaque espace de nom est un attribut de cet objet.
Chaque espace se manipule comme un dictionnaire.
Exemple :
self.variables["nom_variable"] retourne la valeur de la variable
nom_variable
"""
def __init__(self, evenement):
"""Constructeur d'un espace"""
BaseObj.__init__(self)
self.evenement = evenement
self.variables = {}
self._construire()
def __getnewargs__(self):
return (None, )
| stormi/tsunami | src/primaires/scripting/espaces.py | Python | bsd-3-clause | 2,229 |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities related to bgp data types and models.
"""
import logging
import socket
from ryu.lib.packet.bgp import BGPUpdate
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.lib.packet.bgp import RF_IPv4_VPN
from ryu.lib.packet.bgp import RF_IPv6_VPN
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.lib.packet.bgp import RouteTargetMembershipNLRI
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC
from ryu.lib.packet.bgp import BGPPathAttributeMultiExitDisc
from ryu.lib.packet.bgp import BGPPathAttributeMpUnreachNLRI
from ryu.lib.packet.bgp import BGPPathAttributeUnknown
from ryu.services.protocols.bgp.info_base.rtc import RtcPath
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.services.protocols.bgp.info_base.ipv6 import Ipv6Path
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
LOG = logging.getLogger('utils.bgp')
# RouteFmaily to path sub-class mapping.
_ROUTE_FAMILY_TO_PATH_MAP = {RF_IPv4_UC: Ipv4Path,
RF_IPv6_UC: Ipv6Path,
RF_IPv4_VPN: Vpnv4Path,
RF_IPv6_VPN: Vpnv6Path,
RF_RTC_UC: RtcPath}
def create_path(src_peer, nlri, **kwargs):
route_family = nlri.ROUTE_FAMILY
assert route_family in _ROUTE_FAMILY_TO_PATH_MAP.keys()
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
return path_cls(src_peer, nlri, src_peer.version_num, **kwargs)
def clone_path_and_update_med_for_target_neighbor(path, med):
assert path and med
route_family = path.route_family
if route_family not in _ROUTE_FAMILY_TO_PATH_MAP.keys():
raise ValueError('Clone is not supported for address-family %s' %
route_family)
path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
pattrs = path.pathattr_map
pattrs[BGP_ATTR_TYPE_MULTI_EXIT_DISC] = BGPPathAttributeMultiExitDisc(med)
return path_cls(
path.source, path.nlri, path.source_version_num,
pattrs=pattrs, nexthop=path.nexthop,
is_withdraw=path.is_withdraw,
med_set_by_target_neighbor=True
)
def clone_rtcpath_update_rt_as(path, new_rt_as):
"""Clones given RT NLRI `path`, and updates it with new RT_NLRI AS.
Parameters:
- `path`: (Path) RT_NLRI path
- `new_rt_as`: AS value of cloned paths' RT_NLRI
"""
assert path and new_rt_as
if not path or path.route_family != RF_RTC_UC:
raise ValueError('Expected RT_NLRI path')
old_nlri = path.nlri
new_rt_nlri = RouteTargetMembershipNLRI(new_rt_as, old_nlri.route_target)
return RtcPath(path.source, new_rt_nlri, path.source_version_num,
pattrs=path.pathattr_map, nexthop=path.nexthop,
is_withdraw=path.is_withdraw)
def from_inet_ptoi(bgp_id):
"""Convert an IPv4 address string format to a four byte long.
"""
four_byte_id = None
try:
packed_byte = socket.inet_pton(socket.AF_INET, bgp_id)
four_byte_id = long(packed_byte.encode('hex'), 16)
except ValueError:
LOG.debug('Invalid bgp id given for conversion to integer value %s',
bgp_id)
return four_byte_id
def get_unknow_opttrans_attr(path):
"""Utility method that gives a `dict` of unknown optional transitive
path attributes of `path`.
Returns dict: <key> - attribute type code, <value> - unknown path-attr.
"""
path_attrs = path.pathattr_map
unknown_opt_tran_attrs = {}
for _, attr in path_attrs.items():
if (isinstance(attr, BGPPathAttributeUnknown) and
attr.is_optional_transitive()):
unknown_opt_tran_attrs[attr.type_code] = attr
return unknown_opt_tran_attrs
def create_end_of_rib_update():
"""Construct end-of-rib (EOR) Update instance."""
mpunreach_attr = BGPPathAttributeMpUnreachNLRI(RF_IPv4_VPN.afi,
RF_IPv4_VPN.safi,
[])
eor = BGPUpdate(path_attributes=[mpunreach_attr])
return eor
# Bgp update message instance that can used as End of RIB marker.
UPDATE_EOR = create_end_of_rib_update()
| sivaramakrishnansr/ryu | ryu/services/protocols/bgp/utils/bgp.py | Python | apache-2.0 | 4,919 |
import sys
print(sys.platform)
print(2**100)
x='Spam!'
print(x*8)
| dachuanz/python3 | script1.py | Python | apache-2.0 | 66 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines():
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
# DO NOT add landmines as part of a regular CL. Landmines are a last-effort
# bandaid fix if a CL that got landed has a build dependency bug and all bots
# need to be cleaned up. If you're writing a new CL that causes build
# dependency problems, fix the dependency problems instead of adding a
# landmine.
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: to handle new way of suppressing findbugs failures.'
print 'Clobber to fix gyp not rename package name (crbug.com/457038)'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print "Update to VS2013 Update 2."
print "Update to VS2013 Update 4."
if (platform() == 'win' and gyp_msvs_version().startswith('2015')):
print 'Switch to VS2015'
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
print 'Need to clobber everything due to gen file moves in r175513 (Blink)'
if (platform() != 'ios'):
print 'Clobber to get rid of obselete test plugin after r248358'
print 'Clobber to rebuild GN files for V8'
print 'Clobber to get rid of stale generated mojom.h files'
print 'Need to clobber everything due to build_nexe change in nacl r13424'
print '[chromium-dev] PSA: clobber build needed for IDR_INSPECTOR_* compil...'
print 'blink_resources.grd changed: crbug.com/400860'
print 'ninja dependency cycle: crbug.com/408192'
print 'Clobber to fix missing NaCl gyp dependencies (crbug.com/427427).'
print 'Another clobber for missing NaCl gyp deps (crbug.com/427427).'
print 'Clobber to fix GN not picking up increased ID range (crbug.com/444902)'
print 'Remove NaCl toolchains from the output dir (crbug.com/456902)'
if platform() == 'ios':
print 'Clobber iOS to workaround Xcode deps bug (crbug.com/485435)'
if platform() == 'win':
print 'Clobber to delete stale generated files (crbug.com/510086)'
def main():
print_landmines()
return 0
if __name__ == '__main__':
sys.exit(main())
| vadimtk/chrome4sdp | build/get_landmines.py | Python | bsd-3-clause | 3,555 |
from pyknon.MidiFile import MIDIFile
from pyknon.music import Note, NoteSeq, Rest
class MidiError(Exception):
pass
class Midi(object):
def __init__(self, number_tracks=1, tempo=60, instrument=0, channel=None):
"""
instrument: can be an integer or a list
channel: can be an integer or a list
"""
self.number_tracks = number_tracks
self.midi_data = MIDIFile(number_tracks)
for track in range(number_tracks):
self.midi_data.addTrackName(track, 0, "Track {0}".format(track))
self.midi_data.addTempo(track, 0, tempo)
instr = instrument[track] if isinstance(instrument, list) else instrument
if channel is None:
_channel = track
elif isinstance(channel, list):
_channel = channel[track]
else:
_channel = channel
self.midi_data.addProgramChange(track, _channel, 0, instr)
def seq_chords(self, seqlist, track=0, time=0, channel=None):
if track + 1 > self.number_tracks:
raise MidiError("You are trying to use more tracks than we have.")
_channel = channel if channel is not None else track
for item in seqlist:
if isinstance(item, NoteSeq):
volume = item[0].volume
dur = item[0].midi_dur
for note in item:
self.midi_data.addNote(track, _channel, note.midi_number, time, dur, volume)
time += dur
elif isinstance(item, Rest):
time += item.midi_dur
else:
raise MidiError("The input should be a list of NoteSeq but yours is a {0}: {1}".format(type(seqlist), seqlist))
return time
def seq_notes(self, noteseq, track=0, time=0, channel=None):
if track + 1 > self.number_tracks:
raise MidiError("You are trying to use more tracks than we have.")
_channel = channel if channel is not None else track
for note in noteseq:
if isinstance(note, Note):
self.midi_data.addNote(track, _channel, note.midi_number, time, note.midi_dur, note.volume)
else:
# we ignore the rests
pass
time += note.midi_dur
return time
def write(self, filename):
if isinstance(filename, str):
with open(filename, 'wb') as midifile:
self.midi_data.writeFile(midifile)
else:
self.midi_data.writeFile(filename)
| palmerev/pyknon | pyknon/genmidi.py | Python | mit | 2,565 |
from typing import Any, ClassVar, Dict, List, Optional, Type, Union
from commonmark.blocks import Parser
from . import box
from ._loop import loop_first
from ._stack import Stack
from .console import Console, ConsoleOptions, JustifyMethod, RenderResult
from .containers import Renderables
from .jupyter import JupyterMixin
from .panel import Panel
from .rule import Rule
from .segment import Segment
from .style import Style, StyleStack
from .syntax import Syntax
from .text import Text, TextType
class MarkdownElement:
new_line: ClassVar[bool] = True
@classmethod
def create(cls, markdown: "Markdown", node: Any) -> "MarkdownElement":
"""Factory to create markdown element,
Args:
markdown (Markdown): THe parent Markdown object.
node (Any): A node from Pygments.
Returns:
MarkdownElement: A new markdown element
"""
return cls()
def on_enter(self, context: "MarkdownContext") -> None:
"""Called when the node is entered.
Args:
context (MarkdownContext): The markdown context.
"""
def on_text(self, context: "MarkdownContext", text: TextType) -> None:
"""Called when text is parsed.
Args:
context (MarkdownContext): The markdown context.
"""
def on_leave(self, context: "MarkdownContext") -> None:
"""Called when the parser leaves the element.
Args:
context (MarkdownContext): [description]
"""
def on_child_close(
self, context: "MarkdownContext", child: "MarkdownElement"
) -> bool:
"""Called when a child element is closed.
This method allows a parent element to take over rendering of its children.
Args:
context (MarkdownContext): The markdown context.
child (MarkdownElement): The child markdown element.
Returns:
bool: Return True to render the element, or False to not render the element.
"""
return True
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
return ()
class UnknownElement(MarkdownElement):
"""An unknown element.
Hopefully there will be no unknown elements, and we will have a MarkdownElement for
everything in the document.
"""
class TextElement(MarkdownElement):
"""Base class for elements that render text."""
style_name = "none"
def on_enter(self, context: "MarkdownContext") -> None:
self.style = context.enter_style(self.style_name)
self.text = Text(justify="left")
def on_text(self, context: "MarkdownContext", text: TextType) -> None:
self.text.append(text, context.current_style if isinstance(text, str) else None)
def on_leave(self, context: "MarkdownContext") -> None:
context.leave_style()
class Paragraph(TextElement):
"""A Paragraph."""
style_name = "markdown.paragraph"
justify: JustifyMethod
@classmethod
def create(cls, markdown: "Markdown", node: MarkdownElement) -> "Paragraph":
return cls(justify=markdown.justify or "left")
def __init__(self, justify: JustifyMethod) -> None:
self.justify = justify
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
self.text.justify = self.justify
yield self.text
class Heading(TextElement):
"""A heading."""
@classmethod
def create(cls, markdown: "Markdown", node: Any) -> "Heading":
heading = cls(node.level)
return heading
def on_enter(self, context: "MarkdownContext") -> None:
self.text = Text()
context.enter_style(self.style_name)
def __init__(self, level: int) -> None:
self.level = level
self.style_name = f"markdown.h{level}"
super().__init__()
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
text = self.text
text.justify = "center"
if self.level == 1:
# Draw a border around h1s
yield Panel(
text,
box=box.DOUBLE,
style="markdown.h1.border",
)
else:
# Styled text for h2 and beyond
if self.level == 2:
yield Text("")
yield text
class CodeBlock(TextElement):
"""A code block with syntax highlighting."""
style_name = "markdown.code_block"
@classmethod
def create(cls, markdown: "Markdown", node: Any) -> "CodeBlock":
node_info = node.info or ""
lexer_name = node_info.partition(" ")[0]
return cls(lexer_name or "default", markdown.code_theme)
def __init__(self, lexer_name: str, theme: str) -> None:
self.lexer_name = lexer_name
self.theme = theme
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
code = str(self.text).rstrip()
syntax = Panel(
Syntax(code, self.lexer_name, theme=self.theme, word_wrap=True),
border_style="dim",
box=box.SQUARE,
)
yield syntax
class BlockQuote(TextElement):
"""A block quote."""
style_name = "markdown.block_quote"
def __init__(self) -> None:
self.elements: Renderables = Renderables()
def on_child_close(
self, context: "MarkdownContext", child: "MarkdownElement"
) -> bool:
self.elements.append(child)
return False
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
render_options = options.update(width=options.max_width - 4)
lines = console.render_lines(self.elements, render_options, style=self.style)
style = self.style
new_line = Segment("\n")
padding = Segment("▌ ", style)
for line in lines:
yield padding
yield from line
yield new_line
class HorizontalRule(MarkdownElement):
"""A horizontal rule to divide sections."""
new_line = False
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
style = console.get_style("markdown.hr", default="none")
yield Rule(style=style)
class ListElement(MarkdownElement):
"""A list element."""
@classmethod
def create(cls, markdown: "Markdown", node: Any) -> "ListElement":
list_data = node.list_data
return cls(list_data["type"], list_data["start"])
def __init__(self, list_type: str, list_start: Optional[int]) -> None:
self.items: List[ListItem] = []
self.list_type = list_type
self.list_start = list_start
def on_child_close(
self, context: "MarkdownContext", child: "MarkdownElement"
) -> bool:
assert isinstance(child, ListItem)
self.items.append(child)
return False
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
if self.list_type == "bullet":
for item in self.items:
yield from item.render_bullet(console, options)
else:
number = 1 if self.list_start is None else self.list_start
last_number = number + len(self.items)
for item in self.items:
yield from item.render_number(console, options, number, last_number)
number += 1
class ListItem(TextElement):
"""An item in a list."""
style_name = "markdown.item"
def __init__(self) -> None:
self.elements: Renderables = Renderables()
def on_child_close(
self, context: "MarkdownContext", child: "MarkdownElement"
) -> bool:
self.elements.append(child)
return False
def render_bullet(self, console: Console, options: ConsoleOptions) -> RenderResult:
render_options = options.update(width=options.max_width - 3)
lines = console.render_lines(self.elements, render_options, style=self.style)
bullet_style = console.get_style("markdown.item.bullet", default="none")
bullet = Segment(" • ", bullet_style)
padding = Segment(" " * 3, bullet_style)
new_line = Segment("\n")
for first, line in loop_first(lines):
yield bullet if first else padding
yield from line
yield new_line
def render_number(
self, console: Console, options: ConsoleOptions, number: int, last_number: int
) -> RenderResult:
number_width = len(str(last_number)) + 2
render_options = options.update(width=options.max_width - number_width)
lines = console.render_lines(self.elements, render_options, style=self.style)
number_style = console.get_style("markdown.item.number", default="none")
new_line = Segment("\n")
padding = Segment(" " * number_width, number_style)
numeral = Segment(f"{number}".rjust(number_width - 1) + " ", number_style)
for first, line in loop_first(lines):
yield numeral if first else padding
yield from line
yield new_line
class ImageItem(TextElement):
"""Renders a placeholder for an image."""
new_line = False
@classmethod
def create(cls, markdown: "Markdown", node: Any) -> "MarkdownElement":
"""Factory to create markdown element,
Args:
markdown (Markdown): THe parent Markdown object.
node (Any): A node from Pygments.
Returns:
MarkdownElement: A new markdown element
"""
return cls(node.destination, markdown.hyperlinks)
def __init__(self, destination: str, hyperlinks: bool) -> None:
self.destination = destination
self.hyperlinks = hyperlinks
self.link: Optional[str] = None
super().__init__()
def on_enter(self, context: "MarkdownContext") -> None:
self.link = context.current_style.link
self.text = Text(justify="left")
super().on_enter(context)
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
link_style = Style(link=self.link or self.destination or None)
title = self.text or Text(self.destination.strip("/").rsplit("/", 1)[-1])
if self.hyperlinks:
title.stylize(link_style)
yield Text.assemble("🌆 ", title, " ", end="")
class MarkdownContext:
"""Manages the console render state."""
def __init__(
self,
console: Console,
options: ConsoleOptions,
style: Style,
inline_code_lexer: Optional[str] = None,
inline_code_theme: str = "monokai",
) -> None:
self.console = console
self.options = options
self.style_stack: StyleStack = StyleStack(style)
self.stack: Stack[MarkdownElement] = Stack()
self._syntax: Optional[Syntax] = None
if inline_code_lexer is not None:
self._syntax = Syntax("", inline_code_lexer, theme=inline_code_theme)
@property
def current_style(self) -> Style:
"""Current style which is the product of all styles on the stack."""
return self.style_stack.current
def on_text(self, text: str, node_type: str) -> None:
"""Called when the parser visits text."""
if node_type in "code" and self._syntax is not None:
highlight_text = self._syntax.highlight(text)
highlight_text.rstrip()
self.stack.top.on_text(
self, Text.assemble(highlight_text, style=self.style_stack.current)
)
else:
self.stack.top.on_text(self, text)
def enter_style(self, style_name: Union[str, Style]) -> Style:
"""Enter a style context."""
style = self.console.get_style(style_name, default="none")
self.style_stack.push(style)
return self.current_style
def leave_style(self) -> Style:
"""Leave a style context."""
style = self.style_stack.pop()
return style
class Markdown(JupyterMixin):
"""A Markdown renderable.
Args:
markup (str): A string containing markdown.
code_theme (str, optional): Pygments theme for code blocks. Defaults to "monokai".
justify (JustifyMethod, optional): Justify value for paragraphs. Defaults to None.
style (Union[str, Style], optional): Optional style to apply to markdown.
hyperlinks (bool, optional): Enable hyperlinks. Defaults to ``True``.
inline_code_lexer: (str, optional): Lexer to use if inline code highlighting is
enabled. Defaults to None.
inline_code_theme: (Optional[str], optional): Pygments theme for inline code
highlighting, or None for no highlighting. Defaults to None.
"""
elements: ClassVar[Dict[str, Type[MarkdownElement]]] = {
"paragraph": Paragraph,
"heading": Heading,
"code_block": CodeBlock,
"block_quote": BlockQuote,
"thematic_break": HorizontalRule,
"list": ListElement,
"item": ListItem,
"image": ImageItem,
}
inlines = {"emph", "strong", "code", "strike"}
def __init__(
self,
markup: str,
code_theme: str = "monokai",
justify: Optional[JustifyMethod] = None,
style: Union[str, Style] = "none",
hyperlinks: bool = True,
inline_code_lexer: Optional[str] = None,
inline_code_theme: Optional[str] = None,
) -> None:
self.markup = markup
parser = Parser()
self.parsed = parser.parse(markup)
self.code_theme = code_theme
self.justify: Optional[JustifyMethod] = justify
self.style = style
self.hyperlinks = hyperlinks
self.inline_code_lexer = inline_code_lexer
self.inline_code_theme = inline_code_theme or code_theme
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
"""Render markdown to the console."""
style = console.get_style(self.style, default="none")
options = options.update(height=None)
context = MarkdownContext(
console,
options,
style,
inline_code_lexer=self.inline_code_lexer,
inline_code_theme=self.inline_code_theme,
)
nodes = self.parsed.walker()
inlines = self.inlines
new_line = False
for current, entering in nodes:
node_type = current.t
if node_type in ("html_inline", "html_block", "text"):
context.on_text(current.literal.replace("\n", " "), node_type)
elif node_type == "linebreak":
if entering:
context.on_text("\n", node_type)
elif node_type == "softbreak":
if entering:
context.on_text(" ", node_type)
elif node_type == "link":
if entering:
link_style = console.get_style("markdown.link", default="none")
if self.hyperlinks:
link_style += Style(link=current.destination)
context.enter_style(link_style)
else:
context.leave_style()
if not self.hyperlinks:
context.on_text(" (", node_type)
style = Style(underline=True) + console.get_style(
"markdown.link_url", default="none"
)
context.enter_style(style)
context.on_text(current.destination, node_type)
context.leave_style()
context.on_text(")", node_type)
elif node_type in inlines:
if current.is_container():
if entering:
context.enter_style(f"markdown.{node_type}")
else:
context.leave_style()
else:
context.enter_style(f"markdown.{node_type}")
if current.literal:
context.on_text(current.literal, node_type)
context.leave_style()
else:
element_class = self.elements.get(node_type) or UnknownElement
if current.is_container():
if entering:
element = element_class.create(self, current)
context.stack.push(element)
element.on_enter(context)
else:
element = context.stack.pop()
if context.stack:
if context.stack.top.on_child_close(context, element):
if new_line:
yield Segment("\n")
yield from console.render(element, context.options)
element.on_leave(context)
else:
element.on_leave(context)
else:
element.on_leave(context)
yield from console.render(element, context.options)
new_line = element.new_line
else:
element = element_class.create(self, current)
context.stack.push(element)
element.on_enter(context)
if current.literal:
element.on_text(context, current.literal.rstrip())
context.stack.pop()
if context.stack.top.on_child_close(context, element):
if new_line:
yield Segment("\n")
yield from console.render(element, context.options)
element.on_leave(context)
else:
element.on_leave(context)
new_line = element.new_line
if __name__ == "__main__": # pragma: no cover
import argparse
import sys
parser = argparse.ArgumentParser(
description="Render Markdown to the console with Rich"
)
parser.add_argument(
"path",
metavar="PATH",
help="path to markdown file, or - for stdin",
)
parser.add_argument(
"-c",
"--force-color",
dest="force_color",
action="store_true",
default=None,
help="force color for non-terminals",
)
parser.add_argument(
"-t",
"--code-theme",
dest="code_theme",
default="monokai",
help="pygments code theme",
)
parser.add_argument(
"-i",
"--inline-code-lexer",
dest="inline_code_lexer",
default=None,
help="inline_code_lexer",
)
parser.add_argument(
"-y",
"--hyperlinks",
dest="hyperlinks",
action="store_true",
help="enable hyperlinks",
)
parser.add_argument(
"-w",
"--width",
type=int,
dest="width",
default=None,
help="width of output (default will auto-detect)",
)
parser.add_argument(
"-j",
"--justify",
dest="justify",
action="store_true",
help="enable full text justify",
)
parser.add_argument(
"-p",
"--page",
dest="page",
action="store_true",
help="use pager to scroll output",
)
args = parser.parse_args()
from rich.console import Console
if args.path == "-":
markdown_body = sys.stdin.read()
else:
with open(args.path, "rt", encoding="utf-8") as markdown_file:
markdown_body = markdown_file.read()
markdown = Markdown(
markdown_body,
justify="full" if args.justify else "left",
code_theme=args.code_theme,
hyperlinks=args.hyperlinks,
inline_code_lexer=args.inline_code_lexer,
)
if args.page:
import pydoc
import io
console = Console(
file=io.StringIO(), force_terminal=args.force_color, width=args.width
)
console.print(markdown)
pydoc.pager(console.file.getvalue()) # type: ignore
else:
console = Console(force_terminal=args.force_color, width=args.width)
console.print(markdown)
| willmcgugan/rich | rich/markdown.py | Python | mit | 20,616 |
import os
from flask import render_template, request, send_from_directory, redirect
from threading import Thread
from app import app
from app import twitapp,tagcloud
from app.utils import maintenance,gettoken,process_source,process_uploaded_txt_file,allowed_file_img
@app.route('/',methods=['GET'])
def index():
maintenance()
if bool(request.args):
return redirect('/')
else:
return render_template('index.html')
@app.route('/',methods=['POST'])
def index_process():
if 'token' in request.form.keys():
return recolor_tagcloud(request=request)
else:
return create_tagcloud(request=request)
@app.route('/updatetwitter',methods=['GET'])
def update_twitter_tagclouds():
t = Thread(target=twitapp.post_tags_from_url,args=('news.yandex.ru',))
t.start()
return render_template('redirect.html')
#return redirect('/')
def create_tagcloud(request):
token = gettoken()
#SOURCEFILE AND URL
try:
sourcefilename = process_source(token=token,uploadedfile=request.files['source'],weburl=request.form['pageurl'])
except:
return render_template('index.html', error='nosource')
#STOPWORDSFILE
if request.files['stopwords']:
try:
stopwordsfilename = os.path.join(app.config['UPLOAD_FOLDER'], ''.join(['stopwords', token, '.txt']))
process_uploaded_txt_file(uploadedfile=request.files['stopwords'], targetfilename=stopwordsfilename)
except:
return render_template('index.html', error='stoptxtfile')
else:
stopwordsfilename = None
#MASKFILE
file = request.files['mask']
if file:
if allowed_file_img(file.filename) and file.content_length < app.config['MAX_FILE_CONTENT_LENGTH']:
maskfilename = os.path.join(app.config['UPLOAD_FOLDER'], ''.join(['maskfile', token]))
request.files['mask'].save(maskfilename)
else:
return render_template('index.html', error='maskfile')
else:
maskfilename = None
#MAX_COUNT
if request.form['max_words']:
max_words = int(request.form['max_words'])
else:
max_words = 250
randomizecolors = True if 'randomizecolors' in request.form.keys() else False
ignorebasestopwords = True if 'ignorebasestopwords' in request.form.keys() else False
outputfilename = os.path.join(app.config['OUTPUT_FOLDER'], ''.join(['tagcloud_', token, '.png']))
layoutfilename = os.path.join(app.config['UPLOAD_FOLDER'], ''.join(['layout', token]))
if tagcloud.createcloud(sourcefilename=sourcefilename,
stopwordsfilename=stopwordsfilename,
ignorebasestopwords=ignorebasestopwords,
outputfilename=outputfilename,
layoutfilename=layoutfilename,
maskfilename=maskfilename,
randomizecolors=randomizecolors,
max_words=max_words):
return render_template('result.html',
filename=''.join(['/output/',''.join(['tagcloud_', token, '.png']),'?',gettoken()]),
randomizecolors=randomizecolors,
token=token)
else:
return render_template('index.html', error='tagcloud')
def recolor_tagcloud(request):
token = request.form['token']
randomizecolors = True if request.form['randomizecolors'] == 'True' else False
maskfilename = os.path.join(app.config['UPLOAD_FOLDER'], ''.join(['maskfile', token]))
if not os.path.isfile(maskfilename):
maskfilename = None
outputfilename = os.path.join(app.config['OUTPUT_FOLDER'], ''.join(['tagcloud_', token, '.png']))
if tagcloud.recolor_cloud(outputfilename=outputfilename,
maskfilename=maskfilename,
randomizecolors=randomizecolors,
token=token):
return render_template('result.html',
filename=''.join(['/output/', ''.join(['tagcloud_', token, '.png']), '?', gettoken()]),
randomizecolors=randomizecolors,
token=token)
else:
return render_template('index.html', error='error')
@app.route('/robots.txt')
@app.route('/sitemap.xml')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.route('/examples')
def see_examples():
return render_template('examples.html')
@app.errorhandler(404)
def page_not_found(e):
return redirect('/')
@app.route('/output/<filename>')
def output_file(filename):
return send_from_directory(os.path.abspath(app.config['OUTPUT_FOLDER']),filename)
| OlegPyatakov/tagclouds | source/app/views.py | Python | mit | 4,926 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.forms import ValidationError
from django.http import QueryDict
from django.test import TestCase
from django.test.client import Client
from paypal.pro.fields import CreditCardField
from paypal.pro.helpers import PayPalWPP, PayPalError
class RequestFactory(Client):
# Used to generate request objects.
def request(self, **request):
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
RF = RequestFactory()
REQUEST = RF.get("/pay/", REMOTE_ADDR="127.0.0.1:8000")
class DummyPayPalWPP(PayPalWPP):
pass
# """Dummy class for testing PayPalWPP."""
# responses = {
# # @@@ Need some reals data here.
# "DoDirectPayment": """ack=Success×tamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=""",
# }
#
# def _request(self, data):
# return self.responses["DoDirectPayment"]
class CreditCardFieldTest(TestCase):
def testCreditCardField(self):
field = CreditCardField()
field.clean('4797503429879309')
self.assertEquals(field.card_type, "Visa")
self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455')
class PayPalWPPTest(TestCase):
def setUp(self):
# Avoding blasting real requests at PayPal.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.item = {
'amt': '9.95',
'inv': 'inv',
'custom': 'custom',
'next': 'http://www.example.com/next/',
'returnurl': 'http://www.example.com/pay/',
'cancelurl': 'http://www.example.com/cancel/'
}
self.wpp = DummyPayPalWPP(REQUEST)
def tearDown(self):
settings.DEBUG = self.old_debug
def test_doDirectPayment_missing_params(self):
data = {'firstname': 'Chewbacca'}
self.assertRaises(PayPalError, self.wpp.doDirectPayment, data)
def test_doDirectPayment_valid(self):
data = {
'firstname': 'Brave',
'lastname': 'Star',
'street': '1 Main St',
'city': u'San Jos\xe9',
'state': 'CA',
'countrycode': 'US',
'zip': '95131',
'expdate': '012019',
'cvv2': '037',
'acct': '4797503429879309',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',}
data.update(self.item)
self.assertTrue(self.wpp.doDirectPayment(data))
def test_doDirectPayment_invalid(self):
data = {
'firstname': 'Epic',
'lastname': 'Fail',
'street': '100 Georgia St',
'city': 'Vancouver',
'state': 'BC',
'countrycode': 'CA',
'zip': 'V6V 1V1',
'expdate': '012019',
'cvv2': '999',
'acct': '1234567890',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',}
data.update(self.item)
self.assertFalse(self.wpp.doDirectPayment(data))
def test_setExpressCheckout(self):
# We'll have to stub out tests for doExpressCheckoutPayment and friends
# because they're behind paypal's doors.
nvp_obj = self.wpp.setExpressCheckout(self.item)
self.assertTrue(nvp_obj.ack == "Success")
### DoExpressCheckoutPayment
# PayPal Request:
# {'amt': '10.00',
# 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'custom': u'website_id=480&cname=1',
# 'inv': u'website-480-cname',
# 'method': 'DoExpressCheckoutPayment',
# 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'payerid': u'BN5JZ2V7MLEV4',
# 'paymentaction': 'Sale',
# 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'token': u'EC-6HW17184NE0084127'}
#
# PayPal Response:
# {'ack': 'Success',
# 'amt': '10.00',
# 'build': '848077',
# 'correlationid': '375f4773c3d34',
# 'currencycode': 'USD',
# 'feeamt': '0.59',
# 'ordertime': '2009-03-04T20:56:08Z',
# 'paymentstatus': 'Completed',
# 'paymenttype': 'instant',
# 'pendingreason': 'None',
# 'reasoncode': 'None',
# 'taxamt': '0.00',
# 'timestamp': '2009-03-04T20:56:09Z',
# 'token': 'EC-6HW17184NE0084127',
# 'transactionid': '3TG42202A7335864V',
# 'transactiontype': 'expresscheckout',
# 'version': '54.0'} | neumerance/deploy | paypal/pro/tests.py | Python | apache-2.0 | 4,946 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, time_diff_in_hours, now, add_days, cint
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order \
import make_stock_entry, ItemHasVariantError
from erpnext.stock.doctype.stock_entry import test_stock_entry
from erpnext.stock.doctype.item.test_item import get_total_projected_qty
from erpnext.stock.utils import get_bin
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
class TestProductionOrder(unittest.TestCase):
def setUp(self):
self.warehouse = '_Test Warehouse 2 - _TC'
self.item = '_Test Item'
def check_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item",
"warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_order = make_prod_order_test_record()
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item",
"warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1, planned0 + 10)
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=100, basic_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=100, basic_rate=100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_order.name, "Material Transfer for Manufacture", 4))
for d in s.get("items"):
d.s_warehouse = "Stores - _TC"
s.insert()
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_order.name, "Manufacture", 4))
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_order.name, "produced_qty"), 4)
planned2 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item",
"warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned2, planned0 + 6)
return pro_order
def test_over_production(self):
from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.check_planned_qty()
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="_Test Warehouse - _TC", qty=100, basic_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=100, basic_rate=100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.insert()
self.assertRaises(StockOverProductionError, s.submit)
def test_make_time_sheet(self):
from erpnext.manufacturing.doctype.production_order.production_order import make_timesheet
prod_order = make_prod_order_test_record(item="_Test FG Item 2",
planned_start_date=now(), qty=1, do_not_save=True)
prod_order.set_production_order_operations()
prod_order.insert()
prod_order.submit()
d = prod_order.operations[0]
d.completed_qty = flt(d.completed_qty)
name = frappe.db.get_value('Timesheet', {'production_order': prod_order.name}, 'name')
time_sheet_doc = frappe.get_doc('Timesheet', name)
time_sheet_doc.submit()
self.assertEqual(prod_order.name, time_sheet_doc.production_order)
self.assertEqual((prod_order.qty - d.completed_qty), sum([d.completed_qty for d in time_sheet_doc.time_logs]))
manufacturing_settings = frappe.get_doc({
"doctype": "Manufacturing Settings",
"allow_production_on_holidays": 0
})
manufacturing_settings.save()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Completed")
self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty)
self.assertEqual(prod_order.operations[0].actual_operation_time, 60)
self.assertEqual(prod_order.operations[0].actual_operating_cost, 100)
time_sheet_doc1 = make_timesheet(prod_order.name)
self.assertEqual(len(time_sheet_doc1.get('time_logs')), 0)
time_sheet_doc.cancel()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Pending")
self.assertEqual(flt(prod_order.operations[0].completed_qty), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0)
def test_planned_operating_cost(self):
prod_order = make_prod_order_test_record(item="_Test FG Item 2",
planned_start_date=now(), qty=1, do_not_save=True)
prod_order.set_production_order_operations()
cost = prod_order.planned_operating_cost
prod_order.qty = 2
prod_order.set_production_order_operations()
self.assertEqual(prod_order.planned_operating_cost, cost*2)
def test_production_item(self):
prod_order = make_prod_order_test_record(item="_Test FG Item", qty=1, do_not_save=True)
frappe.db.set_value("Item", "_Test FG Item", "end_of_life", "2000-1-1")
self.assertRaises(frappe.ValidationError, prod_order.save)
frappe.db.set_value("Item", "_Test FG Item", "end_of_life", None)
frappe.db.set_value("Item", "_Test FG Item", "disabled", 1)
self.assertRaises(frappe.ValidationError, prod_order.save)
frappe.db.set_value("Item", "_Test FG Item", "disabled", 0)
prod_order = make_prod_order_test_record(item="_Test Variant Item", qty=1, do_not_save=True)
self.assertRaises(ItemHasVariantError, prod_order.save)
def test_reserved_qty_for_production_submit(self):
self.bin1_at_start = get_bin(self.item, self.warehouse)
# reset to correct value
self.bin1_at_start.update_reserved_qty_for_production()
self.pro_order = make_prod_order_test_record(item="_Test FG Item", qty=2,
source_warehouse=self.warehouse)
self.bin1_on_submit = get_bin(self.item, self.warehouse)
# reserved qty for production is updated
self.assertEqual(cint(self.bin1_at_start.reserved_qty_for_production) + 2,
cint(self.bin1_on_submit.reserved_qty_for_production))
self.assertEqual(cint(self.bin1_at_start.projected_qty),
cint(self.bin1_on_submit.projected_qty) + 2)
def test_reserved_qty_for_production_cancel(self):
self.test_reserved_qty_for_production_submit()
self.pro_order.cancel()
bin1_on_cancel = get_bin(self.item, self.warehouse)
# reserved_qty_for_producion updated
self.assertEqual(cint(self.bin1_at_start.reserved_qty_for_production),
cint(bin1_on_cancel.reserved_qty_for_production))
self.assertEqual(self.bin1_at_start.projected_qty,
cint(bin1_on_cancel.projected_qty))
def test_projected_qty_for_production_and_sales_order(self):
before_production_order = get_bin(self.item, self.warehouse)
before_production_order.update_reserved_qty_for_production()
self.pro_order = make_prod_order_test_record(item="_Test FG Item", qty=2,
source_warehouse=self.warehouse)
after_production_order = get_bin(self.item, self.warehouse)
sales_order = make_sales_order(item = self.item, qty = 2)
after_sales_order = get_bin(self.item, self.warehouse)
self.assertEqual(cint(before_production_order.reserved_qty_for_production) + 2,
cint(after_sales_order.reserved_qty_for_production))
self.assertEqual(cint(before_production_order.projected_qty),
cint(after_sales_order.projected_qty) + 2)
total_projected_qty = get_total_projected_qty(self.item)
item_doc = frappe.get_doc('Item', self.item)
self.assertEqual(total_projected_qty, item_doc.total_projected_qty)
def test_reserved_qty_for_production_on_stock_entry(self):
test_stock_entry.make_stock_entry(item_code="_Test Item",
target= self.warehouse, qty=100, basic_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target= self.warehouse, qty=100, basic_rate=100)
self.test_reserved_qty_for_production_submit()
s = frappe.get_doc(make_stock_entry(self.pro_order.name,
"Material Transfer for Manufacture", 2))
s.submit()
bin1_on_start_production = get_bin(self.item, self.warehouse)
# reserved_qty_for_producion updated
self.assertEqual(cint(self.bin1_at_start.reserved_qty_for_production),
cint(bin1_on_start_production.reserved_qty_for_production))
# projected qty will now be 2 less (becuase of item movement)
self.assertEqual(cint(self.bin1_at_start.projected_qty),
cint(bin1_on_start_production.projected_qty) + 2)
s = frappe.get_doc(make_stock_entry(self.pro_order.name, "Manufacture", 2))
bin1_on_end_production = get_bin(self.item, self.warehouse)
# no change in reserved / projected
self.assertEqual(cint(bin1_on_end_production.reserved_qty_for_production),
cint(bin1_on_start_production.reserved_qty_for_production))
self.assertEqual(cint(bin1_on_end_production.projected_qty),
cint(bin1_on_end_production.projected_qty))
# required_items removed
self.pro_order.reload()
self.assertEqual(len(self.pro_order.required_items), 0)
def test_scrap_material_qty(self):
prod_order = make_prod_order_test_record(planned_start_date=now(), qty=2)
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=10, basic_rate=5000.0)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=10, basic_rate=1000.0)
s = frappe.get_doc(make_stock_entry(prod_order.name, "Material Transfer for Manufacture", 2))
for d in s.get("items"):
d.s_warehouse = "Stores - _TC"
s.insert()
s.submit()
s = frappe.get_doc(make_stock_entry(prod_order.name, "Manufacture", 2))
s.insert()
s.submit()
prod_order_details = frappe.db.get_value("Production Order", prod_order.name,
["scrap_warehouse", "qty", "produced_qty", "bom_no"], as_dict=1)
scrap_item_details = get_scrap_item_details(prod_order_details.bom_no)
self.assertEqual(prod_order_details.produced_qty, 2)
for item in s.items:
if item.bom_no and item.item_code in scrap_item_details:
self.assertEqual(prod_order_details.scrap_warehouse, item.t_warehouse)
self.assertEqual(flt(prod_order_details.qty)*flt(scrap_item_details[item.item_code]), item.qty)
def get_scrap_item_details(bom_no):
scrap_items = {}
for item in frappe.db.sql("""select item_code, qty from `tabBOM Scrap Item`
where parent = %s""", bom_no, as_dict=1):
scrap_items[item.item_code] = item.qty
return scrap_items
def make_prod_order_test_record(**args):
args = frappe._dict(args)
pro_order = frappe.new_doc("Production Order")
pro_order.production_item = args.production_item or args.item or args.item_code or "_Test FG Item"
pro_order.bom_no = frappe.db.get_value("BOM", {"item": pro_order.production_item,
"is_active": 1, "is_default": 1})
pro_order.qty = args.qty or 10
pro_order.wip_warehouse = args.wip_warehouse or "_Test Warehouse - _TC"
pro_order.fg_warehouse = args.fg_warehouse or "_Test Warehouse 1 - _TC"
pro_order.scrap_warehouse = args.fg_warehouse or "_Test Scrap Warehouse - _TC"
pro_order.company = args.company or "_Test Company"
pro_order.stock_uom = args.stock_uom or "_Test UOM"
pro_order.set_production_order_operations()
if args.source_warehouse:
pro_order.source_warehouse = args.source_warehouse
if args.planned_start_date:
pro_order.planned_start_date = args.planned_start_date
if not args.do_not_save:
pro_order.insert()
if not args.do_not_submit:
pro_order.submit()
return pro_order
test_records = frappe.get_test_records('Production Order')
| bhupennewalkar1337/erpnext | erpnext/manufacturing/doctype/production_order/test_production_order.py | Python | gpl-3.0 | 11,486 |
#-*- coding: utf-8 -*-
import os
import logging
import time
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from Common.CreatScreenshot import CreatScreenshot
from Common.CommonFunction import CommonFunction
from Common.CommonFunction import driver
from Common.CreatLog import CreatLog
from UserLogin import UserLogin
filename = CreatScreenshot("d:\\screenshot\\businesscountpic\\") #截图存路径
clickNum = 100 #连续点击次数
function = CommonFunction(clickNum)
class BusinessCount():
def __init__(self):
time.sleep(1)
function.cicleClick(driver.find_element_by_xpath("/html/body/div[2]/div[1]/ul/li[3]/a").click())
logging.info(u"业务统计开始测试")
logging.info(u"连续点击次数:" + str(clickNum) )
def businessMap(self):
try:
time.sleep(1)
if (function.isExsitElement("//li/a[@href='/businessStatistics/areaMap']")):
logging.info(u"######城市信息汇总######")
driver.get_screenshot_as_file(filename.screenShot())
except Exception, e:
logging.error(u"城市信息汇总 网络问题或者找不到内容:")
logging.error(e)
driver.get_screenshot_as_file(filename.screenShot())
def busLineDownload(self):
try:
logging.info(u"######公交线路下载统计######")
time.sleep(1)
function.cicleClick(driver.find_element_by_xpath("//li/a[@href='/businessStatistics/busLine']").click())
driver.get_screenshot_as_file(filename.screenShot())
logging.info(function.importExcle(True, "no"))
driver.get_screenshot_as_file(filename.screenShot())
time.sleep(1)
if(function.isExsitElement("//select/option[@value='3']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='3']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if (function.isExsitElement("//select/option[@value='2']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='2']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
except Exception, e:
logging.error(u"公交线路下载统计 网络问题或者找不到内容:")
logging.error(e)
driver.get_screenshot_as_file(filename.screenShot())
def terminalDownload(self, mac, busNo):
try:
logging.info(u"######车载机下载统计######")
time.sleep(1)
function.cicleClick(driver.find_element_by_xpath("//li/a[@href='/businessStatistics/bus']").click())
logging.info(function.importExcle(True, "no"))
driver.get_screenshot_as_file(filename.screenShot())
time.sleep(1)
driver.get_screenshot_as_file(filename.screenShot())
time.sleep(1)
logging.info(function.keyfindResult(True, "key", "//div/input[@name='macAddress']", mac))
driver.get_screenshot_as_file(filename.screenShot())
time.sleep(1)
logging.info(function.keyfindResult(True, "key", "//div/input[@name='busNo']", busNo))
time.sleep(1)
if(function.isExsitElement("//select/option[@value='0']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='0']", "no"))
if(function.isExsitElement("//select/option[@value='2']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='2']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if(function.isExsitElement("//select/option[@value='3']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='3']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if (function.isExsitElement("//select/option[@value='4']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='4']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if (function.isExsitElement("//select/option[@value='5']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='5']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if (function.isExsitElement("/html/body/div[2]/div[2]/div[2]/div[2]/form/div/div[1]/div[5]/select/option[3]")):
logging.info(function.findResult(True))
logging.info(u"查找wifi模式")
driver.get_screenshot_as_file(filename.screenShot())
if (function.isExsitElement("/html/body/div[2]/div[2]/div[2]/div[2]/form/div/div[1]/div[4]/select/option[4]")):
logging.info(function.findResult(True))
logging.info(u"查找更新完成")
driver.get_screenshot_as_file(filename.screenShot())
except Exception, e:
logging.error(u"车载机下载统计 网络问题或者找不到内容:")
logging.error(e)
driver.get_screenshot_as_file(filename.screenShot())
def stationDownload(self, stationName):
try:
logging.info(u"######基站下载统计######")
time.sleep(1)
function.cicleClick(driver.find_element_by_xpath("//li/a[@href='/businessStatistics/station']").click())
driver.get_screenshot_as_file(filename.screenShot())
logging.info(function.keyfindResult(True, "key", "//*[@id='searchText']", stationName))
driver.get_screenshot_as_file(filename.screenShot())
time.sleep(1)
if(function.isExsitElement("//select/option[@value='0']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='0']", "no"))
if(function.isExsitElement("//select/option[@value='2']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='2']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if(function.isExsitElement("//select/option[@value='3']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='3']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if (function.isExsitElement("//select/option[@value='4']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='4']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if (function.isExsitElement("//select/option[@value='5']")):
logging.info(function.keyfindResult(True, "option", "//select/option[@value='5']", "no"))
driver.get_screenshot_as_file(filename.screenShot())
if (function.isExsitElement("/html/body/div[2]/div[2]/div[2]/div[2]/div/form/div[3]/div[1]/select/option[4]")):
logging.info(function.findResult(True))
logging.info(u"查找更新完成")
driver.get_screenshot_as_file(filename.screenShot())
logging.info(function.importExcle(True, "no"))
driver.get_screenshot_as_file(filename.screenShot())
time.sleep(1)
except Exception, e:
logging.error(u"基站下载统计 网络问题或者找不到内容:")
logging.error(e)
driver.get_screenshot_as_file(filename.screenShot())
if __name__ == '__main__' :
log = CreatLog("d:\\test_log\\businesscountlog\\") #日志存路径
#filename = CreatScreenshot("d:\\screenshot\\businesscountpic\\") #截图存路径
#clickNum = 10 #连续点击次数
#function = CommonFunction(clickNum)
log.testStart()
#登录
login = UserLogin("http://103.10.85.15:20010/home", "liumeixia", "123456")
BusinessCount = BusinessCount()
BusinessCount.businessMap()
BusinessCount.busLineDownload()
BusinessCount.terminalDownload("3400A31D785C", "01258")
BusinessCount.stationDownload(u"基站")
log.testEnd() | liumeixia/xiaworkspace | pythonProject/automate/busOnline/BusinessCount.py | Python | gpl-2.0 | 8,999 |
import contextlib
import os
import pytest
from dcos import constants, util
from dcos.errors import DCOSException
def test_open_file():
path = 'nonexistant_file_name.txt'
with pytest.raises(DCOSException) as excinfo:
with util.open_file(path):
pass
assert 'Error opening file [{}]: No such file or directory'.format(path) \
in str(excinfo.value)
@contextlib.contextmanager
def env():
"""Context manager for altering env vars in tests """
try:
old_env = dict(os.environ)
yield
finally:
os.environ.clear()
os.environ.update(old_env)
def add_cluster_dir(cluster_id, dcos_dir):
clusters_dir = os.path.join(dcos_dir, constants.DCOS_CLUSTERS_SUBDIR)
util.ensure_dir_exists(clusters_dir)
cluster_path = os.path.join(clusters_dir, cluster_id)
util.ensure_dir_exists(cluster_path)
os.path.join(cluster_path, "dcos.toml")
return cluster_path
def create_global_config(dcos_dir):
global_toml = os.path.join(dcos_dir, "dcos.toml")
util.ensure_file_exists(global_toml)
return global_toml
| mesosphere/dcos-cli | tests/test_util.py | Python | apache-2.0 | 1,106 |
from collections import defaultdict
import logging
logger = logging.getLogger(__file__)
items_txt = """
:version 27
# Blocks
# ID NAME FILE CORDS DAMAGE
1 Stone terrain.png 1,0
2 Grass terrain.png 3,0
3 Dirt terrain.png 2,0
4 Cobblestone terrain.png 0,1
5 Wooden_Planks terrain.png 4,0
6 Sapling terrain.png 15,0 0
6 Spruce_Sapling terrain.png 15,3 1
6 Birch_Sapling terrain.png 15,4 2
7 Bedrock terrain.png 1,1
8 Water terrain.png 15,13
9 Still_Water terrain.png 15,13
10 Lava terrain.png 15,15
11 Still_Lava terrain.png 15,15
12 Sand terrain.png 2,1
13 Gravel terrain.png 3,1
14 Gold_Ore terrain.png 0,2
15 Iron_Ore terrain.png 1,2
16 Coal_Ore terrain.png 2,2
17 Wood terrain.png 4,1 0
17 Dark_Wood terrain.png 4,7 1
17 Birch_Wood terrain.png 5,7 2
18 Leaves special.png 5,0 0
18 Dark_Leaves special.png 5,1 1
18 Birch_Leaves special.png 5,2 2
19 Sponge terrain.png 0,3
20 Glass terrain.png 1,3
21 Lapis_Lazuli_Ore terrain.png 0,10
22 Lapis_Lazuli_Block terrain.png 0,9
23 Dispenser terrain.png 14,2
24 Sandstone terrain.png 0,12
25 Note_Block terrain.png 10,4
26 Bed_Block terrain.png 6,8
27 Powered_Rail terrain.png 3,10
28 Detector_Rail terrain.png 3,12
29 Sticky_Piston terrain.png 10,6
30 Cobweb terrain.png 11,0
31 Dead_Bush terrain.png 7,3 0
31 Tall_Grass special.png 5,3 1
31 Fern special.png 4,5 2
32 Dead_Bush terrain.png 7,3
33 Piston terrain.png 11,6
34 Piston_(head) terrain.png 11,6
35 Wool terrain.png 0,4 0
35 Orange_Wool terrain.png 2,13 1
35 Magenta_Wool terrain.png 2,12 2
35 Light_Blue_Wool terrain.png 2,11 3
35 Yellow_Wool terrain.png 2,10 4
35 Lime_Wool terrain.png 2,9 5
35 Pink_Wool terrain.png 2,8 6
35 Gray_Wool terrain.png 2,7 7
35 Light_Gray_Wool terrain.png 1,14 8
35 Cyan_Wool terrain.png 1,13 9
35 Purple_Wool terrain.png 1,12 10
35 Blue_Wool terrain.png 1,11 11
35 Brown_Wool terrain.png 1,10 12
35 Green_Wool terrain.png 1,9 13
35 Red_Wool terrain.png 1,8 14
35 Black_Wool terrain.png 1,7 15
37 Flower terrain.png 13,0
38 Rose terrain.png 12,0
39 Brown_Mushroom terrain.png 13,1
40 Red_Mushroom terrain.png 12,1
41 Block_of_Gold terrain.png 7,1
42 Block_of_Iron terrain.png 6,1
43 Double_Stone_Slab terrain.png 5,0 0
43 Double_Sandstone_Slab terrain.png 0,12 1
43 Double_Wooden_Slab terrain.png 4,0 2
43 Double_Stone_Slab terrain.png 0,1 3
44 Stone_Slab special.png 2,2 0
44 Sandstone_Slab special.png 1,2 1
44 Wooden_Slab special.png 3,0 2
44 Stone_Slab special.png 1,0 3
44 Bricks_Slab special.png 0,0 4
44 Stone_Bricks_Slab special.png 2,0 5
45 Bricks terrain.png 7,0
46 TNT terrain.png 8,0
47 Bookshelf terrain.png 3,2
48 Moss_Stone terrain.png 4,2
49 Obsidian terrain.png 5,2
50 Torch terrain.png 0,5
51 Fire special.png 4,0
52 Monster_Spawner terrain.png 1,4
53 Wooden_Stairs special.png 3,1
54 Chest terrain.png 11,1
55 Redstone_Dust terrain.png 4,5
56 Diamond_Ore terrain.png 2,3
57 Block_of_Diamond terrain.png 8,1
58 Workbench terrain.png 12,3
59 Crops terrain.png 15,5
60 Farmland terrain.png 7,5
61 Furnace terrain.png 12,2
62 Lit_Furnace terrain.png 13,3
63 Sign_Block terrain.png 0,0
64 Wooden_Door_Block terrain.png 1,6
65 Ladder terrain.png 3,5
66 Rail terrain.png 0,8
67 Stone_Stairs special.png 1,1
68 Wall_Sign terrain.png 4,0
69 Lever terrain.png 0,6
70 Stone_Pressure_Plate special.png 2,4
71 Iron_Door_Block terrain.png 2,6
72 Wooden_Pressure_Plate special.png 3,4
73 Redstone_Ore terrain.png 3,3
74 Glowing_Redstone_Ore terrain.png 3,3
75 Redstone_Torch_(off) terrain.png 3,7
76 Redstone_Torch terrain.png 3,6
77 Button special.png 2,3
78 Snow_Layer special.png 1,4
79 Ice terrain.png 3,4
80 Snow terrain.png 2,4
81 Cactus terrain.png 6,4
82 Clay terrain.png 8,4
83 Sugar_cane terrain.png 9,4
84 Jukebox terrain.png 10,4
85 Fence special.png 3,2
86 Pumpkin terrain.png 7,7
87 Netherrack terrain.png 7,6
88 Soul_Sand terrain.png 8,6
89 Glowstone terrain.png 9,6
90 Portal special.png 0,5
91 Jack-o'-lantern terrain.png 8,7
92 Cake special.png 0,2
93 Repeater_Block_(off) terrain.png 3,8
94 Repeater_Block terrain.png 3,9
95 Locked_Chest terrain.png 11,1
96 Trapdoor terrain.png 4,5
97 Silverfish_Block terrain.png 1,0
98 Stone_Bricks terrain.png 6,3 0
98 Mossy_Stone_Bricks terrain.png 4,6 1
98 Damaged_Stone_Bricks terrain.png 5,6 2
99 Brown_Mushroom_Block terrain.png 13,7
100 Red_Mushroom_Block terrain.png 14,7
101 Iron_Bars terrain.png 5,5
102 Glass_Pane special.png 1,3
103 Melon terrain.png 8,8
104 Pumpkin_Stem terrain.png 5,5
105 Melon_Stem terrain.png 5,5
106 Vines special.png 5,4
107 Fence_Gate special.png 3,3
108 Brick_Stairs special.png 0,1
109 Stone_Brick_Stairs special.png 2,1
110 Mycelium terrain.png 13,4
111 Lily_Pad special.png 4,4
112 Nether_Brick terrain.png 0,14
113 Nether_Brick_Fence special.png 4,2
114 Nether_Brick_Stairs special.png 4,1
115 Nether_Wart terrain.png 2,14
116 Enchantment_Table terrain.png 6,11
117 Brewing_Stand terrain.png 13,9
118 Cauldron terrain.png 10,9
119 Air_Portal special.png 1,5
120 Air_Portal_Frame terrain.png 15,9
# Items
# ID NAME FILE CORDS DAMAGE
256 Iron_Shovel items.png 2,5 +250
257 Iron_Pickaxe items.png 2,6 +250
258 Iron_Axe items.png 2,7 +250
259 Flint_and_Steel items.png 5,0 +64
260 Apple items.png 10,0 x1
261 Bow items.png 5,1 x1
262 Arrow items.png 5,2
263 Coal items.png 7,0 0
263 Charcoal items.png 7,0 1
264 Diamond items.png 7,3
265 Iron_Ingot items.png 7,1
266 Gold_Ingot items.png 7,2
267 Iron_Sword items.png 2,4 +250
268 Wooden_Sword items.png 0,4 +59
269 Wooden_Shovel items.png 0,5 +59
270 Wooden_Pickaxe items.png 0,6 +59
271 Wooden_Axe items.png 0,7 +59
272 Stone_Sword items.png 1,4 +131
273 Stone_Shovel items.png 1,5 +131
274 Stone_Pickaxe items.png 1,6 +131
275 Stone_Axe items.png 1,7 +131
276 Diamond_Sword items.png 3,4 +1561
277 Diamond_Shovel items.png 3,5 +1561
278 Diamond_Pickaxe items.png 3,6 +1561
279 Diamond_Axe items.png 3,7 +1561
280 Stick items.png 5,3
281 Bowl items.png 7,4 x1
282 Mushroom_Stew items.png 8,4 x1
283 Golden_sword items.png 4,4 +32
284 Golden_shovel items.png 4,5 +32
285 Golden_pickaxe items.png 4,6 +32
286 Golden_axe items.png 4,7 +32
287 String items.png 8,0
288 Feather items.png 8,1
289 Gunpowder items.png 8,2
290 Wooden_Hoe items.png 0,8 +59
291 Stone_Hoe items.png 1,8 +131
292 Iron_Hoe items.png 2,8 +250
293 Diamond_Hoe items.png 3,8 +1561
294 Golden_hoe items.png 4,8 +32
295 Seeds items.png 9,0
296 Wheat items.png 9,1
297 Bread items.png 9,2 x1
298 Leather_Cap items.png 0,0 +34
299 Leather_Tunic items.png 0,1 +48
300 Leather_Pants items.png 0,2 +46
301 Leather_Boots items.png 0,3 +40
302 Chainmail_Helmet items.png 1,0 +68
303 Chainmail_Chestplate items.png 1,1 +96
304 Chainmail_Leggings items.png 1,2 +92
305 Chainmail_Boots items.png 1,3 +80
306 Iron_Helmet items.png 2,0 +136
307 Iron_Chestplate items.png 2,1 +192
308 Iron_Leggings items.png 2,2 +184
309 Iron_Boots items.png 2,3 +160
310 Diamond_Helmet items.png 3,0 +272
311 Diamond_Chestplate items.png 3,1 +384
312 Diamond_Leggings items.png 3,2 +368
313 Diamond_Boots items.png 3,3 +320
314 Golden_Helmet items.png 4,0 +68
315 Golden_Chestplate items.png 4,1 +96
316 Golden_Leggings items.png 4,2 +92
317 Golden_Boots items.png 4,3 +80
318 Flint items.png 6,0
319 Raw_Porkchop items.png 7,5
320 Cooked_Porkchop items.png 8,5
321 Painting items.png 10,1
322 Golden_Apple items.png 11,0 x1
323 Sign items.png 10,2 x1
324 Wooden_Door items.png 11,2 x1
325 Bucket items.png 10,4 x1
326 Water_Bucket items.png 11,4 x1
327 Lava_Bucket items.png 12,4 x1
328 Minecart items.png 7,8 x1
329 Saddle items.png 8,6 x1
330 Iron_Door items.png 12,2 x1
331 Redstone items.png 8,3
332 Snowball items.png 14,0 x16
333 Boat items.png 8,8 x1
334 Leather items.png 7,6
335 Milk items.png 13,4
336 Brick items.png 6,1
337 Clay items.png 9,3
338 Sugar_Canes items.png 11,1
339 Paper items.png 10,3
340 Book items.png 11,3
341 Slimeball items.png 14,1
342 Minecart_with_Chest items.png 7,9 x1
343 Minecart_with_Furnace items.png 7,10 x1
344 Egg items.png 12,0
345 Compass items.png 6,3 (x1)
346 Fishing_Rod items.png 5,4 +64
347 Clock items.png 6,4 (x1)
348 Glowstone_Dust items.png 9,4
349 Raw_Fish items.png 9,5
350 Cooked_Fish items.png 10,5
351 Ink_Sack items.png 14,4 0
351 Rose_Red items.png 14,5 1
351 Cactus_Green items.png 14,6 2
351 Coco_Beans items.png 14,7 3
351 Lapis_Lazuli items.png 14,8 4
351 Purple_Dye items.png 14,9 5
351 Cyan_Dye items.png 14,10 6
351 Light_Gray_Dye items.png 14,11 7
351 Gray_Dye items.png 15,4 8
351 Pink_Dye items.png 15,5 9
351 Lime_Dye items.png 15,6 10
351 Dandelion_Yellow items.png 15,7 11
351 Light_Blue_Dye items.png 15,8 12
351 Magenta_Dye items.png 15,9 13
351 Orange_Dye items.png 15,10 14
351 Bone_Meal items.png 15,11 15
352 Bone items.png 12,1
353 Sugar items.png 13,0
354 Cake items.png 13,1 x1
355 Bed items.png 13,2 x1
356 Redstone_Repeater items.png 6,5
357 Cookie items.png 12,5
358 Map items.png 12,3 x1
359 Shears items.png 13,5 +238
360 Melon items.png 13,6
361 Pumpkin_Seeds items.png 13,3
362 Melon_Seeds items.png 14,3
363 Raw_Beef items.png 9,6
364 Steak items.png 10,6
365 Raw_Chicken items.png 9,7
366 Cooked_Chicken items.png 10,7
367 Rotten_Flesh items.png 11,5
368 Ender_Pearl items.png 11,6
369 Blaze_Rod items.png 12,6
370 Ghast_Tear items.png 11,7
371 Gold_Nugget items.png 12,7
372 Nether_Wart items.png 13,7
373 Potion items.png 12,8
374 Glass_Bottle items.png 12,8
375 Spider_Eye items.png 11,8
376 Fermented_Spider_Eye items.png 10,8
377 Blaze_Powder items.png 13,9
378 Magma_Cream items.png 13,10
379 Brewing_Stand items.png 12,10 x1
380 Cauldron items.png 12,9 x1
381 Eye_of_Ender items.png 11,9
2256 C418_-_13 items.png 0,15 x1
2257 C418_-_cat items.png 1,15 x1
2258 C418_-_blocks items.png 2,15 x1
2259 C418_-_chirp items.png 3,15 x1
2260 C418_-_far items.png 4,15 x1
2261 C418_-_mall items.png 5,15 x1
2262 C418_-_mellohi items.png 6,15 x1
2263 C418_-_stal items.png 7,15 x1
2264 C418_-_strad items.png 8,15 x1
2265 C418_-_ward items.png 9,15 x1
2266 C418_-_11 items.png 10,15 x1
# Groups
# NAME ICON ITEMS
# Column 1
~ Natural 2 2,3,12,24,44~1,13,82,79,80,78
~ Stone 1 1,4,48,67,44~3,98,109,44~5,44~0,45,108,44~4,101
~ Wood 5 17,5,53,44~2,47,85,107,20,102,30
~ Nether 87 87,88,89,348,112,114,113,372
~ Ores 56 16,15,14,56,73,21,49,42,41,57,22,263~0,265,266,264
~ Special 54 46,52,58,54,61,23,25,84,116,379,380,321,323,324,330,355,65,96
~ Plants1 81 31~1,31~2,106,111,18,81,86,91,103,110
~ Plants2 6 295,361,362,6,296,338,37,38,39,40,32
~ Transport 328 66,27,28,328,342,343,333,329
~ Logic 331 331,76,356,69,70,72,77,33,29
~ Wool 35 35~0,35~8,35~7,35~15,35~14,35~12,35~1,35~4,35~5,35~13,35~11,35~3,35~9,35~10,35~2,35~6
~ Dye 351 351~15,351~7,351~8,351~0,351~1,351~3,351~14,351~11,351~10,351~2,351~4,351~12,351~6,351~5,351~13,351~9
# Column 2
~ TierWood 299 298,299,300,301,269,270,271,290,268
~ TierStone 303 302,303,304,305,273,274,275,291,272
~ TierIron 307 306,307,308,309,256,257,258,292,267
~ TierDiam 311 310,311,312,313,277,278,279,293,276
~ TierGold 315 314,315,316,317,284,285,286,294,283
~ Tools 261 50,261,262,259,346,359,345,347,358,325,326,327,335
~ Food 297 260,322,282,297,360,319,320,363,364,365,366,349,350,354,357
~ Items 318 280,281,318,337,336,353,339,340,332
~ Drops 341 344,288,334,287,352,289,367,375,376,341,368,369,377,370,371,378,381
~ Music 2257 2256,2257,2258,2259,2260,2261,2262,2263,2264,2265,2266
"""
class ItemType (object):
def __init__(self, id, name, imagefile=None, imagecoords=None, maxdamage=0, damagevalue=0, stacksize=64):
self.id = id
self.name = name
self.imagefile = imagefile
self.imagecoords = imagecoords
self.maxdamage = maxdamage
self.damagevalue = damagevalue
self.stacksize = stacksize
def __repr__(self):
return "ItemType({0}, '{1}')".format(self.id, self.name)
def __str__(self):
return "ItemType {0}: {1}".format(self.id, self.name)
class Items (object):
items_txt = items_txt
def __init__(self, filename=None):
if filename is None:
items_txt = self.items_txt
else:
try:
with file(filename) as f:
items_txt = f.read()
except Exception, e:
logger.info( "Error reading items.txt: %s", e)
logger.info( "Using internal data." )
items_txt = self.items_txt
self.itemtypes = {}
self.itemgroups = []
for line in items_txt.split("\n"):
try:
line = line.strip()
if len(line) == 0: continue
if line[0] == "#": continue;
if line[0] == "~":
fields = line.split()
name, icon, items = fields[1:4]
items = items.split(",")
self.itemgroups.append((name, icon, items))
continue
stacksize = 64
damagevalue = None
maxdamage = 0
fields = line.split()
if len(fields) >= 4:
maxdamage = None
id, name, imagefile, imagecoords = fields[0:4]
if len(fields) > 4:
info = fields[4]
if info[0] == '(':
info = info[1:-1]
if info[0] == 'x':
stacksize = int(info[1:])
elif info[0] == '+':
maxdamage = int(info[1:])
else:
damagevalue = int(info)
id = int(id)
name = name.replace("_", " ")
imagecoords = imagecoords.split(",")
self.itemtypes[(id, damagevalue)] = ItemType(id, name, imagefile, imagecoords, maxdamage, damagevalue, stacksize)
except Exception, e:
print "Error reading line:", e
print "Line: ", line
print
self.names = dict((item.name, item.id) for item in self.itemtypes.itervalues())
def findItem(self, id=0, damage=None):
item = self.itemtypes.get((id, damage))
if item: return item
item = self.itemtypes.get((id, None))
if item: return item
item = self.itemtypes.get((id, 0))
if item: return item
return ItemType(id, "Unknown Item {0}:{1}".format(id, damage), damagevalue=damage)
#raise ItemNotFound, "Item {0}:{1} not found".format(id, damage)
class ItemNotFound(KeyError): pass
items = Items()
| codewarrior0/pymclevel | items.py | Python | isc | 19,714 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from twisted.python import util
from twisted.trial import unittest
from buildbot import config
from buildbot.scripts import runner
from buildbot.test.util import dirs
from buildbot.test.util.warnings import assertNotProducesWarnings
from buildbot.warnings import DeprecatedApiWarning
class RealConfigs(dirs.DirsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('basedir')
self.basedir = os.path.abspath('basedir')
self.filename = os.path.abspath("test.cfg")
def tearDown(self):
self.tearDownDirs()
def test_sample_config(self):
filename = util.sibpath(runner.__file__, 'sample.cfg')
with assertNotProducesWarnings(DeprecatedApiWarning):
config.FileLoader(self.basedir, filename).loadConfig()
def test_0_9_0b5_api_renamed_config(self):
with open(self.filename, "w", encoding='utf-8') as f:
f.write(sample_0_9_0b5_api_renamed)
with assertNotProducesWarnings(DeprecatedApiWarning):
config.FileLoader(self.basedir, self.filename).loadConfig()
# sample.cfg from various versions, with comments stripped. Adjustments made
# for compatibility are marked with comments
# Template for master configuration just after worker renaming.
sample_0_9_0b5_api_renamed = """\
from buildbot.plugins import *
c = BuildmasterConfig = {}
c['workers'] = [worker.Worker("example-worker", "pass")]
c['protocols'] = {'pb': {'port': 9989}}
c['change_source'] = []
c['change_source'].append(changes.GitPoller(
'https://github.com/buildbot/hello-world.git',
workdir='gitpoller-workdir', branch='master',
pollinterval=300))
c['schedulers'] = []
c['schedulers'].append(schedulers.SingleBranchScheduler(
name="all",
change_filter=util.ChangeFilter(branch='master'),
treeStableTimer=None,
builderNames=["runtests"]))
c['schedulers'].append(schedulers.ForceScheduler(
name="force",
builderNames=["runtests"]))
factory = util.BuildFactory()
factory.addStep(steps.Git(repourl='https://github.com/buildbot/hello-world.git', mode='incremental'))
factory.addStep(steps.ShellCommand(command=["trial", "hello"],
env={"PYTHONPATH": "."}))
c['builders'] = []
c['builders'].append(
util.BuilderConfig(name="runtests",
workernames=["example-worker"],
factory=factory))
c['title'] = "Pyflakes"
c['titleURL'] = "https://launchpad.net/pyflakes"
c['buildbotURL'] = "http://localhost:8010/"
c['www'] = dict(port=8010,
plugins=dict(waterfall_view={}, console_view={}))
c['db'] = {
'db_url' : "sqlite:///state.sqlite",
}
""" # noqa pylint: disable=line-too-long
| pmisik/buildbot | master/buildbot/test/integration/test_configs.py | Python | gpl-2.0 | 3,540 |
"""Utility functions used by projects"""
from __future__ import absolute_import
import fnmatch
import logging
import os
import subprocess
import traceback
import redis
import six
from builtins import object
from django.conf import settings
from django.core.cache import cache
from httplib2 import Http
log = logging.getLogger(__name__)
# TODO make this a classmethod of Version
def version_from_slug(slug, version):
from readthedocs.builds.models import Version, APIVersion
from readthedocs.restapi.client import api
if getattr(settings, 'DONT_HIT_DB', True):
version_data = api.version().get(project=slug, slug=version)['results'][0]
v = APIVersion(**version_data)
else:
v = Version.objects.get(project__slug=slug, slug=version)
return v
def find_file(filename):
"""Recursively find matching file from the current working path
:param file: Filename to match
:returns: A list of matching filenames.
"""
matches = []
for root, __, filenames in os.walk('.'):
for match in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, match))
return matches
def run(*commands):
"""Run one or more commands
Each argument in `commands` can be passed as a string or as a list. Passing
as a list is the preferred method, as space escaping is more explicit and it
avoids the need for executing anything in a shell.
If more than one command is given, then this is equivalent to
chaining them together with ``&&``; if all commands succeed, then
``(status, out, err)`` will represent the last successful command.
If one command failed, then ``(status, out, err)`` will represent
the failed command.
:returns: ``(status, out, err)``
"""
environment = os.environ.copy()
environment['READTHEDOCS'] = 'True'
if 'DJANGO_SETTINGS_MODULE' in environment:
del environment['DJANGO_SETTINGS_MODULE']
if 'PYTHONPATH' in environment:
del environment['PYTHONPATH']
# Remove PYTHONHOME env variable if set, otherwise pip install of requirements
# into virtualenv will install incorrectly
if 'PYTHONHOME' in environment:
del environment['PYTHONHOME']
cwd = os.getcwd()
if not commands:
raise ValueError("run() requires one or more command-line strings")
for command in commands:
# If command is a string, split it up by spaces to pass into Popen.
# Otherwise treat the command as an iterable.
if isinstance(command, six.string_types):
run_command = command.split()
else:
try:
run_command = list(command)
command = ' '.join(command)
except TypeError:
run_command = command
log.debug('Running command: cwd=%s command=%s', cwd, command)
try:
p = subprocess.Popen(
run_command,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=environment
)
out, err = p.communicate()
ret = p.returncode
except OSError:
out = ''
err = traceback.format_exc()
ret = -1
log.exception("Command failed")
return (ret, out, err)
def safe_write(filename, contents):
"""Normalize and write to filename
Write ``contents`` to the given ``filename``. If the filename's
directory does not exist, it is created. Contents are written as UTF-8,
ignoring any characters that cannot be encoded as UTF-8.
:param filename: Filename to write to
:param contents: File contents to write to file
"""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'w') as fh:
fh.write(contents.encode('utf-8', 'ignore'))
fh.close()
def purge_version(version, mainsite=False, subdomain=False, cname=False):
varnish_servers = getattr(settings, 'VARNISH_SERVERS', None)
h = Http()
if varnish_servers:
for server in varnish_servers:
if subdomain:
# Send a request to the Server, to purge the URL of the Host.
host = "%s.readthedocs.org" % version.project.slug
headers = {'Host': host}
url = "/en/%s/*" % version.slug
to_purge = "http://%s%s" % (server, url)
log.info("Purging %s on %s", url, host)
h.request(to_purge, method="PURGE", headers=headers)
if mainsite:
headers = {'Host': "readthedocs.org"}
url = "/docs/%s/en/%s/*" % (version.project.slug, version.slug)
to_purge = "http://%s%s" % (server, url)
log.info("Purging %s on readthedocs.org", url)
h.request(to_purge, method="PURGE", headers=headers)
root_url = "/docs/%s/" % version.project.slug
to_purge = "http://%s%s" % (server, root_url)
log.info("Purging %s on readthedocs.org", root_url)
h.request(to_purge, method="PURGE", headers=headers)
if cname:
try:
redis_client = cache.get_client(None)
for cnamed in redis_client.smembers('rtd_slug:v1:%s'
% version.project.slug):
headers = {'Host': cnamed}
url = "/en/%s/*" % version.slug
to_purge = "http://%s%s" % (server, url)
log.info("Purging %s on %s", url, cnamed)
h.request(to_purge, method="PURGE", headers=headers)
root_url = "/"
to_purge = "http://%s%s" % (server, root_url)
log.info("Purging %s on %s", root_url, cnamed)
h.request(to_purge, method="PURGE", headers=headers)
except (AttributeError, redis.exceptions.ConnectionError):
pass
class DictObj(object):
def __getattr__(self, attr):
return self.__dict__.get(attr)
| pombredanne/readthedocs.org | readthedocs/projects/utils.py | Python | mit | 6,236 |
# Copyright 2011 Omar Shammas
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#Versions:
# 0.1 -- written by Omar Shammas (email : omar DOT shammas [a t ] g m ail DOT com)
# Initial version works with 0.7 version of the BBB API
# test comment
import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, socket
import hashlib, random
from xml.dom import minidom
from xml.dom.minidom import Node
def bbb_wrap_load_file(url):
timeout = 10
socket.setdefaulttimeout(timeout)
try:
req = urllib.request.urlopen(url)
return minidom.parse(req)
except:
return False
def assign2Dict(xml):
try:
mapping = {}
response = xml.firstChild
for child in response.childNodes:
if( child.hasChildNodes() ):
mapping[child.tagName] = child.firstChild.nodeValue
else:
mapping[child.tagName] = None
return mapping
except:
return False
#------------------------------------------------GET URLs-------------------------------------------------
#
#This method returns the url to join the specified meeting.
#
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param username -- the display name to be used when the user joins the meeting
#@param PW -- the attendee or moderator password of the meeting
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return The url to join the meeting
def joinMeetingURL(meetingID, username, PW, URL, SALT):
url_join = URL + "api/join?"
parameters = {'meetingID' : meetingID,
'fullName' : username,
'password' : PW,
}
parameters = urllib.parse.urlencode(parameters)
return url_join + parameters + '&checksum=' + hashlib.sha1(("join" + parameters + SALT).encode('utf-8')).hexdigest()
#
#This method returns the url to join the specified meeting.
#
#@param name -- a name fot the meeting
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param attendeePW -- the attendee of the meeting
#@param moderatorPW -- the moderator of the meeting
#@param welcome -- the welcome message that gets displayed on the chat window
#@param logoutURL -- the URL that the bbb client will go to after users logouut
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return The url to join the meeting
def createMeetingURL(name, meetingID, attendeePW, moderatorPW, welcome, logoutURL, URL, SALT):
url_create = URL + "api/create?"
voiceBridge = 70000 + random.randint(0, 9999);
parameters = {'name': name,
'meetingID' : meetingID,
'attendeePW' : attendeePW,
'moderatorPW' : moderatorPW,
'voiceBridge' : voiceBridge,
'logoutURL' : logoutURL,
}
#if (welcome and welcome != ''):
# parameters.update({'welcome': welcome.strip()})
parameters = urllib.parse.urlencode(parameters)
return url_create + parameters + '&checksum=' + hashlib.sha1(("create" + parameters + SALT).encode('utf-8')).hexdigest()
#
#This method returns the url to check if the specified meeting is running.
#
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return The url to check if the specified meeting is running.
#
def isMeetingRunningURL( meetingID, URL, SALT ):
base_url = URL + "api/isMeetingRunning?"
parameters = {'meetingID' : meetingID,}
parameters = urllib.parse.urlencode(parameters)
return base_url + parameters + '&checksum=' + hashlib.sha1(("isMeetingRunning" + parameters + SALT).encode('utf-8')).hexdigest()
#
#This method returns the url to getMeetingInfo of the specified meeting.
#
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param modPW -- the moderator password of the meeting
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return The url to check if the specified meeting is running.
#
def getMeetingInfoURL( meetingID, modPW, URL, SALT ):
base_url = URL + "api/getMeetingInfo?"
parameters = {'meetingID' : meetingID,
'password' : modPW,
}
parameters = urllib.parse.urlencode(parameters)
return base_url + parameters + '&checksum=' + hashlib.sha1(("getMeetingInfo" + parameters + SALT).encode('utf-8')).hexdigest()
#
#This method returns the url for listing all meetings in the bigbluebutton server.
#
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return The url of getMeetings.
#
def getMeetingsURL(URL, SALT):
base_url = URL + "api/getMeetings?"
parameters = {'random' : (random.random() * 1000 ),}
parameters = urllib.parse.urlencode(parameters)
return base_url + parameters + '&checksum=' + hashlib.sha1(("getMeetings" + parameters + SALT).encode('utf-8')).hexdigest()
#
#This method returns the url to end the specified meeting.
#
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param modPW -- the moderator password of the meeting
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return The url to end the specified meeting.
#
def endMeetingURL( meetingID, modPW, URL, SALT ):
base_url = URL + "api/end?"
parameters = {'meetingID' : meetingID,
'password' : modPW,
}
parameters = urllib.parse.urlencode(parameters)
return base_url + parameters + '&checksum=' + hashlib.sha1(("end" + parameters + SALT).encode('utf-8')).hexdigest()
#-----------------------------------------------CREATE----------------------------------------------------
#
#This method creates a meeting and return an array of the xml packet
#
#@param username
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param welcomeString -- the welcome message to be displayed when a user logs in to the meeting
#@param mPW -- the moderator password of the meeting
#@param aPW -- the attendee password of the meeting
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#@param logoutURL -- the url the user should be redirected to when they logout of bigbluebutton
#
#@return
# - Null if unable to reach the bigbluebutton server
# - False if an error occurs while parsing
# - Dictionary containing the values of the xml packet
#
def createMeeting(name, meeting_id, welcome_message, moderator_pw, attendee_pw, logout_url, url, secret):
create_url = createMeetingURL(name, meeting_id, attendee_pw, moderator_pw, welcome_message, logout_url, url, secret)
xml = bbb_wrap_load_file( create_url )
if(xml):
return assign2Dict(xml)
#if unable to reach the server
return None
#-------------------------------------------getMeetingInfo---------------------------------------------------
#
#This method calls the getMeetingInfo on the bigbluebutton server and returns an array.
#
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param modPW -- the moderator password of the meeting
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return
# - None if unable to reach the bigbluebutton server
# - Dictionary containing the values of the xml packet
# - If the returncode == 'FAILED' it returns a dictionary containing a returncode, messagekey, and message.
# - If the returncode == 'SUCCESS' it returns a dictionary containing a meetingID, moderatorPW, attendeePW,
# hasBeenForciblyEnded, running, startTime, endTime, participantCount, moderatorCount, and attendees.
def getMeetingInfo( meetingID, modPW, URL, SALT ):
getMeetingInfo_url = getMeetingInfoURL(meetingID, modPW, URL, SALT )
xml = bbb_wrap_load_file( getMeetingInfo_url )
if(xml):
mapping = {}
response = xml.firstChild
for child in response.childNodes:
if( child.hasChildNodes() ):
#Makes a dictionary for attendees inside mapping
if(child.tagName == "attendees"):
attendees = {}
#Makes a dictionary for attendee inside attendees
for atnds in child.childNodes:
attendee = {}
#Adds the elements to the attendee dictionary
for atnd in atnds.childNodes:
if( atnd.hasChildNodes() ):
attendee[atnd.tagName] = atnd.firstChild.nodeValue
else:
attendee[atnd.tagName] = None
#Updates the attendees dictionary with the attendee we just parsed
attendees[ attendee["userID"] ] = attendee
#Once completed parsing the attendees we add that dictionary to mapping
mapping[child.tagName] = attendees
else:
mapping[child.tagName] = child.firstChild.nodeValue
else:
mapping[child.tagName] = None
return mapping
#if unable to reach the server
return None
#-----------------------------------------------getMeetings------------------------------------------------------
#
#This method calls getMeetings on the bigbluebutton server, then calls getMeetingInfo for each meeting and concatenates the result.
#
#@param URL -- the url of the bigbluebutton server
#@param SALT -- the security salt of the bigbluebutton server
#
#@return
# - None if unable to reach the bigbluebutton server
# - Dictionary containing the values of the xml packet
# - If the returncode == 'FAILED' it returns a dictionary containing a returncode, messagekey, and message.
# - If the returncode == 'SUCCESS' it returns a dictionary containing all the meetings. Each item meetingID, moderatorPW, attendeePW,
# hasBeenForciblyEnded, running, startTime, endTime, participantCount, moderatorCount, and attendees.
# - Null if the server is unreachable
# - If FAILED then returns an array containing a returncode, messageKey, message.
# - If SUCCESS then returns an array of all the meetings. Each element in the array is an array containing a meetingID,
# moderatorPW, attendeePW, hasBeenForciblyEnded, running.
#
def getMeetings( URL, SALT ):
getMeetings_url = getMeetingsURL( URL, SALT )
xml = bbb_wrap_load_file( getMeetings_url )
if(xml):
mapping = {}
response = xml.firstChild
for child in response.childNodes:
if( child.hasChildNodes() ):
#Makes a dictionary for meetings inside mapping
if(child.tagName == "meetings"):
meetings = {}
#Makes a dictionary for meeting inside meetings
for mtgs in child.childNodes:
meeting = {}
#Adds the elements to the meeting dictionary
for mtg in mtgs.childNodes:
if( mtg.hasChildNodes() ):
meeting[mtg.tagName] = mtg.firstChild.nodeValue
else:
meeting[mtg.tagName] = None
#Updates the meetings dictionary with the meeting we just parsed
meetings[ meeting["meetingID"] ] = meeting
#Once completed parsing the meetings we add that dictionary to mapping
mapping[child.tagName] = meetings
else:
mapping[child.tagName] = child.firstChild.nodeValue
else:
mapping[child.tagName] = None
return mapping
#if unable to reach the server
return None
#------------------------------------------------End Meeting------------------------------------
#
#This method calls end meeting on the specified meeting in the bigbluebutton server.
#
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param modPW -- the moderator password of the meeting
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return
# - Null if the server is unreachable
# - A dictionary containing a returncode, messageKey, message.
#
def endMeeting( meetingID, modPW, URL, SALT ):
endMeeting_url = endMeetingURL( meetingID, modPW, URL, SALT )
xml = bbb_wrap_load_file( endMeeting_url )
if(xml):
return assign2Dict(xml)
#if unable to reach the server
return None
#------------------------------------------------isMeetingRunning------------------------------------
#
#This method check the BigBlueButton server to see if the meeting is running (i.e. there is someone in the meeting)
#
#@param meetingID -- the unique meeting identifier used to store the meeting in the bigbluebutton server
#@param SALT -- the security salt of the bigbluebutton server
#@param URL -- the url of the bigbluebutton server
#
#@return A boolean of true if the meeting is running and false if it is not running
#
def isMeetingRunning( meetingID, URL, SALT ):
isMeetingRunning_url = isMeetingRunningURL( meetingID, URL, SALT )
xml = bbb_wrap_load_file( isMeetingRunning_url )
if(xml):
return assign2Dict(xml)
#if unable to reach the server
return None | ABlogiX/django-bigbluebutton | bbb_api.py | Python | gpl-2.0 | 14,816 |
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import os
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('/usr/local/lib/gem5/configs/common')
addToPath('/usr/local/lib/gem5/configs/topologies')
import Options
import Simulation
import CacheConfig
from Caches import *
from cpu2000 import *
### Create the Options Parser
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addSEOptions(parser)
### Parse for command line options
(options, args) = parser.parse_args()
### Override some options values for the desired configuration
options.caches = 1
options.cpu_type = "detailed"
options.l1d_size = "16kB"
options.l1d_assoc = 1
options.cacheline_size = 8
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
### Setup the workload to execute on the CPUs
multiprocesses = []
apps = []
if options.cmd:
process = LiveProcess()
process.executable = options.cmd
process.cmd = [options.cmd] + options.options.split()
multiprocesses.append(process)
else:
print >> sys.stderr, "No workload specified. Exiting!\n"
system.exit(1)
### Optionally pipe output to a file
if options.input != "":
process.input = options.input
if options.output != "":
process.output = options.output
if options.errout != "":
process.errout = options.errout
# By default, set workload to path of user-specified binary
workloads = options.cmd
numThreads = 1
if options.cpu_type == "detailed" or options.cpu_type == "inorder":
#check for Simultaneous Multithreaded workload
workloads = options.cmd.split(';')
if len(workloads) > 1:
process = []
smt_idx = 0
inputs = []
outputs = []
errouts = []
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
for wrkld in workloads:
smt_process = LiveProcess()
smt_process.executable = wrkld
smt_process.cmd = wrkld + " " + options.options
if inputs and inputs[smt_idx]:
smt_process.input = inputs[smt_idx]
if outputs and outputs[smt_idx]:
smt_process.output = outputs[smt_idx]
if errouts and errouts[smt_idx]:
smt_process.errout = errouts[smt_idx]
process += [smt_process, ]
smt_idx += 1
numThreads = len(workloads)
### Using the provided options, setup the CPU and cache configuration
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.clock = options.clock
CPUClass.numThreads = numThreads;
### Select the CPU count
np = options.num_cpus
### Assemble the system
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
physmem = SimpleMemory(range=AddrRange("512MB")),
membus = CoherentBus(), mem_mode = test_mem_mode)
### Sanity checks
if options.fastmem and (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
for i in xrange(np):
if len(multiprocesses) == 1:
system.cpu[i].workload = multiprocesses[0]
else:
system.cpu[i].workload = multiprocesses[i]
if options.fastmem:
system.cpu[i].fastmem = True
if options.checker:
system.cpu[i].addCheckerCpu()
### Connect up system memory
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
CacheConfig.config_cache(options, system)
### Create the root, instantiate the system, and run the simulation
root = Root(full_system = False, system = system)
Simulation.run(options, root, system, FutureClass)
| UliceNix/CMPUT429Assignment3 | machine3.py | Python | gpl-2.0 | 6,047 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.