text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
# Copyright 2014 RPiPasswordGenerator
# file: auth.py
# This file just needs to be run.
import tweepy
import sys
CONSUMER_KEY = 'aLrazWkhGaRyLe30HWZcCJrnN'
CONSUMER_SECRET = 'jNSbrJ9TkOobJTbzL4bfd7CWg5x0kv6KMLCZKO5FRAMdIaFvmK'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.secure = True
auth_url = auth.get_authorization_url()
print 'Please visit this URL, to get your access key: ' + auth_url
verifier = raw_input('PIN: ').strip()
auth.get_access_token(verifier)
print "\nPlease put these codes where asked, in run.py\n"
print "ACCESS_KEY = '%s'" % auth.access_token.key
print "ACCESS_SECRET = '%s'" % auth.access_token.secret
| RPiPasswordGenerator/Twitter-Password-Generator-for-Python | auth.py | Python | apache-2.0 | 672 | 0 |
#!/usr/bin/python
'''
oscilloscope for the vLabtool - version 0. \n
Also Includes XY plotting mode, and fitting against standard Sine/Square functions\n
'''
import os
os.environ['QT_API'] = 'pyqt'
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
from PyQt4 import QtCore, QtGui
import time,sys
from v0.templates import analogScope
from v0.analyticsClass import analyticsClass
import sys,os,string
import time
import sys
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
import scipy.optimize as optimize
import scipy.fftpack as fftpack
err_count=0
trial = 0
start_time = time.time()
fps = None
dacval=0
from v0.commands_proto import *
params = {
'image' : 'scope.png',
'name':'Oscilloscope'
}
class AppWindow(QtGui.QMainWindow, analogScope.Ui_MainWindow):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
self.I=kwargs.get('I',None)
self.math = analyticsClass()
self.setWindowTitle(self.I.generic_name + ' : ' +self.I.H.version_string)
self.plot=pg.PlotWidget()
#cross hair
self.vLine = pg.InfiniteLine(angle=90, movable=True)
#self.vLine.setPen(color=(135,44,64,150), width=3)
self.plot.addItem(self.vLine, ignoreBounds=False)
self.proxy = pg.SignalProxy(self.vLine.scene().sigMouseMoved, rateLimit=60, slot=self.readCursor)
self.fps=0
self.max_samples_per_channel=[0,self.I.MAX_SAMPLES/4,self.I.MAX_SAMPLES/4,self.I.MAX_SAMPLES/4,self.I.MAX_SAMPLES/4]
self.liss_win=None
self.liss_ready=False
self.liss_animate_arrow1=None
self.liss_animate_arrow2=None
self.liss_animate_arrow3=None
self.liss_anim1=None
self.liss_anim2=None
self.liss_anim3=None
self.samples=self.I.MAX_SAMPLES/4#self.sample_slider.value()
self.active_channels=1
self.channel_states=np.array([1,0,0,0])
self.channels_in_buffer=1
self.chan1remap='CH1'
self.ch123sa = 0
g=1.75
self.timebase = g
self.lastTime=time.time()
self.trace_colors=[(0,255,20),(255,255,0),(255,10,100),(10,255,255)]
self.plot.setLabel('bottom', 'Time -->>', units='S')
labelStyle = {'color': 'rgb%s'%(str(self.trace_colors[0])), 'font-size': '11pt'}
self.plot.setLabel('left','CH1', units='V',**labelStyle)
self.plot.addLegend(offset=(-10,30))
self.plot2 = pg.ViewBox()
self.ax2 = pg.AxisItem('right')
self.plot.plotItem.layout.addItem(self.ax2, 2, 3)
self.plot.plotItem.scene().addItem(self.plot2)
self.ax2.linkToView(self.plot2)
self.plot2.setXLink(self.plot.plotItem)
self.ax2.setZValue(-10000)
labelStyle = {'color': 'rgb%s'%(str(self.trace_colors[1])), 'font-size': '13pt'}
self.ax2.setLabel('CH2', units='V', **labelStyle)
self.plot2.setGeometry(self.plot.plotItem.vb.sceneBoundingRect())
self.plot2.linkedViewChanged(self.plot.plotItem.vb, self.plot2.XAxis)
## Handle view resizing
self.plot.getViewBox().sigStateChanged.connect(self.updateViews)
self.curve1 = self.plot.plot(name='CH1'); self.curve1.setPen(color=self.trace_colors[0], width=1)
self.curve2 = self.plot.plot(name='CH2'); self.curve2.setPen(color=self.trace_colors[1], width=1)
self.curve3 = self.plot.plot(name='CH3'); self.curve3.setPen(color=self.trace_colors[2], width=1)
self.curve4 = self.plot.plot(name='CH4'); self.curve4.setPen(color=self.trace_colors[3], width=1)
self.curve_lis = self.plot.plot(); self.curve_lis.setPen(color=(255,255,255), width=1)
self.curveF=[]
for a in range(2):
self.curveF.append( self.plot.plot() ); self.curveF[-1].setPen(color=(255,255,255), width=1)
self.curveB = pg.PlotDataItem(name='CH2')
self.plot2.addItem(self.curveB)
self.curveB.setPen(color=self.trace_colors[1], width=1)
self.curveFR = pg.PlotDataItem()
self.plot2.addItem(self.curveFR); self.curveFR.setPen(color=(255,255,255), width=1)
self.CH1_ENABLE.setStyleSheet('background-color:rgba'+str(self.trace_colors[0])[:-1]+',3);color:(0,0,0);')
self.CH2_ENABLE.setStyleSheet('background-color:rgba'+str(self.trace_colors[1])[:-1]+',3);color:(0,0,0);')
for a in range(4):
self.trigger_select_box.setItemData(a, QtGui.QColor(*self.trace_colors[a]), QtCore.Qt.BackgroundRole);
self.triggerChannelName='CH1'
self.arrow = pg.ArrowItem(pos=(0, 0), angle=0)
self.plot.addItem(self.arrow)
#markings every 5 Volts
self.voltsperdiv = ['5V/div','3V/div','2V/div','1V/div','500mV/div','400mV/div','300mV/div','100mV/div']
self.trigger_channel=0
self.trigger_level = 0
self.trigtext = pg.TextItem(html=self.trigger_text('CH1'), anchor=(1.2,0.5), border='w', fill=(0, 0, 255, 100),angle=0)
self.plot.addItem(self.trigtext)
self.plot.showGrid(True,False,0.4)
self.scope_type=0
self.plot_area.addWidget(self.plot)
self.CH1_REMAPS.addItems(self.I.allAnalogChannels)
self.showgrid()
self.trigtext.setParentItem(self.arrow)
self.I.configure_trigger(self.trigger_channel,self.triggerChannelName,0)
self.autoRange()
self.timer = QtCore.QTimer()
self.finished=False
self.timer.singleShot(500,self.start_capture)
def updateViews(self):
self.plot2.setGeometry(self.plot.getViewBox().sceneBoundingRect())
self.plot2.linkedViewChanged(self.plot.plotItem.vb, self.plot2.XAxis)
def trigger_text(self,c):
return '<div style="text-align: center"><span style="color: #FFF; font-size: 8pt;">'+c+'</span></div>'
def showgrid(self):
return
def start_capture(self):
if self.finished:
return
if(self.freezeButton.isChecked()):
self.timer.singleShot(200,self.start_capture)
return
temperature=self.I.get_temperature()
self.plot.setTitle('%0.2f fps, %0.1f ^C' % (self.fps,temperature ) )
self.channels_in_buffer=self.active_channels
a = self.CH1_ENABLE.isChecked()
b = self.CH2_ENABLE.isChecked()
c = self.FOURCHAN_ENABLE.isChecked()
if c:
self.active_channels=4
elif b:
self.active_channels=2
elif a:
self.active_channels=1
else:
self.active_channels=0
self.channels_in_buffer=self.active_channels
self.channel_states[0]=a
self.channel_states[1]=b
self.channel_states[2]=c
self.channel_states[3]=c
if self.active_channels:
self.I.configure_trigger(self.trigger_channel,self.triggerChannelName,self.trigger_level,resolution=10)
self.I.capture_traces(self.active_channels,self.samples,self.timebase,self.chan1remap,self.ch123sa)
self.timer.singleShot(self.samples*self.I.timebase*1e-3+10,self.update)
def update(self):
n=0
while(not self.I.oscilloscope_progress()[0]):
time.sleep(0.1)
print self.timebase,'correction required',n
n+=1
if n>10:
self.timer.singleShot(100,self.start_capture)
return
if(self.channels_in_buffer>=1):self.I.__fetch_channel__(1)
if(self.channels_in_buffer>=2):self.I.__fetch_channel__(2)
if(self.channels_in_buffer>=3):self.I.__fetch_channel__(3)
if(self.channels_in_buffer>=4):self.I.__fetch_channel__(4)
self.curve1.clear()
self.curve2.clear()
self.curve3.clear()
self.curve4.clear()
self.curveB.clear()
self.curveF[0].clear()
self.curveF[1].clear()
self.curveFR.clear()
msg='';pos=0
for fitsel in [self.fit_select_box,self.fit_select_box_2]:
if fitsel.currentIndex()<4:
if len(msg)>0:
msg+='\n'
if self.channel_states[fitsel.currentIndex()]:
if fitsel.currentText()=='CH2':
msg+='FIT '+chr(pos+65)+': '+self.fitData(self.I.achans[fitsel.currentIndex()].get_xaxis(),\
self.I.achans[fitsel.currentIndex()].get_yaxis(),self.curveFR)
else:
msg+='FIT '+chr(pos+65)+': '+self.fitData(self.I.achans[fitsel.currentIndex()].get_xaxis(),\
self.I.achans[fitsel.currentIndex()].get_yaxis(),self.curveF[pos])
else:
msg+='FIT '+chr(pos+65)+': Channel Unavailable'
pos+=1
if len(msg):
self.message_label.setText(msg)
pos=0
if self.Liss_show.isChecked():
chans = ['CH1','CH2','CH3','CH4']
lissx = self.Liss_x.currentText()
lissy = self.Liss_y.currentText()
self.liss_x = chans.index(lissx)
self.liss_y = chans.index(lissy)
la=self.I.achans[self.liss_x].get_yaxis()
lb=self.I.achans[self.liss_y].get_yaxis()
if(self.liss_x<self.active_channels and self.liss_y<self.active_channels and len(la)==len(lb)):
self.curve_lis.setData(self.I.achans[self.liss_x].get_yaxis(),self.I.achans[self.liss_y].get_yaxis())
self.liss_ready=True
else:
self.curve_lis.clear()
self.liss_ready=False
self.message_label.setText('Channels for XY display not selected')
#print self.fps,'not available',self.active_channels,self.liss_x,self.liss_y
else:
self.curve_lis.clear()
for a in [self.curve1,self.curveB,self.curve3,self.curve4]:
if self.channel_states[pos]: a.setData(self.I.achans[pos].get_xaxis()*1e-6,self.I.achans[pos].get_yaxis(),connect='finite')
pos+=1
self.readCursor()
now = time.time()
dt = now - self.lastTime
self.lastTime = now
if self.fps is None:
self.fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
self.fps = self.fps * (1-s) + (1.0/dt) * s
self.timer.singleShot(100,self.start_capture)
def readCursor(self):
pos=self.vLine.getPos()
index = int(pos[0]*1e6)/self.I.timebase
if index > 0 and index < self.I.samples:
coords="<span style='color: white'>%0.1f uS</span>: "%(self.I.achans[0].xaxis[index])
for a in range(4):
if self.channel_states[a]:
c=self.trace_colors[a]
coords+="<span style='color: rgb%s'>%0.3fV</span>," %(c, self.I.achans[a].yaxis[index])
self.coord_label.setText(coords)
else:
self.coord_label.setText("")
def fitData(self,xReal,yReal,curve):
if self.fit_type_box.currentIndex()==0: #sine wave
fitres = self.math.sineFit(xReal,yReal)
if fitres:
amp=fitres[0]
freq=fitres[1]
offset=fitres[2]
ph=fitres[3]
frequency = freq/1e6
period = 1./freq/1e6
if(self.collapseButton.isChecked()):
self.collapseButton.setChecked(False)
self.collapse_win = pg.GraphicsWindow(title="Collapsing plot")
xNew=[]
yNew=[]
for a in range(len(xReal)):
x=(xReal[a]%(period*2))*1e-6
xNew.append(x)
yNew.append(yReal[a])
xNew=np.array(xNew)
yNew=np.array(yNew)
s=np.argsort(xNew)
self.p1 = self.collapse_win.addPlot(title="Collapsing plot: %.1f waveforms collapsed on top of each other"%(xReal[-1]/period), x=xNew[s],y=yNew[s])
if(self.collapse_win.windowState() & QtCore.Qt.WindowActive):
print 'opened'
#------------------------------------------------------
if(self.overlay_fit_button.isChecked()):
x=np.linspace(0,xReal[-1],50000)
curve.setData(x*1e-6,self.math.sineFunc(x,amp,frequency,ph*np.pi/180,offset))
return 'Amp = %0.3fV \tFreq=%0.2fHz \tOffset=%0.3fV \tPhase=%0.1f%c'%(amp, freq, offset,ph,176)
else:
return 'fit failed'
elif self.fit_type_box.currentIndex()==1: #square
fitres = self.math.squareFit(xReal,yReal)
if fitres:
amp=fitres[0]
freq=fitres[1]
phase=fitres[2]
dc=fitres[3]
offset=fitres[4]
frequency = freq/1e6
period = 1./freq/1e6
if(self.collapseButton.isChecked()):
self.collapseButton.setChecked(False)
self.collapse_win = pg.GraphicsWindow(title="Collapsing plot")
xNew=[]
yNew=[]
for a in range(len(xReal)):
x=(xReal[a]%(period*2))*1e-6
xNew.append(x)
yNew.append(yReal[a])
xNew=np.array(xNew)
yNew=np.array(yNew)
s=np.argsort(xNew)
self.p1 = self.collapse_win.addPlot(title="Collapsing plot: %.1f waveforms collapsed on top of each other"%(xReal[-1]/period), x=xNew[s],y=yNew[s])
if(self.collapse_win.windowState() & QtCore.Qt.WindowActive):
print 'opened'
#------------------------------------------------------
if(self.overlay_fit_button.isChecked()):
x=np.linspace(0,xReal[-1],50000)
curve.setData(x*1e-6,self.math.squareFunc(x,amp,frequency,phase,dc,offset))
return 'Amp = %0.3fV \tFreq=%0.2fHz \tDC=%0.3fV \tOffset=%0.3fV'%(amp, freq,dc,offset)
else:
return 'fit failed'
else:
return 'fit failed'
def setOffsetAndGainLabels(self):
self.CH1_LABEL.setText('CH1: ')
self.CH2_LABEL.setText('CH2: ')
def setGainCH1(self,g):
self.I.set_gain(self.chan1remap,g)
self.CH1_LABEL.setText('CH1: ')
if not self.Liss_show.isChecked():
chan = self.I.analogInputSources[self.chan1remap]
R = [chan.calPoly10(0),chan.calPoly10(1023)]
R[0]=R[0]*.9;R[1]=R[1]*.9
self.plot.setYRange(min(R),max(R))
self.setOffsetAndGainLabels()
def setGainCH2(self,g):
self.I.set_gain('CH2',g)
self.CH2_LABEL.setText('CH2: ')
if not self.Liss_show.isChecked():
chan = self.I.analogInputSources['CH2']
R = [chan.calPoly10(0),chan.calPoly10(1023)]
R[0]=R[0]*.9;R[1]=R[1]*.9
self.plot2.setYRange(min(R),max(R))
self.setOffsetAndGainLabels()
def setOffset(self,off):
chan = self.I.analogInputSources[self.chan1remap]
print 'no offset on ',chan
def setOffsetCH1(self,g):
cnum=0
self.setOffsetAndGainLabels()
def setOffsetCH2(self,g):
cnum=1
self.setOffsetAndGainLabels()
def setTimeBase(self,g):
timebases = [1.75,2,4,8,16,32,128,256,512,1024,1024]
samplescaling=[1,1,1,1,1,0.5,0.4,0.3,0.2,0.1,0.1]
#print g,len(timebases),len(samplescaling)
self.timebase=timebases[g]
'''
if(self.active_channels==1 and self.timebase<1.0):
self.timebase=1.0
elif(self.active_channels==2 and self.timebase<1.25):
self.timebase=1.25
elif((self.active_channels==3 or self.active_channels==4) and self.timebase<1.75):
self.timebase=1.75
'''
self.autoSetSamples()
self.samples = int(self.samples*samplescaling[g])
self.autoRange()
self.showgrid()
def autoSetSamples(self):
self.samples = self.max_samples_per_channel[self.active_channels]
def setTriggerLevel(self,val):
if self.trigger_channel==0:self.triggerChannelName=self.chan1remap
else:self.triggerChannelName='CH2'
chan = self.I.analogInputSources[self.triggerChannelName]
if chan.inverted:val=1000-val
levelInVolts=chan.calPoly10(val*1023/1000.)
self.trigger_level=levelInVolts
self.arrow.setPos(0,levelInVolts) #TODO
self.trigger_level_box.setValue(levelInVolts)
def setTriggerChannel(self,val):
self.trigtext.setHtml(self.trigger_text(self.I.achans[val].name))
self.triggerChannel=val
self.trigger_channel = val
c=self.trace_colors[val]
s='QFrame{background-color:rgba'+str(c)[:-1]+',50);}'
self.sender().parentWidget().setStyleSheet(s)
self.arrow.setParentItem(None)
if val==0:
self.plot.addItem(self.arrow)
elif val==1:
self.plot2.addItem(self.arrow)
def setActiveChannels(self,val):
self.active_channels = int(val)
self.autoSetSamples()
def remap_CH0(self,val):
val = str(val)
self.chosa = self.I.__calcCHOSA__(val)
self.chan1remap=val
chan = self.I.analogInputSources[self.chan1remap]
R = [chan.calPoly10(0),chan.calPoly10(1023)]
self.plot.setYRange(min(R),max(R))
def autoRange(self):
if self.Liss_show.isChecked():
X = self.I.analogInputSources[self.chan1remap]
R1 = [X.calPoly10(0),X.calPoly10(1023)]
R1[0]=R1[0]*.9;R1[1]=R1[1]*.9
Y = self.I.analogInputSources['CH2']
R2 = [Y.calPoly10(0),Y.calPoly10(1023)]
R2[0]=R2[0]*.9;R2[1]=R2[1]*.9
self.plot.setXRange(min(R1),max(R1))
self.plot.setYRange(min(R2),max(R2))
else:
chan = self.I.analogInputSources[self.chan1remap]
R = [chan.calPoly10(0),chan.calPoly10(1023)]
R[0]=R[0]*.9;R[1]=R[1]*.9
#print R
self.plot.setYRange(min(R),max(R))
chan = self.I.analogInputSources['CH2']
R = [chan.calPoly10(0),chan.calPoly10(1023)]
R[0]=R[0]*.9;R[1]=R[1]*.9
self.plot2.setYRange(min(R),max(R))
self.plot.setXRange(0,self.timebase*self.samples*1e-6)
#self.plot.setRange(QtCore.QRectF(0, -16.5, self.samples*self.timebase*1e-6, 2*16.5))
def enableXY(self,state):
self.autoRange()
def plot_liss(self):
chans = ['CH1','CH2']
lissx = self.Liss_x.currentText()
lissy = self.Liss_y.currentText()
self.liss_x = chans.index(lissx)
self.liss_y = chans.index(lissy)
self.liss_win = pg.GraphicsWindow(title="Basic plotting examples")
self.liss_win.setWindowTitle('pyqtgraph example: Plotting')
self.p1 = self.liss_win.addPlot(title="Lissajous: x:%s , y:%s"%(lissx,lissy), x=self.I.achans[self.liss_x].get_yaxis(),y=self.I.achans[self.liss_y].get_yaxis())
if(self.liss_win.windowState() & QtCore.Qt.WindowActive):
print 'opened'
def liss_animate(self,val):
if val and self.liss_ready and self.Liss_show.isChecked():
self.freezeButton.setChecked(True)
self.liss_animate_arrow1=pg.CurveArrow(self.curve_lis)
if(self.liss_x==0):
self.liss_animate_arrow2=pg.CurveArrow(self.curve1)
elif(self.liss_x==1):
self.liss_animate_arrow2=pg.CurveArrow(self.curve2)
elif(self.liss_x==2):
self.liss_animate_arrow2=pg.CurveArrow(self.curve3)
elif(self.liss_x==3):
self.liss_animate_arrow2=pg.CurveArrow(self.curve4)
if(self.liss_y==0):
self.liss_animate_arrow3=pg.CurveArrow(self.curve1)
elif(self.liss_y==1):
self.liss_animate_arrow3=pg.CurveArrow(self.curve2)
elif(self.liss_y==2):
self.liss_animate_arrow3=pg.CurveArrow(self.curve3)
elif(self.liss_y==3):
self.liss_animate_arrow3=pg.CurveArrow(self.curve4)
self.plot.addItem(self.liss_animate_arrow1)
self.plot.addItem(self.liss_animate_arrow2)
self.plot.addItem(self.liss_animate_arrow3)
self.liss_anim1 = self.liss_animate_arrow1.makeAnimation(loop=-1)
self.liss_anim2 = self.liss_animate_arrow2.makeAnimation(loop=-1)
self.liss_anim3 = self.liss_animate_arrow3.makeAnimation(loop=-1)
self.liss_anim1.start();self.liss_anim2.start();self.liss_anim3.start()
else:
self.freezeButton.setChecked(False)
try:
self.liss_anim1.stop();self.liss_anim2.stop();self.liss_anim3.stop()
self.plot.removeItem(self.liss_animate_arrow1)
self.plot.removeItem(self.liss_animate_arrow2)
self.plot.removeItem(self.liss_animate_arrow3)
except:
pass
def closeEvent(self, event):
self.timer.stop()
self.finished=True
def __del__(self):
self.timer.stop()
print 'bye'
| jithinbp/vLabtool-v0 | v0/apps/scope.py | Python | gpl-3.0 | 18,018 | 0.051116 |
import logging
from django.views import generic
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.mail import send_mail
from django import http
from django.core.urlresolvers import reverse
from django.template.loader import get_template
from django.template import Context
from apps.gateway import forms
from oscar.apps.customer.forms import generate_username
logger = logging.getLogger('gateway')
class GatewayView(generic.FormView):
template_name = 'gateway/form.html'
form_class = forms.GatewayForm
def form_valid(self, form):
real_email = form.cleaned_data['email']
username = generate_username()
password = generate_username()
email = 'dashboard-user-%[email protected]' % username
user = self.create_dashboard_user(username, email, password)
self.send_confirmation_email(real_email, user, password)
logger.info("Created dashboard user #%d for %s",
user.id, real_email)
messages.success(
self.request,
"The credentials for a dashboard user have been sent to %s" % real_email)
return http.HttpResponseRedirect(reverse('gateway'))
def create_dashboard_user(self, username, email, password):
user = User.objects.create_user(username, email, password)
user.is_staff = True
user.save()
return user
def send_confirmation_email(self, real_email, user, password):
msg = get_template('gateway/email.txt').render(Context({
'email': user.email,
'password': password
}))
send_mail('Dashboard access to Oscar sandbox',
msg, '[email protected]',
[real_email])
| elliotthill/django-oscar | sites/sandbox/apps/gateway/views.py | Python | bsd-3-clause | 1,784 | 0.001121 |
import logging
import sys
from os import path
import click
from clickclick import AliasedGroup, fatal_error
import connexion
from connexion.mock import MockResolver
logger = logging.getLogger('connexion.cli')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def validate_wsgi_server_requirements(ctx, param, value):
if value == 'gevent':
try:
import gevent # NOQA
except:
fatal_error('gevent library is not installed')
elif value == 'tornado':
try:
import tornado # NOQA
except:
fatal_error('tornado library is not installed')
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('Connexion {}'.format(connexion.__version__))
ctx.exit()
@click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS)
@click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True,
help='Print the current version number and exit.')
def main():
pass
@main.command()
@click.argument('spec_file')
@click.argument('base_module_path', required=False)
@click.option('--port', '-p', default=5000, type=int, help='Port to listen.')
@click.option('--host', '-H', type=str, help='Host interface to bind on.')
@click.option('--wsgi-server', '-w', default='flask',
type=click.Choice(['flask', 'gevent', 'tornado']),
callback=validate_wsgi_server_requirements,
help='Which WSGI server container to use.')
@click.option('--stub',
help='Returns status code 501, and `Not Implemented Yet` payload, for '
'the endpoints which handlers are not found.',
is_flag=True, default=False)
@click.option('--mock', metavar='MOCKMODE', type=click.Choice(['all', 'notimplemented']),
help='Returns example data for all endpoints or for which handlers are not found.')
@click.option('--hide-spec',
help='Hides the API spec in JSON format which is by default available at `/swagger.json`.',
is_flag=True, default=False)
@click.option('--hide-console-ui',
help='Hides the the API console UI which is by default available at `/ui`.',
is_flag=True, default=False)
@click.option('--console-ui-url', metavar='URL',
help='Personalize what URL path the API console UI will be mounted.')
@click.option('--console-ui-from', metavar='PATH',
help='Path to a customized API console UI dashboard.')
@click.option('--auth-all-paths',
help='Enable authentication to paths not defined in the spec.',
is_flag=True, default=False)
@click.option('--validate-responses',
help='Enable validation of response values from operation handlers.',
is_flag=True, default=False)
@click.option('--strict-validation',
help='Enable strict validation of request payloads.',
is_flag=True, default=False)
@click.option('--debug', '-d', help='Show debugging information.',
is_flag=True, default=False)
@click.option('--verbose', '-v', help='Show verbose information.', count=True)
@click.option('--base-path', metavar='PATH',
help='Override the basePath in the API spec.')
def run(spec_file,
base_module_path,
port,
host,
wsgi_server,
stub,
mock,
hide_spec,
hide_console_ui,
console_ui_url,
console_ui_from,
auth_all_paths,
validate_responses,
strict_validation,
debug,
verbose,
base_path):
"""
Runs a server compliant with a OpenAPI/Swagger 2.0 Specification file.
Arguments:
- SPEC_FILE: specification file that describes the server endpoints.
- BASE_MODULE_PATH (optional): filesystem path where the API endpoints handlers are going to be imported from.
"""
logging_level = logging.WARN
if verbose > 0:
logging_level = logging.INFO
if debug or verbose > 1:
logging_level = logging.DEBUG
debug = True
logging.basicConfig(level=logging_level)
spec_file_full_path = path.abspath(spec_file)
py_module_path = base_module_path or path.dirname(spec_file_full_path)
sys.path.insert(1, path.abspath(py_module_path))
logger.debug('Added {} to system path.'.format(py_module_path))
resolver_error = None
if stub:
resolver_error = 501
api_extra_args = {}
if mock:
resolver = MockResolver(mock_all=mock == 'all')
api_extra_args['resolver'] = resolver
app = connexion.FlaskApp(__name__,
swagger_json=not hide_spec,
swagger_ui=not hide_console_ui,
swagger_path=console_ui_from or None,
swagger_url=console_ui_url or None,
auth_all_paths=auth_all_paths,
debug=debug)
app.add_api(spec_file_full_path,
base_path=base_path,
resolver_error=resolver_error,
validate_responses=validate_responses,
strict_validation=strict_validation,
**api_extra_args)
app.run(port=port,
host=host,
server=wsgi_server,
debug=debug)
if __name__ == '__main__': # pragma: no cover
main()
| NeostreamTechnology/Microservices | venv/lib/python2.7/site-packages/connexion/cli.py | Python | mit | 5,463 | 0.002014 |
import os, json, datetime
from ContinuousRegistration.Source.make_registration_scripts import parser
from ContinuousRegistration.Source.util import logging, load_submissions, write_json
from ContinuousRegistration.Source.datasets import load_datasets
def run(parameters):
submissions = load_submissions(parameters)
datasets = load_datasets(parameters)
results = {}
for team_name, blueprint_file_names in submissions.items():
for blueprint_file_name in blueprint_file_names:
if not team_name in results:
results[team_name] = {}
blueprint_name, blueprint_ext = os.path.splitext(os.path.basename(blueprint_file_name))
if not blueprint_name in results[team_name]:
results[team_name][blueprint_name] = {}
logging.info('Loading blueprint %s/%s.' % (team_name, os.path.basename(blueprint_name)))
blueprint = json.load(open(blueprint_file_name))
for dataset_name in blueprint['Datasets']:
if not dataset_name in datasets:
continue
dataset = datasets[dataset_name]
results[team_name][blueprint_name][dataset_name] = []
for file_names in dataset.generator():
output_directory = os.path.join(parameters.output_directory, team_name, blueprint_name)
logging.info('Evaluating registration for blueprint %s and images %s.', blueprint_name, file_names['image_file_names'])
try:
results[team_name][blueprint_name][dataset.name].append(dataset.evaluate(
parameters.superelastix, file_names, output_directory))
if hasattr(parameters, 'make_images') and parameters.make_images:
dataset.make_images(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_labels') and parameters.make_labels:
dataset.make_labels(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_difference_images') and parameters.make_difference_images:
dataset.make_difference_images(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_checkerboards') and parameters.make_checkerboards:
dataset.make_checkerboards(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_image_checkerboards') and parameters.make_image_checkerboards:
dataset.make_image_checkerboards(parameters.superelastix, file_names, output_directory)
if hasattr(parameters, 'make_label_checkerboards') and parameters.make_label_checkerboards and dataset.name in ["CUMC12", "IBSR18", "LPBA40", "MGH10"]:
dataset.make_label_checkerboards(parameters.superelastix, file_names, output_directory)
except Exception as e:
logging.error('Error during evaluation of %s\'s blueprint %s and dataset %s: %s'
% (team_name, blueprint_name, dataset.name, str(e)))
write_json(os.path.join(parameters.output_directory,
'results.json'), results)
return results
if __name__ == '__main__':
parser.add_argument('--make-images', '-mi', type=bool, default=False, help="Warp moving images.")
parser.add_argument('--make-labels', '-ml', type=bool, default=False, help="Warp moving labels.")
parser.add_argument('--make-difference-images', '-mdi', type=bool, default=False, help="Warp moving images and subtract from fixed images.")
parser.add_argument('--make-checkerboards', '-mc', type=bool, default=False, help="Warp checkerboard pattern.")
parser.add_argument('--make-image-checkerboards', '-mic', type=bool, default=False,
help="Warp moving images and make checkerboard with fixed and warped moving image.")
parser.add_argument('--make-label-checkerboards', '-mlc', type=bool, default=False,
help="Warp moving labels and make checkerboard with fixed and warped moving label.")
run(parser.parse_args())
| kaspermarstal/SuperElastix | ContinuousRegistration/Source/make_evaluation.py | Python | apache-2.0 | 4,367 | 0.007786 |
"""Takes a Python AST and converts it to a corresponding StencilModel.
Throws an exception if the input does not represent a valid stencil
kernel program. This is the first stage of processing and is done only
once when a stencil class is initialized.
"""
from stencil_model import *
from assert_utils import *
import ast
from asp.util import *
# class to convert from Python AST to StencilModel
class StencilPythonFrontEnd(ast.NodeTransformer):
def __init__(self):
super(StencilPythonFrontEnd, self).__init__()
def parse(self, ast):
return self.visit(ast)
def visit_Module(self, node):
body = map(self.visit, node.body)
assert len(body) == 1
assert_has_type(body[0], StencilModel)
return body[0]
def visit_FunctionDef(self, node):
assert len(node.decorator_list) == 0
arg_ids = self.visit(node.args)
assert arg_ids[0] == 'self'
self.output_arg_id = arg_ids[-1]
self.input_arg_ids = arg_ids[1:-1]
kernels = map(self.visit, node.body)
interior_kernels = map(lambda x: x['kernel'], filter(lambda x: x['kernel_type'] == 'interior_points', kernels))
border_kernels = map(lambda x: x['kernel'], filter(lambda x: x['kernel_type'] == 'border_points', kernels))
assert len(interior_kernels) <= 1, 'Can only have one loop over interior points'
assert len(border_kernels) <= 1, 'Can only have one loop over border points'
return StencilModel(map(lambda x: Identifier(x), self.input_arg_ids),
interior_kernels[0] if len(interior_kernels) > 0 else Kernel([]),
border_kernels[0] if len(border_kernels) > 0 else Kernel([]))
def visit_arguments(self, node):
assert node.vararg == None, 'kernel function may not take variable argument list'
assert node.kwarg == None, 'kernel function may not take variable argument list'
return map (self.visit, node.args)
def visit_Name(self, node):
return node.id
def visit_For(self, node):
# check if this is the right kind of For loop
if (type(node.iter) is ast.Call and
type(node.iter.func) is ast.Attribute):
if (node.iter.func.attr == "interior_points" or
node.iter.func.attr == "border_points"):
assert node.iter.args == [] and node.iter.starargs == None and node.iter.kwargs == None, 'Invalid argument list for %s()' % node.iter.func.attr
grid_id = self.visit(node.iter.func.value)
assert grid_id == self.output_arg_id, 'Can only iterate over %s of output grid "%s" but "%s" was given' % (node.iter.func.attr, self.output_arg_id, grid_id)
self.kernel_target = self.visit(node.target)
body = map(self.visit, node.body)
self.kernel_target = None
return {'kernel_type': node.iter.func.attr, 'kernel': Kernel(body)}
elif node.iter.func.attr == "neighbors":
assert len(node.iter.args) == 2 and node.iter.starargs == None and node.iter.kwargs == None, 'Invalid argument list for neighbors()'
self.neighbor_grid_id = self.visit(node.iter.func.value)
assert self.neighbor_grid_id in self.input_arg_ids, 'Can only iterate over neighbors in an input grid but "%s" was given' % grid_id
neighbors_of_grid_id = self.visit(node.iter.args[0])
assert neighbors_of_grid_id == self.kernel_target, 'Can only iterate over neighbors of an output grid point but "%s" was given' % neighbors_of_grid_id
self.neighbor_target = self.visit(node.target)
body = map(self.visit, node.body)
self.neighbor_target = None
self.neigbor_grid_id = None
neighbors_id = self.visit(node.iter.args[1])
return StencilNeighborIter(Identifier(self.neighbor_grid_id), neighbors_id, body)
else:
assert False, 'Invalid call in For loop argument \'%s\', can only iterate over interior_points, boder_points, or neighbor_points of a grid' % node.iter.func.attr
else:
assert False, 'Unexpected For loop \'%s\', can only iterate over interior_points, boder_points, or neighbor_points of a grid' % node
def visit_AugAssign(self, node):
target = self.visit(node.target)
assert type(target) is OutputElement, 'Only assignments to current output element permitted'
return OutputAssignment(ScalarBinOp(OutputElement(), node.op, self.visit(node.value)))
def visit_Assign(self, node):
targets = map (self.visit, node.targets)
assert len(targets) == 1 and type(targets[0]) is OutputElement, 'Only assignments to current output element permitted'
return OutputAssignment(self.visit(node.value))
def visit_Subscript(self, node):
if type(node.slice) is ast.Index:
grid_id = self.visit(node.value)
target = self.visit(node.slice.value)
if grid_id == self.output_arg_id and target == self.kernel_target:
return OutputElement()
elif target == self.kernel_target:
return InputElementZeroOffset(Identifier(grid_id))
elif grid_id == self.neighbor_grid_id and target == self.neighbor_target:
return Neighbor()
elif isinstance(target, Expr):
return InputElementExprIndex(Identifier(grid_id), target)
else:
assert False, 'Unexpected subscript index \'%s\' on grid \'%s\'' % (target, grid_id)
else:
assert False, 'Unsupported subscript object \'%s\' on grid \'%s\'' % (node.slice, grid_id)
def visit_BinOp(self, node):
return ScalarBinOp(self.visit(node.left), node.op, self.visit(node.right))
def visit_Num(self, node):
return Constant(node.n)
def visit_Call(self, node):
assert isinstance(node.func, ast.Name), 'Cannot call expression'
if node.func.id == 'distance' and len(node.args) == 2:
if ((node.args[0].id == self.neighbor_target and node.args[1].id == self.kernel_target) or \
(node.args[0].id == self.kernel_target and node.args[1].id == self.neighbor_target)):
return NeighborDistance()
elif ((node.args[0].id == self.neighbor_target and node.args[1].id == self.neighbor_target) or \
(node.args[0].id == self.kernel_target and node.args[1].id == self.kernel_target)):
return Constant(0)
else:
return MathFunction(node.func.id, map(self.visit, node.args))
| mbdriscoll/asp-old | specializers/stencil/stencil_python_front_end.py | Python | bsd-3-clause | 6,699 | 0.00627 |
import re
def anti_vowel(text):
newtext = re.sub('[AEIOUaeiou]', '', text)
print newtext
anti_vowel("Hey Look Words!")
anti_vowel("THE QUICK BROWN FOX SLYLY JUMPED OVER THE LAZY DOG") | voiceofrae/Python | antivowel.py | Python | mit | 201 | 0.0199 |
from pycp2k.inputsection import InputSection
from ._each295 import _each295
class _check_spline3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each295()
self._name = "CHECK_SPLINE"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| SINGROUP/pycp2k | pycp2k/classes/_check_spline3.py | Python | lgpl-3.0 | 674 | 0.002967 |
from unittest import TestCase
from mock import MagicMock
from django_auth_lti.verification import is_allowed
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
class TestVerification(TestCase):
def test_is_allowed_config_failure(self):
request = MagicMock(LTI={})
allowed_roles = ["admin", "student"]
self.assertRaises(ImproperlyConfigured, is_allowed,
request, allowed_roles, False)
def test_is_allowed_success(self):
request = MagicMock(LTI={"roles": ["admin"]})
allowed_roles = ["admin", "student"]
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertTrue(user_is_allowed)
def test_is_allowed_success_one_role(self):
request = MagicMock(LTI={"roles": ["admin"]})
allowed_roles = "admin"
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertTrue(user_is_allowed)
def test_is_allowed_failure(self):
request = MagicMock(LTI={"roles":[]})
allowed_roles = ["admin", "student"]
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertFalse(user_is_allowed)
def test_is_allowed_failure_one_role(self):
request = MagicMock(LTI={"roles":[]})
allowed_roles = "admin"
user_is_allowed = is_allowed(request, allowed_roles, False)
self.assertFalse(user_is_allowed)
def test_is_allowed_exception(self):
request = MagicMock(LTI={"roles":["TF"]})
allowed_roles = ["admin", "student"]
self.assertRaises(PermissionDenied, is_allowed,
request, allowed_roles, True)
| UQ-UQx/PerspectivesX | perspectivesx_project/django_auth_lti/tests/test_verification.py | Python | mit | 1,706 | 0.005862 |
"""General utility functions for pyamg"""
__docformat__ = "restructuredtext en"
from warnings import warn
import numpy as np
import scipy as sp
from scipy.sparse import isspmatrix, isspmatrix_csr, isspmatrix_csc, \
isspmatrix_bsr, csr_matrix, csc_matrix, bsr_matrix, coo_matrix, eye
from scipy.sparse.sputils import upcast
from pyamg.util.linalg import norm, cond, pinv_array
from scipy.linalg import eigvals
import pyamg.amg_core
__all__ = ['blocksize', 'diag_sparse', 'profile_solver', 'to_type',
'type_prep', 'get_diagonal', 'UnAmal', 'Coord2RBM',
'hierarchy_spectrum', 'print_table', 'get_block_diag', 'amalgamate',
'symmetric_rescaling', 'symmetric_rescaling_sa',
'relaxation_as_linear_operator', 'filter_operator', 'scale_T',
'get_Cpt_params', 'compute_BtBinv', 'eliminate_diag_dom_nodes',
'levelize_strength_or_aggregation',
'levelize_smooth_or_improve_candidates']
try:
from scipy.sparse._sparsetools import csr_scale_rows, bsr_scale_rows
from scipy.sparse._sparsetools import csr_scale_columns, bsr_scale_columns
except ImportError:
from scipy.sparse.sparsetools import csr_scale_rows, bsr_scale_rows
from scipy.sparse.sparsetools import csr_scale_columns, bsr_scale_columns
def blocksize(A):
# Helper Function: return the blocksize of a matrix
if isspmatrix_bsr(A):
return A.blocksize[0]
else:
return 1
def profile_solver(ml, accel=None, **kwargs):
"""
A quick solver to profile a particular multilevel object
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg)
"""
A = ml.levels[0].A
b = A * sp.rand(A.shape[0], 1)
residuals = []
if accel is None:
x_sol = ml.solve(b, residuals=residuals, **kwargs)
del x_sol
else:
def callback(x):
residuals.append(norm(np.ravel(b) - np.ravel(A*x)))
M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V'))
accel(A, b, M=M, callback=callback, **kwargs)
return np.asarray(residuals)
def diag_sparse(A):
"""
If A is a sparse matrix (e.g. csr_matrix or csc_matrix)
- return the diagonal of A as an array
Otherwise
- return a csr_matrix with A on the diagonal
Parameters
----------
A : sparse matrix or 1d array
General sparse matrix or array of diagonal entries
Returns
-------
B : array or sparse matrix
Diagonal sparse is returned as csr if A is dense otherwise return an
array of the diagonal
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import diag_sparse
>>> d = 2.0*np.ones((3,)).ravel()
>>> print diag_sparse(d).todense()
[[ 2. 0. 0.]
[ 0. 2. 0.]
[ 0. 0. 2.]]
"""
if isspmatrix(A):
return A.diagonal()
else:
if(np.ndim(A) != 1):
raise ValueError('input diagonal array expected to be 1d')
return csr_matrix((np.asarray(A), np.arange(len(A)),
np.arange(len(A)+1)), (len(A), len(A)))
def scale_rows(A, v, copy=True):
"""
Scale the sparse rows of a matrix
Parameters
----------
A : sparse matrix
Sparse matrix with M rows
v : array_like
Array of M scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_rows(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_rows(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_rows, scale_columns
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_columns
- if A is not csr, csc, or bsr, it is converted to csr and sent
to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_rows
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> B = scale_rows(A,5*np.ones((A.shape[0],1)))
"""
v = np.ravel(v)
if isspmatrix_csr(A) or isspmatrix_bsr(A):
M, N = A.shape
if M != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_rows(M, N, A.indptr, A.indices, A.data, v)
else:
R, C = A.blocksize
bsr_scale_rows(M/R, N/C, R, C, A.indptr, A.indices,
np.ravel(A.data), v)
return A
elif isspmatrix_csc(A):
return scale_columns(A.T, v)
else:
return scale_rows(csr_matrix(A), v)
def scale_columns(A, v, copy=True):
"""
Scale the sparse columns of a matrix
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
v : array_like
Array of N scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_columns(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_columns(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_columns, scale_rows
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_rows
- if A is not csr, csc, or bsr, it is converted to csr and sent to
scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_columns
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense()
[[ 10. -5. 0. 0.]
[ -5. 10. -5. 0.]
[ 0. -5. 10. -5.]
[ 0. 0. -5. 10.]
[ 0. 0. 0. -5.]]
"""
v = np.ravel(v)
if isspmatrix_csr(A) or isspmatrix_bsr(A):
M, N = A.shape
if N != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_columns(M, N, A.indptr, A.indices, A.data, v)
else:
R, C = A.blocksize
bsr_scale_columns(M/R, N/C, R, C, A.indptr, A.indices,
np.ravel(A.data), v)
return A
elif isspmatrix_csc(A):
return scale_rows(A.T, v)
else:
return scale_rows(csr_matrix(A), v)
def symmetric_rescaling(A, copy=True):
"""
Scale the matrix symmetrically::
A = D^{-1/2} A D^{-1/2}
where D=diag(A).
The left multiplication is accomplished through scale_rows and the right
multiplication is done through scale columns.
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=symmetric_rescaling(A))
- If copy=False, then the matrix is overwritten deeply (e.g.
symmetric_rescaling(A,copy=False) overwrites A)
Returns
-------
D_sqrt : array
Array of sqrt(diag(A))
D_sqrt_inv : array
Array of 1/sqrt(diag(A))
DAD : csr_matrix
Symmetrically scaled A
Notes
-----
- if A is not csr, it is converted to csr and sent to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import symmetric_rescaling
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n).tocsr()
>>> Ds, Dsi, DAD = symmetric_rescaling(A)
>>> print DAD.todense()
[[ 1. -0.5 0. 0. 0. ]
[-0.5 1. -0.5 0. 0. ]
[ 0. -0.5 1. -0.5 0. ]
[ 0. 0. -0.5 1. -0.5]
[ 0. 0. 0. -0.5 1. ]]
"""
if isspmatrix_csr(A) or isspmatrix_csc(A) or isspmatrix_bsr(A):
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
D = diag_sparse(A)
mask = (D != 0)
if A.dtype != complex:
D_sqrt = np.sqrt(abs(D))
else:
# We can take square roots of negative numbers
D_sqrt = np.sqrt(D)
D_sqrt_inv = np.zeros_like(D_sqrt)
D_sqrt_inv[mask] = 1.0/D_sqrt[mask]
DAD = scale_rows(A, D_sqrt_inv, copy=copy)
DAD = scale_columns(DAD, D_sqrt_inv, copy=False)
return D_sqrt, D_sqrt_inv, DAD
else:
return symmetric_rescaling(csr_matrix(A))
def symmetric_rescaling_sa(A, B, BH=None):
"""
Scale the matrix symmetrically::
A = D^{-1/2} A D^{-1/2}
where D=diag(A). The left multiplication is accomplished through
scale_rows and the right multiplication is done through scale columns.
The candidates B and BH are scaled accordingly::
B = D^{1/2} B
BH = D^{1/2} BH
Parameters
----------
A : {sparse matrix}
Sparse matrix with N rows
B : {array}
N x m array
BH : {None, array}
If A.symmetry == 'nonsymmetric, then BH must be an N x m array.
Otherwise, BH is ignored.
Returns
-------
Appropriately scaled A, B and BH, i.e.,
A = D^{-1/2} A D^{-1/2}, B = D^{1/2} B, and BH = D^{1/2} BH
Notes
-----
- if A is not csr, it is converted to csr and sent to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import symmetric_rescaling_sa
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n).tocsr()
>>> B = e.copy().reshape(-1,1)
>>> [DAD, DB, DBH] = symmetric_rescaling_sa(A,B,BH=None)
>>> print DAD.todense()
[[ 1. -0.5 0. 0. 0. ]
[-0.5 1. -0.5 0. 0. ]
[ 0. -0.5 1. -0.5 0. ]
[ 0. 0. -0.5 1. -0.5]
[ 0. 0. 0. -0.5 1. ]]
>>> print DB
[[ 1.41421356]
[ 1.41421356]
[ 1.41421356]
[ 1.41421356]
[ 1.41421356]]
"""
# rescale A
[D_sqrt, D_sqrt_inv, A] = symmetric_rescaling(A, copy=False)
# scale candidates
for i in range(B.shape[1]):
B[:, i] = np.ravel(B[:, i])*np.ravel(D_sqrt)
if hasattr(A, 'symmetry'):
if A.symmetry == 'nonsymmetric':
if BH is None:
raise ValueError("BH should be an n x m array")
else:
for i in range(BH.shape[1]):
BH[:, i] = np.ravel(BH[:, i])*np.ravel(D_sqrt)
return [A, B, BH]
def type_prep(upcast_type, varlist):
"""
Loop over all elements of varlist and convert them to upcasttype
The only difference with pyamg.util.utils.to_type(...), is that scalars
are wrapped into (1,0) arrays. This is desirable when passing
the numpy complex data type to C routines and complex scalars aren't
handled correctly
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import type_prep
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> z = 2.3
>>> varlist = type_prep(upcast(x.dtype, y.dtype), [x, y, z])
"""
varlist = to_type(upcast_type, varlist)
for i in range(len(varlist)):
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]])
return varlist
def to_type(upcast_type, varlist):
"""
Loop over all elements of varlist and convert them to upcasttype
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
"""
# convert_type = type(np.array([0], upcast_type)[0])
for i in range(len(varlist)):
# convert scalars to complex
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]], upcast_type)[0]
else:
# convert sparse and dense mats to complex
try:
if varlist[i].dtype != upcast_type:
varlist[i] = varlist[i].astype(upcast_type)
except AttributeError:
warn('Failed to cast in to_type')
pass
return varlist
def get_diagonal(A, norm_eq=False, inv=False):
""" Return the diagonal or inverse of diagonal for
A, (A.H A) or (A A.H)
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
norm_eq : {0, 1, 2}
0 ==> D = diag(A)
1 ==> D = diag(A.H A)
2 ==> D = diag(A A.H)
inv : {True, False}
If True, D = 1.0/D
Returns
-------
diagonal, D, of appropriate system
Notes
-----
This function is especially useful for its fast methods
of obtaining diag(A A.H) and diag(A.H A). Dinv is zero
wherever D is zero
Examples
--------
>>> from pyamg.util.utils import get_diagonal
>>> from pyamg.gallery import poisson
>>> A = poisson( (5,), format='csr' )
>>> D = get_diagonal(A)
>>> print D
[ 2. 2. 2. 2. 2.]
>>> D = get_diagonal(A, norm_eq=1, inv=True)
>>> print D
[ 0.2 0.16666667 0.16666667 0.16666667 0.2 ]
"""
# if not isspmatrix(A):
if not (isspmatrix_csr(A) or isspmatrix_csc(A) or isspmatrix_bsr(A)):
warn('Implicit conversion to sparse matrix')
A = csr_matrix(A)
# critical to sort the indices of A
A.sort_indices()
if norm_eq == 1:
# This transpose involves almost no work, use csr data structures as
# csc, or vice versa
At = A.T
D = (At.multiply(At.conjugate()))*np.ones((At.shape[0],))
elif norm_eq == 2:
D = (A.multiply(A.conjugate()))*np.ones((A.shape[0],))
else:
D = A.diagonal()
if inv:
Dinv = np.zeros_like(D)
mask = (D != 0.0)
Dinv[mask] = 1.0 / D[mask]
return Dinv
else:
return D
def get_block_diag(A, blocksize, inv_flag=True):
"""
Return the block diagonal of A, in array form
Parameters
----------
A : csr_matrix
assumed to be square
blocksize : int
square block size for the diagonal
inv_flag : bool
if True, return the inverse of the block diagonal
Returns
-------
block_diag : array
block diagonal of A in array form,
array size is (A.shape[0]/blocksize, blocksize, blocksize)
Examples
--------
>>> from scipy import arange
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util import get_block_diag
>>> A = csr_matrix(arange(36).reshape(6,6))
>>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=False)
>>> print block_diag_inv
[[[ 0. 1.]
[ 6. 7.]]
<BLANKLINE>
[[ 14. 15.]
[ 20. 21.]]
<BLANKLINE>
[[ 28. 29.]
[ 34. 35.]]]
>>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True)
"""
if not isspmatrix(A):
raise TypeError('Expected sparse matrix')
if A.shape[0] != A.shape[1]:
raise ValueError("Expected square matrix")
if sp.mod(A.shape[0], blocksize) != 0:
raise ValueError("blocksize and A.shape must be compatible")
# If the block diagonal of A already exists, return that
if hasattr(A, 'block_D_inv') and inv_flag:
if (A.block_D_inv.shape[1] == blocksize) and\
(A.block_D_inv.shape[2] == blocksize) and \
(A.block_D_inv.shape[0] == A.shape[0]/blocksize):
return A.block_D_inv
elif hasattr(A, 'block_D') and (not inv_flag):
if (A.block_D.shape[1] == blocksize) and\
(A.block_D.shape[2] == blocksize) and \
(A.block_D.shape[0] == A.shape[0]/blocksize):
return A.block_D
# Convert to BSR
if not isspmatrix_bsr(A):
A = bsr_matrix(A, blocksize=(blocksize, blocksize))
if A.blocksize != (blocksize, blocksize):
A = A.tobsr(blocksize=(blocksize, blocksize))
# Peel off block diagonal by extracting block entries from the now BSR
# matrix A
A = A.asfptype()
block_diag = sp.zeros((A.shape[0]/blocksize, blocksize, blocksize),
dtype=A.dtype)
AAIJ = (sp.arange(1, A.indices.shape[0]+1), A.indices, A.indptr)
shape = (A.shape[0]/blocksize, A.shape[0]/blocksize)
diag_entries = csr_matrix(AAIJ, shape=shape).diagonal()
diag_entries -= 1
nonzero_mask = (diag_entries != -1)
diag_entries = diag_entries[nonzero_mask]
if diag_entries.shape != (0,):
block_diag[nonzero_mask, :, :] = A.data[diag_entries, :, :]
if inv_flag:
# Invert each block
if block_diag.shape[1] < 7:
# This specialized routine lacks robustness for large matrices
pyamg.amg_core.pinv_array(block_diag, block_diag.shape[0],
block_diag.shape[1], 'T')
else:
pinv_array(block_diag)
A.block_D_inv = block_diag
else:
A.block_D = block_diag
return block_diag
def amalgamate(A, blocksize):
"""
Amalgamate matrix A
Parameters
----------
A : csr_matrix
Matrix to amalgamate
blocksize : int
blocksize to use while amalgamating
Returns
-------
A_amal : csr_matrix
Amalgamated matrix A, first, convert A to BSR with square blocksize
and then return a CSR matrix of ones using the resulting BSR indptr and
indices
Notes
-----
inverse operation of UnAmal for square matrices
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import amalgamate
>>> row = array([0,0,1])
>>> col = array([0,2,1])
>>> data = array([1,2,3])
>>> A = csr_matrix( (data,(row,col)), shape=(4,4) )
>>> A.todense()
matrix([[1, 0, 2, 0],
[0, 3, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
>>> amalgamate(A,2).todense()
matrix([[ 1., 1.],
[ 0., 0.]])
"""
if blocksize == 1:
return A
elif sp.mod(A.shape[0], blocksize) != 0:
raise ValueError("Incompatible blocksize")
A = A.tobsr(blocksize=(blocksize, blocksize))
A.sort_indices()
subI = (np.ones(A.indices.shape), A.indices, A.indptr)
shape = (A.shape[0]/A.blocksize[0], A.shape[1]/A.blocksize[1])
return csr_matrix(subI, shape=shape)
def UnAmal(A, RowsPerBlock, ColsPerBlock):
"""
Unamalgamate a CSR A with blocks of 1's. This operation is equivalent to
replacing each entry of A with ones(RowsPerBlock, ColsPerBlock), i.e., this
is equivalent to setting all of A's nonzeros to 1 and then doing a
Kronecker product between A and ones(RowsPerBlock, ColsPerBlock).
Parameters
----------
A : csr_matrix
Amalgamted matrix
RowsPerBlock : int
Give A blocks of size (RowsPerBlock, ColsPerBlock)
ColsPerBlock : int
Give A blocks of size (RowsPerBlock, ColsPerBlock)
Returns
-------
A : bsr_matrix
Returns A.data[:] = 1, followed by a Kronecker product of A and
ones(RowsPerBlock, ColsPerBlock)
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import UnAmal
>>> row = array([0,0,1,2,2,2])
>>> col = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6])
>>> A = csr_matrix( (data,(row,col)), shape=(3,3) )
>>> A.todense()
matrix([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> UnAmal(A,2,2).todense()
matrix([[ 1., 1., 0., 0., 1., 1.],
[ 1., 1., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 1., 1.],
[ 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1.]])
"""
data = np.ones((A.indices.shape[0], RowsPerBlock, ColsPerBlock))
blockI = (data, A.indices, A.indptr)
shape = (RowsPerBlock*A.shape[0], ColsPerBlock*A.shape[1])
return bsr_matrix(blockI, shape=shape)
def print_table(table, title='', delim='|', centering='center', col_padding=2,
header=True, headerchar='-'):
"""
Print a table from a list of lists representing the rows of a table
Parameters
----------
table : list
list of lists, e.g. a table with 3 columns and 2 rows could be
[ ['0,0', '0,1', '0,2'], ['1,0', '1,1', '1,2'] ]
title : string
Printed centered above the table
delim : string
character to delimit columns
centering : {'left', 'right', 'center'}
chooses justification for columns
col_padding : int
number of blank spaces to add to each column
header : {True, False}
Does the first entry of table contain column headers?
headerchar : {string}
character to separate column headers from rest of table
Returns
-------
string representing table that's ready to be printed
Notes
-----
The string for the table will have correctly justified columns
with extra padding added into each column entry to ensure columns align.
The characters to delimit the columns can be user defined. This
should be useful for printing convergence data from tests.
Examples
--------
>>> from pyamg.util.utils import print_table
>>> table = [ ['cos(0)', 'cos(pi/2)', 'cos(pi)'], ['0.0', '1.0', '0.0'] ]
>>> table1 = print_table(table) # string to print
>>> table2 = print_table(table, delim='||')
>>> table3 = print_table(table, headerchar='*')
>>> table4 = print_table(table, col_padding=6, centering='left')
"""
table_str = '\n'
# sometimes, the table will be passed in as (title, table)
if isinstance(table, tuple):
title = table[0]
table = table[1]
# Calculate each column's width
colwidths = []
for i in range(len(table)):
# extend colwidths for row i
for k in range(len(table[i]) - len(colwidths)):
colwidths.append(-1)
# Update colwidths if table[i][j] is wider than colwidth[j]
for j in range(len(table[i])):
if len(table[i][j]) > colwidths[j]:
colwidths[j] = len(table[i][j])
# Factor in extra column padding
for i in range(len(colwidths)):
colwidths[i] += col_padding
# Total table width
ttwidth = sum(colwidths) + len(delim)*(len(colwidths)-1)
# Print Title
if len(title) > 0:
title = title.split("\n")
for i in range(len(title)):
table_str += str.center(title[i], ttwidth) + '\n'
table_str += "\n"
# Choose centering scheme
centering = centering.lower()
if centering == 'center':
centering = str.center
if centering == 'right':
centering = str.rjust
if centering == 'left':
centering = str.ljust
if header:
# Append Column Headers
for elmt, elmtwidth in zip(table[0], colwidths):
table_str += centering(str(elmt), elmtwidth) + delim
if table[0] != []:
table_str = table_str[:-len(delim)] + '\n'
# Append Header Separator
# Total Column Width Total Col Delimiter Widths
if len(headerchar) == 0:
headerchar = ' '
table_str += headerchar *\
int(sp.ceil(float(ttwidth)/float(len(headerchar)))) + '\n'
table = table[1:]
for row in table:
for elmt, elmtwidth in zip(row, colwidths):
table_str += centering(str(elmt), elmtwidth) + delim
if row != []:
table_str = table_str[:-len(delim)] + '\n'
else:
table_str += '\n'
return table_str
def hierarchy_spectrum(mg, filter=True, plot=False):
"""
Examine a multilevel hierarchy's spectrum
Parameters
----------
mg { pyamg multilevel hierarchy }
e.g. generated with smoothed_aggregation_solver(...) or
ruge_stuben_solver(...)
Returns
-------
(1) table to standard out detailing the spectrum of each level in mg
(2) if plot==True, a sequence of plots in the complex plane of the
spectrum at each level
Notes
-----
This can be useful for troubleshooting and when examining how your
problem's nature changes from level to level
Examples
--------
>>> from pyamg import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import hierarchy_spectrum
>>> A = poisson( (1,), format='csr' )
>>> ml = smoothed_aggregation_solver(A)
>>> hierarchy_spectrum(ml)
<BLANKLINE>
Level min(re(eig)) max(re(eig)) num re(eig) < 0 num re(eig) > 0 cond_2(A)
---------------------------------------------------------------------------
0 2.000 2.000 0 1 1.00e+00
<BLANKLINE>
<BLANKLINE>
Level min(im(eig)) max(im(eig)) num im(eig) < 0 num im(eig) > 0 cond_2(A)
---------------------------------------------------------------------------
0 0.000 0.000 0 0 1.00e+00
<BLANKLINE>
"""
real_table = [['Level', 'min(re(eig))', 'max(re(eig))', 'num re(eig) < 0',
'num re(eig) > 0', 'cond_2(A)']]
imag_table = [['Level', 'min(im(eig))', 'max(im(eig))', 'num im(eig) < 0',
'num im(eig) > 0', 'cond_2(A)']]
for i in range(len(mg.levels)):
A = mg.levels[i].A.tocsr()
if filter is True:
# Filter out any zero rows and columns of A
A.eliminate_zeros()
nnz_per_row = A.indptr[0:-1] - A.indptr[1:]
nonzero_rows = (nnz_per_row != 0).nonzero()[0]
A = A.tocsc()
nnz_per_col = A.indptr[0:-1] - A.indptr[1:]
nonzero_cols = (nnz_per_col != 0).nonzero()[0]
nonzero_rowcols = sp.union1d(nonzero_rows, nonzero_cols)
A = np.mat(A.todense())
A = A[nonzero_rowcols, :][:, nonzero_rowcols]
else:
A = np.mat(A.todense())
e = eigvals(A)
c = cond(A)
lambda_min = min(sp.real(e))
lambda_max = max(sp.real(e))
num_neg = max(e[sp.real(e) < 0.0].shape)
num_pos = max(e[sp.real(e) > 0.0].shape)
real_table.append([str(i), ('%1.3f' % lambda_min),
('%1.3f' % lambda_max),
str(num_neg), str(num_pos), ('%1.2e' % c)])
lambda_min = min(sp.imag(e))
lambda_max = max(sp.imag(e))
num_neg = max(e[sp.imag(e) < 0.0].shape)
num_pos = max(e[sp.imag(e) > 0.0].shape)
imag_table.append([str(i), ('%1.3f' % lambda_min),
('%1.3f' % lambda_max),
str(num_neg), str(num_pos), ('%1.2e' % c)])
if plot:
import pylab
pylab.figure(i+1)
pylab.plot(sp.real(e), sp.imag(e), 'kx')
handle = pylab.title('Level %d Spectrum' % i)
handle.set_fontsize(19)
handle = pylab.xlabel('real(eig)')
handle.set_fontsize(17)
handle = pylab.ylabel('imag(eig)')
handle.set_fontsize(17)
print print_table(real_table)
print print_table(imag_table)
if plot:
pylab.show()
def Coord2RBM(numNodes, numPDEs, x, y, z):
"""
Convert 2D or 3D coordinates into Rigid body modes for use as near
nullspace modes in elasticity AMG solvers
Parameters
----------
numNodes : int
Number of nodes
numPDEs :
Number of dofs per node
x,y,z : array_like
Coordinate vectors
Returns
-------
rbm : matrix
A matrix of size (numNodes*numPDEs) x (1 | 6) containing the 6 rigid
body modes
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import Coord2RBM
>>> a = np.array([0,1,2])
>>> Coord2RBM(3,6,a,a,a)
matrix([[ 1., 0., 0., 0., 0., -0.],
[ 0., 1., 0., -0., 0., 0.],
[ 0., 0., 1., 0., -0., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 1., -1.],
[ 0., 1., 0., -1., 0., 1.],
[ 0., 0., 1., 1., -1., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 2., -2.],
[ 0., 1., 0., -2., 0., 2.],
[ 0., 0., 1., 2., -2., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.]])
"""
# check inputs
if(numPDEs == 1):
numcols = 1
elif((numPDEs == 3) or (numPDEs == 6)):
numcols = 6
else:
raise ValueError("Coord2RBM(...) only supports 1, 3 or 6 PDEs per\
spatial location,i.e. numPDEs = [1 | 3 | 6].\
You've entered " + str(numPDEs) + ".")
if((max(x.shape) != numNodes) or
(max(y.shape) != numNodes) or
(max(z.shape) != numNodes)):
raise ValueError("Coord2RBM(...) requires coordinate vectors of equal\
length. Length must be numNodes = " + str(numNodes))
# if( (min(x.shape) != 1) or (min(y.shape) != 1) or (min(z.shape) != 1) ):
# raise ValueError("Coord2RBM(...) requires coordinate vectors that are
# (numNodes x 1) or (1 x numNodes).")
# preallocate rbm
rbm = np.mat(np.zeros((numNodes*numPDEs, numcols)))
for node in range(numNodes):
dof = node*numPDEs
if(numPDEs == 1):
rbm[node] = 1.0
if(numPDEs == 6):
for ii in range(3, 6): # lower half = [ 0 I ]
for jj in range(0, 6):
if(ii == jj):
rbm[dof+ii, jj] = 1.0
else:
rbm[dof+ii, jj] = 0.0
if((numPDEs == 3) or (numPDEs == 6)):
for ii in range(0, 3): # upper left = [ I ]
for jj in range(0, 3):
if(ii == jj):
rbm[dof+ii, jj] = 1.0
else:
rbm[dof+ii, jj] = 0.0
for ii in range(0, 3): # upper right = [ Q ]
for jj in range(3, 6):
if(ii == (jj-3)):
rbm[dof+ii, jj] = 0.0
else:
if((ii+jj) == 4):
rbm[dof+ii, jj] = z[node]
elif((ii+jj) == 5):
rbm[dof+ii, jj] = y[node]
elif((ii+jj) == 6):
rbm[dof+ii, jj] = x[node]
else:
rbm[dof+ii, jj] = 0.0
ii = 0
jj = 5
rbm[dof+ii, jj] *= -1.0
ii = 1
jj = 3
rbm[dof+ii, jj] *= -1.0
ii = 2
jj = 4
rbm[dof+ii, jj] *= -1.0
return rbm
def relaxation_as_linear_operator(method, A, b):
"""
Create a linear operator that applies a relaxation method for the
given right-hand-side
Parameters
----------
methods : {tuple or string}
Relaxation descriptor: Each tuple must be of the form ('method','opts')
where 'method' is the name of a supported smoother, e.g., gauss_seidel,
and 'opts' a dict of keyword arguments to the smoother, e.g., opts =
{'sweep':symmetric}. If string, must be that of a supported smoother,
e.g., gauss_seidel.
Returns
-------
linear operator that applies the relaxation method to a vector for a
fixed right-hand-side, b.
Notes
-----
This method is primarily used to improve B during the aggregation setup
phase. Here b = 0, and each relaxation call can improve the quality of B,
especially near the boundaries.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import relaxation_as_linear_operator
>>> import numpy as np
>>> A = poisson((100,100), format='csr') # matrix
>>> B = np.ones((A.shape[0],1)) # Candidate vector
>>> b = np.zeros((A.shape[0])) # RHS
>>> relax = relaxation_as_linear_operator('gauss_seidel', A, b)
>>> B = relax*B
"""
from pyamg import relaxation
from scipy.sparse.linalg.interface import LinearOperator
import pyamg.multilevel
def unpack_arg(v):
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
# setup variables
accepted_methods = ['gauss_seidel', 'block_gauss_seidel', 'sor',
'gauss_seidel_ne', 'gauss_seidel_nr', 'jacobi',
'block_jacobi', 'richardson', 'schwarz',
'strength_based_schwarz']
b = np.array(b, dtype=A.dtype)
fn, kwargs = unpack_arg(method)
lvl = pyamg.multilevel_solver.level()
lvl.A = A
# Retrieve setup call from relaxation.smoothing for this relaxation method
if not accepted_methods.__contains__(fn):
raise NameError("invalid relaxation method: ", fn)
try:
setup_smoother = getattr(relaxation.smoothing, 'setup_' + fn)
except NameError:
raise NameError("invalid presmoother method: ", fn)
# Get relaxation routine that takes only (A, x, b) as parameters
relax = setup_smoother(lvl, **kwargs)
# Define matvec
def matvec(x):
xcopy = x.copy()
relax(A, xcopy, b)
return xcopy
return LinearOperator(A.shape, matvec, dtype=A.dtype)
def filter_operator(A, C, B, Bf, BtBinv=None):
"""
Filter the matrix A according to the matrix graph of C,
while ensuring that the new, filtered A satisfies: A_new*B = Bf.
A : {csr_matrix, bsr_matrix}
n x m matrix to filter
C : {csr_matrix, bsr_matrix}
n x m matrix representing the couplings in A to keep
B : {array}
m x k array of near nullspace vectors
Bf : {array}
n x k array of near nullspace vectors to place in span(A)
BtBinv : {None, array}
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (with respect to the matrix graph
of C) of dof of i. If None is passed in, this array is
computed internally.
Returns
-------
A : sparse matrix updated such that sparsity structure of A now matches
that of C, and that the relationship A*B = Bf holds.
Notes
-----
This procedure allows for certain important modes (i.e., Bf) to be placed
in span(A) by way of row-wise l2-projections that enforce the relationship
A*B = Bf. This is useful for maintaining certain modes (e.g., the
constant) in the span of prolongation.
Examples
--------
>>> from numpy import ones, array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import filter_operator
>>> A = array([ [1.,1,1],[1,1,1],[0,1,0],[0,1,0],[0,0,1],[0,0,1]])
>>> C = array([ [1.,1,0],[1,1,0],[0,1,0],[0,1,0],[0,0,1],[0,0,1]])
>>> B = ones((3,1))
>>> Bf = ones((6,1))
>>> filter_operator(csr_matrix(A), csr_matrix(C), B, Bf).todense()
matrix([[ 0.5, 0.5, 0. ],
[ 0.5, 0.5, 0. ],
[ 0. , 1. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ],
[ 0. , 0. , 1. ]])
Notes
-----
This routine is primarily used in
pyamg.aggregation.smooth.energy_prolongation_smoother, where it is used to
generate a suitable initial guess for the energy-minimization process, when
root-node style SA is used. Essentially, the tentative prolongator, T, is
processed by this routine to produce fine-grid nullspace vectors when
multiplying coarse-grid nullspace vectors, i.e., T*B = Bf. This is
possible for any arbitrary vectors B and Bf, so long as the sparsity
structure of T is rich enough.
When generating initial guesses for root-node style prolongation operators,
this function is usually called before pyamg.uti.utils.scale_T
"""
# First preprocess the parameters
Nfine = A.shape[0]
if A.shape[0] != C.shape[0]:
raise ValueError('A and C must be the same size')
if A.shape[1] != C.shape[1]:
raise ValueError('A and C must be the same size')
if isspmatrix_bsr(C):
isBSR = True
ColsPerBlock = C.blocksize[1]
RowsPerBlock = C.blocksize[0]
Nnodes = Nfine/RowsPerBlock
if not isspmatrix_bsr(A):
raise ValueError('A and C must either both be CSR or BSR')
elif (ColsPerBlock != A.blocksize[1]) or\
(RowsPerBlock != A.blocksize[0]):
raise ValueError('A and C must have same BSR blocksizes')
elif isspmatrix_csr(C):
isBSR = False
ColsPerBlock = 1
RowsPerBlock = 1
Nnodes = Nfine/RowsPerBlock
if not isspmatrix_csr(A):
raise ValueError('A and C must either both be CSR or BSR')
else:
raise ValueError('A and C must either both be CSR or BSR')
if len(Bf.shape) == 1:
Bf = Bf.reshape(-1, 1)
if Bf.shape[0] != A.shape[0]:
raise ValueError('A and Bf must have the same first dimension')
if len(B.shape) == 1:
B = B.reshape(-1, 1)
if B.shape[0] != A.shape[1]:
raise ValueError('A and B must have matching dimensions such\
that A*B is computable')
if B.shape[1] != Bf.shape[1]:
raise ValueError('B and Bf must have the same second\
dimension')
else:
NullDim = B.shape[1]
if A.dtype == int:
A.data = np.array(A.data, dtype=float)
if B.dtype == int:
B.data = np.array(B.data, dtype=float)
if Bf.dtype == int:
Bf.data = np.array(Bf.data, dtype=float)
if (A.dtype != B.dtype) or (A.dtype != Bf.dtype):
raise TypeError('A, B and Bf must of the same dtype')
# First, preprocess some values for filtering. Construct array of
# inv(Bi'Bi), where Bi is B restricted to row i's sparsity pattern in
# C. This array is used multiple times in Satisfy_Constraints(...).
if BtBinv is None:
BtBinv = compute_BtBinv(B, C)
# Filter A according to C's matrix graph
C = C.copy()
C.data[:] = 1
A = A.multiply(C)
# add explicit zeros to A wherever C is nonzero, but A is zero
A = A.tocoo()
C = C.tocoo()
A.data = np.hstack((np.zeros(C.data.shape, dtype=A.dtype), A.data))
A.row = np.hstack((C.row, A.row))
A.col = np.hstack((C.col, A.col))
if isBSR:
A = A.tobsr((RowsPerBlock, ColsPerBlock))
else:
A = A.tocsr()
# Calculate difference between A*B and Bf
diff = A*B - Bf
# Right multiply each row i of A with
# A_i <--- A_i - diff_i*inv(B_i.T B_i)*Bi.T
# where A_i, and diff_i denote restriction to just row i, and B_i denotes
# restriction to multiple rows corresponding to the the allowed nz's for
# row i in A_i. A_i also represents just the nonzeros for row i.
pyamg.amg_core.satisfy_constraints_helper(RowsPerBlock, ColsPerBlock,
Nnodes, NullDim,
np.conjugate(np.ravel(B)),
np.ravel(diff),
np.ravel(BtBinv), A.indptr,
A.indices, np.ravel(A.data))
A.eliminate_zeros()
return A
def scale_T(T, P_I, I_F):
'''
Helper function that scales T with a right multiplication by a block
diagonal inverse, so that T is the identity at C-node rows.
Parameters
----------
T : {bsr_matrix}
Tentative prolongator, with square blocks in the BSR data structure,
and a non-overlapping block-diagonal structure
P_I : {bsr_matrix}
Interpolation operator that carries out only simple injection from the
coarse grid to fine grid Cpts nodes
I_F : {bsr_matrix}
Identity operator on Fpts, i.e., the action of this matrix zeros
out entries in a vector at all Cpts, leaving Fpts untouched
Returns
-------
T : {bsr_matrix}
Tentative prolongator scaled to be identity at C-pt nodes
Examples
--------
>>> from scipy.sparse import csr_matrix, bsr_matrix
>>> from scipy import matrix, array
>>> from pyamg.util.utils import scale_T
>>> T = matrix([[ 1.0, 0., 0. ],
... [ 0.5, 0., 0. ],
... [ 0. , 1., 0. ],
... [ 0. , 0.5, 0. ],
... [ 0. , 0., 1. ],
... [ 0. , 0., 0.25 ]])
>>> P_I = matrix([[ 0., 0., 0. ],
... [ 1., 0., 0. ],
... [ 0., 1., 0. ],
... [ 0., 0., 0. ],
... [ 0., 0., 0. ],
... [ 0., 0., 1. ]])
>>> I_F = matrix([[ 1., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 1., 0., 0.],
... [ 0., 0., 0., 0., 1., 0.],
... [ 0., 0., 0., 0., 0., 0.]])
>>> scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F)).todense()
matrix([[ 2. , 0. , 0. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 4. ],
[ 0. , 0. , 1. ]])
Notes
-----
This routine is primarily used in
pyamg.aggregation.smooth.energy_prolongation_smoother, where it is used to
generate a suitable initial guess for the energy-minimization process, when
root-node style SA is used. This function, scale_T, takes an existing
tentative prolongator and ensures that it injects from the coarse-grid to
fine-grid root-nodes.
When generating initial guesses for root-node style prolongation operators,
this function is usually called after pyamg.uti.utils.filter_operator
This function assumes that the eventual coarse-grid nullspace vectors
equal coarse-grid injection applied to the fine-grid nullspace vectors.
'''
if not isspmatrix_bsr(T):
raise TypeError('Expected BSR matrix T')
elif T.blocksize[0] != T.blocksize[1]:
raise TypeError('Expected BSR matrix T with square blocks')
if not isspmatrix_bsr(P_I):
raise TypeError('Expected BSR matrix P_I')
elif P_I.blocksize[0] != P_I.blocksize[1]:
raise TypeError('Expected BSR matrix P_I with square blocks')
if not isspmatrix_bsr(I_F):
raise TypeError('Expected BSR matrix I_F')
elif I_F.blocksize[0] != I_F.blocksize[1]:
raise TypeError('Expected BSR matrix I_F with square blocks')
if (I_F.blocksize[0] != P_I.blocksize[0]) or\
(I_F.blocksize[0] != T.blocksize[0]):
raise TypeError('Expected identical blocksize in I_F, P_I and T')
# Only do if we have a non-trivial coarse-grid
if P_I.nnz > 0:
# Construct block diagonal inverse D
D = P_I.T*T
if D.nnz > 0:
# changes D in place
pinv_array(D.data)
# Scale T to be identity at root-nodes
T = T*D
# Ensure coarse-grid injection
T = I_F*T + P_I
return T
def get_Cpt_params(A, Cnodes, AggOp, T):
''' Helper function that returns a dictionary of sparse matrices and arrays
which allow us to easily operate on Cpts and Fpts separately.
Parameters
----------
A : {csr_matrix, bsr_matrix}
Operator
Cnodes : {array}
Array of all root node indices. This is an array of nodal indices,
not degree-of-freedom indices. If the blocksize of T is 1, then
nodal indices and degree-of-freedom indices coincide.
AggOp : {csr_matrix}
Aggregation operator corresponding to A
T : {bsr_matrix}
Tentative prolongator based on AggOp
Returns
-------
Dictionary containing these parameters:
P_I : {bsr_matrix}
Interpolation operator that carries out only simple injection from the
coarse grid to fine grid Cpts nodes
I_F : {bsr_matrix}
Identity operator on Fpts, i.e., the action of this matrix zeros
out entries in a vector at all Cpts, leaving Fpts untouched
I_C : {bsr_matrix}
Identity operator on Cpts nodes, i.e., the action of this matrix zeros
out entries in a vector at all Fpts, leaving Cpts untouched
Cpts : {array}
An array of all root node dofs, corresponding to the F/C splitting
Fpts : {array}
An array of all non root node dofs, corresponding to the F/C splitting
Example
-------
>>> from numpy import array
>>> from pyamg.util.utils import get_Cpt_params
>>> from pyamg.gallery import poisson
>>> from scipy.sparse import csr_matrix, bsr_matrix
>>> A = poisson((10,), format='csr')
>>> Cpts = array([3, 7])
>>> AggOp = ([[ 1., 0.], [ 1., 0.],
... [ 1., 0.], [ 1., 0.],
... [ 1., 0.], [ 0., 1.],
... [ 0., 1.], [ 0., 1.],
... [ 0., 1.], [ 0., 1.]])
>>> AggOp = csr_matrix(AggOp)
>>> T = AggOp.copy().tobsr()
>>> params = get_Cpt_params(A, Cpts, AggOp, T)
>>> params['P_I'].todense()
matrix([[ 0., 0.],
[ 0., 0.],
[ 0., 0.],
[ 1., 0.],
[ 0., 0.],
[ 0., 0.],
[ 0., 0.],
[ 0., 1.],
[ 0., 0.],
[ 0., 0.]])
Notes
-----
The principal calling routine is
aggregation.smooth.energy_prolongation_smoother,
which uses the Cpt_param dictionary for root-node style
prolongation smoothing
'''
if not isspmatrix_bsr(A) and not isspmatrix_csr(A):
raise TypeError('Expected BSR or CSR matrix A')
if not isspmatrix_csr(AggOp):
raise TypeError('Expected CSR matrix AggOp')
if not isspmatrix_bsr(T):
raise TypeError('Expected BSR matrix T')
if T.blocksize[0] != T.blocksize[1]:
raise TypeError('Expected square blocksize for BSR matrix T')
if A.shape[0] != A.shape[1]:
raise TypeError('Expected square matrix A')
if T.shape[0] != A.shape[0]:
raise TypeError('Expected compatible dimensions for T and A,\
T.shape[0] = A.shape[0]')
if Cnodes.shape[0] != AggOp.shape[1]:
if AggOp.shape[1] > 1:
raise TypeError('Number of columns in AggOp must equal number\
of Cnodes')
if isspmatrix_bsr(A) and A.blocksize[0] > 1:
# Expand the list of Cpt nodes to a list of Cpt dofs
blocksize = A.blocksize[0]
Cpts = np.repeat(blocksize*Cnodes, blocksize)
for k in range(1, blocksize):
Cpts[range(k, Cpts.shape[0], blocksize)] += k
else:
blocksize = 1
Cpts = Cnodes
Cpts = np.array(Cpts, dtype=int)
# More input checking
if Cpts.shape[0] != T.shape[1]:
if T.shape[1] > blocksize:
raise ValueError('Expected number of Cpts to match T.shape[1]')
if blocksize != T.blocksize[0]:
raise ValueError('Expected identical blocksize in A and T')
if AggOp.shape[0] != T.shape[0]/blocksize:
raise ValueError('Number of rows in AggOp must equal number of\
fine-grid nodes')
# Create two maps, one for F points and one for C points
ncoarse = T.shape[1]
I_C = eye(A.shape[0], A.shape[1], format='csr')
I_F = I_C.copy()
I_F.data[Cpts] = 0.0
I_F.eliminate_zeros()
I_C = I_C - I_F
I_C.eliminate_zeros()
# Find Fpts, the complement of Cpts
Fpts = I_F.indices.copy()
# P_I only injects from Cpts on the coarse grid to the fine grid, but
# because of it's later uses, it must have the CSC indices ordered as
# in Cpts
if I_C.nnz > 0:
indices = Cpts.copy()
indptr = np.arange(indices.shape[0]+1)
else:
indices = np.zeros((0,), dtype=T.indices.dtype)
indptr = np.zeros((ncoarse+1,), dtype=T.indptr.dtype)
P_I = csc_matrix((I_C.data.copy(), indices, indptr),
shape=(I_C.shape[0], ncoarse))
P_I = P_I.tobsr(T.blocksize)
# Use same blocksize as A
if isspmatrix_bsr(A):
I_C = I_C.tobsr(A.blocksize)
I_F = I_F.tobsr(A.blocksize)
else:
I_C = I_C.tobsr(blocksize=(1, 1))
I_F = I_F.tobsr(blocksize=(1, 1))
return {'P_I': P_I, 'I_F': I_F, 'I_C': I_C, 'Cpts': Cpts, 'Fpts': Fpts}
def compute_BtBinv(B, C):
''' Helper function that creates inv(B_i.T B_i) for each block row i in C,
where B_i is B restricted to the sparsity pattern of block row i.
Parameters
----------
B : {array}
(M,k) array, typically near-nullspace modes for coarse grid, i.e., B_c.
C : {csr_matrix, bsr_matrix}
Sparse NxM matrix, whose sparsity structure (i.e., matrix graph)
is used to determine BtBinv.
Returns
-------
BtBinv : {array}
BtBinv[i] = inv(B_i.T B_i), where B_i is B restricted to the nonzero
pattern of block row i in C.
Example
-------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.utils import compute_BtBinv
>>> T = array([[ 1., 0.],
... [ 1., 0.],
... [ 0., .5],
... [ 0., .25]])
>>> T = bsr_matrix(T)
>>> B = array([[1.],[2.]])
>>> compute_BtBinv(B, T)
array([[[ 1. ]],
<BLANKLINE>
[[ 1. ]],
<BLANKLINE>
[[ 0.25]],
<BLANKLINE>
[[ 0.25]]])
Notes
-----
The principal calling routines are
aggregation.smooth.energy_prolongation_smoother, and
util.utils.filter_operator.
BtBinv is used in the prolongation smoothing process that incorporates B
into the span of prolongation with row-wise projection operators. It is
these projection operators that BtBinv is part of.
'''
if not isspmatrix_bsr(C) and not isspmatrix_csr(C):
raise TypeError('Expected bsr_matrix or csr_matrix for C')
if C.shape[1] != B.shape[0]:
raise TypeError('Expected matching dimensions such that C*B')
# Problem parameters
if isspmatrix_bsr(C):
ColsPerBlock = C.blocksize[1]
RowsPerBlock = C.blocksize[0]
else:
ColsPerBlock = 1
RowsPerBlock = 1
Ncoarse = C.shape[1]
Nfine = C.shape[0]
NullDim = B.shape[1]
Nnodes = Nfine/RowsPerBlock
# Construct BtB
BtBinv = np.zeros((Nnodes, NullDim, NullDim), dtype=B.dtype)
BsqCols = sum(range(NullDim+1))
Bsq = np.zeros((Ncoarse, BsqCols), dtype=B.dtype)
counter = 0
for i in range(NullDim):
for j in range(i, NullDim):
Bsq[:, counter] = np.conjugate(np.ravel(np.asarray(B[:, i]))) * \
np.ravel(np.asarray(B[:, j]))
counter = counter + 1
# This specialized C-routine calculates (B.T B) for each row using Bsq
pyamg.amg_core.calc_BtB(NullDim, Nnodes, ColsPerBlock,
np.ravel(np.asarray(Bsq)),
BsqCols, np.ravel(np.asarray(BtBinv)),
C.indptr, C.indices)
# Invert each block of BtBinv, noting that amg_core.calc_BtB(...) returns
# values in column-major form, thus necessitating the deep transpose
# This is the old call to a specialized routine, but lacks robustness
# pyamg.amg_core.pinv_array(np.ravel(BtBinv), Nnodes, NullDim, 'F')
BtBinv = BtBinv.transpose((0, 2, 1)).copy()
pinv_array(BtBinv)
return BtBinv
def eliminate_diag_dom_nodes(A, C, theta=1.02):
''' Helper function that eliminates diagonally dominant rows and cols from A
in the separate matrix C. This is useful because it eliminates nodes in C
which we don't want coarsened. These eliminated nodes in C just become
the rows and columns of the identity.
Parameters
----------
A : {csr_matrix, bsr_matrix}
Sparse NxN matrix
C : {csr_matrix}
Sparse MxM matrix, where M is the number of nodes in A. M=N if A
is CSR or is BSR with blocksize 1. Otherwise M = N/blocksize.
theta : {float}
determines diagonal dominance threshhold
Returns
-------
C : {csr_matrix}
C updated such that the rows and columns corresponding to diagonally
dominant rows in A have been eliminated and replaced with rows and
columns of the identity.
Notes
-----
Diagonal dominance is defined as
|| (e_i, A) - a_ii ||_1 < theta a_ii
that is, the 1-norm of the off diagonal elements in row i must be less than
theta times the diagonal element.
Example
-------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import eliminate_diag_dom_nodes
>>> A = poisson( (4,), format='csr' )
>>> C = eliminate_diag_dom_nodes(A, A.copy(), 1.1)
>>> C.todense()
matrix([[ 1., 0., 0., 0.],
[ 0., 2., -1., 0.],
[ 0., -1., 2., 0.],
[ 0., 0., 0., 1.]])
'''
# Find the diagonally dominant rows in A.
A_abs = A.copy()
A_abs.data = np.abs(A_abs.data)
D_abs = get_diagonal(A_abs, norm_eq=0, inv=False)
diag_dom_rows = (D_abs > (theta*(A_abs*np.ones((A_abs.shape[0],),
dtype=A_abs) - D_abs)))
# Account for BSR matrices and translate diag_dom_rows from dofs to nodes
bsize = blocksize(A_abs)
if bsize > 1:
diag_dom_rows = np.array(diag_dom_rows, dtype=int)
diag_dom_rows = diag_dom_rows.reshape(-1, bsize)
diag_dom_rows = np.sum(diag_dom_rows, axis=1)
diag_dom_rows = (diag_dom_rows == bsize)
# Replace these rows/cols in # C with rows/cols of the identity.
I = eye(C.shape[0], C.shape[1], format='csr')
I.data[diag_dom_rows] = 0.0
C = I*C*I
I.data[diag_dom_rows] = 1.0
I.data[np.where(diag_dom_rows == 0)[0]] = 0.0
C = C + I
del A_abs
return C
def remove_diagonal(S):
""" Removes the diagonal of the matrix S
Parameters
----------
S : csr_matrix
Square matrix
Returns
-------
S : csr_matrix
Strength matrix with the diagonal removed
Notes
-----
This is needed by all the splitting routines which operate on matrix graphs
with an assumed zero diagonal
Example
-------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import remove_diagonal
>>> A = poisson( (4,), format='csr' )
>>> C = remove_diagonal(A)
>>> C.todense()
matrix([[ 0., -1., 0., 0.],
[-1., 0., -1., 0.],
[ 0., -1., 0., -1.],
[ 0., 0., -1., 0.]])
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
if S.shape[0] != S.shape[1]:
raise ValueError('expected square matrix, shape=%s' % (S.shape,))
S = coo_matrix(S)
mask = S.row != S.col
S.row = S.row[mask]
S.col = S.col[mask]
S.data = S.data[mask]
return S.tocsr()
def scale_rows_by_largest_entry(S):
""" Scale each row in S by it's largest in magnitude entry
Parameters
----------
S : csr_matrix
Returns
-------
S : csr_matrix
Each row has been scaled by it's largest in magnitude entry
Example
-------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import scale_rows_by_largest_entry
>>> A = poisson( (4,), format='csr' )
>>> A.data[1] = 5.0
>>> A = scale_rows_by_largest_entry(A)
>>> A.todense()
matrix([[ 0.4, 1. , 0. , 0. ],
[-0.5, 1. , -0.5, 0. ],
[ 0. , -0.5, 1. , -0.5],
[ 0. , 0. , -0.5, 1. ]])
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
# Scale S by the largest magnitude entry in each row
largest_row_entry = np.zeros((S.shape[0],), dtype=S.dtype)
pyamg.amg_core.maximum_row_value(S.shape[0], largest_row_entry,
S.indptr, S.indices, S.data)
largest_row_entry[largest_row_entry != 0] =\
1.0 / largest_row_entry[largest_row_entry != 0]
S = scale_rows(S, largest_row_entry, copy=True)
return S
def levelize_strength_or_aggregation(to_levelize, max_levels, max_coarse):
"""
Helper function to preprocess the strength and aggregation parameters
passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
max_coarse : int
Defines the maximum coarse grid size allowed
Returns
-------
(max_levels, max_coarse, to_levelize) : tuple
New max_levels and max_coarse values and then the parameter list
to_levelize, such that entry i specifies the parameter choice at level
i. max_levels and max_coarse are returned, because they may be updated
if strength or aggregation set a predefined coarsening and possibly
change these values.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_strength_or_aggregation
>>> strength = ['evolution', 'classical']
>>> levelize_strength_or_aggregation(strength, 4, 10)
(4, 10, ['evolution', 'classical', 'classical'])
"""
if isinstance(to_levelize, tuple):
if to_levelize[0] == 'predefined':
to_levelize = [to_levelize]
max_levels = 2
max_coarse = 0
else:
to_levelize = [to_levelize for i in range(max_levels-1)]
elif isinstance(to_levelize, str):
if to_levelize == 'predefined':
raise ValueError('predefined to_levelize requires a user-provided\
CSR matrix representing strength or aggregation\
i.e., (\'predefined\', {\'C\' : CSR_MAT}).')
else:
to_levelize = [to_levelize for i in range(max_levels-1)]
elif isinstance(to_levelize, list):
if isinstance(to_levelize[-1], tuple) and\
(to_levelize[-1][0] == 'predefined'):
# to_levelize is a list that ends with a predefined operator
max_levels = len(to_levelize) + 1
max_coarse = 0
else:
# to_levelize a list that __doesn't__ end with 'predefined'
if len(to_levelize) < max_levels-1:
mlz = max_levels - 1 - len(to_levelize)
toext = [to_levelize[-1] for i in range(mlz)]
to_levelize.extend(toext)
elif to_levelize is None:
to_levelize = [(None, {}) for i in range(max_levels-1)]
else:
raise ValueError('invalid to_levelize')
return max_levels, max_coarse, to_levelize
def levelize_smooth_or_improve_candidates(to_levelize, max_levels):
"""
Helper function to preprocess the smooth and improve_candidates
parameters passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
Returns
-------
to_levelize : list
The parameter list such that entry i specifies the parameter choice
at level i.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_smooth_or_improve_candidates
>>> improve_candidates = ['gauss_seidel', None]
>>> levelize_smooth_or_improve_candidates(improve_candidates, 4)
['gauss_seidel', None, None, None]
"""
if isinstance(to_levelize, tuple) or isinstance(to_levelize, str):
to_levelize = [to_levelize for i in range(max_levels)]
elif isinstance(to_levelize, list):
if len(to_levelize) < max_levels:
mlz = max_levels - len(to_levelize)
toext = [to_levelize[-1] for i in range(mlz)]
to_levelize.extend(toext)
elif to_levelize is None:
to_levelize = [(None, {}) for i in range(max_levels)]
return to_levelize
# from functools import partial, update_wrapper
# def dispatcher(name_to_handle):
# def dispatcher(arg):
# if isinstance(arg,tuple):
# fn,opts = arg[0],arg[1]
# else:
# fn,opts = arg,{}
#
# if fn in name_to_handle:
# # convert string into function handle
# fn = name_to_handle[fn]
# #elif isinstance(fn, type(numpy.ones)):
# # pass
# elif callable(fn):
# # if fn is itself a function handle
# pass
# else:
# raise TypeError('Expected function')
#
# wrapped = partial(fn, **opts)
# update_wrapper(wrapped, fn)
#
# return wrapped
#
# return dispatcher
| huahbo/pyamg | pyamg/util/utils.py | Python | mit | 65,500 | 0.000031 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
VERSION = '0.0.50'
| sameerparekh/pants | src/python/pants/version.py | Python | apache-2.0 | 320 | 0.003125 |
#!/usr/bin/env python
def iterate_list():
for item in [ 1, 2, 3 ]:
yield item
def identity(object):
return object
def simple_callback(callback, value):
return callback(value)
def simple_generator(callback):
output = []
for i in callback():
output.append(i)
return output
def named_args(arg1, arg2):
return [arg1, arg2]
def expects_tuple(tvalue):
return isinstance(tvalue, tuple)
| newcontext/rubypython | spec/python_helpers/basics.py | Python | mit | 409 | 0.036675 |
from .fields import JSONField, JSONCharField # noqa
| bradjasper/django-jsonfield | jsonfield/__init__.py | Python | mit | 53 | 0 |
#! /usr/bin/env python
import rospy
import math
import numpy
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Image
from std_msgs.msg import Header
from multiprocessing import Pool
from multiprocessing import cpu_count
class Line:
def __init__(self, points):
assert isinstance(points, list)
self.points = points
class SeaGoatRosClient:
def __init__(self):
self.publisher = rospy.Publisher('VisionScan', LaserScan)
self.subscriber = rospy.Subscriber('ImageArray', Image, self.image_callback)
rospy.init_node('SeaGoatRosClient')
self.r = rospy.Rate(15)
self.vision_raw_scan = numpy.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [0, 0, 0, 0]])
#Init lines intersect
self.range_max = 0
self.angle_max = math.radians(180.0)
self.angle_increment = math.radians(0.5)
self.number_lines = int(self.angle_max/self.angle_increment)+1
self.init = False
self.max_pixel_dist = 0
self.pool = Pool(cpu_count()/2)
self.resolution = 3.0
self.image_height_centimeter = 300
#self._init_lines()
self.tasks = list()
self.id = 0
print self.vision_raw_scan
def publish_loop(self):
while not rospy.is_shutdown():
vision_scan = self.convert_array_to_laser_scan(self.vision_raw_scan)
if vision_scan is not None:
self.publisher.publish(vision_scan)
self.r.sleep()
def convert_array_to_laser_scan(self, vision_raw_scan):
if vision_raw_scan.size < 100:
return None
header = Header()
header.frame_id = "vision_scan"
#header.stamp = time()
laser_scan = LaserScan()
laser_scan.angle_min = 0.0
laser_scan.angle_max = self.angle_max
laser_scan.angle_increment = self.angle_increment
laser_scan.range_min = 0.0
laser_scan.range_max = self.range_max
#laser_scan.ranges = [0]*360
image_size = vision_raw_scan.shape
if len(image_size) == 3:
vision_raw_scan = cv2.cvtColor(vision_raw_scan, cv2.COLOR_BGR2GRAY)
image_size = vision_raw_scan.shape
if self.init is False:
self._init_lines(image_size)
self.init = True
tasks = list()
for line in range(self.number_lines):
tasks.append((vision_raw_scan, self.lines[line]))
laser_scan.ranges = self.pool.map(_getObstacle, tasks)
#pool.close()
laser_scan.header = header
#laser_scan.scan_time = 1.0/5.0
#laser_scan.time_increment = 1.0/5.0
return laser_scan
def image_callback(self, msg):
image = CvBridge().imgmsg_to_cv2(msg)
self.vision_raw_scan = numpy.asanyarray(image)
#cv2.imshow("Image", image)
#cv2.waitKey(10)
def _init_lines(self, image_size):
origin_x = int(image_size[1] / 2)
origin_y = (image_size[0] - 1)-50
current_angle = 0
self.centimeter_by_pixel= float(self.image_height_centimeter)/float(image_size[0])
self.max_points = int(math.sqrt(math.pow(image_size[0], 2) + math.pow(image_size[1], 2)))
self.lines = numpy.ndarray((self.number_lines, self.max_points, 3), dtype=int)
for line_id in range(self.number_lines):
current_x = origin_x
current_y = origin_y
current_pixel_dist = 0
line = self.lines[line_id]
point_id = -1
while current_x < image_size[1] and current_y < image_size[0] and current_x >= 0 and current_y >= 0:
if (current_pixel_dist > 0):
point = line[point_id]
point[0] = current_x
point[1] = current_y
point[2] = int(current_pixel_dist*self.centimeter_by_pixel)
if point[2] > self.range_max:
self.range_max = point[2]
current_pixel_dist += self.resolution
current_x = int(current_pixel_dist * math.cos(current_angle)) + origin_x
current_y = int(current_pixel_dist * math.sin(-1 * current_angle)) + origin_y
point_id += 1
if point_id < self.max_points:
end_point = line[point_id]
end_point[0] = -1
end_point[1] = -1
end_point[2] = -1
#self.lines = self.lines + (line,)
#self.tasks.append(list([self.vision_raw_scan, line]))
current_angle += self.angle_increment
def _getObstacle(args):
#line = lines[i]
image = args[0]
line = args[1]
for point_id in range(len(line)):
point = line[point_id]
if point[0] == -1 and point[1] == -1 and point[2] == -1:
break
if image[point[1]][point[0]] > 0:
return float(point[2])/100.0
return 0.0
if __name__ == '__main__':
sgrc = SeaGoatRosClient()
sgrc.publish_loop() | clubcapra/capra_seagoat | src/seagoat_ros_client/seagoat_ros_client.py | Python | gpl-3.0 | 5,086 | 0.004915 |
#!/usr/bin/env python
__author__ = 'Ben "TheX1le" Smith, Marfi'
__email__ = '[email protected]'
__website__= ''
__date__ = '04/26/2011'
__version__ = '2011.4.26'
__file__ = 'ouiParse.py'
__data__ = 'a class for dealing with the oui txt file'
"""
########################################
#
# This program and its support programs are free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation; version 2.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
#########################################
"""
import re
import sys
if sys.version_info[0] >= 3:
import requests
else:
import urllib
import os
import pdb
#this lib is crap and needs to be rewritten -Textile
if os.getenv('AIRGRAPH_HOME') is not None and os.path.isdir(os.getenv('AIRGRAPH_HOME')):
path=os.getenv('AIRGRAPH_HOME') + '/support/'
if not os.path.isdir(path):
try:
os.mkdir(path)
except:
raise Exception("Can't create destination directory (%s)!" % path)
elif os.path.isdir('./support/'):
path='./support/'
elif os.path.isdir('/usr/local/share/airgraph-ng/'):
path='/usr/local/share/airgraph-ng/'
elif os.path.isdir('/usr/share/airgraph-ng/'):
path='/usr/share/airgraph-ng/'
else:
raise Exception("Could not determine path, please, check your installation or set AIRGRAPH_HOME environment variable")
class macOUI_lookup:
"""
A class for deaing with OUIs and deterimining device type
"""
def __init__(self, oui=False):
"""
generate the two dictionaries and return them
"""
#a poor fix where if we have no file it trys to download it
self.ouiTxtUrl = "http://standards-oui.ieee.org/oui.txt"
self.ouiTxt = oui
if not oui or not os.path.isfile(self.ouiTxt):
self.ouiUpdate()
self.ouiTxt = path + "oui.txt"
self.last_error = None
self.identDeviceDict(path + 'ouiDevice.txt')
self.identDeviceDictWhacMac(path + 'whatcDB.csv')
self.ouiRaw = self.ouiOpen(self.ouiTxt)
self.oui_company = self.ouiParse() #dict where oui's are the keys to company names
self.company_oui = self.companyParse() #dict where company name is the key to oui's
def compKeyChk(self,name):
"""
check for valid company name key
"""
compMatch = re.compile(name,re.I)
if name in self.company_oui:
return True
for key in list(self.company_oui.keys()):
if compMatch.search(key) is not None:
return True
return False
def ouiKeyChk(self,name):
"""
check for a valid oui prefix
"""
if name in self.oui_company:
return True
else:
return False
def lookup_OUI(self,mac):
"""
Lookup a oui and return the company name
"""
if self.ouiKeyChk(mac) is not False:
return self.oui_company[mac]
else:
return False
def lookup_company(self,companyLst):
"""
look up a company name and return their OUI's
"""
oui = []
if type(companyLst) is list:
for name in companyLst:
compMatch = re.compile(name,re.I)
if name in self.company_oui:
oui.extend(self.company_oui[name])
else:
for key in self.company_oui:
if compMatch.search(key) is not None:
oui.extend(self.company_oui[key])
elif type(companyLst) is str:
if companyLst in self.company_oui:
oui = self.company_oui[companyLst]
else:
compMatch = re.compile(companyLst,re.I)
for key in self.company_oui:
if compMatch.search(key) is not None:
oui.extend(self.company_oui[key]) #return the oui for that key
return oui
def ouiOpen(self,fname,flag='R'):
"""
open the file and read it in
flag denotes use of read or readlines
"""
try:
with open(fname, "r") as fid:
if flag == 'RL':
text = fid.readlines()
elif flag == 'R':
text = fid.read()
return text
except IOError:
return False
def ouiParse(self):
"""
generate a oui to company lookup dict
"""
HexOui= {}
Hex = re.compile('.*(hex).*')
#matches the following example "00-00-00 (hex)\t\tXEROX CORPORATION"
ouiLines = self.ouiRaw.split("\n")
#split each company into a list one company per position
for line in ouiLines:
if Hex.search(line) is not None:
lineList = Hex.search(line).group().replace("\t"," ").split(" ")
#return the matched text and build a list out of it
HexOui[lineList[0].replace("-",":")] = lineList[2].strip()
#build a dict in the format of mac:company name
return HexOui
def companyParse(self):
"""
generate a company to oui lookup dict
"""
company_oui = {}
for oui in self.oui_company:
if self.oui_company[oui] in company_oui:
company_oui[self.oui_company[oui]].append(oui)
else:
company_oui[self.oui_company[oui]] = [oui]
return company_oui
def ouiUpdate(self):
"""
Grab the oui txt file off the ieee.org website
"""
try:
print(("Getting OUI file from %s to %s" %(self.ouiTxtUrl, path)))
if sys.version_info[0] == 2:
urllib.request.urlretrieve(self.ouiTxtUrl, path + "oui.txt")
else:
response = requests.get(self.ouiTxtUrl)
with open(path + "oui.txt", "wb") as file:
bytes_written = file.write(response.content)
print("Completed Successfully")
except Exception as error:
print(("Could not download file:\n %s\n Exiting airgraph-ng" %(error)))
sys.exit(0)
def identDeviceDict(self,fname):
"""
Create two dicts allowing device type lookup
one for oui to device and one from device to OUI group
"""
self.ouitodevice = {}
self.devicetooui = {}
data = self.ouiOpen(fname,'RL')
if data == False:
self.last_error = "Unable to open lookup file for parsing"
return False
for line in data:
dat = line.strip().split(',')
self.ouitodevice[dat[1]] = dat[0]
if dat[0] in list(self.devicetooui.keys()):
self.devicetooui[dat[0]].append(dat[1])
else:
self.devicetooui[dat[0]] = [dat[1]]
def identDeviceDictWhacMac(self,fname):
"""
Create two dicts allowing device type lookup from whatmac DB
one for oui to device and one from the device to OUI group
"""
self.ouitodeviceWhatmac3 = {}
self.ouitodeviceWhatmac = {}
self.devicetoouiWhacmac = {}
data = self.ouiOpen(fname,'RL')
if data == False:
self.last_error = "Unble to open lookup file for parsing"
return False
for line in data:
dat = line.strip().split(',')
dat[0] = dat[0].upper()
self.ouitodeviceWhatmac[dat[0]] = dat[1]
self.ouitodeviceWhatmac3[dat[0][0:8]] = dat[1] # a db to support the 3byte lookup from whatmac
if dat[1] in list(self.devicetoouiWhacmac.keys()):
self.devicetoouiWhacmac[dat[1]].append(dat[0])
else:
self.devicetoouiWhacmac[dat[1]] = [dat[0]]
| creaktive/aircrack-ng | scripts/airgraph-ng/airgraphviz/libOuiParse.py | Python | gpl-2.0 | 8,152 | 0.008342 |
import sys, os, gzip, pickle
from xml.etree.ElementTree import ParseError
from traceback import print_tb
from multiprocessing import Pool # @UnresolvedImport
from pydmrs.components import RealPred, GPred
from pydmrs.core import ListDmrs as Dmrs
PROC = 50
def is_verb(pred):
# Ignore GPreds
if not isinstance(pred, RealPred):
return False
if pred.pos == 'v':
# For verbs in the lexicon, ignore modals
if pred.sense == 'modal':
return False
else:
return True
if pred.pos == 'u':
# For unknown words, use the PoS-tag
tag = pred.lemma.rsplit('/', 1)[-1]
if tag[0] == 'v':
return True
return False
def is_noun(pred):
# Assumes not a GPred
if pred.pos == 'n':
return True
if pred.pos == 'u':
# For unknown words, use the PoS-tag
tag = pred.lemma.rsplit('/', 1)[-1]
if tag[0] == 'n':
return True
return False
def find_sit(dmrs, node):
"""
Find if a node representations a situation
:param dmrs: a Dmrs object
:param node: a Node object
:return: (verb, agent, patient), realpred_only
or if not found: None, None
"""
# Only consider verbal nodes
if not is_verb(node.pred):
return None, None
# Output of the form (verb, agent, patient)
output = [node.pred, None, None]
# Record if arguments are RealPreds
noun_only = True
# Look for both ARG1 and ARG2
for i in (1,2):
try: # See if the argument is there
arglink = dmrs.get_out(node.nodeid, 'ARG'+str(i)).pop()
except KeyError:
continue
# Get argument's pred
end = dmrs[arglink.end]
pred = end.pred
# Deal with different pred types
if type(pred) == RealPred:
# Ignore coordinations
if pred.pos == 'c':
continue
# Record the pred
output[i] = pred
# Record if it's not a noun
if not is_noun(pred):
noun_only = False
else:
# Note that this pred is not a RealPred
noun_only = False
# Ignore coordinations
if pred == GPred('implicit_conj'):
continue
# Record information about pronouns
elif pred == GPred('pron'):
pronstring = end.sortinfo['pers']
try:
pronstring += end.sortinfo['num']
except TypeError: # info is None
pass
try:
pronstring += end.sortinfo['gend']
except TypeError: # info is None
pass
output[i] = pronstring
elif pred == GPred('named'):
output[i] = end.carg
else:
output[i] = pred
# Check if an argument was found
if output[1] or output[2]:
return output, noun_only
else:
return None, None
def extract(xmlstring, sits, extra_sits, filename):
"""
Extract situations from a DMRS in XML form
:param xmlstring: the input XML
:param sits: the list of situations to append to
:param extra_sits: the list of extra situations (including GPreds) to append to
:param filename: the filename to log errors to
"""
try:
dmrs = Dmrs.loads_xml(xmlstring)
except ParseError as e: # badly formed XML
print("ParseError!")
with open('wikiwoods_extractcore.log', 'a') as f:
f.write(filename + ':\n' + xmlstring.decode() + '\n' + str(e) + '\n\n')
return None
# Look for situations
for n in dmrs.iter_nodes():
situation, realpred_only = find_sit(dmrs, n)
if situation:
if realpred_only:
sits.append(situation)
else:
extra_sits.append(situation)
# Directory of DMRSs, and directory to save triples
SOURCE = '/usr/groups/corpora/wikiwoods-1212-tmp/dmrs/'
TARGET = '/anfs/bigdisc/gete2/wikiwoods/core'
EXTRA = '/anfs/bigdisc/gete2/wikiwoods/core-extra'
if not os.path.exists(TARGET):
os.mkdir(TARGET)
if not os.path.exists(EXTRA):
os.mkdir(EXTRA)
def extract_file(filename):
"Extract all situations from a file"
newname = os.path.splitext(filename)[0]+'.pkl'
if os.path.exists(os.path.join(TARGET, newname)):
print('skipping '+filename)
return
try:
with gzip.open(os.path.join(SOURCE, filename),'rb') as f:
print(filename)
# List of situation triples
situations = []
extra_sits = []
# Each xml will span multiple lines,
# separated by an empty line
f.readline() # The first line is blank, for some reason
xml = b''
for line in f:
# Keep adding new lines until we get a blank line
if line != b'\n':
xml += line
else: # Once we've found a blank line, extract the DMRS
extract(xml, situations, extra_sits, filename)
# Reset the xml string
xml = b''
# If the file does not end with a blank line:
if xml != b'':
extract(xml, situations, extra_sits, filename)
# Save the triples in TARGET
with open(os.path.join(TARGET, newname), 'wb') as f:
pickle.dump(situations, f)
with open(os.path.join(EXTRA, newname), 'wb') as f:
pickle.dump(extra_sits, f)
except:
print("Error!")
with open('wikiwoods_extractcore.log', 'a') as f:
f.write(filename+'\n')
_, error, trace = sys.exc_info()
f.write(str(error)+'\n')
print_tb(trace, file=f)
f.write('\n\n')
# Process each file in SOURCE
all_files = sorted(os.listdir(SOURCE))
with Pool(PROC) as p:
p.map(extract_file, all_files)
| guyemerson/sem-func | src/preprocess/wikiwoods_extractcore.py | Python | mit | 6,001 | 0.002333 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, re
from frappe.utils import touch_file, encode, cstr
def make_boilerplate(dest, app_name):
if not os.path.exists(dest):
print "Destination directory does not exist"
return
# app_name should be in snake_case
app_name = frappe.scrub(app_name)
hooks = frappe._dict()
hooks.app_name = app_name
app_title = hooks.app_name.replace("_", " ").title()
for key in ("App Title (default: {0})".format(app_title),
"App Description", "App Publisher", "App Email",
"App Icon (default 'octicon octicon-file-directory')",
"App Color (default 'grey')",
"App License (default 'MIT')"):
hook_key = key.split(" (")[0].lower().replace(" ", "_")
hook_val = None
while not hook_val:
hook_val = cstr(raw_input(key + ": "))
if not hook_val:
defaults = {
"app_title": app_title,
"app_icon": "octicon octicon-file-directory",
"app_color": "grey",
"app_license": "MIT"
}
if hook_key in defaults:
hook_val = defaults[hook_key]
if hook_key=="app_name" and hook_val.lower().replace(" ", "_") != hook_val:
print "App Name must be all lowercase and without spaces"
hook_val = ""
elif hook_key=="app_title" and not re.match("^(?![\W])[^\d_\s][\w -]+$", hook_val, re.UNICODE):
print "App Title should start with a letter and it can only consist of letters, numbers, spaces and underscores"
hook_val = ""
hooks[hook_key] = hook_val
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, frappe.scrub(hooks.app_title)),
with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "www"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"pages"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"includes"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "config"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"css"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"js"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "__init__.py"), "w") as f:
f.write(encode(init_template))
with open(os.path.join(dest, hooks.app_name, "MANIFEST.in"), "w") as f:
f.write(encode(manifest_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, ".gitignore"), "w") as f:
f.write(encode(gitignore_template.format(app_name = hooks.app_name)))
with open(os.path.join(dest, hooks.app_name, "setup.py"), "w") as f:
f.write(encode(setup_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, "requirements.txt"), "w") as f:
f.write("frappe")
with open(os.path.join(dest, hooks.app_name, "README.md"), "w") as f:
f.write(encode("## {0}\n\n{1}\n\n#### License\n\n{2}".format(hooks.app_title,
hooks.app_description, hooks.app_license)))
with open(os.path.join(dest, hooks.app_name, "license.txt"), "w") as f:
f.write(encode("License: " + hooks.app_license))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "modules.txt"), "w") as f:
f.write(encode(hooks.app_title))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "hooks.py"), "w") as f:
f.write(encode(hooks_template.format(**hooks)))
touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, "patches.txt"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "desktop.py"), "w") as f:
f.write(encode(desktop_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "docs.py"), "w") as f:
f.write(encode(docs_template.format(**hooks)))
print "'{app}' created at {path}".format(app=app_name, path=os.path.join(dest, app_name))
manifest_template = """include MANIFEST.in
include requirements.txt
include *.json
include *.md
include *.py
include *.txt
recursive-include {app_name} *.css
recursive-include {app_name} *.csv
recursive-include {app_name} *.html
recursive-include {app_name} *.ico
recursive-include {app_name} *.js
recursive-include {app_name} *.json
recursive-include {app_name} *.md
recursive-include {app_name} *.png
recursive-include {app_name} *.py
recursive-include {app_name} *.svg
recursive-include {app_name} *.txt
recursive-exclude {app_name} *.pyc"""
init_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '0.0.1'
"""
hooks_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "{app_name}"
app_title = "{app_title}"
app_publisher = "{app_publisher}"
app_description = "{app_description}"
app_icon = "{app_icon}"
app_color = "{app_color}"
app_email = "{app_email}"
app_license = "{app_license}"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/{app_name}/css/{app_name}.css"
# app_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js, css files in header of web template
# web_include_css = "/assets/{app_name}/css/{app_name}.css"
# web_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js in page
# page_js = {{"page" : "public/js/file.js"}}
# include js in doctype views
# doctype_js = {{"doctype" : "public/js/doctype.js"}}
# doctype_list_js = {{"doctype" : "public/js/doctype_list.js"}}
# doctype_tree_js = {{"doctype" : "public/js/doctype_tree.js"}}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {{
# "Role": "home_page"
# }}
# Website user home page (by function)
# get_website_user_home_page = "{app_name}.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "{app_name}.install.before_install"
# after_install = "{app_name}.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "{app_name}.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {{
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }}
#
# has_permission = {{
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {{
# "*": {{
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }}
# }}
# Scheduled Tasks
# ---------------
# scheduler_events = {{
# "all": [
# "{app_name}.tasks.all"
# ],
# "daily": [
# "{app_name}.tasks.daily"
# ],
# "hourly": [
# "{app_name}.tasks.hourly"
# ],
# "weekly": [
# "{app_name}.tasks.weekly"
# ]
# "monthly": [
# "{app_name}.tasks.monthly"
# ]
# }}
# Testing
# -------
# before_tests = "{app_name}.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {{
# "frappe.desk.doctype.event.event.get_events": "{app_name}.event.get_events"
# }}
"""
desktop_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{{
"module_name": "{app_title}",
"color": "{app_color}",
"icon": "{app_icon}",
"type": "module",
"label": _("{app_title}")
}}
]
"""
setup_template = """# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in {app_name}/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('{app_name}/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='{app_name}',
version=version,
description='{app_description}',
author='{app_publisher}',
author_email='{app_email}',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
"""
gitignore_template = """.DS_Store
*.pyc
*.egg-info
*.swp
tags
{app_name}/docs/current"""
docs_template = '''"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/{app_name}"
# docs_base_url = "https://[org_name].github.io/{app_name}"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "{app_title}"
'''
| rohitwaghchaure/frappe | frappe/utils/boilerplate.py | Python | mit | 9,035 | 0.014721 |
from __future__ import annotations
import argparse
import dataclasses
from datetime import datetime, timedelta, timezone
from typing import NamedTuple, Optional, Tuple
from redbot.core.commands import BadArgument, Context
from .time_utils import parse_time, parse_timedelta
class NonNumeric(NamedTuple):
parsed: str
@classmethod
async def convert(cls, context: Context, argument: str):
if argument.isdigit():
raise BadArgument("Event names must contain at least 1 non-numeric value")
return cls(argument)
class NoExitParser(argparse.ArgumentParser):
def error(self, message):
raise BadArgument()
@dataclasses.dataclass()
class Schedule:
start: datetime
command: str
recur: Optional[timedelta] = None
quiet: bool = False
def to_tuple(self) -> Tuple[str, datetime, Optional[timedelta]]:
return self.command, self.start, self.recur
@classmethod
async def convert(cls, ctx: Context, argument: str):
start: datetime
command: Optional[str] = None
recur: Optional[timedelta] = None
command, *arguments = argument.split(" -- ")
if arguments:
argument = " -- ".join(arguments)
else:
command = None
parser = NoExitParser(description="Scheduler event parsing", add_help=False)
parser.add_argument(
"-q", "--quiet", action="store_true", dest="quiet", default=False
)
parser.add_argument("--every", nargs="*", dest="every", default=[])
if not command:
parser.add_argument("command", nargs="*")
at_or_in = parser.add_mutually_exclusive_group()
at_or_in.add_argument("--start-at", nargs="*", dest="at", default=[])
at_or_in.add_argument("--start-in", nargs="*", dest="in", default=[])
try:
vals = vars(parser.parse_args(argument.split(" ")))
except Exception as exc:
raise BadArgument() from exc
if not (vals["at"] or vals["in"]):
raise BadArgument("You must provide one of `--start-in` or `--start-at`")
if not command and not vals["command"]:
raise BadArgument("You have to provide a command to run")
command = command or " ".join(vals["command"])
for delta in ("in", "every"):
if vals[delta]:
parsed = parse_timedelta(" ".join(vals[delta]))
if not parsed:
raise BadArgument("I couldn't understand that time interval")
if delta == "in":
start = datetime.now(timezone.utc) + parsed
else:
recur = parsed
if recur.total_seconds() < 60:
raise BadArgument(
"You can't schedule something to happen that frequently, "
"I'll get ratelimited."
)
if vals["at"]:
try:
start = parse_time(" ".join(vals["at"]))
except Exception:
raise BadArgument("I couldn't understand that starting time.") from None
return cls(command=command, start=start, recur=recur, quiet=vals["quiet"])
class TempMute(NamedTuple):
reason: Optional[str]
start: datetime
@classmethod
async def convert(cls, ctx: Context, argument: str):
start: datetime
reason: str
parser = NoExitParser(description="Scheduler event parsing", add_help=False)
parser.add_argument("reason", nargs="*")
at_or_in = parser.add_mutually_exclusive_group()
at_or_in.add_argument("--until", nargs="*", dest="until", default=[])
at_or_in.add_argument("--for", nargs="*", dest="for", default=[])
try:
vals = vars(parser.parse_args(argument.split()))
except Exception as exc:
raise BadArgument() from exc
if not (vals["until"] or vals["for"]):
raise BadArgument("You must provide one of `--until` or `--for`")
reason = " ".join(vals["reason"])
if vals["for"]:
parsed = parse_timedelta(" ".join(vals["for"]))
if not parsed:
raise BadArgument("I couldn't understand that time interval")
start = datetime.now(timezone.utc) + parsed
if vals["until"]:
try:
start = parse_time(" ".join(vals["at"]))
except Exception:
raise BadArgument("I couldn't understand that unmute time.") from None
return cls(reason, start)
| mikeshardmind/SinbadCogs | scheduler/converters.py | Python | mit | 4,607 | 0.001954 |
import pytest
import responses
from document import Document
from scrapers.knox_tn_agendas_scraper import KnoxCoTNAgendaScraper
from . import common
from . import utils
class TestKnoxAgendaScraper(object):
session = None
page_str = ""
def test_get_docs_from_page(self):
scraper = KnoxCoTNAgendaScraper()
docs = scraper._get_docs_from_schedule(self.page_str)
assert len(docs) == 4
for doc in docs:
# All URLs should be absolute.
assert doc.url.startswith('https://')
actual_titles = [doc.title for doc in docs]
expected_titles = [
'June 28, 2017: BZA Agenda',
'June 26, 2017: Beer Board',
'June 19, 2017: Work Session',
'June 7, 2017: AGENDA COMMITTEE MEETING',
]
assert expected_titles == actual_titles
@responses.activate
def test_full_scraper(self):
self.session.query(Document).delete()
count = self.session.query(Document).count()
assert count == 0
responses.add(
responses.GET,
KnoxCoTNAgendaScraper.MEETING_SCHEDULE_URL,
body=self.page_str,
status=200,
match_querystring=True
)
scraper = KnoxCoTNAgendaScraper()
scraper.scrape(self.session)
docs = self.session.query(Document).all()
assert len(docs) == 4
expected_titles = {
'June 28, 2017: BZA Agenda',
'June 26, 2017: Beer Board',
'June 19, 2017: Work Session',
'June 7, 2017: AGENDA COMMITTEE MEETING',
}
for doc in docs:
assert doc.title in expected_titles
@classmethod
def setup_class(cls):
cls.session = common.Session()
with open(utils.get_abs_filename('knox-co-results-page.html'), 'r') as page:
cls.page_str = page.read()
@classmethod
def teardown_class(cls):
common.Session.remove()
def setup_method(self, test_method):
self.session.begin_nested()
def teardown_method(self, test_method):
self.session.rollback()
| RagtagOpen/bidwire | bidwire/tests/test_knox_co_agenda_scraper.py | Python | mit | 2,136 | 0.000468 |
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# Reads board information from boards/BOARDNAME.py - used by build_board_docs,
# build_pininfo, and build_platform_config
# ----------------------------------------------------------------------------------------
import subprocess;
import re;
import json;
import sys;
import os;
import importlib;
silent = os.getenv("SILENT");
if silent:
class Discarder(object):
def write(self, text):
pass # do nothing
# now discard everything coming out of stdout
sys.stdout = Discarder()
# http://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
if "check_output" not in dir( subprocess ):
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
# Scans files for comments of the form /*JSON......*/
#
# Comments look like:
#
#/*JSON{ "type":"staticmethod|staticproperty|constructor|method|property|function|variable|class|library|idle|init|kill",
# // class = built-in class that does not require instantiation
# // library = built-in class that needs require('classname')
# // idle = function to run on idle regardless
# // init = function to run on initialisation
# // kill = function to run on deinitialisation
# "class" : "Double", "name" : "doubleToIntBits",
# "needs_parentName":true, // optional - if for a method, this makes the first 2 args parent+parentName (not just parent)
# "generate_full|generate|wrap" : "*(JsVarInt*)&x",
# "description" : " Convert the floating point value given into an integer representing the bits contained in it",
# "params" : [ [ "x" , "float|int|int32|bool|pin|JsVar|JsVarName|JsVarArray", "A floating point number"] ],
# // float - parses into a JsVarFloat which is passed to the function
# // int - parses into a JsVarInt which is passed to the function
# // int32 - parses into a 32 bit int
# // bool - parses into a boolean
# // pin - parses into a pin
# // JsVar - passes a JsVar* to the function (after skipping names)
# // JsVarArray - parses this AND ANY SUBSEQUENT ARGUMENTS into a JsVar of type JSV_ARRAY. THIS IS ALWAYS DEFINED, EVEN IF ZERO LENGTH. Currently it must be the only parameter
# "return" : ["int|float|JsVar", "The integer representation of x"],
# "return_object" : "ObjectName", // optional - used for tern's code analysis - so for example we can do hints for openFile(...).yyy
# "no_create_links":1 // optional - if this is set then hyperlinks are not created when this name is mentioned (good example = bit() )
# "not_real_object" : "anything", // optional - for classes, this means we shouldn't treat this as a built-in object, as internally it isn't stored in a JSV_OBJECT
# "prototype" : "Object", // optional - for classes, this is what their prototype is. It's particlarly helpful if not_real_object, because there is no prototype var in that case
# "check" : "jsvIsFoo(var)", // for classes - this is code that returns true if 'var' is of the given type
# "ifndef" : "SAVE_ON_FLASH", // if the given preprocessor macro is defined, don't implement this
# "ifdef" : "USE_LCD_FOO", // if the given preprocessor macro isn't defined, don't implement this
# "#if" : "A>2", // add a #if statement in the generated C file (ONLY if type==object)
#}*/
#
# description can be an array of strings as well as a simple string (in which case each element is separated by a newline),
# and adding ```sometext``` in the description surrounds it with HTML code tags
#
def get_jsondata(is_for_document, parseArgs = True, board = False):
scriptdir = os.path.dirname (os.path.realpath(__file__))
print("Script location "+scriptdir)
os.chdir(scriptdir+"/..")
jswraps = []
defines = []
if board and ("build" in board.info) and ("defines" in board.info["build"]):
for i in board.info["build"]["defines"]:
print("Got define from board: " + i);
defines.append(i)
if parseArgs and len(sys.argv)>1:
print("Using files from command line")
for i in range(1,len(sys.argv)):
arg = sys.argv[i]
if arg[0]=="-":
if arg[1]=="D":
defines.append(arg[2:])
elif arg[1]=="B":
board = importlib.import_module(arg[2:])
if "usart" in board.chip: defines.append("USART_COUNT="+str(board.chip["usart"]));
if "spi" in board.chip: defines.append("SPI_COUNT="+str(board.chip["spi"]));
if "i2c" in board.chip: defines.append("I2C_COUNT="+str(board.chip["i2c"]));
if "USB" in board.devices: defines.append("defined(USB)=True");
else: defines.append("defined(USB)=False");
elif arg[1]=="F":
"" # -Fxxx.yy in args is filename xxx.yy, which is mandatory for build_jswrapper.py
else:
print("Unknown command-line option")
exit(1)
else:
jswraps.append(arg)
else:
print("Scanning for jswrap.c files")
jswraps = subprocess.check_output(["find", ".", "-name", "jswrap*.c"]).strip().split("\n")
if len(defines)>1:
print("Got #DEFINES:")
for d in defines: print(" "+d)
jsondatas = []
for jswrap in jswraps:
# ignore anything from archives
if jswrap.startswith("./archives/"): continue
# now scan
print("Scanning "+jswrap)
code = open(jswrap, "r").read()
if is_for_document and "DO_NOT_INCLUDE_IN_DOCS" in code:
print("FOUND 'DO_NOT_INCLUDE_IN_DOCS' IN FILE "+jswrap)
continue
for comment in re.findall(r"/\*JSON.*?\*/", code, re.VERBOSE | re.MULTILINE | re.DOTALL):
charnumber = code.find(comment)
linenumber = 1+code.count("\n", 0, charnumber)
# Strip off /*JSON .. */ bit
comment = comment[6:-2]
endOfJson = comment.find("\n}")+2;
jsonstring = comment[0:endOfJson];
description = comment[endOfJson:].strip();
# print("Parsing "+jsonstring)
try:
jsondata = json.loads(jsonstring)
if len(description): jsondata["description"] = description;
jsondata["filename"] = jswrap
jsondata["include"] = jswrap[:-2]+".h"
jsondata["githublink"] = "https://github.com/espruino/Espruino/blob/master/"+jswrap+"#L"+str(linenumber)
dropped_prefix = "Dropped "
if "name" in jsondata: dropped_prefix += jsondata["name"]+" "
elif "class" in jsondata: dropped_prefix += jsondata["class"]+" "
drop = False
if not is_for_document:
if ("ifndef" in jsondata) and (jsondata["ifndef"] in defines):
print(dropped_prefix+" because of #ifndef "+jsondata["ifndef"])
drop = True
if ("ifdef" in jsondata) and not (jsondata["ifdef"] in defines):
print(dropped_prefix+" because of #ifdef "+jsondata["ifdef"])
drop = True
if ("#if" in jsondata):
expr = jsondata["#if"]
for defn in defines:
if defn.find('=')!=-1:
dname = defn[:defn.find('=')]
dkey = defn[defn.find('=')+1:]
expr = expr.replace(dname, dkey);
try:
r = eval(expr)
except:
print("WARNING: error evaluating '"+expr+"' - from '"+jsondata["#if"]+"'")
r = True
if not r:
print(dropped_prefix+" because of #if "+jsondata["#if"]+ " -> "+expr)
drop = True
if not drop:
jsondatas.append(jsondata)
except ValueError as e:
sys.stderr.write( "JSON PARSE FAILED for " + jsonstring + " - "+ str(e) + "\n")
exit(1)
except:
sys.stderr.write( "JSON PARSE FAILED for " + jsonstring + " - "+str(sys.exc_info()[0]) + "\n" )
exit(1)
print("Scanning finished.")
return jsondatas
# Takes the data from get_jsondata and restructures it in prepartion for output as JS
#
# Results look like:,
#{
# "Pin": {
# "desc": [
# "This is the built-in class for Pins, such as D0,D1,LED1, or BTN",
# "You can call the methods on Pin, or you can use Wiring-style functions such as digitalWrite"
# ],
# "methods": {
# "read": {
# "desc": "Returns the input state of the pin as a boolean",
# "params": [],
# "return": [
# "bool",
# "Whether pin is a logical 1 or 0"
# ]
# },
# "reset": {
# "desc": "Sets the output state of the pin to a 0",
# "params": [],
# "return": []
# },
# ...
# },
# "props": {},
# "staticmethods": {},
# "staticprops": {}
# },
# "print": {
# "desc": "Print the supplied string",
# "return": []
# },
# ...
#}
#
def get_struct_from_jsondata(jsondata):
context = {"modules": {}}
def checkClass(details):
cl = details["class"]
if not cl in context:
context[cl] = {"type": "class", "methods": {}, "props": {}, "staticmethods": {}, "staticprops": {}, "desc": details.get("description", "")}
return cl
def addConstructor(details):
cl = checkClass(details)
context[cl]["constructor"] = {"params": details.get("params", []), "return": details.get("return", []), "desc": details.get("description", "")}
def addMethod(details, type = ""):
cl = checkClass(details)
context[cl][type + "methods"][details["name"]] = {"params": details.get("params", []), "return": details.get("return", []), "desc": details.get("description", "")}
def addProp(details, type = ""):
cl = checkClass(details)
context[cl][type + "props"][details["name"]] = {"return": details.get("return", []), "desc": details.get("description", "")}
def addFunc(details):
context[details["name"]] = {"type": "function", "return": details.get("return", []), "desc": details.get("description", "")}
def addObj(details):
context[details["name"]] = {"type": "object", "instanceof": details.get("instanceof", ""), "desc": details.get("description", "")}
def addLib(details):
context["modules"][details["class"]] = {"desc": details.get("description", "")}
def addVar(details):
return
for data in jsondata:
type = data["type"]
if type=="class":
checkClass(data)
elif type=="constructor":
addConstructor(data)
elif type=="method":
addMethod(data)
elif type=="property":
addProp(data)
elif type=="staticmethod":
addMethod(data, "static")
elif type=="staticproperty":
addProp(data, "static")
elif type=="function":
addFunc(data)
elif type=="object":
addObj(data)
elif type=="library":
addLib(data)
elif type=="variable":
addVar(data)
else:
print(json.dumps(data, sort_keys=True, indent=2))
return context
def get_includes_from_jsondata(jsondatas):
includes = []
for jsondata in jsondatas:
include = jsondata["include"]
if not include in includes:
includes.append(include)
return includes
def is_property(jsondata):
return jsondata["type"]=="property" or jsondata["type"]=="staticproperty" or jsondata["type"]=="variable"
def is_function(jsondata):
return jsondata["type"]=="function" or jsondata["type"]=="method"
def get_prefix_name(jsondata):
if jsondata["type"]=="event": return "event"
if jsondata["type"]=="constructor": return "constructor"
if jsondata["type"]=="function": return "function"
if jsondata["type"]=="method": return "function"
if jsondata["type"]=="variable": return "variable"
if jsondata["type"]=="property": return "property"
return ""
def get_ifdef_description(d):
if d=="SAVE_ON_FLASH": return "devices with low flash memory"
if d=="STM32F1": return "STM32F1 devices (including Espruino Board)"
if d=="USE_LCD_SDL": return "Linux with SDL support compiled in"
if d=="USE_TLS": return "devices with TLS and SSL support (Espruino Pico only)"
if d=="RELEASE": return "release builds"
if d=="LINUX": return "Linux-based builds"
if d=="USE_USB_HID": return "devices that support USB HID (Espruino Pico)"
if d=="USE_AES": return "devices that support AES (Espruino Pico, Espruino Wifi or Linux)"
if d=="USE_CRYPTO": return "devices that support Crypto Functionality (Espruino Pico, Espruino Wifi, Linux or ESP8266)"
print("WARNING: Unknown ifdef '"+d+"' in common.get_ifdef_description")
return d
def get_script_dir():
return os.path.dirname(os.path.realpath(__file__))
def get_version():
# Warning: the same release label derivation is also in the Makefile
scriptdir = get_script_dir()
jsutils = scriptdir+"/../src/jsutils.h"
version = re.compile("^.*JS_VERSION.*\"(.*)\"");
alt_release = os.getenv("ALT_RELEASE")
if alt_release == None:
# Default release labeling based on commits since last release tag
latest_release = subprocess.check_output('git tag 2>nul | grep RELEASE_ | sort | tail -1', shell=True).strip()
commits_since_release = subprocess.check_output('git log --oneline 2>nul '+latest_release.decode("utf-8")+'..HEAD | wc -l', shell=True).decode("utf-8").strip()
else:
# Alternate release labeling with fork name (in ALT_RELEASE env var) plus branch
# name plus commit SHA
sha = subprocess.check_output('git rev-parse --short HEAD 2>nul', shell=True).strip()
branch = subprocess.check_output('git name-rev --name-only HEAD 2>nul', shell=True).strip()
commits_since_release = alt_release + '_' + branch + '_' + sha
for line in open(jsutils):
match = version.search(line);
if (match != None):
v = match.group(1);
if commits_since_release=="0": return v
else: return v+"."+commits_since_release
return "UNKNOWN"
def get_name_or_space(jsondata):
if "name" in jsondata: return jsondata["name"]
return ""
def get_bootloader_size(board):
if board.chip["family"]=="STM32F4": return 16*1024; # 16kb Pages, so we have no choice
return 10*1024;
# On normal chips this is 0x00000000
# On boards with bootloaders it's generally + 10240
# On F401, because of the setup of pages we put the bootloader in the first 16k, then in the 16+16+16 we put the saved code, and then finally we but the binary somewhere else
def get_espruino_binary_address(board):
if "place_text_section" in board.chip:
return board.chip["place_text_section"]
if "bootloader" in board.info and board.info["bootloader"]==1:
return get_bootloader_size(board);
return 0;
def get_board_binary_name(board):
return board.info["binary_name"].replace("%v", get_version());
| redbear/Espruino | scripts/common.py | Python | mpl-2.0 | 16,565 | 0.020344 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import fields, validate
import polyaxon_sdk
from polyaxon.contexts import refs as contexts_refs
from polyaxon.lifecycle import V1Statuses
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
class V1EventKind(polyaxon_sdk.V1EventKind):
events_statuses_mapping = {
polyaxon_sdk.V1EventKind.RUN_STATUS_CREATED: V1Statuses.CREATED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RESUMING: V1Statuses.RESUMING,
polyaxon_sdk.V1EventKind.RUN_STATUS_ON_SCHEDULE: V1Statuses.ON_SCHEDULE,
polyaxon_sdk.V1EventKind.RUN_STATUS_COMPILED: V1Statuses.COMPILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_QUEUED: V1Statuses.QUEUED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SCHEDULED: V1Statuses.SCHEDULED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STARTING: V1Statuses.STARTING,
polyaxon_sdk.V1EventKind.RUN_STATUS_RUNNING: V1Statuses.RUNNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_PROCESSING: V1Statuses.PROCESSING,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPING: V1Statuses.STOPPING,
polyaxon_sdk.V1EventKind.RUN_STATUS_FAILED: V1Statuses.FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPED: V1Statuses.STOPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SUCCEEDED: V1Statuses.SUCCEEDED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SKIPPED: V1Statuses.SKIPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_WARNING: V1Statuses.WARNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNSCHEDULABLE: V1Statuses.UNSCHEDULABLE,
polyaxon_sdk.V1EventKind.RUN_STATUS_UPSTREAM_FAILED: V1Statuses.UPSTREAM_FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RETRYING: V1Statuses.RETRYING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNKNOWN: V1Statuses.UNKNOWN,
polyaxon_sdk.V1EventKind.RUN_STATUS_DONE: V1Statuses.DONE,
}
class EventTriggerSchema(BaseCamelSchema):
kinds = fields.List(
fields.Str(validate=validate.OneOf(V1EventKind.allowable_values)),
required=True,
)
ref = fields.Str(required=True)
@staticmethod
def schema_config():
return V1EventTrigger
class V1EventTrigger(BaseConfig, contexts_refs.RefMixin, polyaxon_sdk.V1EventTrigger):
"""Events are an advanced triggering logic that users can take advantage of in addition to:
* Manual triggers via API/CLI/UI.
* Time-based triggers with schedules and crons.
* Upstream triggers with upstream runs or upstream ops in DAGs.
Events can be attached to an operation in the context of a DAG
to extend the simple trigger process,
this is generally important when the user defines a dependency between two operations
and needs a run to start as soon as
the upstream run generates an event instead of waiting until it reaches a final state.
For instance, a usual use-case is to start a tensorboard as soon as training starts.
In that case the downstream operation will watch for the `running` status.
Events can be attached as well to a single operation
to wait for an internal alert or external events,
for instance if a user integrates Polyaxon with Github,
they can trigger training as soon as Polyaxon is notified that a new git commit was created.
Polyaxon provides several internal and external events that users
can leverage to fully automate their usage of the platform:
* "run_status_created"
* "run_status_resuming"
* "run_status_compiled"
* "run_status_queued"
* "run_status_scheduled"
* "run_status_starting"
* "run_status_initializing"
* "run_status_running"
* "run_status_processing"
* "run_status_stopping"
* "run_status_failed"
* "run_status_stopped"
* "run_status_succeeded"
* "run_status_skipped"
* "run_status_warning"
* "run_status_unschedulable"
* "run_status_upstream_failed"
* "run_status_retrying"
* "run_status_unknown"
* "run_status_done"
* "run_approved_actor"
* "run_invalidated_actor"
* "run_new_artifacts"
* "connection_git_commit"
* "connection_dataset_version"
* "connection_registry_image"
* "alert_info"
* "alert_warning"
* "alert_critical"
* "model_version_new_metric"
* "project_custom_event"
* "org_custom_event"
Args:
kinds: List[str]
ref: str
> **Important**: Currently only events with prefix `run_status_*` are supported.
## YAML usage
```yaml
>>> events:
>>> ref: {{ ops.upstream-operation }}
>>> kinds: [run_status_running]
```
```yaml
>>> event:
>>> ref: {{ connections.git-repo-connection-name }}
>>> kinds: [connection_git_commit]
```
## Python usage
```python
>>> from polyaxon.polyflow import V1EventKind, V1EventTrigger
>>> event1 = V1EventTrigger(
>>> ref="{{ ops.upstream-operation }}",
>>> kinds=[V1EventTrigger.RUN_STATUS_RUNNING],
>>> )
>>> event2 = V1EventTrigger(
>>> ref="{{ connections.git-repo-connection-name }}",
>>> kinds=[V1EventTrigger.CONNECTION_GIT_COMMIT],
>>> )
```
## Fields
### kinds
The trigger event kinds to watch, if any event is detected the operation defining the `events`
section will be initiated.
```yaml
>>> event:
>>> kinds: [run_status_running, run_status_done]
```
> **Note**: Similar to trigger in DAGs, after an operation is initiated,
> it will still have to validate the rest of the Polyaxonfile,
> i.e. conditions, contexts, connections, ...
### ref
A valid reference that Polyaxon can resolve the objects that will send the events to watch for.
All supported events are prefixed with the object reference that can send such events.
The `run_*` events can be referenced both by `runs.UUID` or
`ops.OPERATION_NAME` if defined in the context of a DAG.
```yaml
>>> event:
>>> ref: ops.upstream_operation_name
```
"""
IDENTIFIER = "event_trigger"
SCHEMA = EventTriggerSchema
REDUCED_ATTRIBUTES = [
"ref",
]
| polyaxon/polyaxon | core/polyaxon/polyflow/events/__init__.py | Python | apache-2.0 | 6,731 | 0.001931 |
__all__ = ['Constraint', 'ConstraintGroup', 'TotalSumValueConstraint', 'UniqueValueConstraint']
from .constraint import Constraint
from .constraintgroup import ConstraintGroup
from .totalsumvalueconstraint import TotalSumValueConstraint
from .uniquevalueconstraint import UniqueValueConstraint
| JoostvanPinxten/ConstraintPuzzler | constraints/__init__.py | Python | mit | 296 | 0.006757 |
#!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from wpan import verify
import wpan
import time
#-----------------------------------------------------------------------------------------------------------------------
# Test description: Orphaned node attach through MLE Announcement
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print '-' * 120
print 'Starting \'{}\''.format(test_name)
def verify_channel(nodes, new_channel, wait_time=20):
"""
This function checks the channel on a given list of `nodes` and verifies that all nodes
switch to a given `new_channel` (as int) within certain `wait_time` (int and in seconds)
"""
start_time = time.time()
while not all([ (new_channel == int(node.get(wpan.WPAN_CHANNEL), 0)) for node in nodes ]):
if time.time() - start_time > wait_time:
print 'Took too long to switch to channel {} ({}>{} sec)'.format(new_channel, time.time() - start_time,
wait_time)
exit(1)
time.sleep(0.1)
#-----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
router = wpan.Node()
c1 = wpan.Node()
c2 = wpan.Node()
all_nodes = [router, c1, c2]
#-----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
#-----------------------------------------------------------------------------------------------------------------------
# Build network topology
router.form('announce-tst', channel=11)
c1.join_node(router, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
c2.join_node(router, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
c1.set(wpan.WPAN_POLL_INTERVAL, '500')
c2.set(wpan.WPAN_POLL_INTERVAL, '500')
c1.set(wpan.WPAN_THREAD_DEVICE_MODE,'5')
c2.set(wpan.WPAN_THREAD_DEVICE_MODE,'5')
#-----------------------------------------------------------------------------------------------------------------------
# Test implementation
# Reset c2 and keep it in detached state
c2.set('Daemon:AutoAssociateAfterReset', 'false')
c2.reset();
# Switch the rest of network to channel 26
router.set(wpan.WPAN_CHANNEL_MANAGER_NEW_CHANNEL, '26')
verify_channel([router, c1], 26)
# Now re-enable c2 and verify that it does attach to router and is on channel 26
# c2 would go through the ML Announce recovery.
c2.set('Daemon:AutoAssociateAfterReset', 'true')
c2.reset();
verify(int(c2.get(wpan.WPAN_CHANNEL), 0) == 11)
# wait for 20s for c2 to be attached/associated
start_time = time.time()
wait_time = 20
while not c2.is_associated():
if time.time() - start_time > wait_time:
print 'Took too long to recover through ML Announce ({}>{} sec)'.format(time.time() - start_time, wait_time)
exit(1)
time.sleep(0.1)
# Check that c2 did attach and is on channel 26.
verify(int(c2.get(wpan.WPAN_CHANNEL), 0) == 26)
#-----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print '\'{}\' passed.'.format(test_name)
| LeZhang2016/openthread | tests/toranj/test-603-channel-manager-announce-recovery.py | Python | bsd-3-clause | 4,783 | 0.005854 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
| alirizakeles/zato | code/zato-scheduler/src/zato/scheduler/__init__.py | Python | gpl-3.0 | 238 | 0.004202 |
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Access Control Lists (ACL's) control access the API server."""
from ceilometer.openstack.common import policy
from keystoneclient.middleware import auth_token
from oslo.config import cfg
_ENFORCER = None
OPT_GROUP_NAME = 'keystone_authtoken'
def register_opts(conf):
"""Register keystoneclient middleware options
"""
conf.register_opts(auth_token.opts,
group=OPT_GROUP_NAME)
auth_token.CONF = conf
register_opts(cfg.CONF)
def install(app, conf):
"""Install ACL check on application."""
return auth_token.AuthProtocol(app,
conf=dict(conf.get(OPT_GROUP_NAME)))
def get_limited_to(headers):
"""Return the user and project the request should be limited to.
:param headers: HTTP headers dictionary
:return: A tuple of (user, project), set to None if there's no limit on
one of these.
"""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer()
if not _ENFORCER.enforce('context_is_admin',
{},
{'roles': headers.get('X-Roles', "").split(",")}):
return headers.get('X-User-Id'), headers.get('X-Project-Id')
return None, None
def get_limited_to_project(headers):
"""Return the project the request should be limited to.
:param headers: HTTP headers dictionary
:return: A project, or None if there's no limit on it.
"""
return get_limited_to(headers)[1]
| NeCTAR-RC/ceilometer | ceilometer/api/acl.py | Python | apache-2.0 | 2,172 | 0 |
#!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from PyQt4.QtCore import (QDate, QString, Qt, SIGNAL, pyqtSignature)
from PyQt4.QtGui import (QApplication, QDialog, QDialogButtonBox)
import moviedata_ans as moviedata
import ui_addeditmoviedlg_ans as ui_addeditmoviedlg
class AddEditMovieDlg(QDialog,
ui_addeditmoviedlg.Ui_AddEditMovieDlg):
def __init__(self, movies, movie=None, parent=None):
super(AddEditMovieDlg, self).__init__(parent)
self.setupUi(self)
self.movies = movies
self.movie = movie
self.acquiredDateEdit.setDisplayFormat(moviedata.DATEFORMAT)
if movie is not None:
self.titleLineEdit.setText(movie.title)
self.yearSpinBox.setValue(movie.year)
self.minutesSpinBox.setValue(movie.minutes)
self.acquiredDateEdit.setDate(movie.acquired)
self.acquiredDateEdit.setEnabled(False)
self.locationLineEdit.setText(movie.location)
self.notesTextEdit.setPlainText(movie.notes)
self.notesTextEdit.setFocus()
self.buttonBox.button(QDialogButtonBox.Ok).setText(
"&Accept")
self.setWindowTitle("My Movies - Edit Movie")
else:
today = QDate.currentDate()
self.acquiredDateEdit.setDateRange(today.addDays(-5),
today)
self.acquiredDateEdit.setDate(today)
self.titleLineEdit.setFocus()
self.on_titleLineEdit_textEdited(QString())
@pyqtSignature("QString")
def on_titleLineEdit_textEdited(self, text):
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(
not self.titleLineEdit.text().isEmpty())
def accept(self):
title = self.titleLineEdit.text()
year = self.yearSpinBox.value()
minutes = self.minutesSpinBox.value()
location = self.locationLineEdit.text()
notes = self.notesTextEdit.toPlainText()
if self.movie is None:
acquired = self.acquiredDateEdit.date()
self.movie = moviedata.Movie(title, year, minutes,
acquired, location, notes)
self.movies.add(self.movie)
else:
self.movies.updateMovie(self.movie, title, year,
minutes, location, notes)
QDialog.accept(self)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = AddEditMovieDlg(0)
form.show()
app.exec_()
| paradiseOffice/Bash_and_Cplus-plus | CPP/full_examples/pyqt/chap08/addeditmoviedlg_ans.py | Python | gpl-2.0 | 3,155 | 0.001268 |
'''
Created on Aug 21, 2014
@author: Dean4Devil
'''
import mysql.connector
from pycore.sql_util import MySQLHelper
class SubmitTree():
'A tree of all submits to that standard. I.e. OpenDriver is a tree, OpenDriver 0.2 is a submit.'
def __init__(self, identifier):
'Create a new Tree in memory.'
self.sql_helper = MySQLHelper("oetf_submits")
if self.sql_helper.check_exists(identifier):
self.tree = self.sql_helper.query_data(identifier, "*", delimiter="", order="id", row_num=0)
else:
# First submit in that tree. Table does not exist yet.
table = (
"CREATE TABLE IF NOT EXISTS `{}` (".format(identifier),
"`id` int(11) NOT NULL AUTO_INCREMENT,",
"`version` varchar(32) COLLATE utf8mb4_bin NOT NULL",
"`comment` text COLLATE utf8mb4_bin NOT NULL,",
"`content` text COLLATE utf8mb4_bin NOT NULL,",
"`published_date` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP",
"PRIMARY KEY (`id`)",
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=1 ;")
con = self.sql_helper.return_con()
cur = con.cursor()
cur.execute(table)
self.tree = []
cur.close()
con.close()
class Submit():
'Submit element'
| OpenEngeneeringTaskForce/OETFDev | pycore/submit_util.py | Python | mit | 1,401 | 0.004996 |
import filecmp
from transfert import Resource
from transfert.actions import copy
def estimate_nb_cycles(len_data, chunk_size):
return (len_data // chunk_size) + [0, 1][(len_data % chunk_size) > 0]
def test_simple_local_copy(tmpdir):
src = tmpdir.join('alpha')
dst = tmpdir.join('beta')
src.write('some data')
assert src.check()
assert not dst.check()
copy(Resource('file://' + src.strpath),
Resource('file://' + dst.strpath))
assert src.check()
assert dst.check()
assert filecmp.cmp(src.strpath, dst.strpath)
def test_simple_local_copy_with_callback(tmpdir):
def wrapper(size):
nonlocal count
count += 1
count = 0
src = tmpdir.join('alpha')
dst = tmpdir.join('beta')
data = b'some data'
src.write(data)
chunk_size = 1
assert src.check()
assert not dst.check()
copy(Resource('file://' + src.strpath),
Resource('file://' + dst.strpath,),
size=chunk_size,
callback_freq=1,
callback=wrapper)
assert src.check()
assert dst.check()
assert filecmp.cmp(src.strpath, dst.strpath)
assert count == estimate_nb_cycles(len(data), chunk_size)
dst.remove()
count = 0
chunk_size = 2
assert src.check()
assert not dst.check()
copy(Resource('file://' + src.strpath),
Resource('file://' + dst.strpath,),
size=chunk_size,
callback_freq=1,
callback=wrapper)
assert src.check()
assert dst.check()
assert filecmp.cmp(src.strpath, dst.strpath)
assert count == estimate_nb_cycles(len(data), chunk_size)
| rbernand/transfert | tests/unit/test_copy.py | Python | mit | 1,619 | 0 |
#####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire X (FoFiX) #
# Copyright (C) 2009 Team FoFiX #
# 2009 John Stumpo #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from util import Log
from core import Audio
import math
import numpy as np
try:
import pyaudio
from audio import pypitch
supported = True
except ImportError:
Log.warn('Missing pyaudio or pypitch - microphone support will not be possible')
supported = False
from core.Task import Task
from core.Language import _
if supported:
pa = pyaudio.PyAudio()
# Precompute these in the interest of saving CPU time in the note analysis loop
LN_2 = math.log(2.0)
LN_440 = math.log(440.0)
#stump: return dictionary mapping indices to device names
# -1 is magic for the default device and will be replaced by None when actually opening the mic.
def getAvailableMics():
result = {-1: _('[Default Microphone]')}
for devnum in range(pa.get_device_count()):
devinfo = pa.get_device_info_by_index(devnum)
if devinfo['maxInputChannels'] > 0:
result[devnum] = devinfo['name']
return result
class Microphone(Task):
def __init__(self, engine, controlnum, samprate=44100):
Task.__init__(self)
self.engine = engine
self.controlnum = controlnum
devnum = self.engine.input.controls.micDevice[controlnum]
if devnum == -1:
devnum = None
self.devname = pa.get_default_input_device_info()['name']
else:
self.devname = pa.get_device_info_by_index(devnum)['name']
self.mic = pa.open(samprate, 1, pyaudio.paFloat32, input=True, input_device_index=devnum, start=False)
self.analyzer = pypitch.Analyzer(samprate)
self.mic_started = False
self.lastPeak = 0
self.detectTaps = True
self.tapStatus = False
self.tapThreshold = -self.engine.input.controls.micTapSensitivity[controlnum]
self.passthroughQueue = []
passthroughVolume = self.engine.input.controls.micPassthroughVolume[controlnum]
if passthroughVolume > 0.0:
Log.debug('Microphone: creating passthrough stream at %d%% volume' % round(passthroughVolume * 100))
self.passthroughStream = Audio.MicrophonePassthroughStream(engine, self)
self.passthroughStream.setVolume(passthroughVolume)
else:
Log.debug('Microphone: not creating passthrough stream')
self.passthroughStream = None
def __del__(self):
self.stop()
self.mic.close()
def start(self):
if not self.mic_started:
self.mic_started = True
self.mic.start_stream()
self.engine.addTask(self, synchronized=False)
Log.debug('Microphone: started %s' % self.devname)
if self.passthroughStream is not None:
Log.debug('Microphone: starting passthrough stream')
self.passthroughStream.play()
def stop(self):
if self.mic_started:
if self.passthroughStream is not None:
Log.debug('Microphone: stopping passthrough stream')
self.passthroughStream.stop()
self.engine.removeTask(self)
self.mic.stop_stream()
self.mic_started = False
Log.debug('Microphone: stopped %s' % self.devname)
# Called by the Task machinery: pump the mic and shove the data through the analyzer.
def run(self, ticks):
while self.mic.get_read_available() > 1024:
try:
chunk = self.mic.read(1024)
except IOError, e:
if e.args[1] == pyaudio.paInputOverflowed:
Log.notice('Microphone: ignoring input buffer overflow')
chunk = '\x00' * 4096
else:
raise
if self.passthroughStream is not None:
self.passthroughQueue.append(chunk)
self.analyzer.input(np.frombuffer(chunk, dtype=np.float32))
self.analyzer.process()
pk = self.analyzer.getPeak()
if self.detectTaps:
if pk > self.tapThreshold and pk > self.lastPeak + 5.0:
self.tapStatus = True
self.lastPeak = pk
# Get the amplitude (in dB) of the peak of the most recent input window.
def getPeak(self):
return self.analyzer.getPeak()
# Get the microphone tap status.
# When a tap occurs, it is remembered until this function is called.
def getTap(self):
retval = self.tapStatus
self.tapStatus = False
return retval
def getFormants(self):
return self.analyzer.getFormants()
# Get the note currently being sung.
# Returns None if there isn't one or a pypitch.Tone object if there is.
def getTone(self):
return self.analyzer.findTone()
# Get the note currently being sung, as an integer number of semitones above A.
# The frequency is rounded to the nearest semitone, then shifted by octaves until
# the result is between 0 and 11 (inclusive). Returns None is no note is being sung.
def getSemitones(self):
tone = self.analyzer.findTone()
if tone is None:
return tone
return int(round((math.log(tone.freq) - LN_440) * 12.0 / LN_2) % 12)
# Work out how accurately the note (passed in as a MIDI note number) is being
# sung. Return a float in the range [-6.0, 6.0] representing the number of
# semitones difference there is from the nearest occurrence of the note. The
# octave doesn't matter. Or return None if there's no note being sung.
def getDeviation(self, midiNote):
tone = self.analyzer.findTone()
if tone is None:
return tone
# Convert to semitones from A-440.
semitonesFromA440 = (math.log(tone.freq) - LN_440) * 12.0 / LN_2
# midiNote % 12 = semitones above C, which is 3 semitones above A
semitoneDifference = (semitonesFromA440 - 3.0) - float(midiNote % 12)
# Adjust to the proper range.
acc = math.fmod(semitoneDifference, 12.0)
if acc > 6.0:
acc -= 12.0
elif acc < -6.0:
acc += 12.0
return acc
else:
def getAvailableMics():
return {-1: _('[Microphones not supported]')}
class Microphone(object):
def __new__(self, *args, **kw):
raise RuntimeError, 'Tried to instantiate Microphone when it is unsupported!'
# Turn a number of semitones above A into a human-readable note name.
def getNoteName(semitones):
return ['A', 'Bb', 'B', 'C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab'][semitones]
| mecwerks/fofix | src/audio/Microphone.py | Python | gpl-2.0 | 8,551 | 0.003158 |
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
import os
import sys
test = TestGyp.TestGyp(formats=['make', 'ninja', 'android', 'xcode', 'msvs'])
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
no dir here
hi c
hello baz
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('gencc_int_output', chdir=chdir, stdout=expect)
if test.format == 'msvs':
test.run_built_executable('gencc_int_output_external', chdir=chdir,
stdout=expect)
test.must_match('relocate/src/subdir/foo/bar/baz.dirname',
os.path.join('foo', 'bar'))
test.must_match('relocate/src/subdir/a/b/c.dirname',
os.path.join('a', 'b'))
# FIXME the xcode and make generators incorrectly convert RULE_INPUT_PATH
# to an absolute path, making the tests below fail!
if test.format != 'xcode' and test.format != 'make':
test.must_match('relocate/src/subdir/foo/bar/baz.path',
os.path.join('foo', 'bar', 'baz.printvars'))
test.must_match('relocate/src/subdir/a/b/c.path',
os.path.join('a', 'b', 'c.printvars'))
test.pass_test()
| sgraham/nope | tools/gyp/test/rules-dirname/gyptest-dirname.py | Python | bsd-3-clause | 1,475 | 0.00339 |
import sys
old_stdout = sys.stdout
sys.stdout = open('/dev/null','w')
import settings
sys.stdout = old_stdout
if __name__ == '__main__':
if sys.argv[1] == '-env':
print 'export DATA_DIR="%s"' % (settings.DATA_DIR)
| oferb/OpenTrains | webserver/opentrain/opentrain/print_settings.py | Python | bsd-3-clause | 230 | 0.021739 |
from .gameserver import Game
from .example import TicTacToe
| andydrop/ludicode | GameServers/__init__.py | Python | gpl-3.0 | 60 | 0 |
# -*- coding: cp1252 -*-
import urllib,urllib2,re,cookielib,string,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from t0mm0.common.net import Net as net
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
datapath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))
elogo = xbmc.translatePath('special://home/addons/plugin.video.movie25/resources/art/bigx.png')
class ResolverError(Exception):
def __init__(self, value, value2):
value = value
value2 = value2
def __str__(self):
return repr(value,value2)
def resolve_url(url, filename = False):
stream_url = False
if(url):
try:
url = url.split('"')[0]
match = re.search('xoxv(.+?)xoxe(.+?)xoxc',url)
print "host "+url
if(match):
import urlresolver
source = urlresolver.HostedMediaFile(host=match.group(1), media_id=match.group(2))
if source:
stream_url = source.resolve()
elif re.search('billionuploads',url,re.I):
stream_url=resolve_billionuploads(url, filename)
elif re.search('180upload',url,re.I):
stream_url=resolve_180upload(url)
elif re.search('veehd',url,re.I):
stream_url=resolve_veehd(url)
elif re.search('vidto',url,re.I):
stream_url=resolve_vidto(url)
elif re.search('epicshare',url,re.I):
stream_url=resolve_epicshare(url)
elif re.search('lemuploads',url,re.I):
stream_url=resolve_lemupload(url)
elif re.search('mightyupload',url,re.I):
stream_url=resolve_mightyupload(url)
elif re.search('hugefiles',url,re.I):
stream_url=resolve_hugefiles(url)
elif re.search('megarelease',url,re.I):
stream_url=resolve_megarelease(url)
elif re.search('movreel',url,re.I):
stream_url=resolve_movreel(url)
elif re.search('bayfiles',url,re.I):
stream_url=resolve_bayfiles(url)
elif re.search('nowvideo',url,re.I):
stream_url=resolve_nowvideo(url)
elif re.search('novamov',url,re.I):
stream_url=resolve_novamov(url)
elif re.search('vidspot',url,re.I):
stream_url=resolve_vidspot(url)
elif re.search('videomega',url,re.I):
stream_url=resolve_videomega(url)
elif re.search('youwatch',url,re.I):
stream_url=resolve_youwatch(url)
elif re.search('vk.com',url,re.I):
stream_url=resolve_VK(url)
elif re.search('(?i)(firedrive|putlocker)',url):
stream_url=resolve_firedrive(url)
elif re.search('project-free-upload',url,re.I):
stream_url=resolve_projectfreeupload(url)
elif re.search('yify.tv',url,re.I):
stream_url=resolve_yify(url)
elif re.search('mail.ru',url,re.I):
stream_url=resolve_mailru(url)
elif re.search('youtube',url,re.I):
try:url=url.split('watch?v=')[1]
except:
try:url=url.split('com/v/')[1]
except:url=url.split('com/embed/')[1]
stream_url='plugin://plugin.video.youtube/?action=play_video&videoid=' +url
else:
import urlresolver
print "host "+url
source = urlresolver.HostedMediaFile(url)
if source:
stream_url = source.resolve()
if isinstance(stream_url,urlresolver.UrlResolver.unresolvable):
showUrlResoverError(stream_url)
stream_url = False
else:
stream_url=url
try:
stream_url=stream_url.split('referer')[0]
stream_url=stream_url.replace('|','')
except:
pass
except ResolverError as e:
#logerror(str(e))
#showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR] ' + e.value2,'[B][COLOR red]'+e.value+'[/COLOR][/B]',5000, elogo)
try:
import urlresolver
source = urlresolver.HostedMediaFile(url)
if source:
stream_url = source.resolve()
if isinstance(stream_url,urlresolver.UrlResolver.unresolvable):
showUrlResoverError(stream_url)
stream_url = False
except Exception as e:
logerror(str(e))
showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR]','[B][COLOR red]'+str(e)+'[/COLOR][/B]',5000, elogo)
except Exception as e:
logerror(str(e))
showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR]','[B][COLOR red]'+str(e)+'[/COLOR][/B]',5000, elogo)
else:
logerror("video url not valid")
showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR]','[B][COLOR red]video url not valid[/COLOR][/B]',5000, elogo)
if stream_url and re.search('\.(zip|rar|7zip)$',stream_url,re.I):
logerror("video url found is an archive")
showpopup('[COLOR=FF67cc33]Mash Up URLresolver Error[/COLOR]','[B][COLOR red]video url found is an archive[/COLOR][/B]',5000, elogo)
return False
return stream_url
def showUrlResoverError(unresolvable):
logerror(str(unresolvable.msg))
showpopup('[B]UrlResolver Error[/B]','[COLOR red]'+str(unresolvable.msg)+'[/COLOR]',10000, elogo)
def logerror(log):
xbmc.log(log, xbmc.LOGERROR)
def showpopup(title='', msg='', delay=5000, image=''):
xbmc.executebuiltin('XBMC.Notification("%s","%s",%d,"%s")' % (title, msg, delay, image))
def grab_cloudflare(url):
class NoRedirection(urllib2.HTTPErrorProcessor):
# Stop Urllib2 from bypassing the 503 page.
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
return response
https_response = http_response
cj = cookielib.CookieJar()
opener = urllib2.build_opener(NoRedirection, urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36')]
response = opener.open(url).read()
jschl=re.compile('name="jschl_vc" value="(.+?)"/>').findall(response)
if jschl:
import time
jschl = jschl[0]
maths=re.compile('value = (.+?);').findall(response)[0].replace('(','').replace(')','')
domain_url = re.compile('(https?://.+?/)').findall(url)[0]
domain = re.compile('https?://(.+?)/').findall(domain_url)[0]
time.sleep(5)
normal = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
normal.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36')]
final= normal.open(domain_url+'cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s'%(jschl,eval(maths)+len(domain))).read()
response = normal.open(url).read()
return response
def millis():
import time as time_
return int(round(time_.time() * 1000))
def load_json(data):
def to_utf8(dct):
rdct = {}
for k, v in dct.items() :
if isinstance(v, (str, unicode)) :
rdct[k] = v.encode('utf8', 'ignore')
else :
rdct[k] = v
return rdct
try :
from lib import simplejson
json_data = simplejson.loads(data, object_hook=to_utf8)
return json_data
except:
try:
import json
json_data = json.loads(data, object_hook=to_utf8)
return json_data
except:
import sys
for line in sys.exc_info():
print "%s" % line
return None
def resolve_firedrive(url):
try:
url=url.replace('putlocker.com','firedrive.com').replace('putlocker.to','firedrive.com')
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Firedrive Link...')
dialog.update(0)
print 'MashUp Firedrive - Requesting GET URL: %s' % url
html = net().http_GET(url).content
dialog.update(50)
if dialog.iscanceled(): return None
post_data = {}
r = re.findall(r'(?i)<input type="hidden" name="(.+?)" value="(.+?)"', html)
for name, value in r:
post_data[name] = value
post_data['referer'] = url
html = net().http_POST(url, post_data).content
embed=re.findall('(?sim)href="([^"]+?)">Download file</a>',html)
if not embed:
embed=re.findall('(?sim)href="(http://dl.firedrive.com[^"]+?)"',html)
if dialog.iscanceled(): return None
if embed:
dialog.update(100)
return embed[0]
else:
logerror('Mash Up: Resolve Firedrive - File Not Found')
xbmc.executebuiltin("XBMC.Notification(File Not Found,Firedrive,2000)")
return False
except Exception, e:
logerror('**** Firedrive Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]Firedrive[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
def resolve_bayfiles(url):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Bayfiles Link...')
dialog.update(0)
print 'MashUp Bayfiles - Requesting GET URL: %s' % url
html = net().http_GET(url).content
try: vfid = re.compile('var vfid = ([^;]+);').findall(html)[0]
except:pass
try:urlpremium='http://'+ re.compile('<a class="highlighted-btn" href="http://(.+?)">Premium Download</a>').findall(html)[0]
except:urlpremium=[]
if urlpremium:
return urlpremium
else:
try:
delay = re.compile('var delay = ([^;]+);').findall(html)[0]
delay = int(delay)
except: delay = 300
t = millis()
html2 = net().http_GET("http://bayfiles.net/ajax_download?_=%s&action=startTimer&vfid=%s"%(t,vfid)).content
datajson=load_json(html2)
if datajson['set']==True:
token=datajson['token']
url_ajax = 'http://bayfiles.net/ajax_download'
post = "action=getLink&vfid=%s&token=%s" %(vfid,token)
finaldata=net().http_GET(url_ajax + '?' + post).content
patron = 'onclick="javascript:window.location.href = \'(.+?)\''
matches = re.compile(patron,re.DOTALL).findall(finaldata)
return matches[0] #final url mp4
except:
html = net().http_GET(url).content
try:
match2=re.compile('<div id="content-inner">\n\t\t\t\t<center><strong style="color:#B22B13;">Your IP (.+?) has recently downloaded a file. Upgrade to premium or wait (.+?) min.</strong>').findall(html)[0]
raise ResolverError('You recently downloaded a file. Upgrade to premium or wait',"Bayfiles")
return
except:
match3=re.compile('<div id="content-inner">\n\t\t\t\t<center><strong style="color:#B22B13;">Your IP (.+?) is already downloading. Upgrade to premium or wait.</strong>').findall(html)
raise ResolverError('You are already downloading. Upgrade to premium or wait.',"Bayfiles")
return
def resolve_mailru(url):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp MailRU Link...')
dialog.update(0)
print 'MashUp MailRU - Requesting GET URL: %s' % url
link = net().http_GET(url).content
match=re.compile('videoSrc = "(.+?)",',re.DOTALL).findall(link)
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj), urllib2.HTTPHandler())
req = urllib2.Request(url)
f = opener.open(req)
html = f.read()
for cookie in cj:
cookie=str(cookie)
rcookie=cookie.replace('<Cookie ','').replace(' for .video.mail.ru/>','')
vlink=match[0]+'&Cookie='+rcookie
return vlink
except Exception, e:
logerror('**** MailRU Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]MailRU[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
def resolve_yify(url):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Yify Link...')
dialog.update(0)
print 'MashUp Yify - Requesting GET URL: %s' % url
html = net().http_GET(url).content
url = re.compile('showPkPlayer[(]"(.+?)"[)]').findall(html)[0]
url = 'http://yify.tv/reproductor2/pk/pk/plugins/player_p.php?url=https%3A//picasaweb.google.com/' + url
html = net().http_GET(url).content
html = re.compile('{(.+?)}').findall(html)[-1]
stream_url = re.compile('"url":"(.+?)"').findall(html)[0]
return stream_url
except Exception, e:
logerror('**** Yify Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]Yify[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
def resolve_VK(url):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp VK Link...')
dialog.update(0)
print 'MashUp VK - Requesting GET URL: %s' % url
useragent='Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7'
link2 = net(user_agent=useragent).http_GET(url).content
if re.search('This video has been removed', link2, re.I):
logerror('***** MashUp VK - This video has been removed')
xbmc.executebuiltin("XBMC.Notification(This video has been removed,VK,2000)")
return Fals
urllist=[]
quaList=[]
match=re.findall('(?sim)<source src="([^"]+)"',link2)
for url in match:
print url
urllist.append(url)
qua=re.findall('(?sim).(\d+).mp4',url)
quaList.append(str(qua[0]))
dialog2 = xbmcgui.Dialog()
ret = dialog2.select('[COLOR=FF67cc33][B]Select Quality[/COLOR][/B]',quaList)
if ret == -1:
return False
stream_url = urllist[ret]
if match:
return stream_url.replace("\/",'/')
except Exception, e:
logerror('**** VK Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]VK[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
def resolve_youwatch(url):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Youwatch Link...')
dialog.update(0)
print 'MashUp Youwatch - Requesting GET URL: %s' % url
if 'embed' not in url:
mediaID = re.findall('http://youwatch.org/([^<]+)', url)[0]
url='http://youwatch.org/embed-'+mediaID+'.html'
else:url=url
html = net().http_GET(url).content
try:
html=html.replace('|','/')
stream=re.compile('/mp4/video/(.+?)/(.+?)/(.+?)/setup').findall(html)
for id,socket,server in stream:
continue
except:
raise ResolverError('This file is not available on',"Youwatch")
stream_url='http://'+server+'.youwatch.org:'+socket+'/'+id+'/video.mp4?start=0'
return stream_url
except Exception, e:
logerror('**** Youwatch Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]Youwatch[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
def resolve_projectfreeupload(url):
try:
import jsunpack
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Project Free Link...')
dialog.update(0)
print 'MashUp Project Free - Requesting GET URL: %s' % url
html = net().http_GET(url).content
r = re.findall(r'\"hidden\"\sname=\"?(.+?)\"\svalue=\"?(.+?)\"\>', html, re.I)
post_data = {}
for name, value in r:
post_data[name] = value
post_data['referer'] = url
post_data['method_premium']=''
post_data['method_free']=''
html = net().http_POST(url, post_data).content
embed=re.findall('<IFRAME SRC="(.+?)"',html)
html = net().http_GET(embed[0]).content
r = re.findall(r'(eval\(function\(p,a,c,k,e,d\)\{while.+?)</script>',html,re.M|re.DOTALL)
try:unpack=jsunpack.unpack(r[1])
except:unpack=jsunpack.unpack(r[0])
stream_url=re.findall('<param name="src"value="(.+?)"/>',unpack)[0]
return stream_url
if dialog.iscanceled(): return None
except Exception, e:
logerror('**** Project Free Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]Project Free[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
def resolve_videomega(url):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Videomega Link...')
dialog.update(0)
print 'MashUp Videomega - Requesting GET URL: %s' % url
try:
mediaID = re.findall('http://videomega.tv/.?ref=([^<]+)', url)[0]
url='http://videomega.tv/iframe.php?ref='+mediaID
except:url=url
html = net().http_GET(url).content
try:
encodedurl=re.compile('unescape.+?"(.+?)"').findall(html)
except:
raise ResolverError('This file is not available on',"VideoMega")
url2=urllib.unquote(encodedurl[0])
stream_url=re.compile('file: "(.+?)"').findall(url2)[0]
return stream_url
except Exception, e:
logerror('**** Videomega Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]Videomega[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
def resolve_vidspot(url):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Vidspot Link...')
dialog.update(0)
print 'MashUp Vidspot - Requesting GET URL: %s' % url
mediaID=re.findall('http://vidspot.net/([^<]+)',url)[0]
url='http://vidspot.net/embed-'+mediaID+'.html'
print url
html = net().http_GET(url).content
r = re.search('"file" : "(.+?)",', html)
if r:
stream_url = urllib.unquote(r.group(1))
return stream_url
except Exception, e:
logerror('**** Vidspot Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]Vidspot[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
def resolve_novamov(url):
try:
import unwise
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Novamov Link...')
dialog.update(0)
print 'MashUp Novamov - Requesting GET URL: %s' % url
html = net().http_GET(url).content
html = unwise.unwise_process(html)
filekey = unwise.resolve_var(html, "flashvars.filekey")
media_id=re.findall('.+?/video/([^<]+)',url)
#get stream url from api
api = 'http://www.novamov.com/api/player.api.php?key=%s&file=%s' % (filekey, media_id)
html = net().http_GET(api).content
r = re.search('url=(.+?)&title', html)
if r:
stream_url = urllib.unquote(r.group(1))
else:
r = re.search('file no longer exists',html)
if r:
raise ResolverError('File Not Found or removed',"Novamov")
raise ResolverError('Failed to parse url',"Novamov")
return stream_url
except urllib2.URLError, e:
logerror('Novamov: got http error %d fetching %s' %
(e.code, web_url))
return unresolvable(code=3, msg=e)
except Exception, e:
logerror('**** Novamov Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]Novamov[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
return unresolvable(code=0, msg=e)
def resolve_nowvideo(url):
try:
import unwise
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Nowvideo Link...')
dialog.update(0)
print 'MashUp Nowvideo - Requesting GET URL: %s' % url
html = net().http_GET(url).content
html = unwise.unwise_process(html)
filekey = unwise.resolve_var(html, "flashvars.filekey")
try:media_id=re.findall('.+?/video/([^<]+)',url)[0]
except:media_id=re.findall('http://embed.nowvideo.+?/embed.php.?v=([^<]+)',url)[0]
#get stream url from api
api = 'http://www.nowvideo.sx/api/player.api.php?key=%s&file=%s' % (filekey, media_id)
html = net().http_GET(api).content
r = re.search('url=(.+?)&title', html)
if r:
stream_url = urllib.unquote(r.group(1))
else:
r = re.search('file no longer exists',html)
if r:
raise ResolverError('File Not Found or removed',"Nowvideo")
raise ResolverError('Failed to parse url',"Nowvideo")
return stream_url
except urllib2.URLError, e:
logerror('Nowvideo: got http error %d fetching %s' %
(e.code, web_url))
return unresolvable(code=3, msg=e)
except Exception, e:
logerror('**** Nowvideo Error occured: %s' % e)
xbmc.executebuiltin('[B][COLOR white]Nowvideo[/COLOR][/B]','[COLOR red]%s[/COLOR]' % e, 5000, elogo)
return unresolvable(code=0, msg=e)
def resolve_movreel(url):
try:
#Show dialog box so user knows something is happening
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp Movreel Link...')
dialog.update(0)
print 'MashUp Movreel - Requesting GET URL: %s' % url
html = net().http_GET(url).content
dialog.update(33)
#Check page for any error msgs
if re.search('This server is in maintenance mode', html):
logerror('***** MashUp Movreel - Site reported maintenance mode')
xbmc.executebuiltin("XBMC.Notification(File is currently unavailable on the host,Movreel in maintenance,2000)")
#Set POST data values
op = re.search('<input type="hidden" name="op" value="(.+?)">', html).group(1)
postid = re.search('<input type="hidden" name="id" value="(.+?)">', html).group(1)
method_free = re.search('<input type="(submit|hidden)" name="method_free" (style=".*?" )*value="(.*?)">', html).group(3)
method_premium = re.search('<input type="(hidden|submit)" name="method_premium" (style=".*?" )*value="(.*?)">', html).group(3)
rand = re.search('<input type="hidden" name="rand" value="(.+?)">', html).group(1)
data = {'op': op, 'id': postid, 'referer': url, 'rand': rand, 'method_premium': method_premium}
print 'MashUp Movreel - Requesting POST URL: %s DATA: %s' % (url, data)
html = net().http_POST(url, data).content
#Only do next post if Free account, skip to last page for download link if Premium
if method_free:
#Check for download limit error msg
if re.search('<p class="err">.+?</p>', html):
logerror('***** Download limit reached')
errortxt = re.search('<p class="err">(.+?)</p>', html).group(1)
xbmc.executebuiltin("XBMC.Notification("+errortxt+",Movreel,2000)")
dialog.update(66)
#Set POST data values
data = {}
r = re.findall(r'type="hidden" name="(.+?)" value="(.+?)">', html)
if r:
for name, value in r:
data[name] = value
else:
logerror('***** MashUp Movreel - Cannot find data values')
xbmc.executebuiltin("XBMC.Notification(Unable to resolve Movreel Link,Movreel,2000)")
print 'MashUp Movreel - Requesting POST URL: %s DATA: %s' % (url, data)
html = net().http_POST(url, data).content
#Get download link
dialog.update(100)
link = re.search('<a href="(.+)">Download Link</a>', html)
if link:
return link.group(1)
else:
xbmc.executebuiltin("XBMC.Notification(Unable to find final link,Movreel,2000)")
except Exception, e:
logerror('**** Mash Up Movreel Error occured: %s' % e)
raise ResolverError(str(e),"Movreel")
finally:
dialog.close()
def resolve_megarelease(url):
try:
#Show dialog box so user knows something is happening
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp MegaRelease Link...')
dialog.update(0)
print 'MegaRelease MashUp - Requesting GET URL: %s' % url
html = net().http_GET(url).content
dialog.update(50)
#Check page for any error msgs
if re.search('This server is in maintenance mode', html):
logerror('***** MegaRelease - Site reported maintenance mode')
xbmc.executebuiltin("XBMC.Notification(File is currently unavailable,MegaRelease in maintenance,2000)")
return False
if re.search('<b>File Not Found</b>', html):
logerror('Mash Up: Resolve MegaRelease - File Not Found')
xbmc.executebuiltin("XBMC.Notification(File Not Found,MegaRelease,2000)")
return False
filename = re.search('You have requested <font color="red">(.+?)</font>', html).group(1)
filename = filename.split('/')[-1]
extension = re.search('(\.[^\.]*$)', filename).group(1)
guid = re.search('http://megarelease.org/(.+)$', url).group(1)
vid_embed_url = 'http://megarelease.org/vidembed-%s%s' % (guid, extension)
UserAgent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
ACCEPT = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
request = urllib2.Request(vid_embed_url)
request.add_header('User-Agent', UserAgent)
request.add_header('Accept', ACCEPT)
request.add_header('Referer', url)
response = urllib2.urlopen(request)
redirect_url = re.search('(http://.+?)video', response.geturl()).group(1)
download_link = redirect_url + filename
dialog.update(100)
return download_link
except Exception, e:
logerror('**** Mash Up MegaRelease Error occured: %s' % e)
raise ResolverError(str(e),"MegaRelease")
finally:
dialog.close()
def resolve_veehd(url):
name = "veeHD"
cookie_file = os.path.join(datapath, '%s.cookies' % name)
user_agent='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
from random import choice
userName = ['mashup1', 'mashup3', 'mashup4', 'mashup5', 'mashup6', 'mashup7']
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving Mash Up VeeHD Link...')
dialog.update(0)
loginurl = 'http://veehd.com/login'
ref = 'http://veehd.com/'
submit = 'Login'
terms = 'on'
remember_me = 'on'
data = {'ref': ref, 'uname': choice(userName), 'pword': 'xbmcisk00l', 'submit': submit, 'terms': terms, 'remember_me': remember_me}
html = net(user_agent).http_POST(loginurl, data).content
if dialog.iscanceled(): return False
dialog.update(33)
net().save_cookies(cookie_file)
headers = {}
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2'}
net().set_cookies(cookie_file)
print 'Mash Up VeeHD - Requesting GET URL: %s' % url
html = net().http_GET(url, headers).content
if dialog.iscanceled(): return False
dialog.update(66)
fragment = re.findall('playeriframe".+?attr.+?src : "(.+?)"', html)
frag = 'http://%s%s'%('veehd.com',fragment[1])
net().set_cookies(cookie_file)
html = net().http_GET(frag, headers).content
r = re.search('"video/divx" src="(.+?)"', html)
if r:
stream_url = r.group(1)
if not r:
print name + '- 1st attempt at finding the stream_url failed probably an Mp4, finding Mp4'
a = re.search('"url":"(.+?)"', html)
if a:
r=urllib.unquote(a.group(1))
if r:
stream_url = r
else:
logerror('***** VeeHD - File Not Found')
xbmc.executebuiltin("XBMC.Notification(File Not Found,VeeHD,2000)")
return False
if not a:
a = re.findall('href="(.+?)">', html)
stream_url = a[1]
if dialog.iscanceled(): return False
dialog.update(100)
return stream_url
except Exception, e:
logerror('**** Mash Up VeeHD Error occured: %s' % e)
raise ResolverError(str(e),"VeeHD")
def resolve_billionuploads(url, filename):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving Mash Up BillionUploads Link...')
dialog.update(0)
url = re.sub('(?i)^(.*?\.com/.+?)/.*','\\1',url)
print 'Mash Up BillionUploads - Requesting GET URL: %s' % url
cookie_file = os.path.join(os.path.join(datapath,'Cookies'), 'billionuploads.cookies')
cj = cookielib.LWPCookieJar()
if os.path.exists(cookie_file):
try: cj.load(cookie_file,True)
except: cj.save(cookie_file,True)
else: cj.save(cookie_file,True)
normal = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
headers = [
('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:25.0) Gecko/20100101 Firefox/25.0'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
('Accept-Language', 'en-US,en;q=0.5'),
('Accept-Encoding', ''),
('DNT', '1'),
('Connection', 'keep-alive'),
('Pragma', 'no-cache'),
('Cache-Control', 'no-cache')
]
normal.addheaders = headers
class NoRedirection(urllib2.HTTPErrorProcessor):
# Stop Urllib2 from bypassing the 503 page.
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
return response
https_response = http_response
opener = urllib2.build_opener(NoRedirection, urllib2.HTTPCookieProcessor(cj))
opener.addheaders = normal.addheaders
response = opener.open(url).read()
decoded = re.search('(?i)var z="";var b="([^"]+?)"', response)
if decoded:
decoded = decoded.group(1)
z = []
for i in range(len(decoded)/2):
z.append(int(decoded[i*2:i*2+2],16))
decoded = ''.join(map(unichr, z))
incapurl = re.search('(?i)"GET","(/_Incapsula_Resource[^"]+?)"', decoded)
if incapurl:
incapurl = 'http://billionuploads.com'+incapurl.group(1)
opener.open(incapurl)
cj.save(cookie_file,True)
response = opener.open(url).read()
captcha = re.search('(?i)<iframe src="(/_Incapsula_Resource[^"]+?)"', response)
if captcha:
captcha = 'http://billionuploads.com'+captcha.group(1)
opener.addheaders.append(('Referer', url))
response = opener.open(captcha).read()
formurl = 'http://billionuploads.com'+re.search('(?i)<form action="(/_Incapsula_Resource[^"]+?)"', response).group(1)
resource = re.search('(?i)src=" (/_Incapsula_Resource[^"]+?)"', response)
if resource:
import random
resourceurl = 'http://billionuploads.com'+resource.group(1) + str(random.random())
opener.open(resourceurl)
recaptcha = re.search('(?i)<script type="text/javascript" src="(https://www.google.com/recaptcha/api[^"]+?)"', response)
if recaptcha:
response = opener.open(recaptcha.group(1)).read()
challenge = re.search('''(?i)challenge : '([^']+?)',''', response)
if challenge:
challenge = challenge.group(1)
captchaimg = 'https://www.google.com/recaptcha/api/image?c=' + challenge
# site = re.search('''(?i)site : '([^']+?)',''', response).group(1)
# reloadurl = 'https://www.google.com/recaptcha/api/reload?c=' + challenge + '&' + site + '&reason=[object%20MouseEvent]&type=image&lang=en'
img = xbmcgui.ControlImage(550,15,300,57,captchaimg)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
kb = xbmc.Keyboard('', 'Please enter the text in the image', False)
kb.doModal()
capcode = kb.getText()
if (kb.isConfirmed()):
userInput = kb.getText()
if userInput != '': capcode = kb.getText()
elif userInput == '':
logerror('BillionUploads - Image-Text not entered')
xbmc.executebuiltin("XBMC.Notification(Image-Text not entered.,BillionUploads,2000)")
return None
else: return None
wdlg.close()
captchadata = {}
captchadata['recaptcha_challenge_field'] = challenge
captchadata['recaptcha_response_field'] = capcode
opener.addheaders = headers
opener.addheaders.append(('Referer', captcha))
resultcaptcha = opener.open(formurl,urllib.urlencode(captchadata)).info()
opener.addheaders = headers
response = opener.open(url).read()
ga = re.search('(?i)"text/javascript" src="(/ga[^"]+?)"', response)
if ga:
jsurl = 'http://billionuploads.com'+ga.group(1)
p = "p=%7B%22appName%22%3A%22Netscape%22%2C%22platform%22%3A%22Win32%22%2C%22cookies%22%3A1%2C%22syslang%22%3A%22en-US%22"
p += "%2C%22userlang%22%3A%22en-US%22%2C%22cpu%22%3A%22WindowsNT6.1%3BWOW64%22%2C%22productSub%22%3A%2220100101%22%7D"
opener.open(jsurl, p)
response = opener.open(url).read()
# pid = re.search('(?i)PID=([^"]+?)"', response)
# if pid:
# normal.addheaders += [('Cookie','D_UID='+pid.group(1)+';')]
# opener.addheaders = normal.addheaders
if re.search('(?i)url=/distil_r_drop.html', response) and filename:
url += '/' + filename
response = normal.open(url).read()
jschl=re.compile('name="jschl_vc" value="(.+?)"/>').findall(response)
if jschl:
jschl = jschl[0]
maths=re.compile('value = (.+?);').findall(response)[0].replace('(','').replace(')','')
domain_url = re.compile('(https?://.+?/)').findall(url)[0]
domain = re.compile('https?://(.+?)/').findall(domain_url)[0]
final= normal.open(domain_url+'cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s'%(jschl,eval(maths)+len(domain))).read()
html = normal.open(url).read()
else: html = response
if dialog.iscanceled(): return None
dialog.update(25)
#Check page for any error msgs
if re.search('This server is in maintenance mode', html):
logerror('***** BillionUploads - Site reported maintenance mode')
xbmc.executebuiltin("XBMC.Notification(File is currently unavailable,BillionUploads in maintenance,2000)")
return None
if re.search('File Not Found', html, re.I):
logerror('***** BillionUploads - File Not Found')
xbmc.executebuiltin("XBMC.Notification(File Not Found,BillionUploads,2000)")
return False
data = {}
r = re.findall(r'type="hidden" name="(.+?)" value="(.*?)">', html)
for name, value in r: data[name] = value
if not data:
logerror('Mash Up: Resolve BillionUploads - No Data Found')
xbmc.executebuiltin("XBMC.Notification(No Data Found,BillionUploads,2000)")
return None
if dialog.iscanceled(): return None
captchaimg = re.search('<img src="((?:http://|www\.)?BillionUploads.com/captchas/.+?)"', html)
if captchaimg:
img = xbmcgui.ControlImage(550,15,240,100,captchaimg.group(1))
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
kb = xbmc.Keyboard('', 'Please enter the text in the image', False)
kb.doModal()
capcode = kb.getText()
if (kb.isConfirmed()):
userInput = kb.getText()
if userInput != '': capcode = kb.getText()
elif userInput == '':
showpopup('BillionUploads','[B]You must enter the text from the image to access video[/B]',5000, elogo)
return None
else: return None
wdlg.close()
data.update({'code':capcode})
if dialog.iscanceled(): return None
dialog.update(50)
data.update({'submit_btn':''})
enc_input = re.compile('decodeURIComponent\("(.+?)"\)').findall(html)
if enc_input:
dec_input = urllib2.unquote(enc_input[0])
r = re.findall(r'type="hidden" name="(.+?)" value="(.*?)">', dec_input)
for name, value in r:
data[name] = value
extradata = re.compile("append\(\$\(document.createElement\('input'\)\).attr\('type','hidden'\).attr\('name','(.*?)'\).val\((.*?)\)").findall(html)
if extradata:
for attr, val in extradata:
if 'source="self"' in val:
val = re.compile('<textarea[^>]*?source="self"[^>]*?>([^<]*?)<').findall(html)[0]
data[attr] = val.strip("'")
r = re.findall("""'input\[name="([^"]+?)"\]'\)\.remove\(\)""", html)
for name in r: del data[name]
normal.addheaders.append(('Referer', url))
html = normal.open(url, urllib.urlencode(data)).read()
cj.save(cookie_file,True)
if dialog.iscanceled(): return None
dialog.update(75)
def custom_range(start, end, step):
while start <= end:
yield start
start += step
def checkwmv(e):
s = ""
i=[]
u=[[65,91],[97,123],[48,58],[43,44],[47,48]]
for z in range(0, len(u)):
for n in range(u[z][0],u[z][1]):
i.append(chr(n))
t = {}
for n in range(0, 64): t[i[n]]=n
for n in custom_range(0, len(e), 72):
a=0
h=e[n:n+72]
c=0
for l in range(0, len(h)):
f = t.get(h[l], 'undefined')
if f == 'undefined': continue
a = (a<<6) + f
c = c + 6
while c >= 8:
c = c - 8
s = s + chr( (a >> c) % 256 )
return s
dll = re.compile('<input type="hidden" id="dl" value="(.+?)">').findall(html)
if dll:
dl = dll[0].split('GvaZu')[1]
dl = checkwmv(dl);
dl = checkwmv(dl);
else:
alt = re.compile('<source src="([^"]+?)"').findall(html)
if alt:
dl = alt[0]
else:
logerror('Mash Up: Resolve BillionUploads - No Video File Found')
xbmc.executebuiltin("XBMC.Notification(No Video File Found,BillionUploads,2000)")
return None
if dialog.iscanceled(): return None
dialog.update(100)
return dl
except Exception, e:
logerror('BillionUploads - Exception occured: %s' % e)
raise ResolverError(str(e),"BillionUploads")
return None
finally:
dialog.close()
def resolve_180upload(url):
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving Mash Up 180Upload Link...')
dialog.update(0)
puzzle_img = os.path.join(datapath, "180_puzzle.png")
url=url.replace('180upload.nl','180upload.com')
print 'Mash Up 180Upload - Requesting GET URL: %s' % url
html = net().http_GET(url).content
if ">File Not Found" in html:
logerror('Mash Up: Resolve 180Upload - File Not Found')
xbmc.executebuiltin("XBMC.Notification(File Not Found,180Upload,2000)")
return False
if re.search('\.(rar|zip)</b>', html, re.I):
logerror('Mash Up: Resolve 180Upload - No Video File Found')
xbmc.executebuiltin("XBMC.Notification(No Video File Found,180Upload,2000)")
return False
if dialog.iscanceled(): return False
dialog.update(50)
data = {}
r = re.findall(r'type="hidden" name="(.+?)" value="(.+?)">', html)
if r:
for name, value in r:
data[name] = value
else:
raise Exception('Unable to resolve 180Upload Link')
#Check for SolveMedia Captcha image
solvemedia = re.search('<iframe src="(http://api.solvemedia.com.+?)"', html)
if solvemedia:
dialog.close()
html = net().http_GET(solvemedia.group(1)).content
hugekey=re.search('id="adcopy_challenge" value="(.+?)">', html).group(1)
open(puzzle_img, 'wb').write(net().http_GET("http://api.solvemedia.com%s" % re.search('<img src="(.+?)"', html).group(1)).content)
img = xbmcgui.ControlImage(450,15,400,130, puzzle_img)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
kb = xbmc.Keyboard('', 'Type the letters in the image', False)
kb.doModal()
capcode = kb.getText()
if (kb.isConfirmed()):
userInput = kb.getText()
if userInput != '':
solution = kb.getText()
elif userInput == '':
xbmc.executebuiltin("XBMC.Notification(You must enter text in the image to access video,2000)")
return False
else:
return False
wdlg.close()
dialog.create('Resolving', 'Resolving Mash Up 180Upload Link...')
dialog.update(50)
if solution:
data.update({'adcopy_challenge': hugekey,'adcopy_response': solution})
print 'Mash Up 180Upload - Requesting POST URL: %s' % url
html = net().http_POST(url, data).content
if dialog.iscanceled(): return False
dialog.update(100)
link = re.search('id="lnk_download" href="([^"]+)"', html)
if link:
print 'Mash Up 180Upload Link Found: %s' % link.group(1)
return link.group(1)
else:
raise Exception('Unable to resolve 180Upload Link')
except Exception, e:
logerror('**** Mash Up 180Upload Error occured: %s' % e)
raise ResolverError(str(e),"180Upload")
finally:
dialog.close()
def resolve_vidto(url):
user_agent='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
from resources.libs import jsunpack
import time
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving Mash Up Vidto Link...')
dialog.update(0)
html = net(user_agent).http_GET(url).content
if dialog.iscanceled(): return False
dialog.update(11)
logerror('Mash Up: Resolve Vidto - Requesting GET URL: '+url)
r = re.findall(r'<font class="err">File was removed</font>',html,re.I)
if r:
logerror('Mash Up: Resolve Vidto - File Was Removed')
xbmc.executebuiltin("XBMC.Notification(File Not Found,Vidto,2000)")
return False
if not r:
r = re.findall(r'(eval\(function\(p,a,c,k,e,d\)\{while.+?flvplayer.+?)</script>'
,html,re.M|re.DOTALL)
if r:
unpacked = jsunpack.unpack(r[0])#this is where it will error, not sure if resources,libs added to os path
try:
r = re.findall(r'label:"360p",file:"(.+?)"}',unpacked)[0]
except:
r = re.findall(r'label:"240p",file:"(.+?)"}',unpacked)[0]
if not r:
r = re.findall('type="hidden" name="(.+?)" value="(.+?)">',html)
post_data = {}
for name, value in r:
post_data[name] = value.encode('utf-8')
post_data['usr_login'] = ''
post_data['referer'] = url
for i in range(7):
time.sleep(1)
if dialog.iscanceled(): return False
dialog.update(22+i*11.3)
html = net(user_agent).http_POST(url,post_data).content
r = re.findall(r'(eval\(function\(p,a,c,k,e,d\)\{while.+?flvplayer.+?)</script>'
,html,re.M|re.DOTALL)
if r:
unpacked = jsunpack.unpack(r[0])
try:
r = re.findall(r'label:"360p",file:"(.+?)"}',unpacked)[0]
except:
r = re.findall(r'label:"240p",file:"(.+?)"}',unpacked)[0]
if not r:
r = re.findall(r"var file_link = '(.+?)';",html)[0]
if dialog.iscanceled(): return False
dialog.update(100)
return r
except Exception, e:
logerror('Mash Up: Resolve Vidto Error - '+str(e))
raise ResolverError(str(e),"Vidto")
finally:
dialog.close()
def resolve_epicshare(url):
try:
puzzle_img = os.path.join(datapath, "epicshare_puzzle.png")
#Show dialog box so user knows something is happening
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp EpicShare Link...')
dialog.update(0)
print 'EpicShare - MashUp Requesting GET URL: %s' % url
html = net().http_GET(url).content
if dialog.iscanceled(): return False
dialog.update(50)
#Check page for any error msgs
if re.search('This server is in maintenance mode', html):
logerror('***** EpicShare - Site reported maintenance mode')
xbmc.executebuiltin("XBMC.Notification(File is currently unavailable,EpicShare in maintenance,2000)")
return False
if re.search('<b>File Not Found</b>', html):
logerror('***** EpicShare - File not found')
xbmc.executebuiltin("XBMC.Notification(File Not Found,EpicShare,2000)")
return False
data = {}
r = re.findall(r'type="hidden" name="(.+?)" value="(.+?)">', html)
if r:
for name, value in r:
data[name] = value
else:
logerror('***** EpicShare - Cannot find data values')
raise Exception('Unable to resolve EpicShare Link')
#Check for SolveMedia Captcha image
solvemedia = re.search('<iframe src="(http://api.solvemedia.com.+?)"', html)
if solvemedia:
dialog.close()
html = net().http_GET(solvemedia.group(1)).content
hugekey=re.search('id="adcopy_challenge" value="(.+?)">', html).group(1)
open(puzzle_img, 'wb').write(net().http_GET("http://api.solvemedia.com%s" % re.search('<img src="(.+?)"', html).group(1)).content)
img = xbmcgui.ControlImage(450,15,400,130, puzzle_img)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
kb = xbmc.Keyboard('', 'Type the letters in the image', False)
kb.doModal()
capcode = kb.getText()
if (kb.isConfirmed()):
userInput = kb.getText()
if userInput != '':
solution = kb.getText()
elif userInput == '':
Notify('big', 'No text entered', 'You must enter text in the image to access video', '')
return False
else:
return False
wdlg.close()
dialog.create('Resolving', 'Resolving MashUp EpicShare Link...')
dialog.update(50)
if solution:
data.update({'adcopy_challenge': hugekey,'adcopy_response': solution})
print 'EpicShare - MashUp Requesting POST URL: %s' % url
html = net().http_POST(url, data).content
if dialog.iscanceled(): return False
dialog.update(100)
link = re.search('<a id="lnk_download" href=".+?product_download_url=(.+?)">', html)
if link:
print 'MashUp EpicShare Link Found: %s' % link.group(1)
return link.group(1)
else:
logerror('***** EpicShare - Cannot find final link')
raise Exception('Unable to resolve EpicShare Link')
except Exception, e:
logerror('**** EpicShare MashUp Error occured: %s' % e)
raise ResolverError(str(e),"EpicShare")
finally:
dialog.close()
def resolve_lemupload(url):
try:
#Show dialog box so user knows something is happening
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp LemUpload Link...')
dialog.update(0)
#
print 'LemUpload - MashUp Requesting GET URL: %s' % url
html = net().http_GET(url).content
if dialog.iscanceled(): return False
dialog.update(50)
#Check page for any error msgs
if re.search('<b>File Not Found</b>', html):
print '***** LemUpload - File Not Found'
xbmc.executebuiltin("XBMC.Notification(File Not Found,LemUpload,2000)")
return False
if re.search('This server is in maintenance mode', html):
print '***** LemUpload - Server is in maintenance mode'
xbmc.executebuiltin("XBMC.Notification(Site In Maintenance,LemUpload,2000)")
return False
filename = re.search('<h2>(.+?)</h2>', html).group(1)
extension = re.search('(\.[^\.]*$)', filename).group(1)
guid = re.search('http://lemuploads.com/(.+)$', url).group(1)
vid_embed_url = 'http://lemuploads.com/vidembed-%s%s' % (guid, extension)
request = urllib2.Request(vid_embed_url)
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36')
request.add_header('Referer', url)
response = urllib2.urlopen(request)
if dialog.iscanceled(): return False
dialog.update(100)
link = response.geturl()
if link:
redirect_url = re.search('(http://.+?)video', link)
if redirect_url:
link = redirect_url.group(1) + filename
print 'MashUp LemUpload Link Found: %s' % link
return link
else:
logerror('***** LemUpload - Cannot find final link')
raise Exception('Unable to resolve LemUpload Link')
except Exception, e:
logerror('**** LemUpload Error occured: %s' % e)
raise ResolverError(str(e),"LemUpload")
finally:
dialog.close()
def resolve_mightyupload(url):
from resources.libs import jsunpack
try:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MightyUpload Link...')
dialog.update(0)
html = net().http_GET(url).content
if dialog.iscanceled(): return False
dialog.update(50)
logerror('Mash Up: Resolve MightyUpload - Requesting GET URL: '+url)
r = re.findall(r'name="(.+?)" value="?(.+?)"', html, re.I|re.M)
if r:
post_data = {}
for name, value in r:
post_data[name] = value
post_data['referer'] = url
html = net().http_POST(url, post_data).content
if dialog.iscanceled(): return False
dialog.update(100)
r = re.findall(r'<a href=\"(.+?)(?=\">Download the file</a>)', html)
return r[0]
else:
logerror('***** MightyUpload - File not found')
xbmc.executebuiltin("XBMC.Notification(File Not Found,MightyUpload,2000,"+elogo+")")
return False
except Exception, e:
logerror('Mash Up: Resolve MightyUpload Error - '+str(e))
raise ResolverError(str(e),"MightyUpload")
def resolve_hugefiles(url):
from resources.libs import jsunpack
try:
import time
puzzle_img = os.path.join(datapath, "hugefiles_puzzle.png")
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', 'Resolving MashUp HugeFiles Link...')
dialog.update(0)
html = net().http_GET(url).content
r = re.findall('File Not Found',html)
if r:
xbmc.log('Mash Up: Resolve HugeFiles - File Not Found or Removed', xbmc.LOGERROR)
xbmc.executebuiltin("XBMC.Notification(File Not Found or Removed,HugeFiles,2000)")
return False
data = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
data.update({'method_free':'Free Download'})
if data['fname'] and re.search('\.(rar|zip)$', data['fname'], re.I):
dialog.update(100)
logerror('Mash Up: Resolve HugeFiles - No Video File Found')
xbmc.executebuiltin("XBMC.Notification(No Video File Found,HugeFiles,2000)")
return False
if dialog.iscanceled(): return False
dialog.update(33)
#Check for SolveMedia Captcha image
solvemedia = re.search('<iframe src="(http://api.solvemedia.com.+?)"', html)
recaptcha = re.search('<script type="text/javascript" src="(http://www.google.com.+?)">', html)
if solvemedia:
html = net().http_GET(solvemedia.group(1)).content
hugekey=re.search('id="adcopy_challenge" value="(.+?)">', html).group(1)
open(puzzle_img, 'wb').write(net().http_GET("http://api.solvemedia.com%s" % re.search('img src="(.+?)"', html).group(1)).content)
img = xbmcgui.ControlImage(450,15,400,130, puzzle_img)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
xbmc.sleep(3000)
kb = xbmc.Keyboard('', 'Type the letters in the image', False)
kb.doModal()
capcode = kb.getText()
if (kb.isConfirmed()):
userInput = kb.getText()
if userInput != '':
solution = kb.getText()
elif userInput == '':
xbmc.executebuiltin("XBMC.Notification(No text entered, You must enter text in the image to access video,2000)")
return False
else:
return False
wdlg.close()
dialog.update(66)
if solution:
data.update({'adcopy_challenge': hugekey,'adcopy_response': solution})
elif recaptcha:
html = net().http_GET(recaptcha.group(1)).content
part = re.search("challenge \: \\'(.+?)\\'", html)
captchaimg = 'http://www.google.com/recaptcha/api/image?c='+part.group(1)
img = xbmcgui.ControlImage(450,15,400,130,captchaimg)
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
time.sleep(3)
kb = xbmc.Keyboard('', 'Type the letters in the image', False)
kb.doModal()
capcode = kb.getText()
if (kb.isConfirmed()):
userInput = kb.getText()
if userInput != '':
solution = kb.getText()
elif userInput == '':
raise Exception ('You must enter text in the image to access video')
else:
raise Exception ('Captcha Error')
wdlg.close()
dialog.update(66)
data.update({'recaptcha_challenge_field':part.group(1),'recaptcha_response_field':solution})
else:
captcha = re.compile("left:(\d+)px;padding-top:\d+px;'>&#(.+?);<").findall(html)
result = sorted(captcha, key=lambda ltr: int(ltr[0]))
solution = ''.join(str(int(num[1])-48) for num in result)
dialog.update(66)
data.update({'code':solution})
html = net().http_POST(url, data).content
if dialog.iscanceled(): return False
if 'reached the download-limit' in html:
logerror('Mash Up: Resolve HugeFiles - Daily Limit Reached, Cannot Get The File\'s Url')
xbmc.executebuiltin("XBMC.Notification(Daily Limit Reached,HugeFiles,2000)")
return False
r = re.findall("software_download_url : '(.+?)',", html, re.DOTALL + re.IGNORECASE)
if r:
dialog.update(100)
return r[0]
if not r:
sPattern = '''<div id="player_code">.*?<script type='text/javascript'>(eval.+?)</script>'''
jpack = re.findall(sPattern, html, re.DOTALL|re.I)
if jpack:
dialog.update(100)
sUnpacked = jsunpack.unpack(jpack[0])
sUnpacked = sUnpacked.replace("\\'","")
r = re.findall('file,(.+?)\)\;s1',sUnpacked)
if not r:
r = re.findall('"src"value="(.+?)"/><embed',sUnpacked)
return r[0]
else:
logerror('***** HugeFiles - Cannot find final link')
raise Exception('Unable to resolve HugeFiles Link')
except Exception, e:
logerror('Mash Up: Resolve HugeFiles Error - '+str(e))
raise ResolverError(str(e),"HugeFiles")
| marduk191/plugin.video.movie25 | resources/libs/resolvers.py | Python | gpl-3.0 | 60,306 | 0.014012 |
"""
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - Michael Baudin
Copyright (C) 2012 - Maria Christopoulou
Copyright (C) 2010 - 2011 - INRIA - Michael Baudin
Copyright (C) 2009 - Yann Collette
Copyright (C) 2009 - CEA - Jean-Marc Martinez
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
Abraham Lee.
"""
import re
import numpy as np
__all__ = ['np', 'fullfact', 'ff2n', 'fracfact']
def fullfact(levels):
"""
Create a general full-factorial design
Parameters
----------
levels : array-like
An array of integers that indicate the number of levels of each input
design factor.
Returns
-------
mat : 2d-array
The design matrix with coded levels 0 to k-1 for a k-level factor
Example
-------
::
>>> fullfact([2, 4, 3])
array([[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 1., 1., 0.],
[ 0., 2., 0.],
[ 1., 2., 0.],
[ 0., 3., 0.],
[ 1., 3., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 1., 1.],
[ 0., 2., 1.],
[ 1., 2., 1.],
[ 0., 3., 1.],
[ 1., 3., 1.],
[ 0., 0., 2.],
[ 1., 0., 2.],
[ 0., 1., 2.],
[ 1., 1., 2.],
[ 0., 2., 2.],
[ 1., 2., 2.],
[ 0., 3., 2.],
[ 1., 3., 2.]])
"""
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H = np.zeros((nb_lines, n))
level_repeat = 1
range_repeat = np.prod(levels)
for i in range(n):
range_repeat //= levels[i]
lvl = []
for j in range(levels[i]):
lvl += [j]*level_repeat
rng = lvl*range_repeat
level_repeat *= levels[i]
H[:, i] = rng
return H
################################################################################
def ff2n(n):
"""
Create a 2-Level full-factorial design
Parameters
----------
n : int
The number of factors in the design.
Returns
-------
mat : 2d-array
The design matrix with coded levels -1 and 1
Example
-------
::
>>> ff2n(3)
array([[-1., -1., -1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.],
[ 1., -1., 1.],
[-1., 1., 1.],
[ 1., 1., 1.]])
"""
return 2*fullfact([2]*n) - 1
################################################################################
def fracfact(gen):
"""
Create a 2-level fractional-factorial design with a generator string.
Parameters
----------
gen : str
A string, consisting of lowercase, uppercase letters or operators "-"
and "+", indicating the factors of the experiment
Returns
-------
H : 2d-array
A m-by-n matrix, the fractional factorial design. m is 2^k, where k
is the number of letters in ``gen``, and n is the total number of
entries in ``gen``.
Notes
-----
In ``gen`` we define the main factors of the experiment and the factors
whose levels are the products of the main factors. For example, if
gen = "a b ab"
then "a" and "b" are the main factors, while the 3rd factor is the product
of the first two. If we input uppercase letters in ``gen``, we get the same
result. We can also use the operators "+" and "-" in ``gen``.
For example, if
gen = "a b -ab"
then the 3rd factor is the opposite of the product of "a" and "b".
The output matrix includes the two level full factorial design, built by
the main factors of ``gen``, and the products of the main factors. The
columns of ``H`` follow the sequence of ``gen``.
For example, if
gen = "a b ab c"
then columns H[:, 0], H[:, 1], and H[:, 3] include the two level full
factorial design and H[:, 2] includes the products of the main factors.
Examples
--------
::
>>> fracfact("a b ab")
array([[-1., -1., 1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., 1.]])
>>> fracfact("A B AB")
array([[-1., -1., 1.],
[ 1., -1., -1.],
[-1., 1., -1.],
[ 1., 1., 1.]])
>>> fracfact("a b -ab c +abc")
array([[-1., -1., -1., -1., -1.],
[ 1., -1., 1., -1., 1.],
[-1., 1., 1., -1., 1.],
[ 1., 1., -1., -1., -1.],
[-1., -1., -1., 1., 1.],
[ 1., -1., 1., 1., -1.],
[-1., 1., 1., 1., -1.],
[ 1., 1., -1., 1., 1.]])
"""
# Recognize letters and combinations
A = [item for item in re.split('\-?\s?\+?', gen) if item] # remove empty strings
C = [len(item) for item in A]
# Indices of single letters (main factors)
I = [i for i, item in enumerate(C) if item==1]
# Indices of letter combinations (we need them to fill out H2 properly).
J = [i for i, item in enumerate(C) if item!=1]
# Check if there are "-" or "+" operators in gen
U = [item for item in gen.split(' ') if item] # remove empty strings
# If R1 is either None or not, the result is not changed, since it is a
# multiplication of 1.
R1 = _grep(U, '+')
R2 = _grep(U, '-')
# Fill in design with two level factorial design
H1 = ff2n(len(I))
H = np.zeros((H1.shape[0], len(C)))
H[:, I] = H1
# Recognize combinations and fill in the rest of matrix H2 with the proper
# products
for k in J:
# For lowercase letters
xx = np.array([ord(c) for c in A[k]]) - 97
# For uppercase letters
if np.any(xx<0):
xx = np.array([ord(c) for c in A[k]]) - 65
H[:, k] = np.prod(H1[:, xx], axis=1)
# Update design if gen includes "-" operator
if R2:
H[:, R2] *= -1
# Return the fractional factorial design
return H
def _grep(haystack, needle):
try:
haystack[0]
except (TypeError, AttributeError):
return [0] if needle in haystack else []
else:
locs = []
for idx, item in enumerate(haystack):
if needle in item:
locs += [idx]
return locs
| tisimst/pyDOE | pyDOE/doe_factorial.py | Python | bsd-3-clause | 7,200 | 0.007917 |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Convolutional autoencoder example network on MNIST data set.
Usage:
python examples/conv_autoencoder.py
"""
import numpy as np
from neon import logger as neon_logger
from neon.data import ArrayIterator, MNIST
from neon.initializers import Uniform
from neon.layers import Conv, Pooling, GeneralizedCost, Deconv
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, SumSquared
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# Load dataset
dataset = MNIST(path=args.data_dir)
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
# Set input and target to X_train
train = ArrayIterator(X_train, lshape=(1, 28, 28))
# Initialize the weights and the learning rule
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.001, momentum_coef=0.9)
# Strided conv autoencoder
bn = False
layers = [Conv((4, 4, 8), init=init_uni, activation=Rectlin(), batch_norm=bn),
Pooling(2),
Conv((4, 4, 32), init=init_uni, activation=Rectlin(), batch_norm=bn),
Pooling(2),
Deconv(fshape=(4, 4, 8), init=init_uni,
activation=Rectlin(), batch_norm=bn),
Deconv(fshape=(3, 3, 8), init=init_uni,
activation=Rectlin(), strides=2, batch_norm=bn),
Deconv(fshape=(2, 2, 1), init=init_uni, strides=2, padding=1)]
# Define the cost
cost = GeneralizedCost(costfunc=SumSquared())
model = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
# Fit the model
model.fit(train, optimizer=opt_gdm, num_epochs=args.epochs,
cost=cost, callbacks=callbacks)
# Plot the reconstructed digits
try:
from matplotlib import pyplot, cm
fi = 0
nrows = 10
ncols = 12
test = np.zeros((28 * nrows, 28 * ncols))
idxs = [(row, col) for row in range(nrows) for col in range(ncols)]
for row, col in idxs:
im = model.layers.layers[-1].outputs.get()[:, fi].reshape((28, 28))
test[28 * row:28 * (row + 1):, 28 * col:28 * (col + 1)] = im
fi = fi + 1
pyplot.matshow(test, cmap=cm.gray)
pyplot.savefig('Reconstructed.png')
except ImportError:
neon_logger.display(
'matplotlib needs to be manually installed to generate plots')
| Jokeren/neon | examples/conv_autoencoder.py | Python | apache-2.0 | 3,219 | 0 |
#-*- coding: utf-8 -*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
_USERS = {
'admin': {'owner': '*', 'first_name': 'Databank', 'last_name': 'Admin', 'role': 'admin', 'description': 'Admin for all silos'}
}
| fcrepo4-archive/RDFDatabank | rdfdatabank/config/users.py | Python | mit | 1,238 | 0.002423 |
from django.conf.urls import url
from django_jinja_ref.superadvanced import views
urlpatterns = [
url(r'^$', views.django, name='django'),
url(r'^jinja$', views.jinja, name='jinja'),
]
| rlr/django_jinja_ref | django_jinja_ref/superadvanced/urls.py | Python | mpl-2.0 | 195 | 0 |
"""
This module is the snapshots command of bowl.
Created on 17 July 2014
@author: Charlie Lewis
"""
import ast
import os
class snapshots(object):
"""
This class is responsible for the snapshots command of the cli.
"""
@classmethod
def main(self, args):
# !! TODO needs to implement login if using that
snapshots = []
try:
directory = args.metadata_path
directory = os.path.expanduser(directory)
with open(os.path.join(directory, "snapshots"), 'r') as f:
for line in f:
snapshot = ast.literal_eval(line.rstrip("\n"))
snapshots.append(snapshot['snapshot_id'])
except:
pass
if not args.z:
for snapshot in snapshots:
print snapshot
return snapshots
| cglewis/bowl | bowl/cli_opts/snapshots.py | Python | apache-2.0 | 850 | 0.003529 |
import os, re, csv
# regular expressions for capturing the interesting quantities
noise_pattern = 'noise: \[(.+)\]'
res_pattern = '^([0-9.]+$)'
search_dir = "output"
results_file = '../results.csv'
os.chdir( search_dir )
files = filter( os.path.isfile, os.listdir( '.' ))
#files = [ os.path.join( search_dir, f ) for f in files ] # add path to each file
files.sort( key=lambda x: os.path.getmtime( x ))
results = []
for file in files:
f = open( file )
contents = f.read()
# noise
matches = re.search( noise_pattern, contents, re.DOTALL )
try:
noise = matches.group( 1 )
noise = noise.strip()
noise = noise.split()
except AttributeError:
print "noise error 1: %s" % ( contents )
continue
# rmse
matches = re.search( res_pattern, contents, re.M )
try:
res = matches.group( 1 )
except AttributeError:
print "matches error 2: %s" % ( contents )
continue
results.append( [ res ] + noise )
writer = csv.writer( open( results_file, 'wb' ))
for result in results:
writer.writerow( result ) | jiminliang/msda-denoising | spearmint_variable_noise/output2csv.py | Python | apache-2.0 | 1,028 | 0.066148 |
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import httplib
from oslo_utils import excutils
import six
from neutron.common import log as call_log
from neutron.common import utils
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.nec.common import constants as nconst
from neutron.plugins.nec.common import exceptions as nexc
LOG = logging.getLogger(__name__)
PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW
@six.add_metaclass(abc.ABCMeta)
class RouterDriverBase(object):
def __init__(self, plugin, ofc_manager):
self.plugin = plugin
self.ofc = ofc_manager
def floating_ip_support(self):
return True
@abc.abstractmethod
def create_router(self, context, tenant_id, router):
pass
@abc.abstractmethod
def update_router(self, context, router_id, old_router, new_router):
pass
@abc.abstractmethod
def delete_router(self, context, router_id, router):
pass
@abc.abstractmethod
def add_interface(self, context, router_id, port):
pass
@abc.abstractmethod
def delete_interface(self, context, router_id, port):
pass
class RouterL3AgentDriver(RouterDriverBase):
need_gw_info = False
@call_log.log
def create_router(self, context, tenant_id, router):
return router
@call_log.log
def update_router(self, context, router_id, old_router, new_router):
return new_router
@call_log.log
def delete_router(self, context, router_id, router):
pass
@call_log.log
def add_interface(self, context, router_id, port):
return self.plugin.activate_port_if_ready(context, port)
@call_log.log
def delete_interface(self, context, router_id, port):
return self.plugin.deactivate_port(context, port)
class RouterOpenFlowDriver(RouterDriverBase):
need_gw_info = True
def floating_ip_support(self):
return self.ofc.driver.router_nat_supported
def _process_gw_port(self, gw_info, routes):
if gw_info and gw_info['gateway_ip']:
routes.append({'destination': '0.0.0.0/0',
'nexthop': gw_info['gateway_ip']})
@call_log.log
def create_router(self, context, tenant_id, router):
try:
router_id = router['id']
added_routes = []
self.ofc.ensure_ofc_tenant(context, tenant_id)
self.ofc.create_ofc_router(context, tenant_id, router_id,
router['name'])
self._process_gw_port(router['gw_port'], added_routes)
if added_routes:
self.ofc.update_ofc_router_route(context, router_id,
added_routes, [])
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(context, "router",
router['id'],
new_status)
router['status'] = new_status
return router
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
if (isinstance(exc, nexc.OFCException) and
exc.status == httplib.CONFLICT):
raise nexc.RouterOverLimit(provider=PROVIDER_OPENFLOW)
LOG.error(_LE("create_router() failed due to %s"), exc)
new_status = nconst.ROUTER_STATUS_ERROR
self._update_resource_status(context, "router",
router['id'],
new_status)
@call_log.log
def update_router(self, context, router_id, old_router, new_router):
old_routes = old_router['routes'][:]
new_routes = new_router['routes'][:]
self._process_gw_port(old_router['gw_port'], old_routes)
self._process_gw_port(new_router['gw_port'], new_routes)
added, removed = utils.diff_list_of_dict(old_routes, new_routes)
if added or removed:
try:
# NOTE(amotoki): PFC supports one-by-one route update at now.
# It means there may be a case where some route is updated but
# some not. To allow the next call of failures to sync routes
# with Neutron side, we pass the whole new routes here.
# PFC should support atomic route update in the future.
self.ofc.update_ofc_router_route(context, router_id,
new_routes)
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(
context, "router", router_id, new_status)
new_router['status'] = new_status
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("_update_ofc_routes() failed due to %s"),
exc)
new_status = nconst.ROUTER_STATUS_ERROR
self.plugin._update_resource_status(
context, "router", router_id, new_status)
return new_router
@call_log.log
def delete_router(self, context, router_id, router):
if not self.ofc.exists_ofc_router(context, router_id):
return
try:
self.ofc.delete_ofc_router(context, router_id, router)
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("delete_router() failed due to %s"), exc)
self.plugin._update_resource_status(
context, "router", router_id, nconst.ROUTER_STATUS_ERROR)
@call_log.log
def add_interface(self, context, router_id, port):
port_id = port['id']
# port['fixed_ips'] may be empty if ext_net has no subnet.
# Such port is invalid for a router port and we don't create a port
# on OFC. The port is removed in l3_db._create_router_gw_port.
if not port['fixed_ips']:
LOG.warning(_LW('RouterOpenFlowDriver.add_interface(): the '
'requested port '
'has no subnet. add_interface() is skipped. '
'router_id=%(id)s, port=%(port)s)'),
{'id': router_id, 'port': port})
return port
fixed_ip = port['fixed_ips'][0]
subnet = self.plugin._get_subnet(context, fixed_ip['subnet_id'])
port_info = {'network_id': port['network_id'],
'ip_address': fixed_ip['ip_address'],
'cidr': subnet['cidr'],
'mac_address': port['mac_address']}
try:
self.ofc.add_ofc_router_interface(context, router_id,
port_id, port_info)
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(
context, "port", port_id, new_status)
return port
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("add_router_interface() failed due to %s"), exc)
new_status = nconst.ROUTER_STATUS_ERROR
self.plugin._update_resource_status(
context, "port", port_id, new_status)
@call_log.log
def delete_interface(self, context, router_id, port):
port_id = port['id']
try:
self.ofc.delete_ofc_router_interface(context, router_id, port_id)
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(context, "port", port_id,
new_status)
port['status'] = new_status
return port
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("delete_router_interface() failed due to %s"),
exc)
new_status = nconst.ROUTER_STATUS_ERROR
self.plugin._update_resource_status(context, "port", port_id,
new_status)
| cloudbase/neutron-virtualbox | neutron/plugins/nec/router_drivers.py | Python | apache-2.0 | 9,085 | 0.00011 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
import json
import uuid
import datetime
from boto3 import Session
from moto.core import BaseBackend, BaseModel
from .exceptions import (
SecretNotFoundException,
SecretHasNoValueException,
InvalidParameterException,
ResourceExistsException,
ResourceNotFoundException,
InvalidRequestException,
ClientError,
)
from .utils import random_password, secret_arn, get_secret_name_from_arn
from .list_secrets.filters import all, tag_key, tag_value, description, name
_filter_functions = {
"all": all,
"name": name,
"description": description,
"tag-key": tag_key,
"tag-value": tag_value,
}
def filter_keys():
return list(_filter_functions.keys())
def _matches(secret, filters):
is_match = True
for f in filters:
# Filter names are pre-validated in the resource layer
filter_function = _filter_functions.get(f["Key"])
is_match = is_match and filter_function(secret, f["Values"])
return is_match
class SecretsManager(BaseModel):
def __init__(self, region_name, **kwargs):
self.region = region_name
class FakeSecret:
def __init__(
self,
region_name,
secret_id,
secret_string=None,
secret_binary=None,
description=None,
tags=[],
version_id=None,
version_stages=None,
):
self.secret_id = secret_id
self.name = secret_id
self.arn = secret_arn(region_name, secret_id)
self.secret_string = secret_string
self.secret_binary = secret_binary
self.description = description
self.tags = tags
self.version_id = version_id
self.version_stages = version_stages
self.rotation_enabled = False
self.rotation_lambda_arn = ""
self.auto_rotate_after_days = 0
self.deleted_date = None
def update(self, description=None, tags=[]):
self.description = description
self.tags = tags
def set_versions(self, versions):
self.versions = versions
def set_default_version_id(self, version_id):
self.default_version_id = version_id
def reset_default_version(self, secret_version, version_id):
# remove all old AWSPREVIOUS stages
for old_version in self.versions.values():
if "AWSPREVIOUS" in old_version["version_stages"]:
old_version["version_stages"].remove("AWSPREVIOUS")
# set old AWSCURRENT secret to AWSPREVIOUS
previous_current_version_id = self.default_version_id
self.versions[previous_current_version_id]["version_stages"] = ["AWSPREVIOUS"]
self.versions[version_id] = secret_version
self.default_version_id = version_id
def delete(self, deleted_date):
self.deleted_date = deleted_date
def restore(self):
self.deleted_date = None
def is_deleted(self):
return self.deleted_date is not None
def to_short_dict(self, include_version_stages=False):
dct = {
"ARN": self.arn,
"Name": self.name,
"VersionId": self.default_version_id,
}
if include_version_stages:
dct["VersionStages"] = self.version_stages
return json.dumps(dct)
def to_dict(self):
version_id_to_stages = self._form_version_ids_to_stages()
return {
"ARN": self.arn,
"Name": self.name,
"Description": self.description or "",
"KmsKeyId": "",
"RotationEnabled": self.rotation_enabled,
"RotationLambdaARN": self.rotation_lambda_arn,
"RotationRules": {"AutomaticallyAfterDays": self.auto_rotate_after_days},
"LastRotatedDate": None,
"LastChangedDate": None,
"LastAccessedDate": None,
"DeletedDate": self.deleted_date,
"Tags": self.tags,
"VersionIdsToStages": version_id_to_stages,
"SecretVersionsToStages": version_id_to_stages,
}
def _form_version_ids_to_stages(self):
version_id_to_stages = {}
for key, value in self.versions.items():
version_id_to_stages[key] = value["version_stages"]
return version_id_to_stages
class SecretsStore(dict):
def __setitem__(self, key, value):
new_key = get_secret_name_from_arn(key)
super(SecretsStore, self).__setitem__(new_key, value)
def __getitem__(self, key):
new_key = get_secret_name_from_arn(key)
return super(SecretsStore, self).__getitem__(new_key)
def __contains__(self, key):
new_key = get_secret_name_from_arn(key)
return dict.__contains__(self, new_key)
def pop(self, key, *args, **kwargs):
new_key = get_secret_name_from_arn(key)
return super(SecretsStore, self).pop(new_key, *args, **kwargs)
class SecretsManagerBackend(BaseBackend):
def __init__(self, region_name=None, **kwargs):
super(SecretsManagerBackend, self).__init__()
self.region = region_name
self.secrets = SecretsStore()
def reset(self):
region_name = self.region
self.__dict__ = {}
self.__init__(region_name)
def _is_valid_identifier(self, identifier):
return identifier in self.secrets
def _unix_time_secs(self, dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds()
def get_secret_value(self, secret_id, version_id, version_stage):
if not self._is_valid_identifier(secret_id):
raise SecretNotFoundException()
if not version_id and version_stage:
# set version_id to match version_stage
versions_dict = self.secrets[secret_id].versions
for ver_id, ver_val in versions_dict.items():
if version_stage in ver_val["version_stages"]:
version_id = ver_id
break
if not version_id:
raise SecretNotFoundException()
# TODO check this part
if self.secrets[secret_id].is_deleted():
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
secret = self.secrets[secret_id]
version_id = version_id or secret.default_version_id
secret_version = secret.versions.get(version_id)
if not secret_version:
raise ResourceNotFoundException(
"An error occurred (ResourceNotFoundException) when calling the GetSecretValue operation: Secrets "
"Manager can't find the specified secret value for VersionId: {}".format(
version_id
)
)
response_data = {
"ARN": secret.arn,
"Name": secret.name,
"VersionId": secret_version["version_id"],
"VersionStages": secret_version["version_stages"],
"CreatedDate": secret_version["createdate"],
}
if "secret_string" in secret_version:
response_data["SecretString"] = secret_version["secret_string"]
if "secret_binary" in secret_version:
response_data["SecretBinary"] = secret_version["secret_binary"]
if (
"secret_string" not in secret_version
and "secret_binary" not in secret_version
):
raise SecretHasNoValueException(version_stage or "AWSCURRENT")
response = json.dumps(response_data)
return response
def update_secret(
self, secret_id, secret_string=None, secret_binary=None, **kwargs
):
# error if secret does not exist
if secret_id not in self.secrets.keys():
raise SecretNotFoundException()
if self.secrets[secret_id].is_deleted():
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the UpdateSecret operation: "
"You can't perform this operation on the secret because it was marked for deletion."
)
secret = self.secrets[secret_id]
tags = secret.tags
description = secret.description
secret = self._add_secret(
secret_id,
secret_string=secret_string,
secret_binary=secret_binary,
description=description,
tags=tags,
)
return secret.to_short_dict()
def create_secret(
self,
name,
secret_string=None,
secret_binary=None,
description=None,
tags=[],
**kwargs
):
# error if secret exists
if name in self.secrets.keys():
raise ResourceExistsException(
"A resource with the ID you requested already exists."
)
secret = self._add_secret(
name,
secret_string=secret_string,
secret_binary=secret_binary,
description=description,
tags=tags,
)
return secret.to_short_dict()
def _add_secret(
self,
secret_id,
secret_string=None,
secret_binary=None,
description=None,
tags=[],
version_id=None,
version_stages=None,
):
if version_stages is None:
version_stages = ["AWSCURRENT"]
if not version_id:
version_id = str(uuid.uuid4())
secret_version = {
"createdate": int(time.time()),
"version_id": version_id,
"version_stages": version_stages,
}
if secret_string is not None:
secret_version["secret_string"] = secret_string
if secret_binary is not None:
secret_version["secret_binary"] = secret_binary
if secret_id in self.secrets:
secret = self.secrets[secret_id]
secret.update(description, tags)
secret.reset_default_version(secret_version, version_id)
else:
secret = FakeSecret(
region_name=self.region,
secret_id=secret_id,
secret_string=secret_string,
secret_binary=secret_binary,
description=description,
tags=tags,
)
secret.set_versions({version_id: secret_version})
secret.set_default_version_id(version_id)
self.secrets[secret_id] = secret
return secret
def put_secret_value(self, secret_id, secret_string, secret_binary, version_stages):
if not self._is_valid_identifier(secret_id):
raise SecretNotFoundException()
else:
secret = self.secrets[secret_id]
tags = secret.tags
description = secret.description
secret = self._add_secret(
secret_id,
secret_string,
secret_binary,
description=description,
tags=tags,
version_stages=version_stages,
)
return secret.to_short_dict(include_version_stages=True)
def describe_secret(self, secret_id):
if not self._is_valid_identifier(secret_id):
raise SecretNotFoundException()
secret = self.secrets[secret_id]
return json.dumps(secret.to_dict())
def rotate_secret(
self,
secret_id,
client_request_token=None,
rotation_lambda_arn=None,
rotation_rules=None,
):
rotation_days = "AutomaticallyAfterDays"
if not self._is_valid_identifier(secret_id):
raise SecretNotFoundException()
if self.secrets[secret_id].is_deleted():
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
if client_request_token:
token_length = len(client_request_token)
if token_length < 32 or token_length > 64:
msg = "ClientRequestToken " "must be 32-64 characters long."
raise InvalidParameterException(msg)
if rotation_lambda_arn:
if len(rotation_lambda_arn) > 2048:
msg = "RotationLambdaARN " "must <= 2048 characters long."
raise InvalidParameterException(msg)
if rotation_rules:
if rotation_days in rotation_rules:
rotation_period = rotation_rules[rotation_days]
if rotation_period < 1 or rotation_period > 1000:
msg = (
"RotationRules.AutomaticallyAfterDays " "must be within 1-1000."
)
raise InvalidParameterException(msg)
secret = self.secrets[secret_id]
old_secret_version = secret.versions[secret.default_version_id]
new_version_id = client_request_token or str(uuid.uuid4())
self._add_secret(
secret_id,
old_secret_version["secret_string"],
description=secret.description,
tags=secret.tags,
version_id=new_version_id,
version_stages=["AWSCURRENT"],
)
secret.rotation_lambda_arn = rotation_lambda_arn or ""
if rotation_rules:
secret.auto_rotate_after_days = rotation_rules.get(rotation_days, 0)
if secret.auto_rotate_after_days > 0:
secret.rotation_enabled = True
if "AWSCURRENT" in old_secret_version["version_stages"]:
old_secret_version["version_stages"].remove("AWSCURRENT")
return secret.to_short_dict()
def get_random_password(
self,
password_length,
exclude_characters,
exclude_numbers,
exclude_punctuation,
exclude_uppercase,
exclude_lowercase,
include_space,
require_each_included_type,
):
# password size must have value less than or equal to 4096
if password_length > 4096:
raise ClientError(
"ClientError: An error occurred (ValidationException) \
when calling the GetRandomPassword operation: 1 validation error detected: Value '{}' at 'passwordLength' \
failed to satisfy constraint: Member must have value less than or equal to 4096".format(
password_length
)
)
if password_length < 4:
raise InvalidParameterException(
"InvalidParameterException: An error occurred (InvalidParameterException) \
when calling the GetRandomPassword operation: Password length is too short based on the required types."
)
response = json.dumps(
{
"RandomPassword": random_password(
password_length,
exclude_characters,
exclude_numbers,
exclude_punctuation,
exclude_uppercase,
exclude_lowercase,
include_space,
require_each_included_type,
)
}
)
return response
def list_secret_version_ids(self, secret_id):
secret = self.secrets[secret_id]
version_list = []
for version_id, version in secret.versions.items():
version_list.append(
{
"CreatedDate": int(time.time()),
"LastAccessedDate": int(time.time()),
"VersionId": version_id,
"VersionStages": version["version_stages"],
}
)
response = json.dumps(
{
"ARN": secret.secret_id,
"Name": secret.name,
"NextToken": "",
"Versions": version_list,
}
)
return response
def list_secrets(self, filters, max_results, next_token):
# TODO implement pagination and limits
secret_list = []
for secret in self.secrets.values():
if _matches(secret, filters):
secret_list.append(secret.to_dict())
return secret_list, None
def delete_secret(
self, secret_id, recovery_window_in_days, force_delete_without_recovery
):
if not self._is_valid_identifier(secret_id):
raise SecretNotFoundException()
if self.secrets[secret_id].is_deleted():
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
if recovery_window_in_days and force_delete_without_recovery:
raise InvalidParameterException(
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \
use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays."
)
if recovery_window_in_days and (
recovery_window_in_days < 7 or recovery_window_in_days > 30
):
raise InvalidParameterException(
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \
RecoveryWindowInDays value must be between 7 and 30 days (inclusive)."
)
deletion_date = datetime.datetime.utcnow()
if force_delete_without_recovery:
secret = self.secrets.pop(secret_id, None)
else:
deletion_date += datetime.timedelta(days=recovery_window_in_days or 30)
self.secrets[secret_id].delete(self._unix_time_secs(deletion_date))
secret = self.secrets.get(secret_id, None)
if not secret:
raise SecretNotFoundException()
arn = secret.arn
name = secret.name
return arn, name, self._unix_time_secs(deletion_date)
def restore_secret(self, secret_id):
if not self._is_valid_identifier(secret_id):
raise SecretNotFoundException()
secret = self.secrets[secret_id]
secret.restore()
return secret.arn, secret.name
def tag_resource(self, secret_id, tags):
if secret_id not in self.secrets.keys():
raise SecretNotFoundException()
secret = self.secrets[secret_id]
old_tags = secret.tags
for tag in tags:
old_tags.append(tag)
return secret_id
def untag_resource(self, secret_id, tag_keys):
if secret_id not in self.secrets.keys():
raise SecretNotFoundException()
secret = self.secrets[secret_id]
tags = secret.tags
for tag in tags:
if tag["Key"] in tag_keys:
tags.remove(tag)
return secret_id
@staticmethod
def get_resource_policy(secret_id):
resource_policy = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::111122223333:root",
"arn:aws:iam::444455556666:root",
]
},
"Action": ["secretsmanager:GetSecretValue"],
"Resource": "*",
},
}
return json.dumps(
{
"ARN": secret_id,
"Name": secret_id,
"ResourcePolicy": json.dumps(resource_policy),
}
)
secretsmanager_backends = {}
for region in Session().get_available_regions("secretsmanager"):
secretsmanager_backends[region] = SecretsManagerBackend(region_name=region)
for region in Session().get_available_regions(
"secretsmanager", partition_name="aws-us-gov"
):
secretsmanager_backends[region] = SecretsManagerBackend(region_name=region)
for region in Session().get_available_regions(
"secretsmanager", partition_name="aws-cn"
):
secretsmanager_backends[region] = SecretsManagerBackend(region_name=region)
| william-richard/moto | moto/secretsmanager/models.py | Python | apache-2.0 | 20,426 | 0.001175 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
import warnings
from pyspark import copy_func, since
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import *
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SQLContext`::
people = sqlContext.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SQLContext
people = sqlContext.read.parquet("...")
department = sqlContext.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(1.3)
def registerTempTable(self, name):
"""Registers this RDD as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SQLContext`
that was used to create this :class:`DataFrame`.
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
.. note:: Deprecated in 2.0, use createOrReplaceTempView instead.
"""
self._jdf.createOrReplaceTempView(name)
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this DataFrame.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Evolving
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to True, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
:param vertical: If set to True, print output rows vertically (one line
per column value).
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this DataFrame, which is especially useful in iterative algorithms where the
plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with L{SparkContext.setCheckpointDir()}.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Evolving
>>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(2.2)
def hint(self, name, *parameters):
"""Specifies some hint on the current DataFrame.
:param name: A name of the hint.
:param parameters: Optional parameters.
:return: :class:`DataFrame`
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
for p in parameters:
if not isinstance(p, str):
raise TypeError(
"all parameters should be str, got {0} of type {1}".format(p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.collectToPython()
return list(_load_from_socket(port, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
port = self._jdf.toPythonIterator()
return _load_from_socket(port, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
``numPartitions`` can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement, fraction, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> df.sample(False, 0.5, 42).count()
2
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd = self._jdf.sample(withReplacement, fraction, long(seed))
return DataFrame(rdd, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
"""
if not isinstance(col, str):
raise ValueError("col must be a string, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``full_outer``, ``left``, ``left_outer``, ``right``, ``right_outer``,
``left_semi``, and ``left_anti``.
The following performs a full outer join between ``df1`` and ``df2``.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self.sql_ctx)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by a distinct.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by a distinct.
Also as standard in SQL, this function resolves columns by position (not by name).
.. note:: Deprecated in 2.0, use union instead.
"""
return self.union(other)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, bool or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, boolean, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, bool, dict)):
raise ValueError("value should be a float, int, long, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value=None, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value should contain either all numerics, all booleans,
or all strings. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored and `to_replace` must be a
mapping between a value and a replacement.
:param value: int, long, float, string, or list.
The replacement value must be an int, long, float, or string. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(basestring)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(basestring)
all_of_numeric = all_of((float, int, long))
# Validate input types
valid_types = (bool, float, int, long, basestring, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a float, int, long, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a float, int, long, string, list, or tuple. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, basestring))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(value, (float, int, long, basestring)):
value = [value for _ in range(len(to_replace))]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, basestring):
subset = [subset]
# Verify we were not passed in mixed type generics."
if not any(all_of_type(rep_dict.keys()) and all_of_type(rep_dict.values())
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[http://dx.doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
"""
if not isinstance(col, (str, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isinstance(col, str):
col = [col]
for c in col:
if not isinstance(c, str):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, str):
raise ValueError("col1 should be a string.")
if not isinstance(col2, str):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"http://dx.doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param col: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. note:: This method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
import pandas as pd
dtype = {}
for field in self.schema:
pandas_type = _to_corrected_pandas_type(field.dataType)
if pandas_type is not None:
dtype[field.name] = pandas_type
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
for f, t in dtype.items():
pdf[f] = pdf[f].astype(t, copy=False)
return pdf
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
def _to_corrected_pandas_type(dt):
"""
When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong.
This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
"""
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == FloatType:
return np.float32
else:
return None
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
from pyspark.sql.functions import from_unixtime
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),
Row(name='Bob', spy=None, age=5),
Row(name='Mallory', spy=True, age=None)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| poffuomo/spark | python/pyspark/sql/dataframe.py | Python | apache-2.0 | 72,001 | 0.002736 |
#
# Brian C. Lane <[email protected]>
#
# Copyright 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.constants import KS_REBOOT, KS_SHUTDOWN, KS_WAIT
class FC3_TestCase(CommandTest):
command = "reboot"
def runTest(self):
# pass
cmd = self.assert_parse("reboot")
self.assertEqual(cmd.action, KS_REBOOT)
self.assertEqual(str(cmd), "# Reboot after installation\nreboot\n")
cmd = self.assert_parse("shutdown")
self.assertEqual(cmd.action, KS_SHUTDOWN)
self.assertEqual(str(cmd), "# Shutdown after installation\nshutdown\n")
cmd = self.assert_parse("halt")
# halt changed in F18
if self.__class__.__name__ in ("FC3_TestCase", "FC6_TestCase"):
self.assertEqual(cmd.action, KS_SHUTDOWN)
cmd = self.assert_parse("poweroff")
self.assertEqual(cmd.action, KS_SHUTDOWN)
class FC6_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
# pass
cmd = self.assert_parse("reboot --eject")
self.assertEqual(cmd.action, KS_REBOOT)
self.assertEqual(cmd.eject, True)
self.assertEqual(str(cmd), "# Reboot after installation\nreboot --eject\n")
class F18_TestCase(FC6_TestCase):
def runTest(self):
FC6_TestCase.runTest(self)
# pass
cmd = self.assert_parse("halt")
self.assertEqual(cmd.action, KS_WAIT)
self.assertEqual(str(cmd), "# Halt after installation\nhalt\n")
cmd = self.assert_parse("halt --eject")
self.assertEqual(cmd.eject, True)
self.assertEqual(str(cmd), "# Halt after installation\nhalt --eject\n")
if __name__ == "__main__":
unittest.main()
| dashea/pykickstart | tests/commands/reboot.py | Python | gpl-2.0 | 2,617 | 0.002293 |
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
##---------------------------------------------------------------------------##
##
## Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
## Copyright (C) 2003 Mt. Hood Playing Card Co.
## Copyright (C) 2005-2009 Skomoroh
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##---------------------------------------------------------------------------##
from pysollib.settings import WIN_SYSTEM
if WIN_SYSTEM == 'win32':
import win32 as gui
elif WIN_SYSTEM == 'aqua':
import aqua as gui
else: # 'x11'
import x11 as gui
init_root_window = gui.init_root_window
TkSettings = gui.TkSettings
| TrevorLowing/PyGames | pysollib/winsystems/__init__.py | Python | gpl-2.0 | 1,307 | 0.011477 |
# -*- coding: utf-8 -*-
from warnings import warn
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
TRANSLATION_FILES = tuple(getattr(settings, 'MODELTRANSLATION_TRANSLATION_FILES', ()))
TRANSLATION_REGISTRY = getattr(settings, 'MODELTRANSLATION_TRANSLATION_REGISTRY', None)
if TRANSLATION_REGISTRY:
TRANSLATION_FILES += (TRANSLATION_REGISTRY,)
warn('The setting MODELTRANSLATION_TRANSLATION_REGISTRY is deprecated, '
'use MODELTRANSLATION_TRANSLATION_FILES instead.', DeprecationWarning)
AVAILABLE_LANGUAGES = [l[0] for l in settings.LANGUAGES]
DEFAULT_LANGUAGE = getattr(settings, 'MODELTRANSLATION_DEFAULT_LANGUAGE', None)
if DEFAULT_LANGUAGE and DEFAULT_LANGUAGE not in AVAILABLE_LANGUAGES:
raise ImproperlyConfigured('MODELTRANSLATION_DEFAULT_LANGUAGE not in LANGUAGES setting.')
elif not DEFAULT_LANGUAGE:
DEFAULT_LANGUAGE = AVAILABLE_LANGUAGES[0]
# Load allowed CUSTOM_FIELDS from django settings
CUSTOM_FIELDS = getattr(settings, 'MODELTRANSLATION_CUSTOM_FIELDS', ())
# Don't change this setting unless you really know what you are doing
ENABLE_REGISTRATIONS = getattr(settings, 'MODELTRANSLATION_ENABLE_REGISTRATIONS', settings.USE_I18N)
# Modeltranslation specific debug setting
DEBUG = getattr(settings, 'MODELTRANSLATION_DEBUG', settings.DEBUG)
AUTO_POPULATE = getattr(settings, 'MODELTRANSLATION_AUTO_POPULATE', False)
# FALLBACK_LANGUAGES should be in either format:
# MODELTRANSLATION_FALLBACK_LANGUAGES = ('en', 'de')
# MODELTRANSLATION_FALLBACK_LANGUAGES = {'default': ('en', 'de'), 'fr': ('de',)}
# By default we fallback to the default language
FALLBACK_LANGUAGES = getattr(settings, 'MODELTRANSLATION_FALLBACK_LANGUAGES', (DEFAULT_LANGUAGE,))
if isinstance(FALLBACK_LANGUAGES, (tuple, list)):
FALLBACK_LANGUAGES = {'default': FALLBACK_LANGUAGES}
if 'default' not in FALLBACK_LANGUAGES:
raise ImproperlyConfigured(
'MODELTRANSLATION_FALLBACK_LANGUAGES does not contain "default" key.')
for key, value in FALLBACK_LANGUAGES.iteritems():
if key != 'default' and key not in AVAILABLE_LANGUAGES:
raise ImproperlyConfigured(
'MODELTRANSLATION_FALLBACK_LANGUAGES: "%s" not in LANGUAGES setting.' % key)
if not isinstance(value, (tuple, list)):
raise ImproperlyConfigured(
'MODELTRANSLATION_FALLBACK_LANGUAGES: value for key "%s" is not list nor tuple.' % key)
for lang in value:
if lang not in AVAILABLE_LANGUAGES:
raise ImproperlyConfigured(
'MODELTRANSLATION_FALLBACK_LANGUAGES: "%s" not in LANGUAGES setting.' % lang)
| acdha/django-modeltranslation | modeltranslation/settings.py | Python | bsd-3-clause | 2,611 | 0.00383 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBinwalk(PythonPackage):
"""Binwalk is a fast, easy to use tool for analyzing, reverse engineering,
and extracting firmware images."""
homepage = "https://github.com/devttys0/binwalk"
url = "https://pypi.io/packages/source/b/binwalk/binwalk-2.1.0.tar.gz"
version('2.1.0', '054867d9abe6a05f43200cf2591051e6')
depends_on('python')
depends_on('py-setuptools', type='build')
| TheTimmy/spack | var/spack/repos/builtin/packages/py-binwalk/package.py | Python | lgpl-2.1 | 1,674 | 0.000597 |
# Copyright (C) 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://mail.google.com/'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
get_credentials()
if __name__ == '__main__':
main()
| csecutsc/GmailBot | quickstart.py | Python | mit | 2,344 | 0.00384 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
from pages.regions.modal import ModalProtocol
from pages.regions.send_to_device import SendToDevice
class FirefoxWelcomePage5(BasePage):
_URL_TEMPLATE = '/{locale}/firefox/welcome/5/'
_modal_primary_button_locator = (By.CSS_SELECTOR, '.primary-cta .js-modal-link')
_modal_secondary_button_locator = (By.CSS_SELECTOR, '.secondary-cta .js-modal-link')
_lockwise_qr_code_locator = (By.ID, 'lockwise-qr')
@property
def send_to_device(self):
return SendToDevice(self)
@property
def is_lockwise_qr_code_displayed(self):
return self.is_element_displayed(*self._lockwise_qr_code_locator)
@property
def is_primary_modal_button_displayed(self):
return self.is_element_displayed(*self._modal_primary_button_locator)
@property
def is_secondary_modal_button_displayed(self):
return self.is_element_displayed(*self._modal_secondary_button_locator)
def open_modal(self, locator):
modal = ModalProtocol(self)
self.find_element(*locator).click()
self.wait.until(lambda s: modal.is_displayed)
return modal
def click_primary_modal_button(self):
self.scroll_element_into_view(*self._modal_primary_button_locator)
return self.open_modal(self._modal_primary_button_locator)
| MichaelKohler/bedrock | tests/pages/firefox/welcome/page5.py | Python | mpl-2.0 | 1,575 | 0.00127 |
from flask import Flask
app = Flask(__name__)
app.config.from_object("configs.appconfig.DevelopmentConfig")
| oyang/testFalsk | app.py | Python | mit | 109 | 0 |
# Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Base class for MIME multipart/* type messages."""
__all__ = ['MIMEMultipart']
from bongo.external.email.mime.base import MIMEBase
class MIMEMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
**_params):
"""Creates a multipart/* type message.
By default, creates a multipart/mixed message, with proper
Content-Type and MIME-Version headers.
_subtype is the subtype of the multipart content type, defaulting to
`mixed'.
boundary is the multipart boundary string. By default it is
calculated as needed.
_subparts is a sequence of initial subparts for the payload. It
must be an iterable object, such as a list. You can always
attach new subparts to the message by using the attach() method.
Additional parameters for the Content-Type header are taken from the
keyword arguments (or passed into the _params argument).
"""
MIMEBase.__init__(self, 'multipart', _subtype, **_params)
if _subparts:
for p in _subparts:
self.attach(p)
if boundary:
self.set_boundary(boundary)
| bongo-project/bongo | src/libs/python/bongo/external/email/mime/multipart.py | Python | gpl-2.0 | 1,377 | 0.000726 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Localization based on UNO from UHF/UKS check files
#
from functools import reduce
import numpy
import scipy.linalg
import h5py
from pyscf import tools,gto,scf,dft
from pyscf.tools import molden
import pmloc
import ulocal
def sqrtm(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v*numpy.sqrt(e), v.T.conj())
def lowdin(s):
e, v = numpy.linalg.eigh(s)
return numpy.dot(v/numpy.sqrt(e), v.T.conj())
def dumpLUNO(fname,thresh=0.01):
chkfile = fname+'.chk'
outfile = fname+'_cmo.molden'
tools.molden.from_chkfile(outfile, chkfile)
#=============================
# Natural orbitals
# Lowdin basis X=S{-1/2}
# psi = chi * C
# = chi' * C'
# = chi*X*(X{-1}C')
#=============================
mol,mf = scf.chkfile.load_scf(chkfile)
mo_coeff = mf["mo_coeff"]
ova=mol.intor_symmetric("cint1e_ovlp_sph")
nb = mo_coeff.shape[1]
# Check overlap
diff = reduce(numpy.dot,(mo_coeff[0].T,ova,mo_coeff[0])) - numpy.identity(nb)
print numpy.linalg.norm(diff)
diff = reduce(numpy.dot,(mo_coeff[1].T,ova,mo_coeff[1])) - numpy.identity(nb)
print numpy.linalg.norm(diff)
# UHF-alpha/beta
ma = mo_coeff[0]
mb = mo_coeff[1]
nalpha = (mol.nelectron+mol.spin)/2
nbeta = (mol.nelectron-mol.spin)/2
# Spin-averaged DM
pTa = numpy.dot(ma[:,:nalpha],ma[:,:nalpha].T)
pTb = numpy.dot(mb[:,:nbeta],mb[:,:nbeta].T)
pT = 0.5*(pTa+pTb)
# Lowdin basis
s12 = sqrtm(ova)
s12inv = lowdin(ova)
pTOAO = reduce(numpy.dot,(s12,pT,s12))
eig,coeff = scipy.linalg.eigh(-pTOAO)
eig = -2.0*eig
eig[eig<0.0]=0.0
eig[abs(eig)<1.e-14]=0.0
ifplot = False #True
if ifplot:
import matplotlib.pyplot as plt
plt.plot(range(nb),eig,'ro')
plt.show()
# Back to AO basis
coeff = numpy.dot(s12inv,coeff)
diff = reduce(numpy.dot,(coeff.T,ova,coeff)) - numpy.identity(nb)
print 'CtSC-I',numpy.linalg.norm(diff)
#
# Averaged Fock
#
enorb = mf["mo_energy"]
fa = reduce(numpy.dot,(ma,numpy.diag(enorb[0]),ma.T))
fb = reduce(numpy.dot,(mb,numpy.diag(enorb[1]),mb.T))
# Non-orthogonal cases: FC=SCE
# Fao = SC*e*C{-1} = S*C*e*Ct*S
fav = 0.5*(fa+fb)
# Expectation value of natural orbitals <i|F|i>
fexpt = reduce(numpy.dot,(coeff.T,ova,fav,ova,coeff))
enorb = numpy.diag(fexpt)
nocc = eig.copy()
#
# Reordering and define active space according to thresh
#
idx = 0
active=[]
for i in range(nb):
if nocc[i]<=2.0-thresh and nocc[i]>=thresh:
active.append(True)
else:
active.append(False)
print '\nNatural orbitals:'
for i in range(nb):
print 'orb:',i,active[i],nocc[i],enorb[i]
active = numpy.array(active)
actIndices = list(numpy.argwhere(active==True).flatten())
cOrbs = coeff[:,:actIndices[0]]
aOrbs = coeff[:,actIndices]
vOrbs = coeff[:,actIndices[-1]+1:]
nb = cOrbs.shape[0]
nc = cOrbs.shape[1]
na = aOrbs.shape[1]
nv = vOrbs.shape[1]
print 'core orbs:',cOrbs.shape
print 'act orbs:',aOrbs.shape
print 'vir orbs:',vOrbs.shape
assert nc+na+nv == nb
# dump UNO
with open(fname+'_uno.molden','w') as thefile:
molden.header(mol,thefile)
molden.orbital_coeff(mol,thefile,coeff)
#=====================
# Population analysis
#=====================
from pyscf import lo
aux = lo.orth_ao(mol,method='meta_lowdin')
#clmo = ulocal.scdm(cOrbs,ova,aux)
#almo = ulocal.scdm(aOrbs,ova,aux)
clmo = cOrbs
almo = aOrbs
ierr,uc = pmloc.loc(mol,clmo)
ierr,ua = pmloc.loc(mol,almo)
clmo = clmo.dot(uc)
almo = almo.dot(ua)
vlmo = ulocal.scdm(vOrbs,ova,aux)
# P-SORT
mo_c,n_c,e_c = ulocal.psort(ova,fav,pT,clmo)
mo_o,n_o,e_o = ulocal.psort(ova,fav,pT,almo)
mo_v,n_v,e_v = ulocal.psort(ova,fav,pT,vlmo)
lmo = numpy.hstack((mo_c,mo_o,mo_v)).copy()
enorb = numpy.hstack([e_c,e_o,e_v])
occ = numpy.hstack([n_c,n_o,n_v])
# CHECK
diff = reduce(numpy.dot,(lmo.T,ova,lmo)) - numpy.identity(nb)
print 'diff=',numpy.linalg.norm(diff)
ulocal.lowdinPop(mol,lmo,ova,enorb,occ)
ulocal.dumpLMO(mol,fname,lmo)
print 'nalpha,nbeta,mol.spin,nb:',\
nalpha,nbeta,mol.spin,nb
return mol,ova,fav,pT,nb,nalpha,nbeta,nc,na,nv,lmo,enorb,occ
def dumpAct(fname,info,actlst,base=1):
actlst2 = [i-base for i in actlst]
mol,ova,fav,pT,nb,nalpha,nbeta,nc,na,nv,lmo,enorb,occ = info
corb = set(range(nc))
aorb = set(range(nc,nc+na))
vorb = set(range(nc+na,nc+na+nv))
print '[dumpAct]'
print ' corb=',corb
print ' aorb=',aorb
print ' vorb=',vorb
sorb = set(actlst2)
rcorb = corb.difference(corb.intersection(sorb))
#assuming act in actlst
#raorb = aorb.difference(aorb.intersection(sorb))
rvorb = vorb.difference(vorb.intersection(sorb))
corb = list(rcorb)
aorb = list(sorb)
vorb = list(rvorb)
print ' corb=',corb
print ' aorb=',aorb
print ' vorb=',vorb
clmo = lmo[:,corb].copy()
almo = lmo[:,aorb].copy()
vlmo = lmo[:,vorb].copy()
ierr,ua = pmloc.loc(mol,almo)
almo = almo.dot(ua)
#>>> DUMP <<<#
# P-SORT
mo_c = clmo
mo_v = vlmo
e_c = enorb[corb].copy()
e_v = enorb[vorb].copy()
n_c = occ[corb].copy()
n_v = occ[vorb].copy()
mo_o,n_o,e_o = ulocal.psort(ova,fav,pT,almo)
lmo2 = numpy.hstack((mo_c,mo_o,mo_v))
enorb = numpy.hstack([e_c,e_o,e_v])
occ = numpy.hstack([n_c,n_o,n_v])
assert len(enorb)==nb
assert len(occ)==nb
# CHECK
diff = reduce(numpy.dot,(lmo2.T,ova,lmo2)) - numpy.identity(nb)
print 'diff=',numpy.linalg.norm(diff)
ulocal.lowdinPop(mol,lmo,ova,enorb,occ)
ulocal.dumpLMO(mol,fname+'_new',lmo2)
print 'nalpha,nbeta,mol.spin,nb:',\
nalpha,nbeta,mol.spin,nb
print 'diff(LMO2-LMO)=',numpy.linalg.norm(lmo2-lmo)
nc = len(e_c)
na = len(e_o)
nv = len(e_v)
assert na == len(actlst)
assert nc+na+nv == nb
print 'nc,na,nv,nb=',nc,na,nv,nb
return lmo2,nc,na,nv
if __name__ == '__main__':
fname = 'hs_bp86'
info = dumpLUNO(fname)
actlst = [117,118,119,120,125,126]+range(127,137)
dumpAct(fname,info,actlst,base=1)
| sunqm/pyscf | examples/local_orb/nlocal.py | Python | apache-2.0 | 6,720 | 0.058333 |
from shuttl.tests import testbase
from shuttl.Models.User import User, UserDataTakenException, NoOrganizationException, ToManyOrganizations
from shuttl.Models.organization import Organization
from shuttl.Models.Reseller import Reseller
class UserTestCase(testbase.BaseTest):
def _setUp(self):
self.reseller = Reseller(name ="test4", url="test2.com")
self.reseller.save()
pass
def test_create(self):
organization = Organization(name="Test", reseller=self.reseller)
organization.save()
organization = Organization.Get(name="Test", vendor=self.reseller)
data = dict(organization=organization, username="Tester", email="[email protected]", password="Things")
user = User.Create(**data)
self.assertRaises(UserDataTakenException, User.Create, **data)
user2 = User.query.get(user.id)
self.assertEqual(user2.username, user.username)
self.assertEqual(user2, user)
self.assertEqual(user2.password, user.password)
self.assertNotEqual(user2.password, "Things")
self.assertFalse(user.isAdmin)
self.assertFalse(user.isFree)
self.assertFalse(user.isActive)
self.assertFalse(user.is_active)
self.assertFalse(user.is_active)
self.assertIsNotNone(user2.organization)
user.organization = None
self.assertRaises(NoOrganizationException, user.save)
pass
def test_password(self):
org = Organization.Create(name="Test", reseller=self.reseller)
usr = User.Create(organization=org, username="Tester", email="[email protected]", password="Bullshit")
oldPW = usr.password
self.assertNotEqual(usr.password, "Bullshit")
self.assertTrue(usr.checkPassword("Bullshit"))
usr.setPassword("Things")
self.assertNotEqual(usr.password, oldPW)
self.assertTrue(usr.checkPassword("Things"))
pass
| shuttl-io/shuttl | shuttl/tests/test_models/test_user.py | Python | mit | 1,929 | 0.00311 |
from __future__ import absolute_import
import collections
from .cache import Cache
class LRUCache(Cache):
"""Least Recently Used (LRU) cache implementation."""
def __init__(self, maxsize, missing=None, getsizeof=None):
Cache.__init__(self, maxsize, missing, getsizeof)
self.__order = collections.OrderedDict()
def __getitem__(self, key, cache_getitem=Cache.__getitem__):
value = cache_getitem(self, key)
self.__update(key)
return value
def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
cache_setitem(self, key, value)
self.__update(key)
def __delitem__(self, key, cache_delitem=Cache.__delitem__):
cache_delitem(self, key)
del self.__order[key]
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used."""
try:
key = next(iter(self.__order))
except StopIteration:
raise KeyError('%s is empty' % self.__class__.__name__)
else:
return (key, self.pop(key))
if hasattr(collections.OrderedDict, 'move_to_end'):
def __update(self, key):
try:
self.__order.move_to_end(key)
except KeyError:
self.__order[key] = None
else:
def __update(self, key):
try:
self.__order[key] = self.__order.pop(key)
except KeyError:
self.__order[key] = None
| endlessm/chromium-browser | tools/swarming_client/third_party/cachetools/lru.py | Python | bsd-3-clause | 1,483 | 0 |
# Configs for mk-livestatus lookup scripts
HOST = [ 'nagios', 'nagios1' ]
PORT = 6557
| skywalka/splunk-for-nagios | bin/mklivestatus.py | Python | gpl-3.0 | 87 | 0.034483 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType
from boto.ec2.blockdevicemapping import BlockDeviceMapping
import time
import copy
import argparse
import sys
import pprint
import os
import yaml
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.join(BASE_PATH, '../configs')
def launch_from_config(conn, instance_config_name, config_file_name):
spot_requests_config = get_config(config_file_name)
config = spot_requests_config[instance_config_name]
mapping = create_mapping(config)
print 'Launching %s instances'%(instance_config_name)
print 'Instance parameters:'
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
spot_req = conn.request_spot_instances(
config['price'],
config['ami_id'],
count=config['count'],
type=config['type'],
key_name=config['key_name'],
instance_type=config['instance_type'],
placement_group=config['placement_group'],
security_group_ids=config['security_groups'],
subnet_id=config['subnet_id'],
instance_profile_name=config['instance_profile_name'],
block_device_map=mapping
)
request_ids = [req.id for req in spot_req]
print 'Waiting for fulfillment'
instance_ids = wait_for_fulfillment(conn, request_ids,
copy.deepcopy(request_ids))
if 'tags' in config:
tag_instances(conn, instance_ids, config['tags'])
return instance_ids
def get_config(config_file_name):
config_file = open(os.path.join(CONFIG_PATH, config_file_name))
config_dict = yaml.load(config_file.read())
return config_dict
def create_mapping(config):
if 'mapping' not in config:
return None
mapping = BlockDeviceMapping()
for ephemeral_name, device_path in config['mapping'].iteritems():
ephemeral = BlockDeviceType()
ephemeral.ephemeral_name = ephemeral_name
mapping[device_path] = ephemeral
return mapping
def wait_for_fulfillment(conn, request_ids, pending_request_ids):
"""Loop through all pending request ids waiting for them to be fulfilled.
If a request is fulfilled, remove it from pending_request_ids.
If there are still pending requests, sleep and check again in 10 seconds.
Only return when all spot requests have been fulfilled."""
instance_ids = []
failed_ids = []
time.sleep(10)
pending_statuses = set(['pending-evaluation', 'pending-fulfillment'])
while len(pending_request_ids) > 0:
results = conn.get_all_spot_instance_requests(
request_ids=pending_request_ids)
for result in results:
if result.status.code == 'fulfilled':
pending_request_ids.pop(pending_request_ids.index(result.id))
print '\nspot request %s fulfilled!'%result.id
instance_ids.append(result.instance_id)
elif result.status.code not in pending_statuses:
pending_request_ids.pop(pending_request_ids.index(result.id))
print '\nspot request %s could not be fulfilled. ' \
'Status code: %s'%(result.id, result.status.code)
failed_ids.append(result.id)
if len(pending_request_ids) > 0:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(10)
if len(failed_ids) > 0:
print 'The following spot requests ' \
'have failed: %s'%(', '.join(failed_ids))
else:
print 'All spot requests fulfilled!'
return instance_ids
def tag_instances(conn, instance_ids, tags):
instances = conn.get_only_instances(instance_ids=instance_ids)
for instance in instances:
for key, value in tags.iteritems():
instance.add_tag(key=key, value=value)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('instance', type=str,
help='Instance config name to launch')
parser.add_argument('-r', '--region', type=str, default='us-east-1',
help='EC2 region name')
parser.add_argument('-c', '--config-file', type=str, default='spot_requests.yml',
help='Spot requests config file name')
args = parser.parse_args()
conn = boto.ec2.connect_to_region(args.region)
config_file_name = args.config_file
instance_config_name = args.instance
launch_from_config(conn, instance_config_name, config_file_name)
if __name__ == '__main__':
main()
| vianasw/spot_launcher | spot_launcher/spot_launcher.py | Python | apache-2.0 | 4,508 | 0.004215 |
# Copyright 2016 Leon Sixt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from diktya.blocks import conv2d_block, resnet
from diktya.func_api_helpers import sequential
from keras.layers import Input
from keras.engine.training import Model
def test_conv2d_block():
x = Input(shape=(1, 8, 8))
y = sequential(
conv2d_block(4)
)(x)
model = Model(x, y)
assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 8, 8)
x = Input(shape=(1, 8, 8))
y = sequential(
conv2d_block(4, pooling='avg')
)(x)
model = Model(x, y)
assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 4, 4)
x = Input(shape=(1, 8, 8))
y = sequential(
conv2d_block(4, up=True)
)(x)
model = Model(x, y)
assert model.get_output_shape_for((None, 1, 8, 8)) == (None, 4, 16, 16)
def test_resnet():
n = 4
x = Input(shape=(1, 8, 8))
y = sequential([
conv2d_block(n),
resnet(n)
])(x)
model = Model(x, y)
assert model.get_output_shape_for((None, 1, 8, 8)) == (None, n, 8, 8)
| BioroboticsLab/diktya | tests/test_blocks.py | Python | apache-2.0 | 1,576 | 0 |
# Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RepositoryBaseTest(object):
def setup(self):
self.remote_url = "https://example.com"
self.branch = "master"
self.repo_path = "/home/gigirepo"
| PressLabs/gitfs | tests/repository/base.py | Python | apache-2.0 | 759 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This file is part of the prometeo project.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
__author__ = 'Emanuele Bertoldi <[email protected]>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.5'
from django.db import models
from django.db.models import permalink
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from prometeo.core.models import Commentable
class Product(Commentable):
"""Product model.
"""
name = models.CharField(max_length=255, verbose_name=_('name'))
code = models.CharField(max_length=255, verbose_name=_('code'))
ean13 = models.CharField(max_length=13, blank=True, verbose_name=_('EAN13'))
description = models.TextField(blank=True, verbose_name=_('description'))
uom = models.CharField(max_length=20, choices=settings.PRODUCT_UOM_CHOICES, default=settings.PRODUCT_DEFAULT_UOM, verbose_name=_('UOM'))
uos = models.CharField(max_length=20, choices=settings.PRODUCT_UOM_CHOICES, default=settings.PRODUCT_DEFAULT_UOM, verbose_name=_('UOS'))
uom_to_uos = models.FloatField(default=1.0, help_text=_('Conversion rate between UOM and UOS'), verbose_name=_('UOM to UOS'))
weight = models.FloatField(default=1.0, verbose_name=_('unit weight (Kg)'))
is_consumable = models.BooleanField(default=False, verbose_name=_('consumable?'))
is_service = models.BooleanField(default=False, verbose_name=_('service?'))
sales_price = models.FloatField(default=0.0, verbose_name=_('sales price'))
sales_currency = models.CharField(max_length=3, choices=settings.CURRENCIES, default=settings.DEFAULT_CURRENCY, verbose_name=_('sales currency'))
max_sales_discount = models.FloatField(default=0.0, verbose_name=_('max sales discount (%)'))
sales_tax = models.FloatField(default=0.0, verbose_name=_('sales tax (%)'))
suppliers = models.ManyToManyField('partners.Partner', through='products.Supply', null=True, blank=True, verbose_name=_('suppliers'))
categories = models.ManyToManyField('taxonomy.Category', null=True, blank=True, verbose_name=_('categories'))
tags = models.ManyToManyField('taxonomy.Tag', null=True, blank=True, verbose_name=_('tags'))
dashboard = models.OneToOneField('widgets.Region', null=True, verbose_name=_("dashboard"))
stream = models.OneToOneField('notifications.Stream', null=True, verbose_name=_('stream'))
class Meta:
ordering = ('code',)
verbose_name = _('product')
verbose_name_plural = _('products')
def __unicode__(self):
return '#%s: %s' % (self.code, self.name)
@models.permalink
def get_absolute_url(self):
return ('product_detail', (), {"id": self.pk})
@models.permalink
def get_edit_url(self):
return ('product_edit', (), {"id": self.pk})
@models.permalink
def get_delete_url(self):
return ('product_delete', (), {"id": self.pk})
class ProductEntry(models.Model):
"""A set of instances of the same product.
"""
product = models.ForeignKey(Product, verbose_name=_('product'))
quantity = models.FloatField(default=1.0, verbose_name=_('quantity'))
unit_price = models.FloatField(null=True, blank=True, help_text=_("Keep it blank to use the product's default one"), verbose_name=_('unit price'))
tax = models.FloatField(null=True, blank=True, help_text=_("Keep it blank to use the product's default one"), verbose_name=_('tax (%)'))
discount = models.FloatField(null=True, blank=True, help_text=_("Keep it blank to use the product's default one"), verbose_name=_('discount (%)'))
notes = models.TextField(null=True, blank=True, verbose_name=_('notes'))
class Meta:
verbose_name = _('product entry')
verbose_name_plural = _('product entries')
def __unicode__(self):
return '%s (%d %s)' % (self.product, self.quantity, self.product.uos)
def get_absolute_url(self):
return self.product.get_absolute_url()
def clean(self):
product = self.product
if not self.unit_price and product:
self.unit_price = product.sales_price
if not self.tax and product:
self.tax = product.sales_tax
class Supply(models.Model):
"""Relation between a product and one of its supplier.
"""
product = models.ForeignKey(Product, verbose_name=_('product'))
supplier = models.ForeignKey('partners.Partner', limit_choices_to = {'is_supplier': True}, verbose_name=_('supplier'))
supply_method = models.CharField(max_length=10, choices=settings.PRODUCT_SUPPLY_METHODS, default=settings.PRODUCT_DEFAULT_SUPPLY_METHOD, verbose_name=_('supply method'))
name = models.CharField(max_length=255, null=True, blank=True, help_text=_("Product name used by the supplier"), verbose_name=_('ref. name'))
code = models.CharField(max_length=255, null=True, blank=True, help_text=_("Product code used by the supplier"), verbose_name=_('ref. code'))
purchase_price = models.FloatField(default=0.0, verbose_name=_('purchase price'))
purchase_currency = models.CharField(max_length=3, choices=settings.CURRENCIES, default=settings.DEFAULT_CURRENCY, verbose_name=_('purchase currency'))
max_purchase_discount = models.FloatField(default=0.0, verbose_name=_('max purchase discount (%)'))
purchase_tax = models.FloatField(default=0.0, verbose_name=_('purchase tax (%)'))
lead_time = models.PositiveIntegerField(default=1, verbose_name=_('lead time (days)'))
minimal_quantity = models.FloatField(default=1.0, verbose_name=_('minimal quantity'))
warranty_period = models.PositiveIntegerField(default=settings.PRODUCT_DEFAULT_WARRANTY_PERIOD, verbose_name=_('warranty period (days)'))
end_of_life = models.DateField(null=True, blank=True, verbose_name=_('end of life'))
class Meta:
ordering = ('product', 'supplier')
verbose_name = _('supply')
verbose_name_plural = _('supplies')
unique_together = (('product', 'supplier'),)
def __unicode__(self):
code = self.code or self.product.code
name = self.name or self.product.name
return '%s (%s)' % (self.product, self.supplier)
@models.permalink
def get_absolute_url(self):
return ('product_supply_detail', (), {"product_id": self.product.pk, "id": self.pk})
@models.permalink
def get_edit_url(self):
return ('product_edit_supply', (), {"product_id": self.product.pk, "id": self.pk})
@models.permalink
def get_delete_url(self):
return ('product_delete_supply', (), {"product_id": self.product.pk, "id": self.pk})
def _stream(self):
return [self.product.stream, self.supplier.stream]
stream = property(_stream)
| zuck/prometeo-erp | products/models.py | Python | lgpl-3.0 | 7,390 | 0.005954 |
# ***************************************************************************
# * Copyright (c) 2016 Ofentse Kgoa <[email protected]> *
# * Copyright (c) 2018 Bernd Hahnebach <[email protected]> *
# * Based on the FemElementGeometry1D by Bernd Hahnebach *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM element fluid 1D task panel for the document object"
__author__ = "Ofentse Kgoa, Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## @package task_element_fluid1D
# \ingroup FEM
# \brief task panel for element fluid 1D object
from PySide import QtCore
from PySide import QtGui
import FreeCAD
import FreeCADGui
from FreeCAD import Units
from femguiutils import selection_widgets
from femobjects import element_fluid1D
class _TaskPanel:
"""
The TaskPanel for editing References property of ElementFluid1D objects
"""
def __init__(self, obj):
self.obj = obj
# parameter widget
self.parameterWidget = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/ElementFluid1D.ui"
)
QtCore.QObject.connect(
self.parameterWidget.cb_section_type,
QtCore.SIGNAL("activated(int)"),
self.sectiontype_changed
)
QtCore.QObject.connect(
self.parameterWidget.cb_liquid_section_type,
QtCore.SIGNAL("activated(int)"),
self.liquidsectiontype_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_manning_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.manning_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_manning_radius,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.manning_radius_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_manning_coefficient,
QtCore.SIGNAL("valueChanged(double)"),
self.manning_coefficient_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_enlarge_area1,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.enlarge_area1_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_enlarge_area2,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.enlarge_area2_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_contract_area1,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.contract_area1_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_contract_area2,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.contract_area2_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_inletpressure,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.inlet_pressure_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_outletpressure,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.outlet_pressure_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_inletflowrate,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.inlet_flowrate_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_outletflowrate,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.outlet_flowrate_changed
)
QtCore.QObject.connect(
self.parameterWidget.gb_inletpressure,
QtCore.SIGNAL("clicked(bool)"),
self.inlet_pressure_active
)
QtCore.QObject.connect(
self.parameterWidget.gb_outletpressure,
QtCore.SIGNAL("clicked(bool)"),
self.outlet_pressure_active
)
QtCore.QObject.connect(
self.parameterWidget.gb_inletflowrate,
QtCore.SIGNAL("clicked(bool)"),
self.inlet_flowrate_active
)
QtCore.QObject.connect(
self.parameterWidget.gb_outletflowrate,
QtCore.SIGNAL("clicked(bool)"),
self.outlet_flowrate_active
)
QtCore.QObject.connect(
self.parameterWidget.if_entrance_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.entrance_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_entrance_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.entrance_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_diaphragm_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.diaphragm_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_diaphragm_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.diaphragm_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_bend_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.bend_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_bradius_pdiameter,
QtCore.SIGNAL("valueChanged(double)"),
self.bradius_pdiameter_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_bend_angle,
QtCore.SIGNAL("valueChanged(double)"),
self.bend_angle_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_bend_loss_coefficient,
QtCore.SIGNAL("valueChanged(double)"),
self.bend_loss_coefficient_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_gatevalve_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.gatevalve_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_gatevalve_closing_coeff,
QtCore.SIGNAL("valueChanged(double)"),
self.gatevalve_closing_coeff_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_colebrooke_pipe_area,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.colebrooke_pipe_area_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_colebrooke_radius,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.colebrooke_radius_changed
)
QtCore.QObject.connect(
self.parameterWidget.if_colebrooke_grain_diameter,
QtCore.SIGNAL("valueChanged(Base::Quantity)"),
self.colebrooke_grain_diameter_changed
)
QtCore.QObject.connect(
self.parameterWidget.sb_colebrooke_form_factor,
QtCore.SIGNAL("valueChanged(double)"),
self.colebrooke_form_factor_changed
)
QtCore.QObject.connect(
self.parameterWidget.tw_pump_characteristics,
QtCore.SIGNAL("cellChanged(int, int)"),
self.pump_characteristics_changed
)
# some fluid types deactivated since they are not implemented in ccx writer
self.parameterWidget.cb_section_type.addItems(
element_fluid1D.ElementFluid1D.known_fluid_types
)
self.parameterWidget.cb_liquid_section_type.addItems(
element_fluid1D.ElementFluid1D.known_liquid_types
)
self.parameterWidget.cb_gas_section_type.addItems(
element_fluid1D.ElementFluid1D.known_gas_types
)
self.parameterWidget.cb_channel_section_type.addItems(
element_fluid1D.ElementFluid1D.known_channel_types
)
self.get_fluidsection_props()
self.updateParameterWidget()
# geometry selection widget
self.selectionWidget = selection_widgets.GeometryElementsSelection(
obj.References,
["Edge"],
False,
True
)
# form made from param and selection widget
self.form = [self.parameterWidget, self.selectionWidget]
def accept(self):
self.set_fluidsection_props()
self.obj.References = self.selectionWidget.references
self.recompute_and_set_back_all()
return True
def reject(self):
self.recompute_and_set_back_all()
return True
def recompute_and_set_back_all(self):
doc = FreeCADGui.getDocument(self.obj.Document)
doc.Document.recompute()
self.selectionWidget.setback_listobj_visibility()
if self.selectionWidget.sel_server:
FreeCADGui.Selection.removeObserver(self.selectionWidget.sel_server)
doc.resetEdit()
def get_fluidsection_props(self):
self.SectionType = self.obj.SectionType
self.LiquidSectionType = self.obj.LiquidSectionType
self.ManningArea = self.obj.ManningArea
self.ManningRadius = self.obj.ManningRadius
self.ManningCoefficient = self.obj.ManningCoefficient
self.EnlargeArea1 = self.obj.EnlargeArea1
self.EnlargeArea2 = self.obj.EnlargeArea2
self.ContractArea1 = self.obj.ContractArea1
self.ContractArea2 = self.obj.ContractArea2
self.OutletPressure = self.obj.OutletPressure
self.InletPressure = self.obj.InletPressure
self.OutletFlowRate = self.obj.OutletFlowRate
self.InletFlowRate = self.obj.InletFlowRate
self.OutletPressureActive = self.obj.OutletPressureActive
self.InletPressureActive = self.obj.InletPressureActive
self.OutletFlowRateActive = self.obj.OutletFlowRateActive
self.InletFlowRateActive = self.obj.InletFlowRateActive
self.EntrancePipeArea = self.obj.EntrancePipeArea
self.EntranceArea = self.obj.EntranceArea
self.DiaphragmPipeArea = self.obj.DiaphragmPipeArea
self.DiaphragmArea = self.obj.DiaphragmArea
self.BendPipeArea = self.obj.BendPipeArea
self.BendRadiusDiameter = self.obj.BendRadiusDiameter
self.BendAngle = self.obj.BendAngle
self.BendLossCoefficient = self.obj.BendLossCoefficient
self.GateValvePipeArea = self.obj.GateValvePipeArea
self.GateValveClosingCoeff = self.obj.GateValveClosingCoeff
self.ColebrookeArea = self.obj.ColebrookeArea
self.ColebrookeRadius = self.obj.ColebrookeRadius
self.ColebrookeGrainDiameter = self.obj.ColebrookeGrainDiameter
self.ColebrookeFormFactor = self.obj.ColebrookeFormFactor
self.PumpFlowRate = self.obj.PumpFlowRate
self.PumpHeadLoss = self.obj.PumpHeadLoss
def set_fluidsection_props(self):
self.obj.LiquidSectionType = self.LiquidSectionType
self.obj.SectionType = self.SectionType
self.obj.ManningArea = self.ManningArea
self.obj.ManningRadius = self.ManningRadius
self.obj.ManningCoefficient = self.ManningCoefficient
self.obj.EnlargeArea1 = self.EnlargeArea1
self.obj.EnlargeArea2 = self.EnlargeArea2
self.obj.ContractArea1 = self.ContractArea1
self.obj.ContractArea2 = self.ContractArea2
self.obj.OutletPressure = self.OutletPressure
self.obj.InletPressure = self.InletPressure
self.obj.OutletFlowRate = self.OutletFlowRate
self.obj.InletFlowRate = self.InletFlowRate
self.obj.OutletPressureActive = self.OutletPressureActive
self.obj.InletPressureActive = self.InletPressureActive
self.obj.OutletFlowRateActive = self.OutletFlowRateActive
self.obj.InletFlowRateActive = self.InletFlowRateActive
self.obj.EntrancePipeArea = self.EntrancePipeArea
self.obj.EntranceArea = self.EntranceArea
self.obj.DiaphragmPipeArea = self.DiaphragmPipeArea
self.obj.DiaphragmArea = self.DiaphragmArea
self.obj.BendPipeArea = self.BendPipeArea
self.obj.BendRadiusDiameter = self.BendRadiusDiameter
self.obj.BendAngle = self.BendAngle
self.obj.BendLossCoefficient = self.BendLossCoefficient
self.obj.GateValvePipeArea = self.GateValvePipeArea
self.obj.GateValveClosingCoeff = self.GateValveClosingCoeff
self.obj.ColebrookeArea = self.ColebrookeArea
self.obj.ColebrookeRadius = self.ColebrookeRadius
self.obj.ColebrookeGrainDiameter = self.ColebrookeGrainDiameter
self.obj.ColebrookeFormFactor = self.ColebrookeFormFactor
self.obj.PumpFlowRate = self.PumpFlowRate
self.obj.PumpHeadLoss = self.PumpHeadLoss
def updateParameterWidget(self):
"fills the widgets"
index_sectiontype = self.parameterWidget.cb_section_type.findText(self.SectionType)
self.parameterWidget.cb_section_type.setCurrentIndex(index_sectiontype)
self.parameterWidget.sw_section_type.setCurrentIndex(index_sectiontype)
index_liquidsectiontype = self.parameterWidget.cb_liquid_section_type.findText(
self.LiquidSectionType
)
self.parameterWidget.cb_liquid_section_type.setCurrentIndex(index_liquidsectiontype)
self.parameterWidget.sw_liquid_section_type.setCurrentIndex(index_liquidsectiontype)
self.parameterWidget.if_manning_area.setText(self.ManningArea.UserString)
self.parameterWidget.if_manning_radius.setText(self.ManningRadius.UserString)
self.parameterWidget.sb_manning_coefficient.setValue(self.ManningCoefficient)
self.parameterWidget.if_enlarge_area1.setText(self.EnlargeArea1.UserString)
self.parameterWidget.if_enlarge_area2.setText(self.EnlargeArea2.UserString)
self.parameterWidget.if_contract_area1.setText(self.ContractArea1.UserString)
self.parameterWidget.if_contract_area2.setText(self.ContractArea2.UserString)
self.parameterWidget.if_inletpressure.setText(FreeCAD.Units.Quantity(
1000 * self.InletPressure, FreeCAD.Units.Pressure).UserString
)
self.parameterWidget.if_outletpressure.setText(FreeCAD.Units.Quantity(
1000 * self.OutletPressure, FreeCAD.Units.Pressure).UserString
)
self.parameterWidget.if_inletflowrate.setText(str(self.InletFlowRate))
self.parameterWidget.if_outletflowrate.setText(str(self.OutletFlowRate))
self.parameterWidget.gb_inletpressure.setChecked(self.InletPressureActive)
self.parameterWidget.gb_outletpressure.setChecked(self.OutletPressureActive)
self.parameterWidget.gb_inletflowrate.setChecked(self.InletFlowRateActive)
self.parameterWidget.gb_outletflowrate.setChecked(self.OutletFlowRateActive)
self.parameterWidget.if_entrance_pipe_area.setText(self.EntrancePipeArea.UserString)
self.parameterWidget.if_entrance_area.setText(self.EntranceArea.UserString)
self.parameterWidget.if_diaphragm_pipe_area.setText(self.DiaphragmPipeArea.UserString)
self.parameterWidget.if_diaphragm_area.setText(self.DiaphragmArea.UserString)
self.parameterWidget.if_bend_pipe_area.setText(self.BendPipeArea.UserString)
self.parameterWidget.sb_bradius_pdiameter.setValue(self.BendRadiusDiameter)
self.parameterWidget.sb_bend_angle.setValue(self.BendAngle)
self.parameterWidget.sb_bend_loss_coefficient.setValue(self.BendLossCoefficient)
self.parameterWidget.if_gatevalve_pipe_area.setText(self.GateValvePipeArea.UserString)
self.parameterWidget.sb_gatevalve_closing_coeff.setValue(self.GateValveClosingCoeff)
self.parameterWidget.if_colebrooke_pipe_area.setText(self.ColebrookeArea.UserString)
self.parameterWidget.if_colebrooke_radius.setText(self.ColebrookeRadius.UserString)
self.parameterWidget.if_colebrooke_grain_diameter.setText(
self.ColebrookeGrainDiameter.UserString
)
self.parameterWidget.sb_colebrooke_form_factor.setValue(self.ColebrookeFormFactor)
for i in range(len(self.PumpFlowRate)):
self.parameterWidget.tw_pump_characteristics.setItem(
i, 0, QtGui.QTableWidgetItem(str(self.PumpFlowRate[i]))
)
self.parameterWidget.tw_pump_characteristics.setItem(
i, 1, QtGui.QTableWidgetItem(str(self.PumpHeadLoss[i]))
)
def sectiontype_changed(self, index):
if index < 0:
return
self.parameterWidget.cb_section_type.setCurrentIndex(index)
self.parameterWidget.sw_section_type.setCurrentIndex(index)
# parameterWidget returns unicode
self.SectionType = str(self.parameterWidget.cb_section_type.itemText(index))
def liquidsectiontype_changed(self, index):
if index < 0:
return
self.parameterWidget.cb_liquid_section_type.setCurrentIndex(index)
self.parameterWidget.sw_liquid_section_type.setCurrentIndex(index)
# parameterWidget returns unicode
self.LiquidSectionType = str(
self.parameterWidget.cb_liquid_section_type.itemText(index)
)
def manning_area_changed(self, base_quantity_value):
self.ManningArea = base_quantity_value
def manning_radius_changed(self, base_quantity_value):
self.ManningRadius = base_quantity_value
def manning_coefficient_changed(self, base_quantity_value):
self.ManningCoefficient = base_quantity_value
def enlarge_area1_changed(self, base_quantity_value):
self.EnlargeArea1 = base_quantity_value
def enlarge_area2_changed(self, base_quantity_value):
self.EnlargeArea2 = base_quantity_value
def contract_area1_changed(self, base_quantity_value):
self.ContractArea1 = base_quantity_value
def contract_area2_changed(self, base_quantity_value):
self.ContractArea2 = base_quantity_value
def inlet_pressure_changed(self, base_quantity_value):
self.InletPressure = Units.Quantity(base_quantity_value).getValueAs("MPa").Value
def outlet_pressure_changed(self, base_quantity_value):
self.OutletPressure = Units.Quantity(base_quantity_value).getValueAs("MPa").Value
def inlet_flowrate_changed(self, base_quantity_value):
self.InletFlowRate = Units.Quantity(base_quantity_value).getValueAs("kg/s").Value
def outlet_flowrate_changed(self, base_quantity_value):
self.OutletFlowRate = Units.Quantity(base_quantity_value).getValueAs("kg/s").Value
def inlet_pressure_active(self, active):
self.InletPressureActive = active
def outlet_pressure_active(self, active):
self.OutletPressureActive = active
def inlet_flowrate_active(self, active):
self.InletFlowRateActive = active
def outlet_flowrate_active(self, active):
self.OutletFlowRateActive = active
def entrance_pipe_area_changed(self, base_quantity_value):
self.EntrancePipeArea = base_quantity_value
def entrance_area_changed(self, base_quantity_value):
self.EntranceArea = base_quantity_value
def diaphragm_pipe_area_changed(self, base_quantity_value):
self.DiaphragmPipeArea = base_quantity_value
def diaphragm_area_changed(self, base_quantity_value):
self.DiaphragmArea = base_quantity_value
def bend_pipe_area_changed(self, base_quantity_value):
self.BendPipeArea = base_quantity_value
def bradius_pdiameter_changed(self, base_quantity_value):
self.BendRadiusDiameter = base_quantity_value
def bend_angle_changed(self, base_quantity_value):
self.BendAngle = base_quantity_value
def bend_loss_coefficient_changed(self, base_quantity_value):
self.BendLossCoefficient = base_quantity_value
def gatevalve_pipe_area_changed(self, base_quantity_value):
self.GateValvePipeArea = base_quantity_value
def gatevalve_closing_coeff_changed(self, base_quantity_value):
self.GateValveClosingCoeff = base_quantity_value
def colebrooke_pipe_area_changed(self, base_quantity_value):
self.ColebrookeArea = base_quantity_value
def colebrooke_radius_changed(self, base_quantity_value):
self.ColebrookeRadius = base_quantity_value
def colebrooke_grain_diameter_changed(self, base_quantity_value):
self.ColebrookeGrainDiameter = base_quantity_value
def colebrooke_form_factor_changed(self, base_quantity_value):
self.ColebrookeFormFactor = base_quantity_value
def pump_characteristics_changed(self, row, column):
if column == 0:
self.PumpFlowRate[row] = float(
self.parameterWidget.tw_pump_characteristics.item(row, column).text()
)
else:
self.PumpHeadLoss[row] = float(
self.parameterWidget.tw_pump_characteristics.item(row, column).text()
)
| sanguinariojoe/FreeCAD | src/Mod/Fem/femtaskpanels/task_element_fluid1D.py | Python | lgpl-2.1 | 22,417 | 0.001695 |
import discord
from discord.ext import commands
from random import choice as randomchoice
from .utils.dataIO import fileIO
from .utils import checks
import os
defaultQuotes = [
"Thats why I love switch hitting, I like to be in control ~ Jan, from the Hypermine Dragon Fight - 21st May 2016",
"Thank you for wwaking within our server today- That sounds wrong. That does not sound PG at all -Jandoncom 24/5/16",
"EVERYONE RUN! GECKOR IS DRIVING A TRUCK AGAIN /o\ ~ N7DeltaForce 03/06/16",
"Everyone wants a piece of this -Jandoncom 7/6/2016",
"I Want Khip Kho's Heart! ~ Jandoncom 7/6/2016"]
class Quote:
"""Quote System for Red-DiscordBot
Based on the MIRC Quote Script by Zsadist (Hawkee Link: http://hawkee.com/snippet/8378/ )"""
def __init__(self, bot):
self.bot = bot
self.quotes = fileIO("data/quote/quotes.json", "load")
def save_quotes(self):
fileIO("data/quote/quotes.json", 'save', self.quotes)
@commands.group(pass_context=True, invoke_without_command=True)
async def quote(self, ctx):
"""Random Quote to be Drawn"""
await self.bot.say("Quote: " + randomchoice(self.quotes) + " ")
@quote.command()
async def add(self, quote):
"""Adds a Quote to the List"""
if quote in self.quotes:
await self.bot.say("That quote is already in the database!")
else:
self.quotes.append(quote)
self.save_quotes()
await self.bot.say("Quote: " + quote + " has been saved to the database!")
@quote.command()
@checks.mod_or_permissions(adminstrator=True)
async def remove(self, quote):
"""Removes a Quote from the list"""
if quote not in self.quotes:
await self.bot.say("That quote is already in the database!")
else:
self.quotes.remove(quote)
self.save_quotes()
await self.bot.say("Quote: " + quote + " has been removed from the database!")
def check_folder():
if not os.path.exists("data/quote"):
print("Creating data/quote")
os.makedirs("data/quote")
def check_files():
fileName = "data/quote/quotes.json"
if not fileIO(fileName, "check"):
print("Creating Empty Quote.json File")
print("Creation Complete! Enjoy your new Quote System ~ Wolfstorm")
fileIO(fileName, "save", defaultQuotes)
def setup(bot):
check_folder()
check_files()
QuoteSystem = Quote(bot)
bot.add_cog(QuoteSystem)
| Wolfst0rm/Wolf-Cogs | quote/quote.py | Python | mit | 2,264 | 0.025177 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_in_geonames.py
----------------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def processCommand(alg, parameters, context, feedback):
# v.in.geonames needs to use WGS84 projection
alg.commands.append('g.proj -c epsg=4326')
# Launch the algorithm
alg.processCommand(parameters, context, feedback)
| minorua/QGIS | python/plugins/processing/algs/grass7/ext/v_in_geonames.py | Python | gpl-2.0 | 1,246 | 0 |
# -*- coding: utf-8 -*-
import functools
import hashlib
import json
import os
import tempfile
import unittest
import uuid
import zipfile
from contextlib import nested
from datetime import datetime, timedelta
from decimal import Decimal
from django import forms
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core import mail
from django.core.urlresolvers import reverse
from django.db.models.signals import post_delete, post_save
from django.test.utils import override_settings
from django.utils import translation
import elasticsearch
import mock
from mock import patch
from nose.tools import eq_, ok_, raises
import mkt
from lib.utils import static_url
from mkt.constants import apps, MANIFEST_CONTENT_TYPE
from mkt.constants.applications import DEVICE_TYPES
from mkt.constants.iarc_mappings import (DESCS, INTERACTIVES, REVERSE_DESCS,
REVERSE_INTERACTIVES)
from mkt.constants.payments import PROVIDER_BANGO, PROVIDER_REFERENCE
from mkt.constants.regions import RESTOFWORLD
from mkt.developers.models import (AddonPaymentAccount, PaymentAccount,
SolitudeSeller)
from mkt.developers.providers import ALL_PROVIDERS
from mkt.files.models import File
from mkt.files.tests.test_models import UploadTest as BaseUploadTest
from mkt.files.utils import WebAppParser
from mkt.prices.models import AddonPremium, Price, PriceCurrency
from mkt.reviewers.models import EscalationQueue, QUEUE_TARAKO, RereviewQueue
from mkt.site.fixtures import fixture
from mkt.site.helpers import absolutify
from mkt.site.storage_utils import (public_storage, private_storage,
storage_is_remote)
from mkt.site.tests import (DynamicBoolFieldsTestMixin, ESTestCase,
TestCase, WebappTestCase, user_factory)
from mkt.site.utils import app_factory, version_factory
from mkt.submit.tests.test_views import BasePackagedAppTest, BaseWebAppTest
from mkt.translations.models import Translation
from mkt.users.models import UserProfile
from mkt.versions.models import update_status, Version
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import (AddonDeviceType, AddonExcludedRegion,
AddonUpsell, AppFeatures, AppManifest,
BlockedSlug, ContentRating, Geodata,
get_excluded_in, IARCInfo, Installed, Preview,
RatingDescriptors, RatingInteractives,
version_changed, Webapp)
from mkt.webapps.signals import version_changed as version_changed_signal
class TestWebapp(WebappTestCase):
def add_payment_account(self, app, provider_id, user=None):
if not user:
user = user_factory()
payment = PaymentAccount.objects.create(
solitude_seller=SolitudeSeller.objects.create(user=user,
uuid=uuid.uuid4()),
provider=provider_id,
user=user,
seller_uri=uuid.uuid4(),
uri=uuid.uuid4())
return AddonPaymentAccount.objects.create(
addon=app, payment_account=payment, product_uri=uuid.uuid4())
def test_get_icon_url(self):
app = self.get_app()
if storage_is_remote():
path = '%s/%s-%s.png' % (app.get_icon_dir(), app.pk, 32)
expected = '%s?modified=never' % public_storage.url(path)
else:
expected = (static_url('ADDON_ICON_URL')
% (str(app.id)[0:3], app.id, 32, 'never'))
assert app.get_icon_url(32).endswith(expected), (
'Expected %s, got %s' % (expected, app.icon_url))
app.icon_hash = 'abcdef'
assert app.get_icon_url(32).endswith('?modified=abcdef')
app.icon_type = None
assert app.get_icon_url(32).endswith('hub/default-32.png')
def test_get_promo_img_url(self):
app = self.get_app()
eq_(app.get_promo_img_url('640'), '')
eq_(app.get_promo_img_url('1050'), '')
app.promo_img_hash = 'chicken'
ok_('webapp_promo_imgs/337/337141-640.png?modified=chicken' in
app.get_promo_img_url('640'))
ok_('webapp_promo_imgs/337/337141-1050.png?modified=chicken' in
app.get_promo_img_url('1050'))
def test_has_payment_account(self):
app = self.get_app()
assert not app.has_payment_account()
self.add_payment_account(app, PROVIDER_BANGO)
assert app.has_payment_account()
def test_has_multiple_payment_accounts(self):
app = self.get_app()
assert not app.has_multiple_payment_accounts(), 'no accounts'
account = self.add_payment_account(app, PROVIDER_BANGO)
assert not app.has_multiple_payment_accounts(), 'one account'
self.add_payment_account(app, PROVIDER_REFERENCE, user=account.user)
ok_(app.has_multiple_payment_accounts(), 'two accounts')
def test_no_payment_account(self):
app = self.get_app()
assert not app.has_payment_account()
with self.assertRaises(app.PayAccountDoesNotExist):
app.payment_account(PROVIDER_BANGO)
def test_get_payment_account(self):
app = self.get_app()
acct = self.add_payment_account(app, PROVIDER_BANGO)
fetched_acct = app.payment_account(PROVIDER_BANGO)
eq_(acct, fetched_acct)
def test_delete_reason(self):
"""Test deleting with a reason gives the reason in the mail."""
app = self.get_app()
reason = u'trêason'
eq_(len(mail.outbox), 0)
app.delete(msg='bye', reason=reason)
eq_(len(mail.outbox), 1)
assert reason in mail.outbox[0].body
def test_soft_deleted(self):
app = self.get_app()
eq_(len(Webapp.objects.all()), 1)
eq_(len(Webapp.with_deleted.all()), 1)
app.delete('boom shakalakalaka')
eq_(len(Webapp.objects.all()), 0)
eq_(len(Webapp.with_deleted.all()), 1)
# When an app is deleted its slugs and domain should get relinquished.
post_mortem = Webapp.with_deleted.filter(id=app.id)
eq_(post_mortem.count(), 1)
eq_(getattr(post_mortem[0], 'app_domain'), None)
eq_(getattr(post_mortem[0], 'app_slug'), '337141')
def test_soft_deleted_valid(self):
app = self.get_app()
Webapp.objects.create(status=mkt.STATUS_DELETED)
eq_(list(Webapp.objects.valid()), [app])
eq_(list(Webapp.with_deleted.valid()), [app])
def test_delete_incomplete_with_deleted_version(self):
"""Test deleting incomplete add-ons with no public version attached."""
app = self.get_app()
app.current_version.delete()
eq_(Version.objects.count(), 0)
eq_(Version.with_deleted.count(), 1)
app.update(status=0, highest_status=0)
# We want to be in the worst possible situation, no direct foreign key
# to the deleted versions, do we call update_version() now that we have
# an incomplete app.
app.update_version()
eq_(app.latest_version, None)
eq_(app.current_version, None)
app.delete()
# The app should have been soft-deleted.
eq_(len(mail.outbox), 1)
eq_(Webapp.objects.count(), 0)
eq_(Webapp.with_deleted.count(), 1)
def test_get_price(self):
app = self.get_app()
self.make_premium(app)
eq_(app.get_price(region=mkt.regions.USA.id), 1)
def test_get_price_tier(self):
app = self.get_app()
self.make_premium(app)
eq_(str(app.get_tier().price), '1.00')
ok_(app.get_tier_name())
def test_get_price_tier_no_charge(self):
app = self.get_app()
self.make_premium(app, 0)
eq_(str(app.get_tier().price), '0')
ok_(app.get_tier_name())
@mock.patch('mkt.versions.models.Version.is_privileged', True)
def test_app_type_privileged(self):
app = self.get_app()
app.update(is_packaged=True)
eq_(app.app_type, 'privileged')
def test_excluded_in(self):
app = self.get_app()
region = mkt.regions.BRA
AddonExcludedRegion.objects.create(addon=app, region=region.id)
self.assertSetEqual(get_excluded_in(region.id), [app.id])
def test_supported_locale_property(self):
app = self.get_app()
eq_(app.supported_locales,
(u'English (US)', [u'English (US)', u'Espa\xf1ol',
u'Portugu\xeas (do\xa0Brasil)']))
def test_supported_locale_property_empty(self):
app = self.get_app()
app.current_version.update(supported_locales='')
eq_(app.supported_locales, (u'English (US)', []))
def test_supported_locale_property_bad(self):
app = self.get_app()
app.current_version.update(supported_locales='de,xx', _signal=False)
eq_(app.supported_locales, (u'English (US)', [u'Deutsch']))
def test_supported_locale_app_non_public(self):
"""
Test supported locales falls back to latest_version when not public.
"""
app = self.get_app()
app.update(status=mkt.STATUS_PENDING)
app.latest_version.files.update(status=mkt.STATUS_PENDING)
app.update_version()
eq_(app.supported_locales,
(u'English (US)',
[u'English (US)', u'Espa\xf1ol', u'Portugu\xeas (do\xa0Brasil)']))
def test_guess_is_offline_when_appcache_path(self):
app = self.get_app()
# If there's no appcache_path defined, ain't an offline-capable app.
am = AppManifest.objects.get(version=app.current_version)
eq_(app.guess_is_offline(), False)
# If there's an appcache_path defined, this is an offline-capable app.
manifest = json.loads(am.manifest)
manifest['appcache_path'] = '/manifest.appcache'
am.update(manifest=json.dumps(manifest))
# reload isn't enough, it doesn't clear cached_property.
app = self.get_app()
eq_(app.guess_is_offline(), True)
def test_guess_is_offline_no_manifest(self):
app = Webapp()
eq_(app.guess_is_offline(), False)
@mock.patch('mkt.webapps.models.cache.get')
def test_is_offline_when_packaged(self, mock_get):
mock_get.return_value = ''
eq_(Webapp(is_packaged=True).guess_is_offline(), True)
eq_(Webapp(is_packaged=False).guess_is_offline(), False)
def test_guess_is_offline_no_version(self):
app = Webapp()
with mock.patch.object(Webapp, 'latest_version', None):
eq_(app.guess_is_offline(), False)
def test_guess_is_offline_no_files(self):
app = Webapp()
version = mock.MagicMock(all_files=[])
with mock.patch.object(Webapp, 'latest_version', version):
eq_(app.guess_is_offline(), False)
@mock.patch('mkt.webapps.models.Webapp.has_payment_account')
def test_payments_complete(self, pay_mock):
# Default to complete if it's not needed.
pay_mock.return_value = False
app = self.get_app()
assert app.payments_complete()
self.make_premium(app)
assert not app.payments_complete()
pay_mock.return_value = True
assert app.payments_complete()
def test_get_region_ids_no_exclusions(self):
# This returns IDs for the *included* regions.
eq_(self.get_app().get_region_ids(), mkt.regions.REGION_IDS)
def test_get_regions_no_exclusions(self):
# This returns the class definitions for the *included* regions.
eq_(sorted(self.get_app().get_regions()),
sorted(mkt.regions.REGIONS_CHOICES_ID_DICT.values()))
def test_get_regions_sort(self):
eq_(self.get_app().get_regions(),
sorted(mkt.regions.REGIONS_CHOICES_ID_DICT.values(),
key=lambda x: x.slug))
eq_(self.get_app().get_regions(sort_by='name'),
sorted(mkt.regions.REGIONS_CHOICES_ID_DICT.values(),
key=lambda x: x.name))
eq_(self.get_app().get_regions(sort_by='id'),
sorted(mkt.regions.REGIONS_CHOICES_ID_DICT.values(),
key=lambda x: x.id))
def test_in_tarako_queue_pending_in_queue(self):
app = self.get_app()
app.update(status=mkt.STATUS_PENDING)
app.additionalreview_set.create(queue=QUEUE_TARAKO)
ok_(app.in_tarako_queue())
def test_in_tarako_queue_approved_in_queue(self):
app = self.get_app()
app.update(status=mkt.STATUS_APPROVED)
app.additionalreview_set.create(queue=QUEUE_TARAKO)
ok_(app.in_tarako_queue())
def test_in_tarako_queue_pending_not_in_queue(self):
app = self.get_app()
app.update(status=mkt.STATUS_PENDING)
ok_(not app.in_tarako_queue())
def test_in_tarako_queue_approved_not_in_queue(self):
app = self.get_app()
app.update(status=mkt.STATUS_APPROVED)
ok_(not app.in_tarako_queue())
def test_in_china_queue_pending_not_in_queue(self):
app = self.get_app()
app.geodata.update(region_cn_nominated=datetime.now(),
region_cn_status=mkt.STATUS_PENDING)
app.update(status=mkt.STATUS_PENDING)
ok_(not app.in_china_queue()) # Need to be approved in general first.
def test_in_china_queue_approved_in_queue(self):
app = self.get_app()
app.geodata.update(region_cn_nominated=datetime.now(),
region_cn_status=mkt.STATUS_PENDING)
app.update(status=mkt.STATUS_APPROVED)
ok_(app.in_china_queue())
def test_in_china_queue_approved_in_china_not_in_queue(self):
app = self.get_app()
app.geodata.update(region_cn_nominated=datetime.now(),
region_cn_status=mkt.STATUS_APPROVED)
app.update(status=mkt.STATUS_APPROVED)
ok_(not app.in_china_queue())
def test_file_size(self):
app = self.get_app()
ok_(app.file_size)
f = app.current_version.all_files[0]
f.update(size=12345)
eq_(app.file_size, 12345)
app.update(_current_version=None)
f = app.latest_version.all_files[0]
f.update(size=54321)
eq_(app.file_size, 54321)
class TestCleanSlug(TestCase):
def test_clean_slug_new_object(self):
# Make sure there's at least an addon with the "webapp" slug,
# subsequent ones should be "webapp-1", "webapp-2", etc.
a = Webapp.objects.create()
eq_(a.app_slug, 'webapp')
# Start with a first clash. This should give us "webapp-1".
# We're not saving yet, we're testing the slug creation without an id.
b = Webapp()
b.clean_slug()
eq_(b.app_slug, 'webapp-1')
# Now save the instance to the database for future clashes.
b.save()
# Test on another object without an id.
c = Webapp()
c.clean_slug()
eq_(c.app_slug, 'webapp-2')
# Even if an addon is deleted, don't clash with its slug.
c.status = mkt.STATUS_DELETED
# Now save the instance to the database for future clashes.
c.save()
# And yet another object without an id. Make sure we're not trying to
# assign the 'webapp-2' slug from the deleted addon.
d = Webapp()
d.clean_slug()
eq_(d.app_slug, 'webapp-3')
def test_clean_slug_with_id(self):
# Create an addon and save it to have an id.
a = Webapp.objects.create()
# Start over: don't use the name nor the id to generate the slug.
a.app_slug = a.name = ""
a.clean_slug()
# Slugs created from an id are of the form "id~", eg "123~" to avoid
# clashing with URLs.
eq_(a.app_slug, "%s~" % a.id)
# And again, this time make it clash.
b = Webapp.objects.create()
# Set a's slug to be what should be created for b from its id.
a.app_slug = "%s~" % b.id
a.save()
# Now start over for b.
b.app_slug = b.name = ""
b.clean_slug()
eq_(b.app_slug, "%s~-1" % b.id)
def test_clean_slug_with_name(self):
# Make sure there's at least an addon with the "fooname" slug,
# subsequent ones should be "fooname-1", "fooname-2" ...
a = Webapp.objects.create(name="fooname")
eq_(a.app_slug, "fooname")
b = Webapp(name="fooname")
b.clean_slug()
eq_(b.app_slug, "fooname-1")
def test_clean_slug_with_slug(self):
# Make sure there's at least an addon with the "fooslug" slug,
# subsequent ones should be "fooslug-1", "fooslug-2" ...
a = Webapp.objects.create(name="fooslug")
eq_(a.app_slug, "fooslug")
b = Webapp(name="fooslug")
b.clean_slug()
eq_(b.app_slug, "fooslug-1")
def test_clean_slug_blocked_slug(self):
blocked_slug = 'fooblocked'
BlockedSlug.objects.create(name=blocked_slug)
a = Webapp(app_slug=blocked_slug)
a.clean_slug()
# Blocked slugs (like "activate" or IDs) have a "~" appended to
# avoid clashing with URLs.
eq_(a.app_slug, "%s~" % blocked_slug)
# Now save the instance to the database for future clashes.
a.save()
b = Webapp(app_slug=blocked_slug)
b.clean_slug()
eq_(b.app_slug, "%s~-1" % blocked_slug)
def test_clean_slug_blocked_slug_long_slug(self):
long_slug = "this_is_a_very_long_slug_that_is_longer_than_thirty_chars"
BlockedSlug.objects.create(name=long_slug[:30])
# If there's no clashing slug, just append a "~".
a = Webapp.objects.create(app_slug=long_slug[:30])
eq_(a.app_slug, "%s~" % long_slug[:29])
# If there's a clash, use the standard clash resolution.
a = Webapp.objects.create(app_slug=long_slug[:30])
eq_(a.app_slug, "%s-1" % long_slug[:27])
def test_clean_slug_long_slug(self):
long_slug = "this_is_a_very_long_slug_that_is_longer_than_thirty_chars"
# If there's no clashing slug, don't over-shorten it.
a = Webapp.objects.create(app_slug=long_slug)
eq_(a.app_slug, long_slug[:30])
# Now that there is a clash, test the clash resolution.
b = Webapp(app_slug=long_slug)
b.clean_slug()
eq_(b.app_slug, "%s-1" % long_slug[:27])
def test_clean_slug_always_slugify(self):
illegal_chars = "some spaces and !?@"
# Slugify if there's a slug provided.
a = Webapp(app_slug=illegal_chars)
a.clean_slug()
assert a.app_slug.startswith("some-spaces-and"), a.app_slug
# Also slugify if there's no slug provided.
b = Webapp(name=illegal_chars)
b.clean_slug()
assert b.app_slug.startswith("some-spaces-and"), b.app_slug
def test_clean_slug_worst_case_scenario(self):
long_slug = "this_is_a_very_long_slug_that_is_longer_than_thirty_chars"
# Generate 100 addons with this very long slug. We should encounter the
# worst case scenario where all the available clashes have been
# avoided. Check the comment in addons.models.clean_slug, in the "else"
# part of the "for" loop checking for available slugs not yet assigned.
for i in range(100):
Webapp.objects.create(app_slug=long_slug)
with self.assertRaises(RuntimeError): # Fail on the 100th clash.
Webapp.objects.create(app_slug=long_slug)
class TestPreviewModel(mkt.site.tests.TestCase):
def setUp(self):
app = Webapp.objects.create()
self.preview = Preview.objects.create(addon=app, filetype='image/png',
thumbtype='image/png',
caption='my preview')
def test_as_dict(self):
expect = ['caption', 'full', 'thumbnail']
reality = sorted(Preview.objects.all()[0].as_dict().keys())
eq_(expect, reality)
def test_filename(self):
eq_(self.preview.file_extension, 'png')
self.preview.update(filetype='')
eq_(self.preview.file_extension, 'png')
self.preview.update(filetype='video/webm')
eq_(self.preview.file_extension, 'webm')
def test_filename_in_url(self):
self.preview.update(filetype='video/webm')
assert 'png' in self.preview.thumbnail_path
assert 'webm' in self.preview.image_path
class TestRemoveLocale(mkt.site.tests.TestCase):
def test_remove(self):
app = Webapp.objects.create()
app.name = {'en-US': 'woo', 'el': 'yeah'}
app.description = {'en-US': 'woo', 'el': 'yeah', 'ja': 'ola'}
app.save()
app.remove_locale('el')
qs = (Translation.objects.filter(localized_string__isnull=False)
.values_list('locale', flat=True))
eq_(sorted(qs.filter(id=app.name_id)), ['en-US'])
eq_(sorted(qs.filter(id=app.description_id)), ['en-US', 'ja'])
def test_remove_version_locale(self):
app = app_factory()
version = app.latest_version
version.releasenotes = {'fr': 'oui'}
version.save()
app.remove_locale('fr')
qs = (Translation.objects.filter(localized_string__isnull=False)
.values_list('locale', flat=True))
eq_(sorted(qs), [u'en-us'])
class TestUpdateNames(mkt.site.tests.TestCase):
def setUp(self):
self.addon = Webapp.objects.create()
self.addon.name = self.names = {'en-US': 'woo'}
self.addon.save()
def get_name(self, app, locale='en-US'):
return Translation.objects.get(id=app.name_id, locale=locale)
def check_names(self, names):
"""`names` in {locale: name} format."""
for locale, localized_string in names.iteritems():
eq_(self.get_name(self.addon, locale).localized_string,
localized_string)
def test_new_name(self):
names = dict(self.names, **{'de': u'frü'})
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
def test_new_names(self):
names = dict(self.names, **{'de': u'frü', 'es': u'eso'})
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
def test_remove_name_missing(self):
names = dict(self.names, **{'de': u'frü', 'es': u'eso'})
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
# Now update without de to remove it.
del names['de']
self.addon.update_names(names)
self.addon.save()
names['de'] = None
self.check_names(names)
def test_remove_name_with_none(self):
names = dict(self.names, **{'de': u'frü', 'es': u'eso'})
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
# Now update without de to remove it.
names['de'] = None
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
def test_add_and_remove(self):
names = dict(self.names, **{'de': u'frü', 'es': u'eso'})
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
# Now add a new locale and remove an existing one.
names['de'] = None
names['fr'] = u'oui'
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
def test_default_locale_change(self):
names = dict(self.names, **{'de': u'frü', 'es': u'eso'})
self.addon.default_locale = 'de'
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
addon = self.addon.reload()
eq_(addon.default_locale, 'de')
def test_default_locale_change_remove_old(self):
names = dict(self.names, **{'de': u'frü', 'es': u'eso', 'en-US': None})
self.addon.default_locale = 'de'
self.addon.update_names(names)
self.addon.save()
self.check_names(names)
eq_(self.addon.reload().default_locale, 'de')
def test_default_locale_removal_not_deleted(self):
names = {'en-US': None}
self.addon.update_names(names)
self.addon.save()
self.check_names(self.names)
class TestAddonWatchDisabled(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
@patch('mkt.webapps.models.File.hide_disabled_file')
@patch('mkt.webapps.models.File.unhide_disabled_file')
def test_no_disabled_change(self, unhide, hide):
self.app.save()
assert not unhide.called
assert not hide.called
@patch('mkt.webapps.models.File.hide_disabled_file')
@patch('mkt.webapps.models.File.unhide_disabled_file')
def test_disable_addon(self, unhide, hide):
self.app.update(disabled_by_user=True)
assert not unhide.called
assert hide.called
@patch('mkt.webapps.models.File.hide_disabled_file')
@patch('mkt.webapps.models.File.unhide_disabled_file')
def test_admin_disable_addon(self, unhide, hide):
self.app.update(status=mkt.STATUS_DISABLED)
assert not unhide.called
assert hide.called
@patch('mkt.webapps.models.File.hide_disabled_file')
@patch('mkt.webapps.models.File.unhide_disabled_file')
def test_enable_addon(self, unhide, hide):
self.app.update(status=mkt.STATUS_DISABLED)
unhide.reset_mock()
hide.reset_mock()
self.app.update(status=mkt.STATUS_PUBLIC)
assert unhide.called
assert not hide.called
class TestAddonUpsell(mkt.site.tests.TestCase):
def setUp(self):
self.one = Webapp.objects.create(name='free')
self.two = Webapp.objects.create(name='premium')
self.upsell = AddonUpsell.objects.create(free=self.one,
premium=self.two)
def test_create_upsell(self):
eq_(self.one.upsell.free, self.one)
eq_(self.one.upsell.premium, self.two)
eq_(self.two.upsell, None)
def test_delete(self):
self.upsell = AddonUpsell.objects.create(free=self.two,
premium=self.one)
# Note: delete ignores if status 0.
self.one.update(status=mkt.STATUS_PUBLIC)
self.one.delete()
eq_(AddonUpsell.objects.count(), 0)
class TestAddonPurchase(mkt.site.tests.TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.user = UserProfile.objects.get(pk=999)
self.addon = Webapp.objects.create(premium_type=mkt.ADDON_PREMIUM,
name='premium')
def test_no_premium(self):
# If you've purchased something, the fact that its now free
# doesn't change the fact that you purchased it.
self.addon.addonpurchase_set.create(user=self.user)
self.addon.update(premium_type=mkt.ADDON_FREE)
assert self.addon.has_purchased(self.user)
def test_has_purchased(self):
self.addon.addonpurchase_set.create(user=self.user)
assert self.addon.has_purchased(self.user)
def test_not_purchased(self):
assert not self.addon.has_purchased(self.user)
def test_anonymous(self):
assert not self.addon.has_purchased(None)
assert not self.addon.has_purchased(AnonymousUser)
def test_is_refunded(self):
self.addon.addonpurchase_set.create(user=self.user,
type=mkt.CONTRIB_REFUND)
assert self.addon.is_refunded(self.user)
def test_is_chargeback(self):
self.addon.addonpurchase_set.create(user=self.user,
type=mkt.CONTRIB_CHARGEBACK)
assert self.addon.is_chargeback(self.user)
def test_purchase_state(self):
purchase = self.addon.addonpurchase_set.create(user=self.user)
for state in [mkt.CONTRIB_PURCHASE, mkt.CONTRIB_REFUND,
mkt.CONTRIB_CHARGEBACK]:
purchase.update(type=state)
eq_(state, self.addon.get_purchase_type(self.user))
class TestWebappLight(mkt.site.tests.TestCase):
"""
Tests that don't require saving a Webapp to the database or want an empty
database with no existing apps.
"""
fixtures = fixture('prices')
def test_is_public(self):
app = Webapp(status=mkt.STATUS_UNLISTED)
assert app.is_public(), 'STATUS_UNLISTED app should be is_public()'
app.status = mkt.STATUS_PUBLIC
assert app.is_public(), 'STATUS_PUBLIC app should be is_public()'
# Any non-public status
app.status = mkt.STATUS_PENDING
assert not app.is_public(), (
'STATUS_PENDING app should not be is_public()')
# Public, disabled.
app.status = mkt.STATUS_PUBLIC
app.disabled_by_user = True
assert not app.is_public(), (
'STATUS_PUBLIC, disabled app should not be is_public()')
def test_app_slug_collision(self):
Webapp(app_slug='slug').save()
w2 = Webapp(app_slug='slug')
w2.save()
eq_(w2.app_slug, 'slug-1')
w3 = Webapp(app_slug='slug')
w3.save()
eq_(w3.app_slug, 'slug-2')
def test_app_slug_blocklist(self):
BlockedSlug.objects.create(name='slug')
w = Webapp(app_slug='slug')
w.save()
eq_(w.app_slug, 'slug~')
def test_geodata_upon_app_creation(self):
app = Webapp.objects.create()
assert app.geodata, (
'Geodata was not created with Webapp.')
def test_get_url_path(self):
webapp = Webapp(app_slug='woo')
eq_(webapp.get_url_path(), '/app/woo/')
def test_get_api_url(self):
webapp = Webapp(app_slug='woo', pk=1)
self.assertApiUrlEqual(webapp.get_api_url(), '/apps/app/woo/')
def test_get_api_url_pk(self):
webapp = Webapp(pk=1)
self.assertApiUrlEqual(webapp.get_api_url(pk=True), '/apps/app/1/')
def test_get_stats_url(self):
webapp = Webapp(app_slug='woo')
eq_(webapp.get_stats_url(), '/statistics/app/woo')
def test_get_comm_thread_url(self):
app = Webapp(app_slug='foo')
eq_(app.get_comm_thread_url(), '/comm/app/foo')
def test_get_origin(self):
url = 'http://www.xx.com:4000/randompath/manifest.webapp'
webapp = Webapp(manifest_url=url)
eq_(webapp.origin, 'http://www.xx.com:4000')
def test_get_packaged_origin(self):
webapp = Webapp(app_domain='app://foo.com', is_packaged=True,
manifest_url='')
eq_(webapp.origin, 'app://foo.com')
def test_punicode_domain(self):
webapp = Webapp(app_domain=u'http://www.allizôm.org')
eq_(webapp.punycode_app_domain, 'http://www.xn--allizm-mxa.org')
def test_cannot_be_purchased(self):
eq_(Webapp(premium_type=True).can_be_purchased(), False)
eq_(Webapp(premium_type=False).can_be_purchased(), False)
def test_can_be_purchased(self):
w = Webapp(status=mkt.STATUS_PUBLIC, premium_type=True)
eq_(w.can_be_purchased(), True)
w = Webapp(status=mkt.STATUS_PUBLIC, premium_type=False)
eq_(w.can_be_purchased(), False)
def test_get_previews(self):
w = Webapp.objects.create()
eq_(w.get_promo(), None)
p = Preview.objects.create(addon=w, position=0)
eq_(list(w.get_previews()), [p])
p.update(position=-1)
eq_(list(w.get_previews()), [])
def test_get_promo(self):
w = Webapp.objects.create()
eq_(w.get_promo(), None)
p = Preview.objects.create(addon=w, position=0)
eq_(w.get_promo(), None)
p.update(position=-1)
eq_(w.get_promo(), p)
def test_mark_done_pending(self):
w = Webapp()
eq_(w.status, mkt.STATUS_NULL)
w.mark_done()
eq_(w.status, mkt.WEBAPPS_UNREVIEWED_STATUS)
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_no_icon_in_manifest(self, get_manifest_json):
webapp = Webapp()
get_manifest_json.return_value = {}
eq_(webapp.has_icon_in_manifest(), False)
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_has_icon_in_manifest(self, get_manifest_json):
webapp = Webapp()
get_manifest_json.return_value = {'icons': {}}
eq_(webapp.has_icon_in_manifest(), True)
def test_no_version(self):
webapp = Webapp()
eq_(webapp.get_manifest_json(), {})
eq_(webapp.current_version, None)
def test_has_premium(self):
webapp = Webapp(premium_type=mkt.ADDON_PREMIUM)
webapp._premium = mock.Mock()
webapp._premium.price = 1
eq_(webapp.has_premium(), True)
webapp._premium.price = 0
eq_(webapp.has_premium(), True)
def test_get_price_no_premium(self):
webapp = Webapp(premium_type=mkt.ADDON_PREMIUM)
webapp.save()
# Needed because get_price accesses excluded, which triggers geodata
# which triggers a save to the db.
eq_(webapp.get_price(), None)
eq_(webapp.get_price_locale(), None)
def test_has_no_premium(self):
webapp = Webapp(premium_type=mkt.ADDON_PREMIUM)
webapp._premium = None
eq_(webapp.has_premium(), False)
def test_not_premium(self):
eq_(Webapp().has_premium(), False)
def test_get_region_ids_with_exclusions(self):
w1 = Webapp.objects.create()
w2 = Webapp.objects.create()
AddonExcludedRegion.objects.create(addon=w1, region=mkt.regions.BRA.id)
AddonExcludedRegion.objects.create(addon=w1, region=mkt.regions.USA.id)
AddonExcludedRegion.objects.create(addon=w2, region=mkt.regions.GBR.id)
w1_regions = list(mkt.regions.REGION_IDS)
w1_regions.remove(mkt.regions.BRA.id)
w1_regions.remove(mkt.regions.USA.id)
w2_regions = list(mkt.regions.REGION_IDS)
w2_regions.remove(mkt.regions.GBR.id)
eq_(sorted(Webapp.objects.get(id=w1.id).get_region_ids()),
sorted(w1_regions))
eq_(sorted(Webapp.objects.get(id=w2.id).get_region_ids()),
sorted(w2_regions))
def test_get_regions_with_exclusions(self):
w1 = Webapp.objects.create()
w2 = Webapp.objects.create()
AddonExcludedRegion.objects.create(addon=w1, region=mkt.regions.BRA.id)
AddonExcludedRegion.objects.create(addon=w1, region=mkt.regions.USA.id)
AddonExcludedRegion.objects.create(addon=w2, region=mkt.regions.GBR.id)
all_regions = mkt.regions.REGIONS_CHOICES_ID_DICT.values()
w1_regions = list(all_regions)
w1_regions.remove(mkt.regions.BRA)
w1_regions.remove(mkt.regions.USA)
w2_regions = list(all_regions)
w2_regions.remove(mkt.regions.GBR)
eq_(sorted(Webapp.objects.get(id=w1.id).get_regions()),
sorted(w1_regions))
eq_(sorted(Webapp.objects.get(id=w2.id).get_regions()),
sorted(w2_regions))
def test_assign_uuid(self):
app = Webapp()
eq_(app.guid, None)
app.save()
assert app.guid is not None, (
'Expected app to have a UUID assigned to guid')
@mock.patch.object(uuid, 'uuid4')
def test_assign_uuid_max_tries(self, mock_uuid4):
guid = 'abcdef12-abcd-abcd-abcd-abcdef123456'
mock_uuid4.return_value = uuid.UUID(guid)
# Create another webapp with and set the guid.
Webapp.objects.create(guid=guid)
# Now `assign_uuid()` should fail.
app = Webapp()
with self.assertRaises(ValueError):
app.save()
def test_is_premium_type_upgrade_check(self):
app = Webapp()
ALL = set(mkt.ADDON_FREES + mkt.ADDON_PREMIUMS)
free_upgrade = ALL - set([mkt.ADDON_FREE])
free_inapp_upgrade = ALL - set([mkt.ADDON_FREE, mkt.ADDON_FREE_INAPP])
# Checking ADDON_FREE changes.
app.premium_type = mkt.ADDON_FREE
for pt in ALL:
eq_(app.is_premium_type_upgrade(pt), pt in free_upgrade)
# Checking ADDON_FREE_INAPP changes.
app.premium_type = mkt.ADDON_FREE_INAPP
for pt in ALL:
eq_(app.is_premium_type_upgrade(pt), pt in free_inapp_upgrade)
# All else is false.
for pt_old in ALL - set([mkt.ADDON_FREE, mkt.ADDON_FREE_INAPP]):
app.premium_type = pt_old
for pt_new in ALL:
eq_(app.is_premium_type_upgrade(pt_new), False)
@raises(ValueError)
def test_parse_domain(self):
Webapp(is_packaged=True).parsed_app_domain
def test_app_type_hosted(self):
eq_(Webapp().app_type, 'hosted')
def test_app_type_packaged(self):
eq_(Webapp(is_packaged=True).app_type, 'packaged')
def test_nomination_new(self):
app = app_factory()
app.update(status=mkt.STATUS_NULL)
app.versions.latest().update(nomination=None)
app.update(status=mkt.STATUS_PENDING)
assert app.versions.latest().nomination
def test_nomination_rejected(self):
app = app_factory()
app.update(status=mkt.STATUS_REJECTED)
app.versions.latest().update(nomination=self.days_ago(1))
app.update(status=mkt.STATUS_PENDING)
self.assertCloseToNow(app.versions.latest().nomination)
def test_nomination_pkg_pending_new_version(self):
# New versions while pending inherit version nomination.
app = app_factory()
app.update(status=mkt.STATUS_PENDING, is_packaged=True)
old_ver = app.versions.latest()
old_ver.update(nomination=self.days_ago(1))
old_ver.all_files[0].update(status=mkt.STATUS_PENDING)
v = Version.objects.create(addon=app, version='1.9')
eq_(v.nomination, old_ver.nomination)
def test_nomination_pkg_public_new_version(self):
# New versions while public get a new version nomination.
app = app_factory()
app.update(is_packaged=True)
old_ver = app.versions.latest()
old_ver.update(nomination=self.days_ago(1))
v = Version.objects.create(addon=app, version='1.9')
self.assertCloseToNow(v.nomination)
def test_nomination_approved(self):
# New versions while public waiting get a new version nomination.
app = app_factory()
app.update(is_packaged=True, status=mkt.STATUS_APPROVED)
old_ver = app.versions.latest()
old_ver.update(nomination=self.days_ago(1))
old_ver.all_files[0].update(status=mkt.STATUS_APPROVED)
v = Version.objects.create(addon=app, version='1.9')
self.assertCloseToNow(v.nomination)
def test_excluded_in_iarc(self):
app = app_factory()
geodata = app._geodata
geodata.update(region_br_iarc_exclude=True,
region_de_iarc_exclude=True)
self.assertSetEqual(get_excluded_in(mkt.regions.BRA.id), [app.id])
self.assertSetEqual(get_excluded_in(mkt.regions.DEU.id), [app.id])
def test_excluded_in_iarc_de(self):
app = app_factory()
geodata = app._geodata
geodata.update(region_br_iarc_exclude=False,
region_de_iarc_exclude=True)
self.assertSetEqual(get_excluded_in(mkt.regions.BRA.id), [])
self.assertSetEqual(get_excluded_in(mkt.regions.DEU.id), [app.id])
def test_excluded_in_usk_exclude(self):
app = app_factory()
geodata = app._geodata
geodata.update(region_de_usk_exclude=True)
self.assertSetEqual(get_excluded_in(mkt.regions.BRA.id), [])
self.assertSetEqual(get_excluded_in(mkt.regions.DEU.id), [app.id])
@mock.patch('mkt.webapps.models.Webapp.completion_errors')
def test_completion_errors(self, complete_mock):
app = app_factory()
complete_mock.return_value = {
'details': ['1', '2'],
'payments': 'pc load letter'
}
eq_(app.completion_error_msgs(), ['1', '2', 'pc load letter'])
assert not app.is_fully_complete()
complete_mock.return_value = {}
eq_(app.completion_error_msgs(), [])
assert app.is_fully_complete()
@mock.patch('mkt.webapps.models.Webapp.payments_complete')
@mock.patch('mkt.webapps.models.Webapp.is_rated')
@mock.patch('mkt.webapps.models.Webapp.details_complete')
def test_next_step(self, detail_step, rating_step, pay_step):
for step in (detail_step, rating_step, pay_step):
step.return_value = False
app = app_factory(status=mkt.STATUS_NULL)
self.make_premium(app)
eq_(app.next_step()['url'], app.get_dev_url())
detail_step.return_value = True
eq_(app.next_step()['url'], app.get_dev_url('ratings'))
rating_step.return_value = True
eq_(app.next_step()['url'], app.get_dev_url('payments'))
pay_step.return_value = True
assert not app.next_step()
def test_meta_translated_fields(self):
"""Test that we don't load translations for all the translated fields
that live on Addon but we don't need in Webapp."""
useless_fields = ()
useful_fields = ('homepage', 'privacy_policy', 'name', 'description',
'support_email', 'support_url')
self.assertSetEqual(
Webapp._meta.translated_fields,
[Webapp._meta.get_field(f) for f in
useless_fields + useful_fields])
self.assertSetEqual(
Webapp._meta.translated_fields,
[Webapp._meta.get_field(f) for f in useful_fields])
# Build fake data with all fields, and use it to create an app.
data = dict(zip(useless_fields + useful_fields,
useless_fields + useful_fields))
app = app_factory(**data)
for field_name in useless_fields + useful_fields:
field_id_name = app._meta.get_field(field_name).attname
ok_(getattr(app, field_name, None))
ok_(getattr(app, field_id_name, None))
# Reload the app, the useless fields should all have ids but the value
# shouldn't have been loaded.
app = Webapp.objects.get(pk=app.pk)
for field_name in useless_fields:
field_id_name = app._meta.get_field(field_name).attname
ok_(getattr(app, field_name, None) is None)
ok_(getattr(app, field_id_name, None))
# The useful fields should all be ok.
for field_name in useful_fields:
field_id_name = app._meta.get_field(field_name).attname
ok_(getattr(app, field_name, None))
ok_(getattr(app, field_id_name, None))
def test_version_and_file_transformer_with_empty_query(self):
# When we process a query, don't return a list just because
# the query is empty
empty_query = Webapp.objects.filter(app_slug='mahna__mahna')
empty_result = Webapp.version_and_file_transformer(empty_query)
self.assertEqual(empty_result.count(), 0)
class TestWebappContentRatings(TestCase):
def test_rated(self):
assert app_factory(rated=True).is_rated()
assert not app_factory().is_rated()
@mock.patch('mkt.webapps.models.Webapp.details_complete')
@mock.patch('mkt.webapps.models.Webapp.payments_complete')
def test_set_content_ratings(self, pay_mock, detail_mock):
detail_mock.return_value = True
pay_mock.return_value = True
rb = mkt.ratingsbodies
app = app_factory(status=mkt.STATUS_NULL)
app.set_content_ratings({})
assert not app.is_rated()
eq_(app.status, mkt.STATUS_NULL)
# Create.
app.set_content_ratings({
rb.CLASSIND: rb.CLASSIND_L,
rb.PEGI: rb.PEGI_3,
})
eq_(ContentRating.objects.count(), 2)
for expected in [(rb.CLASSIND.id, rb.CLASSIND_L.id),
(rb.PEGI.id, rb.PEGI_3.id)]:
assert ContentRating.objects.filter(
addon=app, ratings_body=expected[0],
rating=expected[1]).exists()
eq_(app.reload().status, mkt.STATUS_PENDING)
# Update.
app.set_content_ratings({
rb.CLASSIND: rb.CLASSIND_10,
rb.PEGI: rb.PEGI_3,
rb.GENERIC: rb.GENERIC_18,
})
for expected in [(rb.CLASSIND.id, rb.CLASSIND_10.id),
(rb.PEGI.id, rb.PEGI_3.id),
(rb.GENERIC.id, rb.GENERIC_18.id)]:
assert ContentRating.objects.filter(
addon=app, ratings_body=expected[0],
rating=expected[1]).exists()
eq_(app.reload().status, mkt.STATUS_PENDING)
def test_app_delete_clears_iarc_data(self):
app = app_factory(rated=True)
# Ensure we have some data to start with.
ok_(IARCInfo.objects.filter(addon=app).exists())
ok_(ContentRating.objects.filter(addon=app).exists())
ok_(RatingDescriptors.objects.filter(addon=app).exists())
ok_(RatingInteractives.objects.filter(addon=app).exists())
# Delete.
app.delete()
msg = 'Related IARC data should be deleted.'
ok_(not IARCInfo.objects.filter(addon=app).exists(), msg)
ok_(not ContentRating.objects.filter(addon=app).exists(), msg)
ok_(not RatingDescriptors.objects.filter(addon=app).exists(), msg)
ok_(not RatingInteractives.objects.filter(addon=app).exists(), msg)
def test_set_content_ratings_usk_refused(self):
app = app_factory()
app.set_content_ratings({
mkt.ratingsbodies.USK: mkt.ratingsbodies.USK_REJECTED
})
ok_(Geodata.objects.get(addon=app).region_de_usk_exclude)
app.set_content_ratings({
mkt.ratingsbodies.USK: mkt.ratingsbodies.USK_12
})
ok_(not Geodata.objects.get(addon=app).region_de_usk_exclude)
def test_set_content_ratings_iarc_games_unexclude(self):
app = app_factory()
app._geodata.update(region_br_iarc_exclude=True,
region_de_iarc_exclude=True)
app.set_content_ratings({
mkt.ratingsbodies.USK: mkt.ratingsbodies.USK_12
})
geodata = Geodata.objects.get(addon=app)
ok_(not geodata.region_br_iarc_exclude)
ok_(not geodata.region_de_iarc_exclude)
def test_set_content_ratings_purge_unexclude(self):
app = app_factory()
app.update(status=mkt.STATUS_DISABLED, iarc_purged=True)
app.set_content_ratings({
mkt.ratingsbodies.USK: mkt.ratingsbodies.USK_12
})
ok_(not app.reload().iarc_purged)
eq_(app.status, mkt.STATUS_PUBLIC)
def test_set_descriptors(self):
app = app_factory()
eq_(RatingDescriptors.objects.count(), 0)
app.set_descriptors([])
descriptors = RatingDescriptors.objects.get(addon=app)
assert not descriptors.has_classind_drugs
assert not descriptors.has_esrb_blood # Blood-deuh!
# Create.
app.set_descriptors([
'has_classind_drugs', 'has_pegi_scary', 'has_generic_drugs'
])
descriptors = RatingDescriptors.objects.get(addon=app)
assert descriptors.has_classind_drugs
assert descriptors.has_pegi_scary
assert descriptors.has_generic_drugs
assert not descriptors.has_esrb_blood
# Update.
app.set_descriptors([
'has_esrb_blood', 'has_classind_drugs'
])
descriptors = RatingDescriptors.objects.get(addon=app)
assert descriptors.has_esrb_blood
assert descriptors.has_classind_drugs
assert not descriptors.has_pegi_scary
assert not descriptors.has_generic_drugs
def test_set_interactives(self):
app = app_factory()
app.set_interactives([])
eq_(RatingInteractives.objects.count(), 1)
app_interactives = RatingInteractives.objects.get(addon=app)
assert not app_interactives.has_shares_info
assert not app_interactives.has_digital_purchases
# Create.
app.set_interactives([
'has_shares_info', 'has_digital_purchases', 'has_UWOTM8'
])
eq_(RatingInteractives.objects.count(), 1)
app_interactives = RatingInteractives.objects.get(addon=app)
assert app_interactives.has_shares_info
assert app_interactives.has_digital_purchases
assert not app_interactives.has_users_interact
# Update.
app.set_interactives([
'has_digital_purchases', 'has_shares_ur_mum'
])
eq_(RatingInteractives.objects.count(), 1)
app_interactives = RatingInteractives.objects.get(addon=app)
assert not app_interactives.has_shares_info
assert app_interactives.has_digital_purchases
@mock.patch('lib.iarc.client.MockClient.call')
@mock.patch('mkt.webapps.models.render_xml')
def test_set_iarc_storefront_data(self, render_mock, storefront_mock):
# Set up ratings/descriptors/interactives.
app = app_factory(name='LOL', app_slug='ha')
app.current_version.reviewed = datetime(2013, 1, 1, 12, 34, 56)
app.current_version._developer_name = 'Lex Luthor'
app.set_iarc_info(submission_id=1234, security_code='sektor')
app.set_descriptors(['has_esrb_blood', 'has_pegi_scary'])
app.set_interactives(['has_users_interact', 'has_shares_info'])
app.content_ratings.create(
ratings_body=mkt.ratingsbodies.ESRB.id,
rating=mkt.ratingsbodies.ESRB_A.id)
app.content_ratings.create(
ratings_body=mkt.ratingsbodies.PEGI.id,
rating=mkt.ratingsbodies.PEGI_3.id)
# Check the client was called.
app.set_iarc_storefront_data()
assert storefront_mock.called
eq_(render_mock.call_count, 2)
eq_(render_mock.call_args_list[0][0][0], 'set_storefront_data.xml')
# Check arguments to the XML template are all correct.
data = render_mock.call_args_list[0][0][1]
eq_(type(data['title']), unicode)
eq_(data['app_url'], app.get_url_path())
eq_(data['submission_id'], 1234)
eq_(data['security_code'], 'sektor')
eq_(data['rating_system'], 'ESRB')
eq_(data['release_date'], app.current_version.reviewed)
eq_(data['title'], 'LOL')
eq_(data['company'], 'Lex Luthor')
eq_(data['rating'], 'Adults Only')
eq_(data['descriptors'], 'Blood')
self.assertSetEqual(data['interactive_elements'].split(', '),
['Shares Info', 'Users Interact'])
data = render_mock.call_args_list[1][0][1]
eq_(type(data['title']), unicode)
eq_(data['submission_id'], 1234)
eq_(data['security_code'], 'sektor')
eq_(data['rating_system'], 'PEGI')
eq_(data['release_date'], app.current_version.reviewed)
eq_(data['title'], 'LOL')
eq_(data['company'], 'Lex Luthor')
eq_(data['rating'], '3+')
eq_(data['descriptors'], 'Fear')
self.assertSetEqual(data['interactive_elements'].split(', '),
['Shares Info', 'Users Interact'])
@mock.patch('lib.iarc.client.MockClient.call')
def test_set_iarc_storefront_data_not_rated_by_iarc(self, storefront_mock):
app_factory().set_iarc_storefront_data()
assert not storefront_mock.called
@mock.patch('mkt.webapps.models.Webapp.current_version', new=None)
@mock.patch('lib.iarc.client.MockClient.call')
def test_set_iarc_storefront_data_no_version(self, storefront_mock):
app = app_factory(rated=True, status=mkt.STATUS_PUBLIC)
ok_(not app.current_version)
app.set_iarc_storefront_data()
assert storefront_mock.called
@mock.patch('lib.iarc.client.MockClient.call')
def test_set_iarc_storefront_data_invalid_status(self, storefront_mock):
app = app_factory()
for status in (mkt.STATUS_NULL, mkt.STATUS_PENDING):
app.update(status=status)
app.set_iarc_storefront_data()
assert not storefront_mock.called
@mock.patch('mkt.webapps.models.render_xml')
@mock.patch('lib.iarc.client.MockClient.call')
def test_set_iarc_storefront_data_disable(self, storefront_mock,
render_mock):
app = app_factory(name='LOL', rated=True)
app.current_version.update(_developer_name='Lex Luthor')
app.set_iarc_info(123, 'abc')
app.set_iarc_storefront_data(disable=True)
data = render_mock.call_args_list[0][0][1]
eq_(data['submission_id'], 123)
eq_(data['security_code'], 'abc')
eq_(data['title'], 'LOL')
eq_(data['release_date'], '')
# Also test that a deleted app has the correct release_date.
app.delete()
app.set_iarc_storefront_data()
data = render_mock.call_args_list[0][0][1]
eq_(data['submission_id'], 123)
eq_(data['security_code'], 'abc')
eq_(data['title'], 'LOL')
eq_(data['release_date'], '')
@override_settings(SECRET_KEY='test')
def test_iarc_token(self):
app = Webapp()
app.id = 1
eq_(app.iarc_token(),
hashlib.sha512(settings.SECRET_KEY + str(app.id)).hexdigest())
@mock.patch('mkt.webapps.models.Webapp.set_iarc_storefront_data')
def test_delete_with_iarc(self, storefront_mock):
app = app_factory(rated=True)
app.delete()
eq_(app.status, mkt.STATUS_DELETED)
assert storefront_mock.called
@mock.patch('mkt.webapps.models.Webapp.details_complete')
@mock.patch('mkt.webapps.models.Webapp.payments_complete')
def test_completion_errors_ignore_ratings(self, mock1, mock2):
app = app_factory()
for mock_ in (mock1, mock2):
mock_.return_value = True
assert app.completion_errors()
assert not app.is_fully_complete()
assert 'content_ratings' not in (
app.completion_errors(ignore_ratings=True))
assert app.is_fully_complete(ignore_ratings=True)
class DeletedAppTests(TestCase):
def test_soft_deleted_no_current_version(self):
webapp = app_factory()
webapp._current_version = None
webapp.save()
webapp.delete()
eq_(webapp.current_version, None)
def test_soft_deleted_no_latest_version(self):
webapp = app_factory()
webapp._latest_version = None
webapp.save()
webapp.delete()
eq_(webapp.latest_version, None)
class TestExclusions(TestCase):
fixtures = fixture('prices')
def setUp(self):
self.app = Webapp.objects.create(premium_type=mkt.ADDON_PREMIUM)
self.app.addonexcludedregion.create(region=mkt.regions.USA.id)
self.geodata = self.app._geodata
def make_tier(self):
self.price = Price.objects.get(pk=1)
AddonPremium.objects.create(addon=self.app, price=self.price)
self.row = PriceCurrency.objects.create(
currency='USD',
dev=True,
paid=True,
price=Decimal('0.99'),
provider=ALL_PROVIDERS[settings.DEFAULT_PAYMENT_PROVIDER].provider,
region=RESTOFWORLD.id,
tier=self.price
)
def test_not_premium(self):
ok_(mkt.regions.USA.id in self.app.get_excluded_region_ids())
def test_not_paid(self):
PriceCurrency.objects.update(paid=False)
# The US is excluded because there are no valid prices.
ok_(mkt.regions.USA.id in self.app.get_excluded_region_ids())
def test_premium(self):
self.make_tier()
ok_(mkt.regions.USA.id in self.app.get_excluded_region_ids())
def test_premium_not_remove_tier(self):
self.make_tier()
(self.price.pricecurrency_set
.filter(region=mkt.regions.POL.id).update(paid=True))
# Poland will not be excluded because we haven't excluded the rest
# of the world.
ok_(mkt.regions.POL.id not in self.app.get_excluded_region_ids())
def test_premium_remove_tier(self):
self.make_tier()
self.app.addonexcludedregion.create(region=mkt.regions.RESTOFWORLD.id)
# If we exclude the rest of the world, then we'll exclude Nicaragua
# which has no price currency.
ok_(mkt.regions.NIC.id in self.app.get_excluded_region_ids())
def test_not_paid_worldwide(self):
self.make_tier()
self.row.update(paid=False)
# Rest of world has been set to not paid. Meaning that its not
# available right now, so we should exclude Nicaragua.
ok_(mkt.regions.NIC.id in self.app.get_excluded_region_ids())
def test_usk_rating_refused(self):
self.geodata.update(region_de_usk_exclude=True)
ok_(mkt.regions.DEU.id in self.app.get_excluded_region_ids())
def test_game_iarc(self):
self.geodata.update(region_de_iarc_exclude=True,
region_br_iarc_exclude=True)
excluded = self.app.get_excluded_region_ids()
ok_(mkt.regions.BRA.id in excluded)
ok_(mkt.regions.DEU.id in excluded)
class TestPackagedAppManifestUpdates(mkt.site.tests.TestCase):
# Note: More extensive tests for `.update_names` are above.
def setUp(self):
self.webapp = app_factory(is_packaged=True,
default_locale='en-US')
self.webapp.name = {'en-US': 'Packaged App'}
self.webapp.save()
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_package_manifest_default_name_change(self, get_manifest_json):
get_manifest_json.return_value = {'name': 'Yo'}
self.trans_eq(self.webapp.name, 'en-US', 'Packaged App')
self.webapp.update_name_from_package_manifest()
self.webapp = Webapp.objects.get(pk=self.webapp.pk)
self.trans_eq(self.webapp.name, 'en-US', 'Yo')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_package_manifest_default_locale_change(self, get_manifest_json):
get_manifest_json.return_value = {'name': 'Yo', 'default_locale': 'fr'}
eq_(self.webapp.default_locale, 'en-US')
self.webapp.update_name_from_package_manifest()
eq_(self.webapp.default_locale, 'fr')
self.trans_eq(self.webapp.name, 'en-US', None)
self.trans_eq(self.webapp.name, 'fr', 'Yo')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_package_manifest_locales_change(self, get_manifest_json):
get_manifest_json.return_value = {'name': 'Yo',
'locales': {'es': {'name': 'es'},
'de': {'name': 'de'}}}
self.webapp.update_supported_locales()
self.webapp.reload()
eq_(self.webapp.current_version.supported_locales, 'de,es')
@mock.patch('mkt.webapps.models.Webapp.get_manifest_json')
def test_package_manifest_locales_change_pending(self, get_manifest_json):
"""Ensure we still work for pending apps."""
get_manifest_json.return_value = {'name': 'Yo',
'locales': {'es': {'name': 'es'},
'de': {'name': 'de'}}}
self.webapp.update(status=mkt.STATUS_PENDING)
self.webapp.update_supported_locales(latest=True)
self.webapp.reload()
eq_(self.webapp.latest_version.supported_locales, 'de,es')
def test_update_name_from_package_manifest_version(self):
evil_manifest = {
'name': u'Evil App Name'
}
good_manifest = {
'name': u'Good App Name',
}
latest_version = version_factory(
addon=self.webapp, version='2.3',
file_kw=dict(status=mkt.STATUS_DISABLED))
current_version = self.webapp.current_version
AppManifest.objects.create(version=current_version,
manifest=json.dumps(good_manifest))
AppManifest.objects.create(version=latest_version,
manifest=json.dumps(evil_manifest))
self.webapp.update_name_from_package_manifest()
eq_(self.webapp.name, u'Good App Name')
class TestWebappVersion(mkt.site.tests.TestCase):
def test_no_version(self):
eq_(Webapp().get_latest_file(), None)
def test_no_file(self):
webapp = Webapp.objects.create(manifest_url='http://foo.com')
webapp._current_version = Version.objects.create(addon=webapp)
eq_(webapp.get_latest_file(), None)
def test_right_file(self):
webapp = Webapp.objects.create(manifest_url='http://foo.com')
version = Version.objects.create(addon=webapp)
old_file = File.objects.create(version=version)
old_file.update(created=datetime.now() - timedelta(days=1))
new_file = File.objects.create(version=version)
webapp._current_version = version
eq_(webapp.get_latest_file().pk, new_file.pk)
class TestWebappManager(TestCase):
def test_by_identifier(self):
w = Webapp.objects.create(app_slug='foo')
eq_(Webapp.objects.by_identifier(w.id), w)
eq_(Webapp.objects.by_identifier(str(w.id)), w)
eq_(Webapp.objects.by_identifier(w.app_slug), w)
with self.assertRaises(Webapp.DoesNotExist):
Webapp.objects.by_identifier('fake')
def test_rated(self):
rated = app_factory(rated=True)
app_factory()
eq_(Webapp.objects.count(), 2)
eq_(list(Webapp.objects.rated()), [rated])
class TestManifest(BaseWebAppTest):
def test_get_manifest_json(self):
webapp = self.post_addon()
assert webapp.latest_version
assert webapp.latest_version.has_files
with open(self.manifest, 'r') as mf:
manifest_json = json.load(mf)
eq_(webapp.get_manifest_json(webapp.latest_version.all_files[0]),
manifest_json)
class TestPackagedModel(mkt.site.tests.TestCase):
@override_settings(SITE_URL='http://hy.fr')
def test_get_package_path(self):
app = app_factory(name=u'Mozillaball ょ', app_slug='test',
is_packaged=False, version_kw={'version': '1.0',
'created': None})
app = app.reload()
f = app.versions.latest().files.latest()
# There should not be a `package_path` for a hosted app.
eq_(app.get_package_path(), None)
# There should be a `package_path` for a packaged app.
app.update(is_packaged=True)
eq_(app.get_package_path(),
'http://hy.fr/downloads/file/%s/%s' % (f.id, f.filename))
# Delete one of the files and ensure that `package_path` is gone.
f.delete()
eq_(app.reload().get_package_path(), None)
@override_settings(SITE_URL='http://hy.fr')
@mock.patch('lib.crypto.packaged.os.unlink', new=mock.Mock)
def test_create_blocklisted_version(self):
app = app_factory(name=u'Mozillaball ょ', app_slug='test',
is_packaged=True, version_kw={'version': '1.0',
'created': None})
app.create_blocklisted_version()
app = app.reload()
v = app.versions.latest()
f = v.files.latest()
eq_(app.status, mkt.STATUS_BLOCKED)
eq_(app.versions.count(), 2)
eq_(v.version, 'blocklisted')
eq_(app._current_version, v)
assert 'blocklisted' in f.filename
eq_(f.status, mkt.STATUS_BLOCKED)
# Check manifest.
url = app.get_manifest_url()
res = self.client.get(url)
eq_(res['Content-type'], MANIFEST_CONTENT_TYPE)
assert 'etag' in res._headers
data = json.loads(res.content)
eq_(data['name'], 'Blocked by Mozilla')
eq_(data['version'], 'blocklisted')
eq_(data['package_path'], 'http://hy.fr/downloads/file/%s/%s' % (
f.id, f.filename))
class TestPackagedManifest(BasePackagedAppTest):
def _get_manifest_json(self):
zf = zipfile.ZipFile(self.package)
data = zf.open('manifest.webapp').read()
zf.close()
return json.loads(data)
def test_get_manifest_json(self):
webapp = self.post_addon()
webapp.update(status=mkt.STATUS_PUBLIC)
file_ = webapp.latest_version.all_files[0]
file_.update(status=mkt.STATUS_PUBLIC)
assert webapp.current_version
assert webapp.current_version.has_files
# Test without file argument.
mf = self._get_manifest_json()
eq_(webapp.get_manifest_json(), mf)
# Test with file argument.
mf = self._get_manifest_json()
eq_(webapp.get_manifest_json(file_), mf)
def test_get_manifest_json_multiple_versions(self):
"""Test `get_manifest_json` gets the right version."""
webapp = self.post_addon()
webapp.update(status=mkt.STATUS_PUBLIC)
latest_version = webapp.latest_version
latest_version.files.update(status=mkt.STATUS_PUBLIC)
version = version_factory(addon=webapp, version='0.5',
created=self.days_ago(1),
file_kw={'status': mkt.STATUS_PENDING})
version.files.update(created=self.days_ago(1))
webapp = Webapp.objects.get(pk=webapp.pk)
eq_(webapp.current_version, latest_version)
assert webapp.current_version.has_files
mf = self._get_manifest_json()
eq_(webapp.get_manifest_json(), mf)
def test_get_manifest_json_multiple_version_disabled(self):
# Post an app, then emulate a reviewer reject and add a new, pending
# version.
webapp = self.post_addon()
webapp.latest_version.files.update(status=mkt.STATUS_DISABLED)
webapp.latest_version.update(created=self.days_ago(1))
webapp.update(status=mkt.STATUS_REJECTED, _current_version=None)
version = version_factory(addon=webapp, version='2.0',
file_kw={'status': mkt.STATUS_PENDING})
mf = self._get_manifest_json()
AppManifest.objects.create(version=version,
manifest=json.dumps(mf))
webapp.update_version()
webapp = webapp.reload()
eq_(webapp.latest_version, version)
self.file = version.all_files[0]
self.setup_files()
eq_(webapp.get_manifest_json(self.file), mf)
def test_cached_manifest_is_cached(self):
webapp = self.post_addon()
# First call does queries and caches results.
webapp.get_cached_manifest()
# Subsequent calls are cached.
with self.assertNumQueries(0):
webapp.get_cached_manifest()
@mock.patch('mkt.webapps.utils.cache')
def test_cached_manifest_no_version_not_cached(self, cache_mock):
webapp = self.post_addon(
data={'packaged': True, 'free_platforms': 'free-firefoxos'})
webapp._current_version = None
eq_(webapp.get_cached_manifest(force=True), '{}')
assert not cache_mock.called
def test_cached_manifest_contents(self):
webapp = self.post_addon(
data={'packaged': True, 'free_platforms': 'free-firefoxos'})
webapp.update(status=mkt.STATUS_PUBLIC)
version = webapp.latest_version
self.file = version.all_files[0]
self.file.update(status=mkt.STATUS_PUBLIC)
self.setup_files()
manifest = self._get_manifest_json()
data = json.loads(webapp.get_cached_manifest(self.file)[0])
eq_(data['name'], webapp.name)
eq_(data['version'], webapp.current_version.version)
eq_(data['size'], self.file.size)
eq_(data['release_notes'], version.releasenotes)
eq_(data['package_path'], absolutify(
os.path.join(reverse('downloads.file', args=[self.file.id]),
self.file.filename)))
eq_(data['developer'], manifest['developer'])
eq_(data['icons'], manifest['icons'])
eq_(data['locales'], manifest['locales'])
def _createPackage(self):
webapp = self.post_addon(
data={'packaged': True, 'free_platforms': 'free-firefoxos'})
webapp.update(status=mkt.STATUS_PUBLIC)
version = webapp.latest_version
file = version.all_files[0]
file.update(status=mkt.STATUS_PUBLIC)
return file
@override_settings(
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.LocalFileStorage')
def test_package_path_local(self):
file = self._createPackage()
res = self.client.get(file.get_url_path('manifest'))
eq_(res.status_code, 200)
eq_(res['content-type'], 'application/zip')
@override_settings(
DEFAULT_FILE_STORAGE='mkt.site.storage_utils.S3BotoPrivateStorage')
def test_package_path_storage(self):
file = self._createPackage()
file.version.addon.get_cached_manifest(force=True)
res = self.client.get(file.get_url_path('manifest'))
self.assert3xx(res, public_storage.url(file.signed_file_path))
def test_packaged_with_BOM(self):
# Exercise separate code paths to loading the packaged app manifest.
self.file.filename = 'mozBOM.zip'
self.setup_files('mozBOM.zip')
assert WebAppParser().parse(private_storage.open(self.file.file_path))
self.assertTrue(self.app.has_icon_in_manifest())
class TestDomainFromURL(unittest.TestCase):
def test_simple(self):
eq_(Webapp.domain_from_url('http://mozilla.com/'),
'http://mozilla.com')
def test_long_path(self):
eq_(Webapp.domain_from_url('http://mozilla.com/super/rad.webapp'),
'http://mozilla.com')
def test_no_normalize_www(self):
eq_(Webapp.domain_from_url('http://www.mozilla.com/super/rad.webapp'),
'http://www.mozilla.com')
def test_with_port(self):
eq_(Webapp.domain_from_url('http://mozilla.com:9000/'),
'http://mozilla.com:9000')
def test_subdomains(self):
eq_(Webapp.domain_from_url('http://apps.mozilla.com/'),
'http://apps.mozilla.com')
def test_https(self):
eq_(Webapp.domain_from_url('https://mozilla.com/'),
'https://mozilla.com')
def test_normalize_case(self):
eq_(Webapp.domain_from_url('httP://mOzIllA.com/'),
'http://mozilla.com')
@raises(ValueError)
def test_none(self):
Webapp.domain_from_url(None)
@raises(ValueError)
def test_empty(self):
Webapp.domain_from_url('')
def test_empty_or_none(self):
eq_(Webapp.domain_from_url(None, allow_none=True), None)
class TestTransformer(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.device = DEVICE_TYPES.keys()[0]
def test_versions(self):
webapps = list(Webapp.objects.all())
with self.assertNumQueries(0):
for webapp in webapps:
ok_(isinstance(webapp.latest_version, Version))
ok_(isinstance(webapp.current_version, Version))
def test_previews(self):
p1 = Preview.objects.create(filetype='image/png', addon_id=337141,
position=0)
p2 = Preview.objects.create(filetype='image/png', addon_id=337141,
position=1)
webapps = list(Webapp.objects.all())
with self.assertNumQueries(0):
for webapp in webapps:
eq_(webapp.all_previews, [p1, p2])
def test_prices(self):
self.make_premium(Webapp.objects.get(pk=337141))
webapps = list(Webapp.objects.all())
with self.assertNumQueries(0):
for webapp in webapps:
ok_(unicode(webapp.premium))
eq_(str(webapp.get_tier().price), '1.00')
ok_(webapp.get_tier_name())
def test_prices_free(self):
webapps = list(Webapp.objects.all())
with self.assertNumQueries(0):
for webapp in webapps:
eq_(webapp.premium, None)
eq_(webapp.get_tier(), None)
def test_device_types(self):
AddonDeviceType.objects.create(addon_id=337141,
device_type=self.device)
webapps = list(Webapp.objects.filter(id=337141))
with self.assertNumQueries(0):
for webapp in webapps:
assert webapp._device_types
eq_(webapp.device_types, [DEVICE_TYPES[self.device]])
def test_device_type_cache(self):
webapp = Webapp.objects.get(id=337141)
webapp._device_types = []
with self.assertNumQueries(0):
eq_(webapp.device_types, [])
class TestDetailsComplete(mkt.site.tests.TestCase):
def setUp(self):
self.device = DEVICE_TYPES.keys()[0]
self.webapp = Webapp.objects.create(status=mkt.STATUS_NULL)
def fail(self, value):
assert not self.webapp.details_complete(), value
reasons = self.webapp.details_errors()
assert value in reasons[0], reasons
def test_fail(self):
self.fail('email')
self.webapp.support_email = '[email protected]'
self.webapp.save()
self.fail('name')
self.webapp.name = 'name'
self.webapp.save()
self.fail('device')
self.webapp.addondevicetype_set.create(device_type=self.device)
self.webapp.save()
self.fail('category')
self.webapp.update(categories=['books'])
self.fail('screenshot')
self.webapp.previews.create()
eq_(self.webapp.details_complete(), True)
self.webapp.support_email = ''
self.webapp.save()
eq_(self.webapp.details_complete(), False)
self.fail('support email or URL')
self.webapp.support_url = 'http://test.com/'
self.webapp.save()
eq_(self.webapp.details_complete(), True)
class TestAddonExcludedRegion(mkt.site.tests.WebappTestCase):
def setUp(self):
super(TestAddonExcludedRegion, self).setUp()
self.excluded = self.app.addonexcludedregion
eq_(list(self.excluded.values_list('id', flat=True)), [])
self.er = self.app.addonexcludedregion.create(
region=mkt.regions.GBR.id)
eq_(list(self.excluded.values_list('id', flat=True)), [self.er.id])
def test_exclude_multiple(self):
other = AddonExcludedRegion.objects.create(addon=self.app,
region=mkt.regions.BRA.id)
self.assertSetEqual(self.excluded.values_list('id', flat=True),
[self.er.id, other.id])
def test_remove_excluded(self):
self.er.delete()
eq_(list(self.excluded.values_list('id', flat=True)), [])
def test_get_region(self):
eq_(self.er.get_region(), mkt.regions.GBR)
def test_unicode(self):
eq_(unicode(self.er), '%s: %s' % (self.app, mkt.regions.GBR.slug))
class TestContentRating(mkt.site.tests.WebappTestCase):
def setUp(self):
self.app = self.get_app()
@mock.patch.object(mkt.regions.BRA, 'ratingsbody',
mkt.ratingsbodies.CLASSIND)
@mock.patch.object(mkt.regions.USA, 'ratingsbody', mkt.ratingsbodies.ESRB)
@mock.patch.object(mkt.regions.VEN, 'ratingsbody',
mkt.ratingsbodies.GENERIC)
def test_get_regions_and_slugs(self):
classind_rating = ContentRating.objects.create(
addon=self.app, ratings_body=mkt.ratingsbodies.CLASSIND.id,
rating=0)
regions = classind_rating.get_regions()
assert mkt.regions.BRA in regions
assert mkt.regions.USA not in regions
assert mkt.regions.VEN not in regions
slugs = classind_rating.get_region_slugs()
assert mkt.regions.BRA.slug in slugs
assert mkt.regions.USA.slug not in slugs
assert mkt.regions.VEN.slug not in slugs
@mock.patch.object(mkt.regions.BRA, 'ratingsbody',
mkt.ratingsbodies.CLASSIND)
@mock.patch.object(mkt.regions.DEU, 'ratingsbody', mkt.ratingsbodies.ESRB)
@mock.patch.object(mkt.regions.VEN, 'ratingsbody',
mkt.ratingsbodies.GENERIC)
def test_get_regions_and_slugs_generic_fallback(self):
gen_rating = ContentRating.objects.create(
addon=self.app, ratings_body=mkt.ratingsbodies.GENERIC.id,
rating=0)
regions = gen_rating.get_regions()
assert mkt.regions.BRA not in regions
assert mkt.regions.DEU not in regions
assert mkt.regions.VEN in regions
slugs = gen_rating.get_region_slugs()
assert mkt.regions.BRA.slug not in slugs
assert mkt.regions.DEU.slug not in slugs
assert mkt.regions.VEN.slug not in slugs
# We have a catch-all 'generic' region for all regions wo/ r.body.
assert mkt.regions.GENERIC_RATING_REGION_SLUG in slugs
@mock.patch.object(mkt.ratingsbodies.CLASSIND, 'name', 'CLASSIND')
@mock.patch.object(mkt.ratingsbodies.CLASSIND_10, 'name', '10+')
@mock.patch.object(mkt.ratingsbodies.ESRB_E, 'name', 'Everybody 10+')
@mock.patch.object(mkt.ratingsbodies.ESRB_E, 'label', '10')
def test_get_ratings(self):
# Infer the label from the name.
cr = ContentRating.objects.create(
addon=self.app, ratings_body=mkt.ratingsbodies.CLASSIND.id,
rating=mkt.ratingsbodies.CLASSIND_10.id)
eq_(cr.get_rating().label, '10')
eq_(cr.get_body().label, 'classind')
# When already has label set.
eq_(ContentRating.objects.create(
addon=self.app, ratings_body=mkt.ratingsbodies.ESRB.id,
rating=mkt.ratingsbodies.ESRB_E.id).get_rating().label,
'10')
class TestContentRatingsIn(mkt.site.tests.WebappTestCase):
def test_not_in_region(self):
for region in mkt.regions.ALL_REGIONS:
eq_(self.app.content_ratings_in(region=region), [])
for region in mkt.regions.ALL_REGIONS:
AddonExcludedRegion.objects.create(addon=self.app,
region=region.id)
eq_(self.get_app().content_ratings_in(region=region), [])
def test_in_region_and_category(self):
self.make_game()
cat = 'games'
for region in mkt.regions.ALL_REGIONS:
eq_(self.app.listed_in(region=region, category=cat), True)
def test_in_region_and_not_in_category(self):
cat = 'games'
for region in mkt.regions.ALL_REGIONS:
eq_(self.app.content_ratings_in(region=region, category=cat), [])
@mock.patch.object(mkt.regions.COL, 'ratingsbody', None)
@mock.patch.object(mkt.regions.BRA, 'ratingsbody',
mkt.ratingsbodies.CLASSIND)
def test_generic_fallback(self):
# Test region with no rating body returns generic content rating.
crs = ContentRating.objects.create(
addon=self.app, ratings_body=mkt.ratingsbodies.GENERIC.id,
rating=mkt.ratingsbodies.GENERIC_3.id)
eq_(self.app.content_ratings_in(region=mkt.regions.COL), [crs])
# Test region with rating body does not include generic content rating.
assert crs not in self.app.content_ratings_in(region=mkt.regions.BRA)
class TestIARCInfo(mkt.site.tests.WebappTestCase):
def test_no_info(self):
with self.assertRaises(IARCInfo.DoesNotExist):
self.app.iarc_info
def test_info(self):
IARCInfo.objects.create(addon=self.app, submission_id=1,
security_code='s3kr3t')
eq_(self.app.iarc_info.submission_id, 1)
eq_(self.app.iarc_info.security_code, 's3kr3t')
class TestQueue(mkt.site.tests.WebappTestCase):
def test_in_rereview_queue(self):
assert not self.app.in_rereview_queue()
RereviewQueue.objects.create(addon=self.app)
assert self.app.in_rereview_queue()
def test_in_escalation_queue(self):
assert not self.app.in_escalation_queue()
EscalationQueue.objects.create(addon=self.app)
assert self.app.in_escalation_queue()
class TestPackagedSigning(mkt.site.tests.WebappTestCase):
@mock.patch('lib.crypto.packaged.sign')
def test_not_packaged(self, sign):
self.app.update(is_packaged=False)
assert not self.app.sign_if_packaged(self.app.current_version.pk)
assert not sign.called
@mock.patch('lib.crypto.packaged.sign')
def test_packaged(self, sign):
self.app.update(is_packaged=True)
assert self.app.sign_if_packaged(self.app.current_version.pk)
eq_(sign.call_args[0][0], self.app.current_version.pk)
@mock.patch('lib.crypto.packaged.sign')
def test_packaged_reviewer(self, sign):
self.app.update(is_packaged=True)
assert self.app.sign_if_packaged(self.app.current_version.pk,
reviewer=True)
eq_(sign.call_args[0][0], self.app.current_version.pk)
eq_(sign.call_args[1]['reviewer'], True)
class TestUpdateStatus(mkt.site.tests.TestCase):
def setUp(self):
# Disabling signals to simplify these tests. We call update_status()
# manually in them.
version_changed_signal.disconnect(version_changed,
dispatch_uid='version_changed')
post_save.disconnect(update_status, sender=Version,
dispatch_uid='version_update_status')
post_delete.disconnect(update_status, sender=Version,
dispatch_uid='version_update_status')
def tearDown(self):
version_changed_signal.connect(version_changed,
dispatch_uid='version_changed')
post_save.connect(update_status, sender=Version,
dispatch_uid='version_update_status')
post_delete.connect(update_status, sender=Version,
dispatch_uid='version_update_status')
def test_no_versions(self):
app = Webapp.objects.create(status=mkt.STATUS_PUBLIC)
app.update_status()
eq_(app.status, mkt.STATUS_NULL)
def test_version_no_files(self):
app = Webapp.objects.create(status=mkt.STATUS_PUBLIC)
Version(addon=app).save()
app.update_status()
eq_(app.status, mkt.STATUS_NULL)
def test_only_version_deleted(self):
app = app_factory(status=mkt.STATUS_REJECTED)
app.latest_version.delete()
app.update_status()
eq_(app.status, mkt.STATUS_NULL)
def test_other_version_deleted(self):
app = app_factory(status=mkt.STATUS_REJECTED)
version_factory(addon=app)
app.latest_version.delete()
app.update_status()
eq_(app.status, mkt.STATUS_REJECTED)
def test_one_version_pending(self):
app = app_factory(status=mkt.STATUS_REJECTED,
file_kw=dict(status=mkt.STATUS_DISABLED))
version_factory(addon=app,
file_kw=dict(status=mkt.STATUS_PENDING))
with mock.patch('mkt.webapps.models.Webapp.is_fully_complete') as comp:
comp.return_value = True
app.update_status()
eq_(app.status, mkt.STATUS_PENDING)
def test_one_version_pending_not_fully_complete(self):
app = app_factory(status=mkt.STATUS_REJECTED,
file_kw=dict(status=mkt.STATUS_DISABLED))
version_factory(addon=app,
file_kw=dict(status=mkt.STATUS_PENDING))
with mock.patch('mkt.webapps.models.Webapp.is_fully_complete') as comp:
comp.return_value = False
app.update_status()
eq_(app.status, mkt.STATUS_REJECTED) # Didn't change.
def test_one_version_public(self):
app = app_factory(status=mkt.STATUS_PUBLIC)
version_factory(addon=app,
file_kw=dict(status=mkt.STATUS_DISABLED))
app.update_status()
eq_(app.status, mkt.STATUS_PUBLIC)
def test_was_approved_then_new_version(self):
app = app_factory(status=mkt.STATUS_APPROVED)
File.objects.filter(version__addon=app).update(status=app.status)
version_factory(addon=app,
file_kw=dict(status=mkt.STATUS_PENDING))
app.update_status()
eq_(app.status, mkt.STATUS_APPROVED)
def test_blocklisted(self):
app = app_factory(status=mkt.STATUS_BLOCKED)
app.latest_version.delete()
app.update_status()
eq_(app.status, mkt.STATUS_BLOCKED)
class TestInstalled(mkt.site.tests.TestCase):
def setUp(self):
user = UserProfile.objects.create(email='[email protected]')
app = Webapp.objects.create()
self.m = functools.partial(Installed.objects.safer_get_or_create,
user=user, addon=app)
def test_install_type(self):
assert self.m(install_type=apps.INSTALL_TYPE_USER)[1]
assert not self.m(install_type=apps.INSTALL_TYPE_USER)[1]
assert self.m(install_type=apps.INSTALL_TYPE_REVIEWER)[1]
class TestAppFeatures(DynamicBoolFieldsTestMixin, mkt.site.tests.TestCase):
def setUp(self):
super(TestAppFeatures, self).setUp()
self.model = AppFeatures
self.related_name = 'features'
self.BOOL_DICT = mkt.constants.features.APP_FEATURES
self.flags = ('APPS', 'GEOLOCATION', 'PAY', 'SMS')
self.expected = [u'App Management API', u'Geolocation', u'Web Payment',
u'WebSMS']
self.af = AppFeatures.objects.get()
def _get_related_bool_obj(self):
return getattr(self.app.current_version, self.related_name)
def test_signature_parity(self):
# Test flags -> signature -> flags works as expected.
self._flag()
signature = self.app.current_version.features.to_signature()
eq_(signature.count('.'), 2, 'Unexpected signature format')
self.af.set_flags(signature)
self._check(self.af)
def test_bad_data(self):
self.af.set_flags('foo')
self.af.set_flags('<script>')
def test_default_false(self):
obj = self.model(version=self.app.current_version)
eq_(getattr(obj, 'has_%s' % self.flags[0].lower()), False)
class TestRatingDescriptors(mkt.site.tests.TestCase):
def setUp(self):
super(TestRatingDescriptors, self).setUp()
def test_desc_mapping(self):
descs = RatingDescriptors.objects.create(addon=app_factory())
for body, mapping in DESCS.items():
for native, rating_desc_field in mapping.items():
assert hasattr(descs, rating_desc_field), rating_desc_field
def test_reverse_desc_mapping(self):
descs = RatingDescriptors.objects.create(addon=app_factory())
for desc in descs._fields():
eq_(type(REVERSE_DESCS.get(desc)), unicode, desc)
def test_iarc_deserialize(self):
descs = RatingDescriptors.objects.create(
addon=app_factory(), has_esrb_blood=True, has_pegi_scary=True)
self.assertSetEqual(descs.iarc_deserialize().split(', '),
['Blood', 'Fear'])
eq_(descs.iarc_deserialize(body=mkt.ratingsbodies.ESRB), 'Blood')
class TestRatingInteractives(mkt.site.tests.TestCase):
def setUp(self):
super(TestRatingInteractives, self).setUp()
def test_interactives_mapping(self):
interactives = RatingInteractives.objects.create(addon=app_factory())
for native, field in INTERACTIVES.items():
assert hasattr(interactives, field)
def test_reverse_interactives_mapping(self):
interactives = RatingInteractives.objects.create(addon=app_factory())
for interactive_field in interactives._fields():
assert REVERSE_INTERACTIVES.get(interactive_field)
def test_iarc_deserialize(self):
interactives = RatingInteractives.objects.create(
addon=app_factory(), has_users_interact=True, has_shares_info=True)
self.assertSetEqual(
interactives.iarc_deserialize().split(', '),
['Shares Info', 'Users Interact'])
class TestManifestUpload(BaseUploadTest, mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
super(TestManifestUpload, self).setUp()
self.addCleanup(translation.deactivate)
def manifest(self, name):
return os.path.join(settings.ROOT, 'mkt', 'developers', 'tests',
'addons', name)
@mock.patch('mkt.webapps.models.parse_addon')
def test_manifest_updated_developer_name(self, parse_addon):
parse_addon.return_value = {
'version': '4.0',
'developer_name': u'Méâ'
}
# Note: we need a valid FileUpload instance, but in the end we are not
# using its contents since we are mocking parse_addon().
upload = self.get_upload(abspath=self.manifest('mozball.webapp'))
app = Webapp.objects.get(pk=337141)
app.manifest_updated('', upload)
version = app.current_version.reload()
eq_(version.version, '4.0')
eq_(version.developer_name, u'Méâ')
@mock.patch('mkt.webapps.models.parse_addon')
def test_manifest_updated_long_developer_name(self, parse_addon):
truncated_developer_name = u'é' * 255
long_developer_name = truncated_developer_name + u'ßßßß'
parse_addon.return_value = {
'version': '4.1',
'developer_name': long_developer_name,
}
# Note: we need a valid FileUpload instance, but in the end we are not
# using its contents since we are mocking parse_addon().
upload = self.get_upload(abspath=self.manifest('mozball.webapp'))
app = Webapp.objects.get(pk=337141)
app.manifest_updated('', upload)
version = app.current_version.reload()
eq_(version.version, '4.1')
eq_(version.developer_name, truncated_developer_name)
def test_manifest_url(self):
upload = self.get_upload(abspath=self.manifest('mozball.webapp'))
addon = Webapp.from_upload(upload)
eq_(addon.manifest_url, upload.name)
def test_app_domain(self):
upload = self.get_upload(abspath=self.manifest('mozball.webapp'))
upload.name = 'http://mozilla.com/my/rad/app.webapp' # manifest URL
addon = Webapp.from_upload(upload)
eq_(addon.app_domain, 'http://mozilla.com')
def test_non_english_app(self):
upload = self.get_upload(abspath=self.manifest('non-english.webapp'))
upload.name = 'http://mozilla.com/my/rad/app.webapp' # manifest URL
addon = Webapp.from_upload(upload)
eq_(addon.default_locale, 'it')
eq_(unicode(addon.name), 'ItalianMozBall')
eq_(addon.name.locale, 'it')
def test_webapp_default_locale_override(self):
with nested(tempfile.NamedTemporaryFile('w', suffix='.webapp'),
open(self.manifest('mozball.webapp'))) as (tmp, mf):
mf = json.load(mf)
mf['default_locale'] = 'es'
tmp.write(json.dumps(mf))
tmp.flush()
upload = self.get_upload(abspath=tmp.name)
addon = Webapp.from_upload(upload)
eq_(addon.default_locale, 'es')
def test_webapp_default_locale_unsupported(self):
with nested(tempfile.NamedTemporaryFile('w', suffix='.webapp'),
open(self.manifest('mozball.webapp'))) as (tmp, mf):
mf = json.load(mf)
mf['default_locale'] = 'gb'
tmp.write(json.dumps(mf))
tmp.flush()
upload = self.get_upload(abspath=tmp.name)
addon = Webapp.from_upload(upload)
eq_(addon.default_locale, 'en-US')
def test_browsing_locale_does_not_override(self):
with translation.override('fr'):
# Upload app with en-US as default.
upload = self.get_upload(abspath=self.manifest('mozball.webapp'))
addon = Webapp.from_upload(upload)
eq_(addon.default_locale, 'en-US') # not fr
@raises(forms.ValidationError)
def test_malformed_locales(self):
manifest = self.manifest('malformed-locales.webapp')
upload = self.get_upload(abspath=manifest)
Webapp.from_upload(upload)
class TestGeodata(mkt.site.tests.WebappTestCase):
def setUp(self):
super(TestGeodata, self).setUp()
self.geo = self.app.geodata
def test_app_geodata(self):
assert isinstance(Webapp(id=337141).geodata, Geodata)
def test_unicode(self):
eq_(unicode(self.geo),
u'%s (unrestricted): <Webapp 337141>' % self.geo.id)
self.geo.update(restricted=True)
eq_(unicode(self.geo),
u'%s (restricted): <Webapp 337141>' % self.geo.id)
def test_get_status(self):
status = mkt.STATUS_PENDING
eq_(self.geo.get_status(mkt.regions.CHN), status)
eq_(self.geo.region_cn_status, status)
status = mkt.STATUS_PUBLIC
self.geo.update(region_cn_status=status)
eq_(self.geo.get_status(mkt.regions.CHN), status)
eq_(self.geo.region_cn_status, status)
def test_set_status(self):
status = mkt.STATUS_PUBLIC
# Called with `save=False`.
self.geo.set_status(mkt.regions.CHN, status)
eq_(self.geo.region_cn_status, status)
eq_(self.geo.reload().region_cn_status, mkt.STATUS_PENDING,
'`set_status(..., save=False)` should not save the value')
# Called with `save=True`.
self.geo.set_status(mkt.regions.CHN, status, save=True)
eq_(self.geo.region_cn_status, status)
eq_(self.geo.reload().region_cn_status, status)
def test_banner_regions_names(self):
eq_(self.geo.banner_regions, {})
eq_(self.geo.banner_regions_names(), [])
self.geo.update(
banner_regions=[mkt.regions.GBR.id, mkt.regions.CHN.id])
eq_(self.geo.banner_regions_names(), [u'China', u'United Kingdom'])
@mock.patch.object(settings, 'PRE_GENERATE_APKS', True)
@mock.patch('mkt.webapps.tasks.pre_generate_apk')
class TestPreGenAPKs(mkt.site.tests.WebappTestCase):
def setUp(self):
super(TestPreGenAPKs, self).setUp()
self.manifest_url = 'http://some-app.com/manifest.webapp'
self.app.update(status=mkt.STATUS_PUBLIC,
manifest_url=self.manifest_url)
# Set up the app to support Android.
self.app.addondevicetype_set.create(device_type=mkt.DEVICE_MOBILE.id)
def switch_device(self, device_id):
self.app.addondevicetype_set.all().delete()
self.app.addondevicetype_set.create(device_type=device_id)
def test_approved_apps(self, pre_gen_task):
assert not pre_gen_task.delay.called
self.app.save()
pre_gen_task.delay.assert_called_with(self.app.id)
def test_unapproved_apps(self, pre_gen_task):
self.app.update(status=mkt.STATUS_REJECTED)
assert not pre_gen_task.delay.called, (
'APKs for unapproved apps should not be pre-generated')
def test_disabled(self, pre_gen_task):
with self.settings(PRE_GENERATE_APKS=False):
self.app.save()
assert not pre_gen_task.delay.called, (
'task should not be called if PRE_GENERATE_APKS is False')
def test_ignore_firefox_os_apps(self, pre_gen_task):
self.switch_device(mkt.DEVICE_GAIA.id)
self.app.save()
assert not pre_gen_task.delay.called, (
'task should not be called for Firefox OS apps')
def test_treat_tablet_as_android(self, pre_gen_task):
self.switch_device(mkt.DEVICE_TABLET.id)
self.app.save()
assert pre_gen_task.delay.called, (
'task should be called for tablet apps')
class TestSearchSignals(ESTestCase):
def setUp(self):
super(TestSearchSignals, self).setUp()
self.addCleanup(self.cleanup)
def cleanup(self):
for index in settings.ES_INDEXES.values():
try:
self.es.indices.delete(index=index)
except elasticsearch.NotFoundError:
pass
def test_create(self):
eq_(WebappIndexer.search().count(), 0)
app_factory()
self.refresh('webapp')
eq_(WebappIndexer.search().count(), 1)
def test_update(self):
app = app_factory()
self.refresh('webapp')
eq_(WebappIndexer.search().count(), 1)
prev_name = unicode(app.name)
app.name = 'yolo'
app.save()
self.refresh('webapp')
eq_(WebappIndexer.search().count(), 1)
eq_(WebappIndexer.search().query('term', name=prev_name).count(), 0)
eq_(WebappIndexer.search().query('term', name='yolo').count(), 1)
| tsl143/zamboni | mkt/webapps/tests/test_models.py | Python | bsd-3-clause | 97,405 | 0 |
import logging
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils import parse_json
log = logging.getLogger(__name__)
class Mitele(Plugin):
_url_re = re.compile(r"https?://(?:www\.)?mitele\.es/directo/(?P<channel>[\w-]+)")
pdata_url = "https://indalo.mediaset.es/mmc-player/api/mmc/v1/{channel}/live/html5.json"
gate_url = "https://gatekeeper.mediaset.es"
error_schema = validate.Schema({
"code": validate.any(validate.text, int),
"message": validate.text,
})
pdata_schema = validate.Schema(validate.transform(parse_json), validate.any(
validate.all(
{
"locations": [{
"gcp": validate.text,
"ogn": validate.any(None, validate.text),
}],
},
validate.get("locations"),
validate.get(0),
),
error_schema,
))
gate_schema = validate.Schema(
validate.transform(parse_json),
validate.any(
{
"mimeType": validate.text,
"stream": validate.url(),
},
error_schema,
)
)
def __init__(self, url):
super(Mitele, self).__init__(url)
self.session.http.headers.update({
"User-Agent": useragents.FIREFOX,
"Referer": self.url
})
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
channel = self._url_re.match(self.url).group("channel")
pdata = self.session.http.get(self.pdata_url.format(channel=channel),
acceptable_status=(200, 403, 404),
schema=self.pdata_schema)
log.trace("{0!r}".format(pdata))
if pdata.get("code"):
log.error("{0} - {1}".format(pdata["code"], pdata["message"]))
return
gdata = self.session.http.post(self.gate_url,
acceptable_status=(200, 403, 404),
data=pdata,
schema=self.gate_schema)
log.trace("{0!r}".format(gdata))
if gdata.get("code"):
log.error("{0} - {1}".format(gdata["code"], gdata["message"]))
return
log.debug("Stream: {0} ({1})".format(gdata["stream"], gdata.get("mimeType", "n/a")))
for s in HLSStream.parse_variant_playlist(self.session,
gdata["stream"],
name_fmt="{pixels}_{bitrate}").items():
yield s
__plugin__ = Mitele
| repotvsupertuga/tvsupertuga.repository | script.module.streamlink.base/resources/lib/streamlink/plugins/mitele.py | Python | gpl-2.0 | 2,844 | 0.001758 |
"""Utility methods for flake8."""
import collections
import fnmatch as _fnmatch
import inspect
import io
import logging
import os
import platform
import re
import sys
import tokenize
from typing import Callable, Dict, Generator, List, Optional, Pattern
from typing import Sequence, Set, Tuple, Union
from flake8 import exceptions
from flake8._compat import lru_cache
if False: # `typing.TYPE_CHECKING` was introduced in 3.5.2
from flake8.plugins.manager import Plugin
DIFF_HUNK_REGEXP = re.compile(r"^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$")
COMMA_SEPARATED_LIST_RE = re.compile(r"[,\s]")
LOCAL_PLUGIN_LIST_RE = re.compile(r"[,\t\n\r\f\v]")
string_types = (str, type(u""))
def parse_comma_separated_list(value, regexp=COMMA_SEPARATED_LIST_RE):
# type: (str, Pattern[str]) -> List[str]
"""Parse a comma-separated list.
:param value:
String to be parsed and normalized.
:param regexp:
Compiled regular expression used to split the value when it is a
string.
:type regexp:
_sre.SRE_Pattern
:returns:
List of values with whitespace stripped.
:rtype:
list
"""
assert isinstance(value, string_types), value
separated = regexp.split(value)
item_gen = (item.strip() for item in separated)
return [item for item in item_gen if item]
_Token = collections.namedtuple("Token", ("tp", "src"))
_CODE, _FILE, _COLON, _COMMA, _WS = "code", "file", "colon", "comma", "ws"
_EOF = "eof"
_FILE_LIST_TOKEN_TYPES = [
(re.compile(r"[A-Z]+[0-9]*(?=$|\s|,)"), _CODE),
(re.compile(r"[^\s:,]+"), _FILE),
(re.compile(r"\s*:\s*"), _COLON),
(re.compile(r"\s*,\s*"), _COMMA),
(re.compile(r"\s+"), _WS),
]
def _tokenize_files_to_codes_mapping(value):
# type: (str) -> List[_Token]
tokens = []
i = 0
while i < len(value):
for token_re, token_name in _FILE_LIST_TOKEN_TYPES:
match = token_re.match(value, i)
if match:
tokens.append(_Token(token_name, match.group().strip()))
i = match.end()
break
else:
raise AssertionError("unreachable", value, i)
tokens.append(_Token(_EOF, ""))
return tokens
def parse_files_to_codes_mapping(value_): # noqa: C901
# type: (Union[Sequence[str], str]) -> List[Tuple[str, List[str]]]
"""Parse a files-to-codes mapping.
A files-to-codes mapping a sequence of values specified as
`filenames list:codes list ...`. Each of the lists may be separated by
either comma or whitespace tokens.
:param value: String to be parsed and normalized.
:type value: str
"""
if not isinstance(value_, string_types):
value = "\n".join(value_)
else:
value = value_
ret = [] # type: List[Tuple[str, List[str]]]
if not value.strip():
return ret
class State:
seen_sep = True
seen_colon = False
filenames = [] # type: List[str]
codes = [] # type: List[str]
def _reset(): # type: () -> None
if State.codes:
for filename in State.filenames:
ret.append((filename, State.codes))
State.seen_sep = True
State.seen_colon = False
State.filenames = []
State.codes = []
def _unexpected_token(): # type: () -> exceptions.ExecutionError
def _indent(s): # type: (str) -> str
return " " + s.strip().replace("\n", "\n ")
return exceptions.ExecutionError(
"Expected `per-file-ignores` to be a mapping from file exclude "
"patterns to ignore codes.\n\n"
"Configured `per-file-ignores` setting:\n\n{}".format(
_indent(value)
)
)
for token in _tokenize_files_to_codes_mapping(value):
# legal in any state: separator sets the sep bit
if token.tp in {_COMMA, _WS}:
State.seen_sep = True
# looking for filenames
elif not State.seen_colon:
if token.tp == _COLON:
State.seen_colon = True
State.seen_sep = True
elif State.seen_sep and token.tp == _FILE:
State.filenames.append(token.src)
State.seen_sep = False
else:
raise _unexpected_token()
# looking for codes
else:
if token.tp == _EOF:
_reset()
elif State.seen_sep and token.tp == _CODE:
State.codes.append(token.src)
State.seen_sep = False
elif State.seen_sep and token.tp == _FILE:
_reset()
State.filenames.append(token.src)
State.seen_sep = False
else:
raise _unexpected_token()
return ret
def normalize_paths(paths, parent=os.curdir):
# type: (Sequence[str], str) -> List[str]
"""Normalize a list of paths relative to a parent directory.
:returns:
The normalized paths.
:rtype:
[str]
"""
assert isinstance(paths, list), paths
return [normalize_path(p, parent) for p in paths]
def normalize_path(path, parent=os.curdir):
# type: (str, str) -> str
"""Normalize a single-path.
:returns:
The normalized path.
:rtype:
str
"""
# NOTE(sigmavirus24): Using os.path.sep and os.path.altsep allow for
# Windows compatibility with both Windows-style paths (c:\\foo\bar) and
# Unix style paths (/foo/bar).
separator = os.path.sep
# NOTE(sigmavirus24): os.path.altsep may be None
alternate_separator = os.path.altsep or ""
if separator in path or (
alternate_separator and alternate_separator in path
):
path = os.path.abspath(os.path.join(parent, path))
return path.rstrip(separator + alternate_separator)
def _stdin_get_value_py3(): # type: () -> str
stdin_value = sys.stdin.buffer.read()
fd = io.BytesIO(stdin_value)
try:
coding, _ = tokenize.detect_encoding(fd.readline)
return stdin_value.decode(coding)
except (LookupError, SyntaxError, UnicodeError):
return stdin_value.decode("utf-8")
@lru_cache(maxsize=1)
def stdin_get_value(): # type: () -> str
"""Get and cache it so plugins can use it."""
if sys.version_info < (3,):
return sys.stdin.read()
else:
return _stdin_get_value_py3()
def stdin_get_lines(): # type: () -> List[str]
"""Return lines of stdin split according to file splitting."""
if sys.version_info < (3,):
return list(io.BytesIO(stdin_get_value()))
else:
return list(io.StringIO(stdin_get_value()))
def parse_unified_diff(diff=None):
# type: (Optional[str]) -> Dict[str, Set[int]]
"""Parse the unified diff passed on stdin.
:returns:
dictionary mapping file names to sets of line numbers
:rtype:
dict
"""
# Allow us to not have to patch out stdin_get_value
if diff is None:
diff = stdin_get_value()
number_of_rows = None
current_path = None
parsed_paths = collections.defaultdict(set) # type: Dict[str, Set[int]]
for line in diff.splitlines():
if number_of_rows:
# NOTE(sigmavirus24): Below we use a slice because stdin may be
# bytes instead of text on Python 3.
if line[:1] != "-":
number_of_rows -= 1
# We're in the part of the diff that has lines starting with +, -,
# and ' ' to show context and the changes made. We skip these
# because the information we care about is the filename and the
# range within it.
# When number_of_rows reaches 0, we will once again start
# searching for filenames and ranges.
continue
# NOTE(sigmavirus24): Diffs that we support look roughly like:
# diff a/file.py b/file.py
# ...
# --- a/file.py
# +++ b/file.py
# Below we're looking for that last line. Every diff tool that
# gives us this output may have additional information after
# ``b/file.py`` which it will separate with a \t, e.g.,
# +++ b/file.py\t100644
# Which is an example that has the new file permissions/mode.
# In this case we only care about the file name.
if line[:3] == "+++":
current_path = line[4:].split("\t", 1)[0]
# NOTE(sigmavirus24): This check is for diff output from git.
if current_path[:2] == "b/":
current_path = current_path[2:]
# We don't need to do anything else. We have set up our local
# ``current_path`` variable. We can skip the rest of this loop.
# The next line we will see will give us the hung information
# which is in the next section of logic.
continue
hunk_match = DIFF_HUNK_REGEXP.match(line)
# NOTE(sigmavirus24): pep8/pycodestyle check for:
# line[:3] == '@@ '
# But the DIFF_HUNK_REGEXP enforces that the line start with that
# So we can more simply check for a match instead of slicing and
# comparing.
if hunk_match:
(row, number_of_rows) = [
1 if not group else int(group)
for group in hunk_match.groups()
]
assert current_path is not None
parsed_paths[current_path].update(
range(row, row + number_of_rows)
)
# We have now parsed our diff into a dictionary that looks like:
# {'file.py': set(range(10, 16), range(18, 20)), ...}
return parsed_paths
def is_windows():
# type: () -> bool
"""Determine if we're running on Windows.
:returns:
True if running on Windows, otherwise False
:rtype:
bool
"""
return os.name == "nt"
def is_using_stdin(paths):
# type: (List[str]) -> bool
"""Determine if we're going to read from stdin.
:param list paths:
The paths that we're going to check.
:returns:
True if stdin (-) is in the path, otherwise False
:rtype:
bool
"""
return "-" in paths
def _default_predicate(*args): # type: (*str) -> bool
return False
def filenames_from(arg, predicate=None):
# type: (str, Optional[Callable[[str], bool]]) -> Generator[str, None, None] # noqa: E501
"""Generate filenames from an argument.
:param str arg:
Parameter from the command-line.
:param callable predicate:
Predicate to use to filter out filenames. If the predicate
returns ``True`` we will exclude the filename, otherwise we
will yield it. By default, we include every filename
generated.
:returns:
Generator of paths
"""
if predicate is None:
predicate = _default_predicate
if predicate(arg):
return
if os.path.isdir(arg):
for root, sub_directories, files in os.walk(arg):
if predicate(root):
sub_directories[:] = []
continue
# NOTE(sigmavirus24): os.walk() will skip a directory if you
# remove it from the list of sub-directories.
for directory in sub_directories:
joined = os.path.join(root, directory)
if predicate(joined):
sub_directories.remove(directory)
for filename in files:
joined = os.path.join(root, filename)
if not predicate(joined):
yield joined
else:
yield arg
def fnmatch(filename, patterns):
# type: (str, Sequence[str]) -> bool
"""Wrap :func:`fnmatch.fnmatch` to add some functionality.
:param str filename:
Name of the file we're trying to match.
:param list patterns:
Patterns we're using to try to match the filename.
:param bool default:
The default value if patterns is empty
:returns:
True if a pattern matches the filename, False if it doesn't.
``default`` if patterns is empty.
"""
if not patterns:
return True
return any(_fnmatch.fnmatch(filename, pattern) for pattern in patterns)
def parameters_for(plugin):
# type: (Plugin) -> Dict[str, bool]
"""Return the parameters for the plugin.
This will inspect the plugin and return either the function parameters
if the plugin is a function or the parameters for ``__init__`` after
``self`` if the plugin is a class.
:param plugin:
The internal plugin object.
:type plugin:
flake8.plugins.manager.Plugin
:returns:
A dictionary mapping the parameter name to whether or not it is
required (a.k.a., is positional only/does not have a default).
:rtype:
dict([(str, bool)])
"""
func = plugin.plugin
is_class = not inspect.isfunction(func)
if is_class: # The plugin is a class
func = plugin.plugin.__init__
if sys.version_info < (3, 3):
argspec = inspect.getargspec(func)
start_of_optional_args = len(argspec[0]) - len(argspec[-1] or [])
parameter_names = argspec[0]
parameters = collections.OrderedDict(
[
(name, position < start_of_optional_args)
for position, name in enumerate(parameter_names)
]
)
else:
parameters = collections.OrderedDict(
[
(parameter.name, parameter.default is parameter.empty)
for parameter in inspect.signature(func).parameters.values()
if parameter.kind == parameter.POSITIONAL_OR_KEYWORD
]
)
if is_class:
parameters.pop("self", None)
return parameters
def matches_filename(path, patterns, log_message, logger):
# type: (str, Sequence[str], str, logging.Logger) -> bool
"""Use fnmatch to discern if a path exists in patterns.
:param str path:
The path to the file under question
:param patterns:
The patterns to match the path against.
:type patterns:
list[str]
:param str log_message:
The message used for logging purposes.
:returns:
True if path matches patterns, False otherwise
:rtype:
bool
"""
if not patterns:
return False
basename = os.path.basename(path)
if basename not in {".", ".."} and fnmatch(basename, patterns):
logger.debug(log_message, {"path": basename, "whether": ""})
return True
absolute_path = os.path.abspath(path)
match = fnmatch(absolute_path, patterns)
logger.debug(
log_message,
{"path": absolute_path, "whether": "" if match else "not "},
)
return match
def get_python_version(): # type: () -> str
"""Find and format the python implementation and version.
:returns:
Implementation name, version, and platform as a string.
:rtype:
str
"""
return "%s %s on %s" % (
platform.python_implementation(),
platform.python_version(),
platform.system(),
)
| TeamSPoon/logicmoo_workspace | packs_web/butterfly/lib/python3.7/site-packages/flake8/utils.py | Python | mit | 15,155 | 0 |
"""test a warning is triggered when using for a lists comprehension variable"""
__revision__ = 'yo'
TEST_LC = [C for C in __revision__ if C.isalpha()]
print C # WARN
C = 4
print C # this one shouldn't trigger any warning
B = [B for B in __revision__ if B.isalpha()]
print B # nor this one
for var1, var2 in TEST_LC:
var1 = var2 + 4
print var1 # WARN
for note in __revision__:
note.something()
for line in __revision__:
for note in line:
A = note.anotherthing()
for x in []:
pass
for x in range(3):
print (lambda : x)() # OK
| dbbhattacharya/kitsune | vendor/packages/pylint/test/input/func_use_for_or_listcomp_var.py | Python | bsd-3-clause | 560 | 0.014286 |
# settings.py
#######################################################
#
# Definition of the different paths:
# - CellProfiler (Software, input, output)
# - Input
# - Output
#
#######################################################
import os
def init():
global pathList
CPPath= "D:/Logiciel/CellProfiler2.2/CellProfiler.exe"
inputDataPath= "C:/Data/Granulometry/Data/"
if inputDataPath[-1] != '/' :
inputDataPath=inputDataPath+'/';
resultPath= "./../Results/"
colorDisplayPath =resultPath +"colorDisplay/"
outputDetPath = resultPath + "outputResults/"
inputCellProfilerPath =resultPath +"inputCP/"
outputCellProfilerPath =resultPath +"CPResults/"
if not os.path.isdir(resultPath):
os.mkdir(resultPath)
if not os.path.isdir(colorDisplayPath):
os.mkdir(colorDisplayPath)
if not os.path.isdir(outputDetPath):
os.mkdir(outputDetPath)
if not os.path.isdir(inputCellProfilerPath):
os.mkdir(inputCellProfilerPath)
if not os.path.isdir(outputCellProfilerPath):
os.mkdir(outputCellProfilerPath)
pathList = [CPPath]+ [inputDataPath] + [resultPath] + [outputDetPath] + [inputCellProfilerPath] + [outputCellProfilerPath]
| maxBombrun/lipidDroplets | settings.py | Python | bsd-3-clause | 1,178 | 0.034805 |
import os
import re
import sys
import unittest
from coalib import coala_format
from coalib.misc.ContextManagers import prepare_file
from tests.TestUtilities import bear_test_module, execute_coala
class coalaFormatTest(unittest.TestCase):
def setUp(self):
self.old_argv = sys.argv
def tearDown(self):
sys.argv = self.old_argv
def test_line_count(self):
with bear_test_module(), \
prepare_file(["#fixme"], None) as (lines, filename):
retval, output = execute_coala(coala_format.main, "coala-format",
"-c", os.devnull,
"-f", re.escape(filename),
"-b", "LineCountTestBear")
self.assertRegex(output, r'msg:This file has [0-9]+ lines.',
"coala-format output for line count should "
"not be empty")
self.assertEqual(retval, 1,
"coala-format must return exitcode 1 when it "
"yields results")
| CruiseDevice/coala | tests/coalaFormatTest.py | Python | agpl-3.0 | 1,124 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
**kwargs: Any
) -> HttpRequest:
api_version = "2019-10-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Resources/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.OperationListResult"]:
"""Lists all of the available Microsoft.Resources REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2019_10_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Resources/operations'} # type: ignore
| Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_10_01/operations/_operations.py | Python | mit | 5,377 | 0.004092 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserFeeds'
db.create_table(u'core_userfeeds', (
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='feeds', unique=True, primary_key=True, to=orm['base.User'])),
('imported_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='imported_items', unique=True, null=True, to=orm['core.BaseFeed'])),
('sent_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='sent_items', unique=True, null=True, to=orm['core.BaseFeed'])),
('received_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='received_items', unique=True, null=True, to=orm['core.BaseFeed'])),
('written_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='written_items', unique=True, null=True, to=orm['core.BaseFeed'])),
))
db.send_create_signal('core', ['UserFeeds'])
# Adding M2M table for field blogs on 'UserFeeds'
m2m_table_name = db.shorten_name(u'core_userfeeds_blogs')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('userfeeds', models.ForeignKey(orm['core.userfeeds'], null=False)),
('basefeed', models.ForeignKey(orm['core.basefeed'], null=False))
))
db.create_unique(m2m_table_name, ['userfeeds_id', 'basefeed_id'])
# Adding model 'UserSubscriptions'
db.create_table(u'core_usersubscriptions', (
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='subscriptions', unique=True, primary_key=True, to=orm['base.User'])),
('imported_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='imported_items', unique=True, null=True, to=orm['core.Subscription'])),
('sent_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='sent_items', unique=True, null=True, to=orm['core.Subscription'])),
('received_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='received_items', unique=True, null=True, to=orm['core.Subscription'])),
('written_items', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='written_items', unique=True, null=True, to=orm['core.Subscription'])),
))
db.send_create_signal('core', ['UserSubscriptions'])
# Adding M2M table for field blogs on 'UserSubscriptions'
m2m_table_name = db.shorten_name(u'core_usersubscriptions_blogs')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('usersubscriptions', models.ForeignKey(orm['core.usersubscriptions'], null=False)),
('subscription', models.ForeignKey(orm['core.subscription'], null=False))
))
db.create_unique(m2m_table_name, ['usersubscriptions_id', 'subscription_id'])
def backwards(self, orm):
# Deleting model 'UserFeeds'
db.delete_table(u'core_userfeeds')
# Removing M2M table for field blogs on 'UserFeeds'
db.delete_table(db.shorten_name(u'core_userfeeds_blogs'))
# Deleting model 'UserSubscriptions'
db.delete_table(u'core_usersubscriptions')
# Removing M2M table for field blogs on 'UserSubscriptions'
db.delete_table(db.shorten_name(u'core_usersubscriptions_blogs'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': 'a2a11d045c484d9cb16448cca4075f1d'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pages_urls': ('jsonfield.fields.JSONField', [], {'default': "u'[]'", 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.User']", 'symmetrical': 'False', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.basefeed': {
'Meta': {'object_name': 'BaseFeed'},
'closed_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'date_last_fetch': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'errors': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'fetch_interval': ('django.db.models.fields.IntegerField', [], {'default': '43200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseItem']", 'symmetrical': 'False', 'blank': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.basefeed_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']"}),
'excerpt': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'sources_rel_+'", 'blank': 'True', 'to': "orm['core.BaseItem']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.SimpleTag']", 'symmetrical': 'False', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.combinedfeed': {
'Meta': {'object_name': 'CombinedFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.combinedfeedrule': {
'Meta': {'ordering': "('position',)", 'object_name': 'CombinedFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'combinedfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.CombinedFeed']"}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseFeed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.corepermissions': {
'Meta': {'object_name': 'CorePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.folder': {
'Meta': {'unique_together': "(('name', 'user', 'parent'),)", 'object_name': 'Folder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.historyentry': {
'Meta': {'object_name': 'HistoryEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.historyentry_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'dj_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Language']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.mailaccount': {
'Meta': {'unique_together': "(('user', 'hostname', 'username'),)", 'object_name': 'MailAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2007, 1, 1, 0, 0)'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.MailAccount']", 'null': 'True', 'blank': 'True'}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.CharField', [], {'default': "u'markread'", 'max_length': '10'}),
'match_action': ('django.db.models.fields.CharField', [], {'default': "u'scrape'", 'max_length': '10'}),
'rules_operation': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'core.mailfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'MailFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'header_field': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeed']"}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_type': ('django.db.models.fields.CharField', [], {'default': "u'contains'", 'max_length': '10'}),
'match_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '1024'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.read': {
'Meta': {'unique_together': "(('user', 'item'),)", 'object_name': 'Read'},
'bookmark_type': ('django.db.models.fields.CharField', [], {'default': "u'U'", 'max_length': '2'}),
'check_set_subscriptions_131004_done': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_analysis': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_archived': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_auto_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_bookmarked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_fact': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_fun': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowhow': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowledge': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_number': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_prospective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_quote': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_rules': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starred': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_analysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_auto_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowhow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_prospective': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_quote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_rules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_starred': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']"}),
'knowledge_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'senders': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'senders'", 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.SimpleTag']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.rssatomfeed': {
'Meta': {'object_name': 'RssAtomFeed', '_ormbases': ['core.BaseFeed']},
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'last_etag': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.SimpleTag']", 'symmetrical': 'False', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'})
},
'core.simpletag': {
'Meta': {'unique_together': "(('name', 'language'),)", 'object_name': 'SimpleTag'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SimpleTag']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'origin_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.SimpleTag']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.subscription': {
'Meta': {'unique_together': "(('feed', 'user'),)", 'object_name': 'Subscription'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']"}),
'folders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Folder']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Read']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.SimpleTag']", 'symmetrical': 'False', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.userfeeds': {
'Meta': {'object_name': 'UserFeeds'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feeds'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"})
},
'core.userimport': {
'Meta': {'object_name': 'UserImport', '_ormbases': ['core.HistoryEntry']},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'historyentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.HistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'results': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {})
},
'core.usersubscriptions': {
'Meta': {'object_name': 'UserSubscriptions'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Subscription']"}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'subscriptions'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"})
},
'core.website': {
'Meta': {'object_name': 'WebSite'},
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'fetch_limit_nr': ('django.db.models.fields.IntegerField', [], {'default': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mail_warned': ('jsonfield.fields.JSONField', [], {'default': "u'[]'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.WebSite']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['core'] | 1flow/1flow | oneflow/core/migrations/0036_auto__add_userfeeds__add_usersubscriptions.py | Python | agpl-3.0 | 37,603 | 0.007872 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mock
import unittest
from airflow import DAG
from airflow import configuration
from airflow.sensors.sql_sensor import SqlSensor
from airflow.utils.timezone import datetime
configuration.load_test_config()
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_sql_dag'
class SqlSensorTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
@unittest.skipUnless(
'mysql' in configuration.conf.get('core', 'sql_alchemy_conn'), "this is a mysql test")
def test_sql_sensor_mysql(self):
t = SqlSensor(
task_id='sql_sensor_check',
conn_id='mysql_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@unittest.skipUnless(
'postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'), "this is a postgres test")
def test_sql_sensor_postgres(self):
t = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke(self, mock_hook):
t = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
)
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(t.poke(None))
mock_get_records.return_value = [['1']]
self.assertTrue(t.poke(None))
| sid88in/incubator-airflow | tests/sensors/test_sql_sensor.py | Python | apache-2.0 | 2,780 | 0.001799 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-02-25 19:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_notification_recipient'),
]
operations = [
migrations.DeleteModel(
name='Email',
),
]
| divio/django-shop | shop/migrations/0009_delete_email.py | Python | bsd-3-clause | 361 | 0 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import traceback
import sys
import time
import struct
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from dns.exception import DNSException
"""
Pure-Python version of dns.dnssec._validate_rsig
"""
import ecdsa
import rsakey
def python_validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
from dns.dnssec import ValidationFailure, ECDSAP256SHA256, ECDSAP384SHA384
from dns.dnssec import _find_candidate_keys, _make_hash, _is_ecdsa, _is_rsa, _to_rdata, _make_algorithm_id
if isinstance(origin, (str, unicode)):
origin = dns.name.from_text(origin, dns.name.root)
for candidate_key in _find_candidate_keys(keys, rrsig):
if not candidate_key:
raise ValidationFailure, 'unknown key'
# For convenience, allow the rrset to be specified as a (name, rdataset)
# tuple as well as a proper rrset
if isinstance(rrset, tuple):
rrname = rrset[0]
rdataset = rrset[1]
else:
rrname = rrset.name
rdataset = rrset
if now is None:
now = time.time()
if rrsig.expiration < now:
raise ValidationFailure, 'expired'
if rrsig.inception > now:
raise ValidationFailure, 'not yet valid'
hash = _make_hash(rrsig.algorithm)
if _is_rsa(rrsig.algorithm):
keyptr = candidate_key.key
(bytes,) = struct.unpack('!B', keyptr[0:1])
keyptr = keyptr[1:]
if bytes == 0:
(bytes,) = struct.unpack('!H', keyptr[0:2])
keyptr = keyptr[2:]
rsa_e = keyptr[0:bytes]
rsa_n = keyptr[bytes:]
n = ecdsa.util.string_to_number(rsa_n)
e = ecdsa.util.string_to_number(rsa_e)
pubkey = rsakey.RSAKey(n, e)
sig = rrsig.signature
elif _is_ecdsa(rrsig.algorithm):
if rrsig.algorithm == ECDSAP256SHA256:
curve = ecdsa.curves.NIST256p
key_len = 32
digest_len = 32
elif rrsig.algorithm == ECDSAP384SHA384:
curve = ecdsa.curves.NIST384p
key_len = 48
digest_len = 48
else:
# shouldn't happen
raise ValidationFailure, 'unknown ECDSA curve'
keyptr = candidate_key.key
x = ecdsa.util.string_to_number(keyptr[0:key_len])
y = ecdsa.util.string_to_number(keyptr[key_len:key_len * 2])
assert ecdsa.ecdsa.point_is_valid(curve.generator, x, y)
point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point, curve)
r = rrsig.signature[:key_len]
s = rrsig.signature[key_len:]
sig = ecdsa.ecdsa.Signature(ecdsa.util.string_to_number(r),
ecdsa.util.string_to_number(s))
else:
raise ValidationFailure, 'unknown algorithm %u' % rrsig.algorithm
hash.update(_to_rdata(rrsig, origin)[:18])
hash.update(rrsig.signer.to_digestable(origin))
if rrsig.labels < len(rrname) - 1:
suffix = rrname.split(rrsig.labels + 1)[1]
rrname = dns.name.from_text('*', suffix)
rrnamebuf = rrname.to_digestable(origin)
rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
rrsig.original_ttl)
rrlist = sorted(rdataset);
for rr in rrlist:
hash.update(rrnamebuf)
hash.update(rrfixed)
rrdata = rr.to_digestable(origin)
rrlen = struct.pack('!H', len(rrdata))
hash.update(rrlen)
hash.update(rrdata)
digest = hash.digest()
if _is_rsa(rrsig.algorithm):
digest = _make_algorithm_id(rrsig.algorithm) + digest
if pubkey.verify(bytearray(sig), bytearray(digest)):
return
elif _is_ecdsa(rrsig.algorithm):
diglong = ecdsa.util.string_to_number(digest)
if verifying_key.pubkey.verifies(diglong, sig):
return
else:
raise ValidationFailure, 'unknown algorithm %u' % rrsig.algorithm
raise ValidationFailure, 'verify failure'
# replace validate_rrsig
dns.dnssec._validate_rrsig = python_validate_rrsig
dns.dnssec.validate_rrsig = python_validate_rrsig
dns.dnssec.validate = dns.dnssec._validate
from util import print_error
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU='),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise BaseException('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
root_rrset = check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})
break
except dns.dnssec.ValidationFailure:
# It's OK as long as one key validates
continue
if not root_rrset:
raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')
keys = {dns.name.root: root_rrset}
# top-down verification
parts = url.split('.')
for i in range(len(parts), 0, -1):
sub = '.'.join(parts[i-1:])
name = dns.name.from_text(sub)
# If server is authoritative, don't fetch DNSKEY
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, ns, 3)
assert response.rcode() == dns.rcode.NOERROR, "query error"
rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]
rr = rrset[0]
if rr.rdtype == dns.rdatatype.SOA:
continue
# get DNSKEY (self-signed)
rrset = check_query(ns, sub, dns.rdatatype.DNSKEY, None)
# get DS (signed by parent)
ds_rrset = check_query(ns, sub, dns.rdatatype.DS, keys)
# verify that a signed DS validates DNSKEY
for ds in ds_rrset:
for dnskey in rrset:
htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'
good_ds = dns.dnssec.make_ds(name, dnskey, htype)
if ds == good_ds:
break
else:
continue
break
else:
raise BaseException("DS does not match DNSKEY")
# set key for next iteration
keys = {name: rrset}
# get TXT record (signed by zone)
rrset = check_query(ns, url, _type, keys)
return rrset
def query(url, rtype):
# 8.8.8.8 is Google's public DNS server
nameservers = ['8.8.8.8']
ns = nameservers[0]
try:
out = get_and_validate(ns, url, rtype)
validated = True
except BaseException as e:
#traceback.print_exc(file=sys.stderr)
print_error("DNSSEC error:", str(e))
resolver = dns.resolver.get_default_resolver()
out = resolver.query(url, rtype)
validated = False
return out, validated
| fireduck64/electrum | lib/dnssec.py | Python | mit | 10,409 | 0.00221 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import sys
import shutil
import openerp
from openerp import api
from openerp.osv import orm, fields
from openerp.addons.runbot.runbot import mkdirs
_logger = logging.getLogger(__name__)
MAGIC_PID_RUN_NEXT_JOB = -2
def custom_build(func):
"""Decorator for functions which should be overwritten only if
is_custom_build is enabled in repo.
"""
def custom_func(self, cr, uid, ids, context=None):
args = [
('id', 'in', ids),
('branch_id.repo_id.is_custom_build', '=', True)
]
custom_ids = self.search(cr, uid, args, context=context)
regular_ids = list(set(ids) - set(custom_ids))
ret = None
if regular_ids:
regular_func = getattr(super(runbot_build, self), func.func_name)
ret = regular_func(cr, uid, regular_ids, context=context)
if custom_ids:
assert ret is None
ret = func(self, cr, uid, custom_ids, context=context)
return ret
return custom_func
class runbot_build(orm.Model):
_inherit = "runbot.build"
_columns = {
'prebuilt': fields.boolean("Prebuilt"),
}
def job_00_init(self, cr, uid, build, lock_path, log_path):
res = super(runbot_build, self).job_00_init(
cr, uid, build, lock_path, log_path
)
if build.branch_id.repo_id.is_custom_build:
build.pre_build(lock_path, log_path)
build.prebuilt = True
return res
def job_10_test_base(self, cr, uid, build, lock_path, log_path):
if build.branch_id.repo_id.skip_test_jobs:
_logger.info('skipping job_10_test_base')
return MAGIC_PID_RUN_NEXT_JOB
else:
return super(runbot_build, self).job_10_test_base(
cr, uid, build, lock_path, log_path
)
def job_20_test_all(self, cr, uid, build, lock_path, log_path):
if build.branch_id.repo_id.skip_test_jobs:
_logger.info('skipping job_20_test_all')
with open(log_path, 'w') as f:
f.write('consider tests as passed: '
'.modules.loading: Modules loaded.')
return MAGIC_PID_RUN_NEXT_JOB
else:
return super(runbot_build, self).job_20_test_all(
cr, uid, build, lock_path, log_path
)
def sub_cmd(self, build, cmd):
if not cmd:
return []
if isinstance(cmd, basestring):
cmd = cmd.split()
internal_vals = {
'custom_build_dir': build.repo_id.custom_build_dir or '',
'custom_server_path': build.repo_id.custom_server_path,
'other_repo_path': build.repo_id.other_repo_id.path or '',
'build_dest': build.dest,
}
return [i % internal_vals for i in cmd]
def pre_build(self, cr, uid, ids, lock_path, log_path, context=None):
"""Run pre-build command if there is one
Substitute path variables after splitting command to avoid problems
with spaces in internal variables.
Run command in build path to avoid relative path issues.
"""
pushd = os.getcwd()
try:
for build in self.browse(cr, uid, ids, context=context):
if build.prebuilt:
continue
cmd = self.sub_cmd(build, build.repo_id.custom_pre_build_cmd)
if not cmd:
continue
os.chdir(build.path())
self.spawn(cmd, lock_path, log_path)
finally:
os.chdir(pushd)
@custom_build
def checkout(self, cr, uid, ids, context=None):
"""Checkout in custom build directories if they are specified
Do same as superclass except for git_export path.
"""
for build in self.browse(cr, uid, ids, context=context):
if build.prebuilt:
continue
# starts from scratch
if os.path.isdir(build.path()):
shutil.rmtree(build.path())
# runbot log path
mkdirs([build.path("logs")])
# checkout branch
build_path = build.path()
custom_build_dir = build.repo_id.custom_build_dir
if custom_build_dir:
mkdirs([build.path(custom_build_dir)])
build_path = os.path.join(build_path, custom_build_dir)
build.repo_id.git_export(build.name, build_path)
@custom_build
def cmd(self, cr, uid, ids, context=None):
"""Get server start script from build config
Overwrite superclass completely
Specify database user in the case of custom config, to allow viewing
after db has been created by Odoo (using current user).
Disable multiworker
"""
build = self.browse(cr, uid, ids[0], context=context)
server_path = build.path(build.repo_id.custom_server_path)
mods = build.repo_id.modules or "base"
params = self.sub_cmd(build, build.repo_id.custom_server_params)
# commandline
cmd = [
sys.executable,
server_path,
"--no-xmlrpcs",
"--xmlrpc-port=%d" % build.port,
"--db_user=%s" % openerp.tools.config['db_user'],
"--workers=0",
] + params
return cmd, mods
@api.cr_uid_ids_context
def server(self, cr, uid, ids, *l, **kw):
for build in self.browse(cr, uid, ids, context=None):
if build.repo_id.is_custom_build:
custom_odoo_path = build.repo_id.custom_odoo_path
if custom_odoo_path and\
os.path.exists(build.path(custom_odoo_path)):
return build.path(custom_odoo_path, *l)
return super(runbot_build, self).server(cr, uid, ids, *l, **kw)
| open-synergy/runbot-addons | runbot_build_instructions/runbot_build.py | Python | agpl-3.0 | 6,892 | 0.000145 |
from __future__ import absolute_import
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import ConfigSubsection, ConfigSelection, getConfigListEntry
from Components.SystemInfo import SystemInfo
from Components.Task import job_manager
from Screens.InfoBarGenerics import InfoBarNotifications
import Screens.Standby
import Tools.Notifications
from boxbranding import getMachineBrand, getMachineName
class JobView(InfoBarNotifications, Screen, ConfigListScreen):
def __init__(self, session, job, parent=None, cancelable = True, backgroundable = True, afterEventChangeable = True , afterEvent="nothing"):
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.Sources.Boolean import Boolean
from Components.ActionMap import ActionMap
Screen.__init__(self, session, parent)
Screen.setTitle(self, _("Job View"))
InfoBarNotifications.__init__(self)
ConfigListScreen.__init__(self, [])
self.parent = parent
self.job = job
if afterEvent:
self.job.afterEvent = afterEvent
self["job_name"] = StaticText(job.name)
self["job_progress"] = Progress()
self["job_task"] = StaticText()
self["summary_job_name"] = StaticText(job.name)
self["summary_job_progress"] = Progress()
self["summary_job_task"] = StaticText()
self["job_status"] = StaticText()
self["finished"] = Boolean()
self["cancelable"] = Boolean(cancelable)
self["backgroundable"] = Boolean(backgroundable)
self["key_blue"] = StaticText(_("Background"))
self.onShow.append(self.windowShow)
self.onHide.append(self.windowHide)
self["setupActions"] = ActionMap(["ColorActions", "SetupActions"],
{
"green": self.ok,
"red": self.abort,
"blue": self.background,
"cancel": self.abort,
"ok": self.ok,
}, -2)
self.settings = ConfigSubsection()
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.settings.afterEvent = ConfigSelection(choices = [("nothing", _("do nothing")), ("close", _("Close")), ("standby", _("go to standby")), ("deepstandby", shutdownString)], default = self.job.afterEvent or "nothing")
self.job.afterEvent = self.settings.afterEvent.value
self.afterEventChangeable = afterEventChangeable
self.setupList()
self.state_changed()
def setupList(self):
if self.afterEventChangeable:
self["config"].setList( [ getConfigListEntry(_("After event"), self.settings.afterEvent) ])
else:
self["config"].hide()
self.job.afterEvent = self.settings.afterEvent.value
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.setupList()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.setupList()
def windowShow(self):
job_manager.visible = True
self.job.state_changed.append(self.state_changed)
def windowHide(self):
job_manager.visible = False
if len(self.job.state_changed) > 0:
self.job.state_changed.remove(self.state_changed)
def state_changed(self):
j = self.job
self["job_progress"].range = j.end
self["summary_job_progress"].range = j.end
self["job_progress"].value = j.progress
self["summary_job_progress"].value = j.progress
#print "JobView::state_changed:", j.end, j.progress
self["job_status"].text = j.getStatustext()
if j.status == j.IN_PROGRESS:
self["job_task"].text = j.tasks[j.current_task].name
self["summary_job_task"].text = j.tasks[j.current_task].name
else:
self["job_task"].text = ""
self["summary_job_task"].text = j.getStatustext()
if j.status in (j.FINISHED, j.FAILED):
self.performAfterEvent()
self["backgroundable"].boolean = False
if j.status == j.FINISHED:
self["finished"].boolean = True
self["cancelable"].boolean = False
elif j.status == j.FAILED:
self["cancelable"].boolean = True
def background(self):
if self["backgroundable"].boolean:
self.close(True)
def ok(self):
if self.job.status in (self.job.FINISHED, self.job.FAILED):
self.close(False)
else:
self.background()
def abort(self):
if self.job.status == self.job.NOT_STARTED:
job_manager.active_jobs.remove(self.job)
self.close(False)
elif self.job.status == self.job.IN_PROGRESS and self["cancelable"].boolean == True:
self.job.cancel()
else:
self.close(False)
def performAfterEvent(self):
self["config"].hide()
if self.settings.afterEvent.value == "nothing":
return
elif self.settings.afterEvent.value == "close" and self.job.status == self.job.FINISHED:
self.close(False)
from Screens.MessageBox import MessageBox
if self.settings.afterEvent.value == "deepstandby":
if not Screens.Standby.inTryQuitMainloop:
Tools.Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A sleep timer wants to shut down\nyour %s %s. Shutdown now?") % (getMachineBrand(), getMachineName()), timeout = 20)
elif self.settings.afterEvent.value == "standby":
if not Screens.Standby.inStandby:
Tools.Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A sleep timer wants to set your\n%s %s to standby. Do that now?") % (getMachineBrand(), getMachineName()), timeout = 20)
def checkNotifications(self):
InfoBarNotifications.checkNotifications(self)
if not Tools.Notifications.notifications:
if self.settings.afterEvent.value == "close" and self.job.status == self.job.FAILED:
self.close(False)
def sendStandbyNotification(self, answer):
if answer:
Tools.Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
if answer:
Tools.Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
| atvcaptain/enigma2 | lib/python/Screens/TaskView.py | Python | gpl-2.0 | 5,728 | 0.027235 |
import unittest
import unittest.mock as mock
import dice
import dice_config as dcfg
import dice_exceptions as dexc
class DiceInputVerificationTest(unittest.TestCase):
def test_dice_roll_input_wod(self):
examples = {'!r 5':[5, 10, None, 10, 8, 'wod', None],
'!r 2000':[2000, 10, None, 10, 8, 'wod', None],
'!r 2d8':[2, 8, None, None, None, 'wod', None],
'!r 7d6x4':[7, 6, None, 4, None, 'wod', None],
'!r 5000d700x700':[5000, 700, None, 700, None, 'wod', None],
'!r 15d20?20':[15, 20, None, None, 20, 'wod', None],
'!r 39d10x5?8':[39, 10, None, 5, 8, 'wod', None],
'!r 1d4x4?4':[1, 4, None, 4, 4, 'wod', None],
'!r 6d6+':[6, 6, 0, None, None, 'wod', None],
'!r 5d32+5':[5, 32, 5, None, None, 'wod', None],
'!r 17d4-12':[17, 4, -12, None, None, 'wod', None],
'!r 3d12+x12':[3, 12, 0, 12, None, 'wod', None],
'!r 10d20-7?15':[10, 20, -7, None, 15, 'wod', None],
'!r 768d37+33x5?23':[768, 37, 33, 5, 23, 'wod', None]}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_roll_input_simple(self):
examples = {'!r 7':[7, 6, 0, None, None, 'simple', None],
'!r 2000':[2000, 6, 0, None, None, 'simple', None],
'!r 2d8':[2, 8, None, None, None, 'simple', None],
'!r 7d6x4':[7, 6, None, 4, None, 'simple', None],
'!r 8000d899x899':[8000, 899, None, 899, None, 'simple', None],
'!r 15d20?20':[15, 20, None, None, 20, 'simple', None],
'!r 39d10x5?8':[39, 10, None, 5, 8, 'simple', None],
'!r 1d4x4?4':[1, 4, None, 4, 4, 'simple', None],
'!r 6d6+':[6, 6, 0, None, None, 'simple', None],
'!r 5d32+5':[5, 32, 5, None, None, 'simple', None],
'!r 17d4-12':[17, 4, -12, None, None, 'simple', None],
'!r 3d12+x12':[3, 12, 0, 12, None, 'simple', None],
'!r 10d20-7?15':[10, 20, -7, None, 15, 'simple', None],
'!r 768d37+33x5?23':[768, 37, 33, 5, 23, 'simple', None]}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, 'simple')
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_options_help(self):
examples = {'!r help': [None, None, None, None, None, dcfg.mode, 'Find all available commands at:'
'\nhttps://github.com/brmedeiros/dicey9000/blob/master/README.md']}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, dcfg.mode)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_options_mode(self):
examples = {'!r set wod': [None, None, None, None, None,
'wod', 'Default mode (!r n) set to World of Darksness (WoD)'],
'!r set simple': [None, None, None, None, None,
'simple', 'Default mode (!r n) set to simple (nd6)']}
for dmode in ['wod', 'simple']:
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, dmode)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_input_exception(self):
examples = ['!r ', '!r dmeoamdef', '!r kelf laij', '!r 2 3', '!r 6dz','!r 30dx', '!r 5d7x7?', '!r 9d10?',
'!r -10', '!r -6d8', '!r 6d8x?10', '!r 12d12x18?', '!r set ', '!r set help', '!r set akneoi',
'!r 3d6 help', '!r set 6d8?4 wod', '!r 6d12-', '!r 8d4-45?+', '!r 12d6+8-9', '!r 8d20-923+1x10?15',
'!r 6+','!r 5+2', '!r 7-', '!r 12-3', '!r 20x4', '!r 25?12', '!r 2+7x4?4', '!r 5-12x15?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.RollInputError, dice.dice_input_verification, example, mode)
def test_exploding_dice_exception(self):
examples = ['!r 5d8x9', '!r 12d60x100', '!r 1d6x9?4', '!r 78d5+x43', '!r 6d12-10x15', '!r 8d20+1x22?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.ExplodingDiceError, dice.dice_input_verification, example, mode)
def test_exploding_dice_too_small_exception(self):
examples = ['!r 5d8x1', '!r 8d6x2', '!r 3d70x1?10', '!r 10d2x2?2', '!r 78d5+x2', '!r 6d12-10x1',
'!r 8d20+1x2?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.ExplodingDiceTooSmallError, dice.dice_input_verification, example, mode)
def test_success_condition_exception(self):
examples = ['!r 2d8?9', '!r 2d15?55', '!r 65d10x6?11', '!r 32d5x5?100', '!r 78d5+?6', '!r 6d12-10?45',
'!r 8d20+1x18?200']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.SuccessConditionError, dice.dice_input_verification, example, mode)
def test_dice_type_exception(self):
examples = ['!r 2d0', '!r 50d0?55', '!r 6d0x6?11', '!r 32d0x5?100', '!r 78d0+?6', '!r 6d0-10?45',
'!r 8d0+1x18?200']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.DiceTypeError, dice.dice_input_verification, example, mode)
class DiceRollTest(unittest.TestCase):
@mock.patch('random.randint')
def test_roll_dice(self, random_call):
results = [1, 4, 6, 6, 2, 3, 5]
random_call.side_effect = results
target = dice.DiceRoll(7, 6, None, None, None)
target.roll_dice()
self.assertEqual(7, target.number_of_dice)
self.assertEqual(7, len(target.results))
for i, result in enumerate(results):
self.assertEqual(result, target.results[i])
self.assertEqual(str(result), target.formated_results[i])
@mock.patch('random.randint')
def test_total(self, random_call):
results = [1, 10, 5, 4, 10]
random_call.side_effect = results
examples = [0, 5, -10, 22, -50]
for example in examples:
target = dice.DiceRoll(5, 10, example, None, None)
target.roll_dice()
self.assertEqual(example, target.roll_modifier)
self.assertEqual(sum(results) + example, target.total)
@mock.patch('random.randint')
def test_explode(self, random_call):
results = [1, 12, 5, 4, 7, 6]
random_call.side_effect = results
target = dice.DiceRoll(6, 12, None, 12, None)
target.roll_dice()
self.assertEqual(12, target.explode_value)
self.assertEqual(len(results)+1, len(target.results))
| brmedeiros/dicey9000 | tests.py | Python | mit | 7,240 | 0.008702 |
"""
Core Linear Algebra Tools
=========================
=============== ==========================================================
Linear algebra basics
==========================================================================
norm Vector or matrix norm
inv Inverse of a square matrix
solve Solve a linear system of equations
det Determinant of a square matrix
slogdet Logarithm of the determinant of a square matrix
lstsq Solve linear least-squares problem
pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
matrix_power Integer power of a square matrix
=============== ==========================================================
=============== ==========================================================
Eigenvalues and decompositions
==========================================================================
eig Eigenvalues and vectors of a square matrix
eigh Eigenvalues and eigenvectors of a Hermitian matrix
eigvals Eigenvalues of a square matrix
eigvalsh Eigenvalues of a Hermitian matrix
qr QR decomposition of a matrix
svd Singular value decomposition of a matrix
cholesky Cholesky decomposition of a matrix
=============== ==========================================================
=============== ==========================================================
Tensor operations
==========================================================================
tensorsolve Solve a linear tensor equation
tensorinv Calculate an inverse of a tensor
=============== ==========================================================
=============== ==========================================================
Exceptions
==========================================================================
LinAlgError Indicates a failed linear algebra operation
=============== ==========================================================
"""
# To get sub-modules
from info import __doc__
from linalg import *
from numpy.testing import Tester
test = Tester().test
bench = Tester().test
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/numpy/linalg/__init__.py | Python | agpl-3.0 | 2,178 | 0 |
## @file
# This file is used to define common parsing related functions used in parsing
# Inf/Dsc/Makefile process
#
# Copyright (c) 2008 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os, re
import Common.EdkLogger as EdkLogger
from Common.DataType import *
from CommonDataClass.DataClass import *
from Common.String import CleanString, GetSplitValueList, ReplaceMacro
import EotGlobalData
from Common.Misc import sdict
from Common.String import GetSplitList
from Common.LongFilePathSupport import OpenLongFilePath as open
## PreProcess() method
#
# Pre process a file
#
# 1. Remove all comments
# 2. Merge multiple lines code to one line
#
# @param Filename: Name of the file to be parsed
# @param MergeMultipleLines: Switch for if merge multiple lines
# @param LineNo: Default line no
#
# @return Lines: The file contents after remvoing comments
#
def PreProcess(Filename, MergeMultipleLines = True, LineNo = -1):
Lines = []
Filename = os.path.normpath(Filename)
if not os.path.isfile(Filename):
EdkLogger.error("Eot", EdkLogger.FILE_NOT_FOUND, ExtraData=Filename)
IsFindBlockComment = False
IsFindBlockCode = False
ReservedLine = ''
ReservedLineLength = 0
for Line in open(Filename, 'r'):
Line = Line.strip()
# Remove comment block
if Line.find(TAB_COMMENT_EDK_START) > -1:
ReservedLine = GetSplitList(Line, TAB_COMMENT_EDK_START, 1)[0]
IsFindBlockComment = True
if Line.find(TAB_COMMENT_EDK_END) > -1:
Line = ReservedLine + GetSplitList(Line, TAB_COMMENT_EDK_END, 1)[1]
ReservedLine = ''
IsFindBlockComment = False
if IsFindBlockComment:
Lines.append('')
continue
# Remove comments at tail and remove spaces again
Line = CleanString(Line)
if Line == '':
Lines.append('')
continue
if MergeMultipleLines:
# Add multiple lines to one line
if IsFindBlockCode and Line[-1] != TAB_SLASH:
ReservedLine = (ReservedLine + TAB_SPACE_SPLIT + Line).strip()
Lines.append(ReservedLine)
for Index in (0, ReservedLineLength):
Lines.append('')
ReservedLine = ''
ReservedLineLength = 0
IsFindBlockCode = False
continue
if Line[-1] == TAB_SLASH:
ReservedLine = ReservedLine + TAB_SPACE_SPLIT + Line[0:-1].strip()
ReservedLineLength = ReservedLineLength + 1
IsFindBlockCode = True
continue
Lines.append(Line)
return Lines
## AddToGlobalMacro() method
#
# Add a macro to EotGlobalData.gMACRO
#
# @param Name: Name of the macro
# @param Value: Value of the macro
#
def AddToGlobalMacro(Name, Value):
Value = ReplaceMacro(Value, EotGlobalData.gMACRO, True)
EotGlobalData.gMACRO[Name] = Value
## AddToSelfMacro() method
#
# Parse a line of macro definition and add it to a macro set
#
# @param SelfMacro: The self macro set
# @param Line: The line of a macro definition
#
# @return Name: Name of macro
# @return Value: Value of macro
#
def AddToSelfMacro(SelfMacro, Line):
Name, Value = '', ''
List = GetSplitValueList(Line, TAB_EQUAL_SPLIT, 1)
if len(List) == 2:
Name = List[0]
Value = List[1]
Value = ReplaceMacro(Value, EotGlobalData.gMACRO, True)
Value = ReplaceMacro(Value, SelfMacro, True)
SelfMacro[Name] = Value
return (Name, Value)
## GetIncludeListOfFile() method
#
# Get the include path list for a source file
#
# 1. Find the source file belongs to which INF file
# 2. Find the inf's package
# 3. Return the include path list of the package
#
# @param WorkSpace: WORKSPACE path
# @param Filepath: File path
# @param Db: Eot database
#
# @return IncludeList: A list of include directories
#
def GetIncludeListOfFile(WorkSpace, Filepath, Db):
IncludeList = []
Filepath = os.path.normpath(Filepath)
SqlCommand = """
select Value1 from Inf where Model = %s and BelongsToFile in(
select distinct B.BelongsToFile from File as A left join Inf as B
where A.ID = B.BelongsToFile and B.Model = %s and (A.Path || '%s' || B.Value1) = '%s')""" \
% (MODEL_META_DATA_PACKAGE, MODEL_EFI_SOURCE_FILE, '\\', Filepath)
RecordSet = Db.TblFile.Exec(SqlCommand)
for Record in RecordSet:
DecFullPath = os.path.normpath(os.path.join(WorkSpace, Record[0]))
(DecPath, DecName) = os.path.split(DecFullPath)
SqlCommand = """select Value1 from Dec where BelongsToFile =
(select ID from File where FullPath = '%s') and Model = %s""" \
% (DecFullPath, MODEL_EFI_INCLUDE)
NewRecordSet = Db.TblDec.Exec(SqlCommand)
for NewRecord in NewRecordSet:
IncludePath = os.path.normpath(os.path.join(DecPath, NewRecord[0]))
if IncludePath not in IncludeList:
IncludeList.append(IncludePath)
return IncludeList
## GetTableList() method
#
# Search table file and find all small tables
#
# @param FileModelList: Model code for the file list
# @param Table: Table to insert records
# @param Db: Eot database
#
# @return TableList: A list of tables
#
def GetTableList(FileModelList, Table, Db):
TableList = []
SqlCommand = """select ID, FullPath from File where Model in %s""" % str(FileModelList)
RecordSet = Db.TblFile.Exec(SqlCommand)
for Record in RecordSet:
TableName = Table + str(Record[0])
TableList.append([TableName, Record[1]])
return TableList
## GetAllIncludeDir() method
#
# Find all Include directories
#
# @param Db: Eot database
#
# @return IncludeList: A list of include directories
#
def GetAllIncludeDirs(Db):
IncludeList = []
SqlCommand = """select distinct Value1 from Inf where Model = %s order by Value1""" % MODEL_EFI_INCLUDE
RecordSet = Db.TblInf.Exec(SqlCommand)
for Record in RecordSet:
IncludeList.append(Record[0])
return IncludeList
## GetAllIncludeFiles() method
#
# Find all Include files
#
# @param Db: Eot database
#
# @return IncludeFileList: A list of include files
#
def GetAllIncludeFiles(Db):
IncludeList = GetAllIncludeDirs(Db)
IncludeFileList = []
for Dir in IncludeList:
if os.path.isdir(Dir):
SubDir = os.listdir(Dir)
for Item in SubDir:
if os.path.isfile(Item):
IncludeFileList.append(Item)
return IncludeFileList
## GetAllSourceFiles() method
#
# Find all source files
#
# @param Db: Eot database
#
# @return SourceFileList: A list of source files
#
def GetAllSourceFiles(Db):
SourceFileList = []
SqlCommand = """select distinct Value1 from Inf where Model = %s order by Value1""" % MODEL_EFI_SOURCE_FILE
RecordSet = Db.TblInf.Exec(SqlCommand)
for Record in RecordSet:
SourceFileList.append(Record[0])
return SourceFileList
## GetAllFiles() method
#
# Find all files, both source files and include files
#
# @param Db: Eot database
#
# @return FileList: A list of files
#
def GetAllFiles(Db):
FileList = []
IncludeFileList = GetAllIncludeFiles(Db)
SourceFileList = GetAllSourceFiles(Db)
for Item in IncludeFileList:
if os.path.isfile(Item) and Item not in FileList:
FileList.append(Item)
for Item in SourceFileList:
if os.path.isfile(Item) and Item not in FileList:
FileList.append(Item)
return FileList
## ParseConditionalStatement() method
#
# Parse conditional statement
#
# @param Line: One line to be parsed
# @param Macros: A set of all macro
# @param StatusSet: A set of all status
#
# @retval True: Find keyword of conditional statement
# @retval False: Not find keyword of conditional statement
#
def ParseConditionalStatement(Line, Macros, StatusSet):
NewLine = Line.upper()
if NewLine.find(TAB_IF_EXIST.upper()) > -1:
IfLine = Line[NewLine.find(TAB_IF_EXIST) + len(TAB_IF_EXIST) + 1:].strip()
IfLine = ReplaceMacro(IfLine, EotGlobalData.gMACRO, True)
IfLine = ReplaceMacro(IfLine, Macros, True)
IfLine = IfLine.replace("\"", '')
IfLine = IfLine.replace("(", '')
IfLine = IfLine.replace(")", '')
Status = os.path.exists(os.path.normpath(IfLine))
StatusSet.append([Status])
return True
if NewLine.find(TAB_IF_DEF.upper()) > -1:
IfLine = Line[NewLine.find(TAB_IF_DEF) + len(TAB_IF_DEF) + 1:].strip()
Status = False
if IfLine in Macros or IfLine in EotGlobalData.gMACRO:
Status = True
StatusSet.append([Status])
return True
if NewLine.find(TAB_IF_N_DEF.upper()) > -1:
IfLine = Line[NewLine.find(TAB_IF_N_DEF) + len(TAB_IF_N_DEF) + 1:].strip()
Status = False
if IfLine not in Macros and IfLine not in EotGlobalData.gMACRO:
Status = True
StatusSet.append([Status])
return True
if NewLine.find(TAB_IF.upper()) > -1:
IfLine = Line[NewLine.find(TAB_IF) + len(TAB_IF) + 1:].strip()
Status = ParseConditionalStatementMacros(IfLine, Macros)
StatusSet.append([Status])
return True
if NewLine.find(TAB_ELSE_IF.upper()) > -1:
IfLine = Line[NewLine.find(TAB_ELSE_IF) + len(TAB_ELSE_IF) + 1:].strip()
Status = ParseConditionalStatementMacros(IfLine, Macros)
StatusSet[-1].append(Status)
return True
if NewLine.find(TAB_ELSE.upper()) > -1:
Status = False
for Item in StatusSet[-1]:
Status = Status or Item
StatusSet[-1].append(not Status)
return True
if NewLine.find(TAB_END_IF.upper()) > -1:
StatusSet.pop()
return True
return False
## ParseConditionalStatement() method
#
# Parse conditional statement with Macros
#
# @param Line: One line to be parsed
# @param Macros: A set of macros
#
# @return Line: New line after replacing macros
#
def ParseConditionalStatementMacros(Line, Macros):
if Line.upper().find('DEFINED(') > -1 or Line.upper().find('EXIST') > -1:
return False
Line = ReplaceMacro(Line, EotGlobalData.gMACRO, True)
Line = ReplaceMacro(Line, Macros, True)
Line = Line.replace("&&", "and")
Line = Line.replace("||", "or")
return eval(Line)
## GetConditionalStatementStatus() method
#
# 1. Assume the latest status as True
# 2. Pop the top status of status set, previous status
# 3. Compare the latest one and the previous one and get new status
#
# @param StatusSet: A set of all status
#
# @return Status: The final status
#
def GetConditionalStatementStatus(StatusSet):
Status = True
for Item in StatusSet:
Status = Status and Item[-1]
return Status
## SearchBelongsToFunction() method
#
# Search all functions belong to the file
#
# @param BelongsToFile: File id
# @param StartLine: Start line of search scope
# @param EndLine: End line of search scope
#
# @return: The found function
#
def SearchBelongsToFunction(BelongsToFile, StartLine, EndLine):
SqlCommand = """select ID, Name from Function where BelongsToFile = %s and StartLine <= %s and EndLine >= %s""" %(BelongsToFile, StartLine, EndLine)
RecordSet = EotGlobalData.gDb.TblFunction.Exec(SqlCommand)
if RecordSet != []:
return RecordSet[0][0], RecordSet[0][1]
else:
return -1, ''
## SearchPpiCallFunction() method
#
# Search all used PPI calling function 'PeiServicesReInstallPpi' and 'PeiServicesInstallPpi'
# Store the result to database
#
# @param Identifier: Table id
# @param SourceFileID: Source file id
# @param SourceFileFullPath: Source file full path
# @param ItemMode: Mode of the item
#
def SearchPpiCallFunction(Identifier, SourceFileID, SourceFileFullPath, ItemMode):
ItemName, ItemType, GuidName, GuidMacro, GuidValue = '', 'Ppi', '', '', ''
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' and Model = %s)""" \
% (Identifier, 'PeiServicesReInstallPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
BelongsToFunctionID, BelongsToFunction = -1, ''
Db = EotGlobalData.gDb.TblReport
RecordSet = Db.Exec(SqlCommand)
for Record in RecordSet:
Index = 0
BelongsToFile, StartLine, EndLine = Record[2], Record[3], Record[4]
BelongsToFunctionID, BelongsToFunction = SearchBelongsToFunction(BelongsToFile, StartLine, EndLine)
VariableList = Record[0].split(',')
for Variable in VariableList:
Variable = Variable.strip()
# Get index of the variable
if Variable.find('[') > -1:
Index = int(Variable[Variable.find('[') + 1 : Variable.find(']')])
Variable = Variable[:Variable.find('[')]
# Get variable name
if Variable.startswith('&'):
Variable = Variable[1:]
# Get variable value
SqlCommand = """select Value from %s where (Name like '%%%s%%') and Model = %s""" \
% (Identifier, Variable, MODEL_IDENTIFIER_VARIABLE)
NewRecordSet = Db.Exec(SqlCommand)
if NewRecordSet:
NewRecord = NewRecordSet[0][0]
VariableValueList = NewRecord.split('},')
if len(VariableValueList) > Index:
VariableValue = VariableValueList[Index]
NewVariableValueList = VariableValue.split(',')
if len(NewVariableValueList) > 1:
NewVariableValue = NewVariableValueList[1].strip()
if NewVariableValue.startswith('&'):
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, NewVariableValue[1:], GuidMacro, GuidValue, BelongsToFunction, 0)
continue
else:
EotGlobalData.gOP_UN_MATCHED.write('%s, %s, %s, %s, %s, %s\n' % (ItemType, ItemMode, SourceFileID, SourceFileFullPath, StartLine, NewParameter))
ItemName, ItemType, GuidName, GuidMacro, GuidValue = '', 'Ppi', '', '', ''
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Value like '%%%s%%' and Model = %s)""" \
% (Identifier, 'PeiServicesInstallPpi', MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION)
BelongsToFunctionID, BelongsToFunction = -1, ''
Db = EotGlobalData.gDb.TblReport
RecordSet = Db.Exec(SqlCommand)
SqlCommand = """select Value, Name, BelongsToFile, StartLine, EndLine from %s
where (Name like '%%%s%%' and Model = %s)""" \
% (Identifier, 'PeiServicesInstallPpi', MODEL_IDENTIFIER_FUNCTION_CALLING)
Db = EotGlobalData.gDb.TblReport
RecordSet2 = Db.Exec(SqlCommand)
for Record in RecordSet + RecordSet2:
if Record == []:
continue
Index = 0
BelongsToFile, StartLine, EndLine = Record[2], Record[3], Record[4]
BelongsToFunctionID, BelongsToFunction = SearchBelongsToFunction(BelongsToFile, StartLine, EndLine)
Variable = Record[0].replace('PeiServicesInstallPpi', '').replace('(', '').replace(')', '').replace('&', '').strip()
Variable = Variable[Variable.find(',') + 1:].strip()
# Get index of the variable
if Variable.find('[') > -1:
Index = int(Variable[Variable.find('[') + 1 : Variable.find(']')])
Variable = Variable[:Variable.find('[')]
# Get variable name
if Variable.startswith('&'):
Variable = Variable[1:]
# Get variable value
SqlCommand = """select Value from %s where (Name like '%%%s%%') and Model = %s""" \
% (Identifier, Variable, MODEL_IDENTIFIER_VARIABLE)
NewRecordSet = Db.Exec(SqlCommand)
if NewRecordSet:
NewRecord = NewRecordSet[0][0]
VariableValueList = NewRecord.split('},')
for VariableValue in VariableValueList[Index:]:
NewVariableValueList = VariableValue.split(',')
if len(NewVariableValueList) > 1:
NewVariableValue = NewVariableValueList[1].strip()
if NewVariableValue.startswith('&'):
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, NewVariableValue[1:], GuidMacro, GuidValue, BelongsToFunction, 0)
continue
else:
EotGlobalData.gOP_UN_MATCHED.write('%s, %s, %s, %s, %s, %s\n' % (ItemType, ItemMode, SourceFileID, SourceFileFullPath, StartLine, NewParameter))
## SearchPpis() method
#
# Search all used PPI calling function
# Store the result to database
#
# @param SqlCommand: SQL command statement
# @param Table: Table id
# @param SourceFileID: Source file id
# @param SourceFileFullPath: Source file full path
# @param ItemMode: Mode of the item
# @param PpiMode: Mode of PPI
#
def SearchPpi(SqlCommand, Table, SourceFileID, SourceFileFullPath, ItemMode, PpiMode = 1):
ItemName, ItemType, GuidName, GuidMacro, GuidValue = '', 'Ppi', '', '', ''
BelongsToFunctionID, BelongsToFunction = -1, ''
Db = EotGlobalData.gDb.TblReport
RecordSet = Db.Exec(SqlCommand)
for Record in RecordSet:
Parameter = GetPpiParameter(Record[0], PpiMode)
BelongsToFile, StartLine, EndLine = Record[2], Record[3], Record[4]
# Get BelongsToFunction
BelongsToFunctionID, BelongsToFunction = SearchBelongsToFunction(BelongsToFile, StartLine, EndLine)
# Default is Not Found
IsFound = False
# For Consumed Ppi
if ItemMode == 'Consumed':
if Parameter.startswith('g'):
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, Parameter, GuidMacro, GuidValue, BelongsToFunction, 0)
else:
EotGlobalData.gOP_UN_MATCHED.write('%s, %s, %s, %s, %s, %s\n' % (ItemType, ItemMode, SourceFileID, SourceFileFullPath, StartLine, Parameter))
continue
# Direct Parameter.Guid
SqlCommand = """select Value from %s where (Name like '%%%s.Guid%%' or Name like '%%%s->Guid%%') and Model = %s""" % (Table, Parameter, Parameter, MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION)
NewRecordSet = Db.Exec(SqlCommand)
for NewRecord in NewRecordSet:
GuidName = GetParameterName(NewRecord[0])
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, 0)
IsFound = True
# Defined Parameter
if not IsFound:
Key = Parameter
if Key.rfind(' ') > -1:
Key = Key[Key.rfind(' ') : ].strip().replace('&', '')
Value = FindKeyValue(EotGlobalData.gDb.TblFile, Table, Key)
List = GetSplitValueList(Value.replace('\n', ''), TAB_COMMA_SPLIT)
if len(List) > 1:
GuidName = GetParameterName(List[1])
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, 0)
IsFound = True
# A list Parameter
if not IsFound:
Start = Parameter.find('[')
End = Parameter.find(']')
if Start > -1 and End > -1 and Start < End:
try:
Index = int(Parameter[Start + 1 : End])
Parameter = Parameter[0 : Start]
SqlCommand = """select Value from %s where Name = '%s' and Model = %s""" % (Table, Parameter, MODEL_IDENTIFIER_VARIABLE)
NewRecordSet = Db.Exec(SqlCommand)
for NewRecord in NewRecordSet:
NewParameter = GetSplitValueList(NewRecord[0], '}')[Index]
GuidName = GetPpiParameter(NewParameter[NewParameter.find('{') : ])
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, 0)
IsFound = True
except Exception:
pass
# A External Parameter
if not IsFound:
SqlCommand = """select File.ID from Inf, File
where BelongsToFile = (select BelongsToFile from Inf where Value1 = '%s')
and Inf.Model = %s and Inf.Value1 = File.FullPath and File.Model = %s""" % (SourceFileFullPath, MODEL_EFI_SOURCE_FILE, MODEL_FILE_C)
NewRecordSet = Db.Exec(SqlCommand)
for NewRecord in NewRecordSet:
Table = 'Identifier' + str(NewRecord[0])
SqlCommand = """select Value from %s where Name = '%s' and Modifier = 'EFI_PEI_PPI_DESCRIPTOR' and Model = %s""" % (Table, Parameter, MODEL_IDENTIFIER_VARIABLE)
PpiSet = Db.Exec(SqlCommand)
if PpiSet != []:
GuidName = GetPpiParameter(PpiSet[0][0])
if GuidName != '':
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, 0)
IsFound = True
break
if not IsFound:
EotGlobalData.gOP_UN_MATCHED.write('%s, %s, %s, %s, %s, %s\n' % (ItemType, ItemMode, SourceFileID, SourceFileFullPath, StartLine, Parameter))
## SearchProtocols() method
#
# Search all used PROTOCOL calling function
# Store the result to database
#
# @param SqlCommand: SQL command statement
# @param Table: Table id
# @param SourceFileID: Source file id
# @param SourceFileFullPath: Source file full path
# @param ItemMode: Mode of the item
# @param ProtocolMode: Mode of PROTOCOL
#
def SearchProtocols(SqlCommand, Table, SourceFileID, SourceFileFullPath, ItemMode, ProtocolMode):
ItemName, ItemType, GuidName, GuidMacro, GuidValue = '', 'Protocol', '', '', ''
BelongsToFunctionID, BelongsToFunction = -1, ''
Db = EotGlobalData.gDb.TblReport
RecordSet = Db.Exec(SqlCommand)
for Record in RecordSet:
Parameter = ''
BelongsToFile, StartLine, EndLine = Record[2], Record[3], Record[4]
# Get BelongsToFunction
BelongsToFunctionID, BelongsToFunction = SearchBelongsToFunction(BelongsToFile, StartLine, EndLine)
# Default is Not Found
IsFound = False
if ProtocolMode == 0 or ProtocolMode == 1:
Parameter = GetProtocolParameter(Record[0], ProtocolMode)
if Parameter.startswith('g') or Parameter.endswith('Guid') or Parameter == 'ShellEnvProtocol' or Parameter == 'ShellInterfaceProtocol':
GuidName = GetParameterName(Parameter)
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, 0)
IsFound = True
if ProtocolMode == 2:
Protocols = GetSplitValueList(Record[0], TAB_COMMA_SPLIT)
for Protocol in Protocols:
if Protocol.startswith('&') and Protocol.endswith('Guid'):
GuidName = GetParameterName(Protocol)
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, 0)
IsFound = True
else:
NewValue = FindKeyValue(EotGlobalData.gDb.TblFile, Table, Protocol)
if Protocol != NewValue and NewValue.endswith('Guid'):
GuidName = GetParameterName(NewValue)
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, 0)
IsFound = True
if not IsFound:
if BelongsToFunction in EotGlobalData.gProducedProtocolLibrary or BelongsToFunction in EotGlobalData.gConsumedProtocolLibrary:
EotGlobalData.gOP_UN_MATCHED_IN_LIBRARY_CALLING.write('%s, %s, %s, %s, %s, %s, %s\n' % (ItemType, ItemMode, SourceFileID, SourceFileFullPath, StartLine, Parameter, BelongsToFunction))
else:
EotGlobalData.gOP_UN_MATCHED.write('%s, %s, %s, %s, %s, %s\n' % (ItemType, ItemMode, SourceFileID, SourceFileFullPath, StartLine, Parameter))
## SearchFunctionCalling() method
#
# Search all used PPI/PROTOCOL calling function by library
# Store the result to database
#
# @param SqlCommand: SQL command statement
# @param Table: Table id
# @param SourceFileID: Source file id
# @param SourceFileFullPath: Source file full path
# @param ItemType: Type of the item, PPI or PROTOCOL
# @param ItemMode: Mode of item
#
def SearchFunctionCalling(Table, SourceFileID, SourceFileFullPath, ItemType, ItemMode):
LibraryList = sdict()
Db = EotGlobalData.gDb.TblReport
Parameters, ItemName, GuidName, GuidMacro, GuidValue, BelongsToFunction = [], '', '', '', '', ''
if ItemType == 'Protocol' and ItemMode == 'Produced':
LibraryList = EotGlobalData.gProducedProtocolLibrary
elif ItemType == 'Protocol' and ItemMode == 'Consumed':
LibraryList = EotGlobalData.gConsumedProtocolLibrary
elif ItemType == 'Protocol' and ItemMode == 'Callback':
LibraryList = EotGlobalData.gCallbackProtocolLibrary
elif ItemType == 'Ppi' and ItemMode == 'Produced':
LibraryList = EotGlobalData.gProducedPpiLibrary
elif ItemType == 'Ppi' and ItemMode == 'Consumed':
LibraryList = EotGlobalData.gConsumedPpiLibrary
for Library in LibraryList:
Index = LibraryList[Library]
SqlCommand = """select Value, StartLine from %s
where Name like '%%%s%%' and Model = %s""" \
% (Table, Library, MODEL_IDENTIFIER_FUNCTION_CALLING)
RecordSet = Db.Exec(SqlCommand)
for Record in RecordSet:
IsFound = False
if Index == -1:
ParameterList = GetSplitValueList(Record[0], TAB_COMMA_SPLIT)
for Parameter in ParameterList:
Parameters.append(GetParameterName(Parameter))
else:
Parameters = [GetProtocolParameter(Record[0], Index)]
StartLine = Record[1]
for Parameter in Parameters:
if Parameter.startswith('g') or Parameter.endswith('Guid') or Parameter == 'ShellEnvProtocol' or Parameter == 'ShellInterfaceProtocol':
GuidName = GetParameterName(Parameter)
Db.Insert(-1, '', '', SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue, BelongsToFunction, 0)
IsFound = True
if not IsFound:
EotGlobalData.gOP_UN_MATCHED.write('%s, %s, %s, %s, %s, %s\n' % (ItemType, ItemMode, SourceFileID, SourceFileFullPath, StartLine, Parameter))
## FindProtocols() method
#
# Find defined protocols
#
# @param SqlCommand: SQL command statement
# @param Table: Table id
# @param SourceFileID: Source file id
# @param SourceFileFullPath: Source file full path
# @param ItemName: String of protocol definition
# @param ItemType: Type of the item, PPI or PROTOCOL
# @param ItemMode: Mode of item
#
#def FindProtocols(Db, SqlCommand, Table, SourceFileID, SourceFileFullPath, ItemName, ItemType, ItemMode, GuidName, GuidMacro, GuidValue):
# BelongsToFunction = ''
# RecordSet = Db.Exec(SqlCommand)
# for Record in RecordSet:
# IsFound = True
# Parameter = GetProtocolParameter(Record[0])
## GetProtocolParameter() method
#
# Parse string of protocol and find parameters
#
# @param Parameter: Parameter to be parsed
# @param Index: The index of the parameter
#
# @return: call common GetParameter
#
def GetProtocolParameter(Parameter, Index = 1):
return GetParameter(Parameter, Index)
## GetPpiParameter() method
#
# Parse string of ppi and find parameters
#
# @param Parameter: Parameter to be parsed
# @param Index: The index of the parameter
#
# @return: call common GetParameter
#
def GetPpiParameter(Parameter, Index = 1):
return GetParameter(Parameter, Index)
## GetParameter() method
#
# Get a parameter by index
#
# @param Parameter: Parameter to be parsed
# @param Index: The index of the parameter
#
# @return Parameter: The found parameter
#
def GetParameter(Parameter, Index = 1):
ParameterList = GetSplitValueList(Parameter, TAB_COMMA_SPLIT)
if len(ParameterList) > Index:
Parameter = GetParameterName(ParameterList[Index])
return Parameter
return ''
## GetParameterName() method
#
# Get a parameter name
#
# @param Parameter: Parameter to be parsed
#
# @return: The name of parameter
#
def GetParameterName(Parameter):
if type(Parameter) == type('') and Parameter.startswith('&'):
return Parameter[1:].replace('{', '').replace('}', '').replace('\r', '').replace('\n', '').strip()
else:
return Parameter.strip()
## FindKeyValue() method
#
# Find key value of a variable
#
# @param Db: Database to be searched
# @param Table: Table to be searched
# @param Key: The keyword
#
# @return Value: The value of the the keyword
#
def FindKeyValue(Db, Table, Key):
SqlCommand = """select Value from %s where Name = '%s' and (Model = %s or Model = %s)""" % (Table, Key, MODEL_IDENTIFIER_VARIABLE, MODEL_IDENTIFIER_ASSIGNMENT_EXPRESSION)
RecordSet = Db.Exec(SqlCommand)
Value = ''
for Record in RecordSet:
if Record[0] != 'NULL':
Value = FindKeyValue(Db, Table, GetParameterName(Record[0]))
if Value != '':
return Value
else:
return Key
## ParseMapFile() method
#
# Parse map files to get a dict of 'ModuleName' : {FunName : FunAddress}
#
# @param Files: A list of map files
#
# @return AllMaps: An object of all map files
#
def ParseMapFile(Files):
AllMaps = {}
CurrentModule = ''
CurrentMaps = {}
for File in Files:
Content = open(File, 'r').readlines()
for Line in Content:
Line = CleanString(Line)
# skip empty line
if Line == '':
continue
if Line.find('(') > -1 and Line.find(')') > -1:
if CurrentModule != '' and CurrentMaps != {}:
AllMaps[CurrentModule] = CurrentMaps
CurrentModule = Line[:Line.find('(')]
CurrentMaps = {}
continue
else:
Name = ''
Address = ''
List = Line.split()
Address = List[0]
if List[1] == 'F' or List[1] == 'FS':
Name = List[2]
else:
Name = List[1]
CurrentMaps[Name] = Address
continue
return AllMaps
## ConvertGuid
#
# Convert a GUID to a GUID with all upper letters
#
# @param guid: The GUID to be converted
#
# @param newGuid: The GUID with all upper letters.
#
def ConvertGuid(guid):
numList = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
newGuid = ''
if guid.startswith('g'):
guid = guid[1:]
for i in guid:
if i.upper() == i and i not in numList:
newGuid = newGuid + ('_' + i)
else:
newGuid = newGuid + i.upper()
if newGuid.startswith('_'):
newGuid = newGuid[1:]
if newGuid.endswith('_'):
newGuid = newGuid[:-1]
return newGuid
## ConvertGuid2() method
#
# Convert a GUID to a GUID with new string instead of old string
#
# @param guid: The GUID to be converted
# @param old: Old string to be replaced
# @param new: New string to replace the old one
#
# @param newGuid: The GUID after replacement
#
def ConvertGuid2(guid, old, new):
newGuid = ConvertGuid(guid)
newGuid = newGuid.replace(old, new)
return newGuid
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
| intel/ipmctl | BaseTools/Source/Python/Eot/Parser.py | Python | bsd-3-clause | 33,751 | 0.004207 |
# -*- coding: utf-8 -*-
#
# restflow documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 31 07:32:50 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('_themes'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.autohttp.flask',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'undoc-members',
'show-inheritance', ]
#autodoc_member_order = 'groupwise'
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'restflow'
copyright = u'2014, Alexander Weigl, Nicolai Schoch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = 'alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'kr'
html_theme_path = ['_themes']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'restflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'restflow.tex', u'restflow Documentation',
u'Alexander Weigl, Nicolai Schoch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'restflow', u'restflow Documentation',
[u'Alexander Weigl, Nicolai Schoch'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'restflow', u'restflow Documentation',
u'Alexander Weigl, Nicolai Schoch', 'restflow', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| CognitionGuidedSurgery/restflow | doc/conf.py | Python | gpl-3.0 | 8,911 | 0.005948 |
###############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# Copyright (C) 2010, Pablo Recio Quijano <[email protected]> #
# 2010, Lorenzo Gil Sanchez <[email protected]> #
###############################################################################
from datetime import datetime, timedelta
from optparse import OptionParser
from os import path
from cobaya import version_string
from cobaya.hamster_task import HamsterTask
from cobaya.hamster_db import HamsterDB
from cobaya.config import Config
from cobaya.remote_server import RemoteServer
class CobayaApp(object):
def __init__(self, options):
self.conf = Config()
self.conf.load(options.config_file)
self.log_file = self.conf.get_option('hamster.log_file')
self.ids = []
if path.exists(self.log_file):
f = file(self.log_file, 'r')
self.ids = f.readlines()
else:
f = file(self.log_file, 'w')
f.close()
self.tasks = get_all_tasks(self.conf)
for id in self.tasks:
str_id = ('%d\n' % id)
if str_id in self.ids:
self.tasks[id].remote_sync = True
def generate_unsynced_data(self):
data = []
for id in self.tasks:
if self.tasks[id].remote_sync == False and \
self.tasks[id].time != 0.0: # not synced or not finished
data = self.append_and_merge(data, id)
return data
def append_and_merge(self, data, id):
d = self.tasks[id].to_dict()
band = False
for i in range(len(data)):
if data[i]['date'] == d['date'] and \
data[i]['project'] == d['project'] and \
data[i]['ticket'] == d['ticket']:
data[i]['time'] += d['time']
if (d['description'] and not data[i]['description']) or \
(d['description'] and not d['description'] in data[i]['description']):
if data[i]['description']:
data[i]['description'] = '%s ||| %s' % (data[i]['description'], d['description'])
else:
data[i]['description'] = d['description']
band = True
if not band or not len(data):
data.append(d)
return data
def perform_notification(self):
unsynced_data = self.generate_unsynced_data()
server = RemoteServer(self.conf)
responses = server.send_tasks(unsynced_data)
news_id = []
synced_tasks = responses['accepted'] + responses['duplicated']
for task in synced_tasks:
id = task['task_id']
news_id.append("%d\n" % id)
self.tasks[id].remote_sync = True
f = file(self.log_file, 'a')
f.writelines(news_id)
f.close()
def get_all_tasks(conf):
"""Returns a list with every task registred on Hamster.
"""
db = HamsterDB(conf)
fact_list = db.all_facts_id
security_days = int(conf.get_option('tasks.security_days'))
today = datetime.today()
tasks = {}
for fact_id in fact_list:
ht = HamsterTask(fact_id, conf, db)
if ht.end_time:
end_time = ht.get_object_dates()[1]
if today - timedelta(security_days) <= end_time:
rt = ht.get_remote_task()
tasks[rt.task_id] = rt
db.close_connection()
print 'Obtained %d tasks' % len(tasks)
return tasks
def main():
parser = OptionParser(usage="usage: %prog [options]",
version="%prog " + version_string)
parser.add_option("-c", "--config", dest="config_file", default=None,
help="configuration file to use")
(options, args) = parser.parse_args()
cob = CobayaApp(options)
cob.perform_notification()
if __name__ == '__main__':
main()
| pablorecio/Cobaya | src/cobaya/app.py | Python | gpl-3.0 | 4,990 | 0.000601 |
#! encoding: utf-8
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for the 'core' data types.
"""
from __future__ import absolute_import, division, print_function
import six
from six.moves import zip
from six import string_types
import time
import sys
from collections import namedtuple, MutableMapping, defaultdict, deque
import numpy as np
from itertools import tee
import logging
logger = logging.getLogger(__name__)
try:
import src.ctrans as ctrans
except ImportError:
try:
import ctrans
except ImportError:
ctrans = None
md_value = namedtuple("md_value", ['value', 'units'])
_defaults = {
"bins": 100,
'nx': 100,
'ny': 100,
'nz': 100
}
class NotInstalledError(ImportError):
'''
Custom exception that should be subclassed to handle
specific missing libraries
'''
pass
class MD_dict(MutableMapping):
"""
A class to make dealing with the meta-data scheme for DataExchange easier
Examples
--------
Getting and setting data by path is possible
>>> tt = MD_dict()
>>> tt['name'] = 'test'
>>> tt['nested.a'] = 2
>>> tt['nested.b'] = (5, 'm')
>>> tt['nested.a'].value
2
>>> tt['nested.a'].units is None
True
>>> tt['name'].value
'test'
>>> tt['nested.b'].units
'm'
"""
def __init__(self, md_dict=None):
# TODO properly walk the input on upgrade dicts -> MD_dict
if md_dict is None:
md_dict = dict()
self._dict = md_dict
self._split = '.'
def __repr__(self):
return self._dict.__repr__()
# overload __setitem__ so dotted paths work
def __setitem__(self, key, val):
key_split = key.split(self._split)
tmp = self._dict
for k in key_split[:-1]:
try:
tmp = tmp[k]._dict
except:
tmp[k] = type(self)()
tmp = tmp[k]._dict
if isinstance(tmp, md_value):
# TODO make message better
raise KeyError("trying to use a leaf node as a branch")
# if passed in an md_value, set it and return
if isinstance(val, md_value):
tmp[key_split[-1]] = val
return
# catch the case of a bare string
elif isinstance(val, string_types):
# a value with out units
tmp[key_split[-1]] = md_value(val, 'text')
return
# not something easy, try to guess what to do instead
try:
# if the second element is a string or None, cast to named tuple
if isinstance(val[1], string_types) or val[1] is None:
print('here')
tmp[key_split[-1]] = md_value(*val)
# else, assume whole thing is the value with no units
else:
tmp[key_split[-1]] = md_value(val, None)
# catch any type errors from trying to index into non-indexable things
# or from trying to use iterables longer than 2
except TypeError:
tmp[key_split[-1]] = md_value(val, None)
def __getitem__(self, key):
key_split = key.split(self._split)
tmp = self._dict
for k in key_split[:-1]:
try:
tmp = tmp[k]._dict
except:
tmp[k] = type(self)()
tmp = tmp[k]._dict
if isinstance(tmp, md_value):
# TODO make message better
raise KeyError("trying to use a leaf node as a branch")
return tmp.get(key_split[-1], None)
def __delitem__(self, key):
# pass one delete the entry
# TODO make robust to non-keys
key_split = key.split(self._split)
tmp = self._dict
for k in key_split[:-1]:
# make sure we are grabbing the internal dict
tmp = tmp[k]._dict
del tmp[key_split[-1]]
# TODO pass 2 remove empty branches
def __len__(self):
return len(list(iter(self)))
def __iter__(self):
return _iter_helper([], self._split, self._dict)
def _iter_helper(path_list, split, md_dict):
"""
Recursively walk the tree and return the names of the leaves
"""
for k, v in six.iteritems(md_dict):
if isinstance(v, md_value):
yield split.join(path_list + [k])
else:
for inner_v in _iter_helper(path_list + [k], split, v._dict):
yield inner_v
class verbosedict(dict):
"""
A sub-class of dict which raises more verbose errors if
a key is not found.
"""
def __getitem__(self, key):
try:
v = dict.__getitem__(self, key)
except KeyError:
if len(self) < 25:
new_msg = ("You tried to access the key '{key}' "
"which does not exist. The "
"extant keys are: {valid_keys}").format(
key=key, valid_keys=list(self))
else:
new_msg = ("You tried to access the key '{key}' "
"which does not exist. There "
"are {num} extant keys, which is too many to "
"show you").format(
key=key, num=len(self))
six.reraise(KeyError, KeyError(new_msg), sys.exc_info()[2])
return v
class RCParamDict(MutableMapping):
"""A class to make dealing with storing default values easier.
RC params is a hold- over from the UNIX days where configuration
files are 'rc' files. See
http://en.wikipedia.org/wiki/Configuration_file
Examples
--------
Getting and setting data by path is possible
>>> tt = RCParamDict()
>>> tt['name'] = 'test'
>>> tt['nested.a'] = 2
"""
_delim = '.'
def __init__(self):
# the dict to hold the keys at this level
self._dict = dict()
# the defaultdict (defaults to just accepting it) of
# validator functions
self._validators = defaultdict(lambda: lambda x: True)
# overload __setitem__ so dotted paths work
def __setitem__(self, key, val):
# try to split the key
splt_key = key.split(self._delim, 1)
# if more than one part, recurse
if len(splt_key) > 1:
try:
tmp = self._dict[splt_key[0]]
except KeyError:
tmp = RCParamDict()
self._dict[splt_key[0]] = tmp
if not isinstance(tmp, RCParamDict):
raise KeyError("name space is borked")
tmp[splt_key[1]] = val
else:
if not self._validators[key]:
# TODO improve the validation error
raise ValueError("fails to validate, improve this")
self._dict[key] = val
def __getitem__(self, key):
# try to split the key
splt_key = key.split(self._delim, 1)
if len(splt_key) > 1:
return self._dict[splt_key[0]][splt_key[1]]
else:
return self._dict[key]
def __delitem__(self, key):
splt_key = key.split(self._delim, 1)
if len(splt_key) > 1:
self._dict[splt_key[0]].__delitem__(splt_key[1])
else:
del self._dict[key]
def __len__(self):
return len(list(iter(self)))
def __iter__(self):
return self._iter_helper([])
def _iter_helper(self, path_list):
"""
Recursively walk the tree and return the names of the leaves
"""
for key, val in six.iteritems(self._dict):
if isinstance(val, RCParamDict):
for k in val._iter_helper(path_list + [key, ]):
yield k
else:
yield self._delim.join(path_list + [key, ])
def __repr__(self):
# recursively get the formatted list of strings
str_list = self._repr_helper(0)
# return as a single string
return '\n'.join(str_list)
def _repr_helper(self, tab_level):
# to accumulate the strings into
str_list = []
# list of the elements at this level
elm_list = []
# list of sub-levels
nested_list = []
# loop over the local _dict and sort out which
# keys are nested and which are this level
for key, val in six.iteritems(self._dict):
if isinstance(val, RCParamDict):
nested_list.append(key)
else:
elm_list.append(key)
# sort the keys in both lists
elm_list.sort()
nested_list.sort()
# loop over and format the keys/vals at this level
for elm in elm_list:
str_list.append(" " * tab_level +
"{key}: {val}".format(
key=elm, val=self._dict[elm]))
# deal with the nested groups
for nested in nested_list:
# add the label for the group name
str_list.append(" " * tab_level +
"{key}:".format(key=nested))
# add the strings from _all_ the nested groups
str_list.extend(
self._dict[nested]._repr_helper(tab_level + 1))
return str_list
keys_core = {
"pixel_size": {
"description": ("2 element tuple defining the (x y) dimensions of the "
"pixel"),
"type": tuple,
"units": "um",
},
"voxel_size": {
"description": ("3 element tuple defining the (x y z) dimensions of the "
"voxel"),
"type": tuple,
"units": "um"
},
"calibrated_center": {
"description": ("2 element tuple defining the (x y) center of the "
"detector in pixels"),
"type": tuple,
"units": "pixel",
},
"detector_size": {
"description": ("2 element tuple defining no. of pixels(size) in the "
"detector X and Y direction"),
"type": tuple,
"units": "pixel",
},
"detector_tilt_angles": {
"description": "Detector tilt angle",
"type": tuple,
"units": " degrees",
},
"dist_sample": {
"description": "distance from the sample to the detector (mm)",
"type": float,
"units": "mm",
},
"wavelength": {
"description": "wavelength of incident radiation (Angstroms)",
"type": float,
"units": "angstrom",
},
"ub_mat": {
"description": "UB matrix(orientation matrix) 3x3 array",
"type": "ndarray",
},
"energy": {
"description": "scanning energy for data collection",
"type": float,
"units": "keV",
},
"array_dimensions": {
"description": "axial lengths of the array (Pixels)",
"x_dimension": {
"description": "x-axis array length as int",
"type": int,
"units": "pixels"
},
"y_dimension": {
"description": "y-axis array length as int",
"type": int,
"units": "pixels"
},
"z_dimension": {
"description": "z-axis array length as int",
"type": int,
"units": "pixels"
}
},
"bounding_box": {
"description": ("physical extents of the array: useful for " +
"volume alignment, transformation, merge and " +
"spatial comparison of multiple volumes"),
"x_min": {
"description": "minimum spatial coordinate along the x-axis",
"type": float,
"units": "um"
},
"x_max": {
"description": "maximum spatial coordinate along the x-axis",
"type": float,
"units": "um"
},
"y_min": {
"description": "minimum spatial coordinate along the y-axis",
"type": float,
"units": "um"
},
"y_max": {
"description": "maximum spatial coordinate along the y-axis",
"type": float,
"units": "um"
},
"z_min": {
"description": "minimum spatial coordinate along the z-axis",
"type": float,
"units": "um"
},
"z_max": {
"description": "maximum spatial coordinate along the z-axis",
"type": float,
"units": "um"
},
},
}
def subtract_reference_images(imgs, is_reference):
"""
Function to subtract a series of measured images from
background/dark current/reference images. The nearest reference
image in the reverse temporal direction is subtracted from each
measured image.
Parameters
----------
imgs : numpy.ndarray
Array of 2-D images
is_reference : 1-D boolean array
true : image is reference image
false : image is measured image
Returns
-------
img_corr : numpy.ndarray
len(img_corr) == len(img_arr) - len(is_reference_img == true)
img_corr is the array of measured images minus the reference
images.
Raises
------
ValueError
Possible causes:
is_reference contains no true values
Raised when the first image in the array is not a reference image.
"""
# an array of 1, 0, 1,.. should work too
if not is_reference[0]:
# use ValueError because the user passed in invalid data
raise ValueError("The first image is not a reference image")
# grab the first image
ref_imge = imgs[0]
# just sum the bool array to get count
ref_count = np.sum(is_reference)
# make an array of zeros of the correct type
corrected_image = deque()
# zip together (lazy like this is really izip), images and flags
for imgs, ref in zip(imgs[1:], is_reference[1:]):
# if this is a ref image, save it and move on
if ref:
ref_imge = imgs
continue
# else, do the subtraction
corrected_image.append(imgs - ref_imge)
# return the output as a list
return list(corrected_image)
def img_to_relative_xyi(img, cx, cy, pixel_size_x=None, pixel_size_y=None):
"""
Convert the 2D image to a list of x y I coordinates where
x == x_img - detector_center[0] and
y == y_img - detector_center[1]
Parameters
----------
img: `ndarray`
2D image
cx : float
Image center in the x direction
cy : float
Image center in the y direction
pixel_size_x : float, optional
Pixel size in x
pixel_size_y : float, optional
Pixel size in y
**kwargs: dict
Bucket for extra parameters in an unpacked dictionary
Returns
-------
x : `ndarray`
x-coordinate of pixel. shape (N, )
y : `ndarray`
y-coordinate of pixel. shape (N, )
I : `ndarray`
intensity of pixel. shape (N, )
"""
if pixel_size_x is not None and pixel_size_y is not None:
if pixel_size_x <= 0:
raise ValueError('Input parameter pixel_size_x must be greater '
'than 0. Your value was ' +
six.text_type(pixel_size_x))
if pixel_size_y <= 0:
raise ValueError('Input parameter pixel_size_y must be greater '
'than 0. Your value was ' +
six.text_type(pixel_size_y))
elif pixel_size_x is None and pixel_size_y is None:
pixel_size_x = 1
pixel_size_y = 1
else:
raise ValueError('pixel_size_x and pixel_size_y must both be None or '
'greater than zero. You passed in values for '
'pixel_size_x of {0} and pixel_size_y of {1]'
''.format(pixel_size_x, pixel_size_y))
# Caswell's incredible terse rewrite
x, y = np.meshgrid(pixel_size_x * (np.arange(img.shape[0]) - cx),
pixel_size_y * (np.arange(img.shape[1]) - cy))
# return x, y and intensity as 1D arrays
return x.ravel(), y.ravel(), img.ravel()
def bin_1D(x, y, nx=None, min_x=None, max_x=None):
"""
Bin the values in y based on their x-coordinates
Parameters
----------
x : array
position
y : array
intensity
nx : integer, optional
number of bins to use defaults to default bin value
min_x : float, optional
Left edge of first bin defaults to minimum value of x
max_x : float, optional
Right edge of last bin defaults to maximum value of x
Returns
-------
edges : array
edges of bins, length nx + 1
val : array
sum of values in each bin, length nx
count : array
The number of counts in each bin, length nx
"""
# handle default values
if min_x is None:
min_x = np.min(x)
if max_x is None:
max_x = np.max(x)
if nx is None:
nx = int(max_x - min_x)
# use a weighted histogram to get the bin sum
bins = np.linspace(start=min_x, stop=max_x, num=nx+1, endpoint=True)
val, _ = np.histogram(a=x, bins=bins, weights=y)
# use an un-weighted histogram to get the counts
count, _ = np.histogram(a=x, bins=bins)
# return the three arrays
return bins, val, count
def radial_grid(center, shape, pixel_size=None):
"""Convert a cartesian grid (x,y) to the radius relative to some center
Parameters
----------
center : tuple
point in image where r=0; may be a float giving subpixel precision.
Order is (rr, cc).
shape : tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates.
Order is (rr, cc).
pixel_size : sequence, optional
The physical size of the pixels.
len(pixel_size) should be the same as len(shape)
defaults to (1,1)
Returns
-------
r : array
The distance of each pixel from `center`
Shape of the return value is equal to the `shape` input parameter
"""
if pixel_size is None:
pixel_size = (1, 1)
X, Y = np.meshgrid(pixel_size[1] * (np.arange(shape[1]) - center[1]),
pixel_size[0] * (np.arange(shape[0]) - center[0]))
return np.sqrt(X*X + Y*Y)
def angle_grid(center, shape, pixel_size=None):
"""
Make a grid of angular positions.
Read note for our conventions here -- there be dragons!
Parameters
----------
center : tuple
point in image where r=0; may be a float giving subpixel precision.
Order is (rr, cc).
shape: tuple
Image shape which is used to determine the maximum extent of output
pixel coordinates. Order is (rr, cc).
Returns
-------
agrid : array
angular position (in radians) of each array element in range [-pi, pi]
Note
----
:math:`\\theta`, the counter-clockwise angle from the positive x axis
:math:`\\theta \\el [-\pi, \pi]`. In array indexing and the conventional
axes for images (origin in upper left), positive y is downward.
"""
if pixel_size is None:
pixel_size = (1, 1)
# row is y, column is x. "so say we all. amen."
x, y = np.meshgrid(pixel_size[1] * (np.arange(shape[1]) -
center[1]),
pixel_size[0] * (np.arange(shape[0]) -
center[0]))
return np.arctan2(y, x)
def radius_to_twotheta(dist_sample, radius):
"""
Converts radius from the calibrated center to scattering angle
(2:math:`2\\theta`) with known detector to sample distance.
Parameters
----------
dist_sample : float
distance from the sample to the detector (mm)
radius : array
The L2 norm of the distance of each pixel from the calibrated center.
Returns
-------
two_theta : array
An array of :math:`2\\theta` values
"""
return np.arctan(radius / dist_sample)
def wedge_integration(src_data, center, theta_start,
delta_theta, r_inner, delta_r):
"""
Implementation of caking.
Parameters
----------
scr_data : ndarray
The source-data to be integrated
center : ndarray
The center of the ring in pixels
theta_start : float
The angle of the start of the wedge from the
image y-axis in degrees
delta_theta : float
The angular width of the wedge in degrees. Positive
angles go clockwise, negative go counter-clockwise.
r_inner : float
The inner radius in pixel units, Must be non-negative
delta_r : float
The length of the wedge in the radial direction
in pixel units. Must be non-negative
Returns
-------
float
The integrated intensity under the wedge
"""
raise NotImplementedError()
def bin_edges(range_min=None, range_max=None, nbins=None, step=None):
"""
Generate bin edges. The last value is the returned array is
the right edge of the last bin, the rest of the values are the
left edges of each bin.
If `range_max` is specified all bin edges will be less than or
equal to it's value.
If `range_min` is specified all bin edges will be greater than
or equal to it's value
If `nbins` is specified then there will be than number of bins and
the returned array will have length `nbins + 1` (as the right most
edge is included)
If `step` is specified then bin width is approximately `step` (It is
not exact due to the nature of floats). The arrays generated by
`np.cumsum(np.ones(nbins) * step)` and `np.arange(nbins) * step` are
not identical. This function uses the second method in all cases
where `step` is specified.
.. warning :: If the set :code:`(range_min, range_max, step)` is
given there is no guarantee that :code:`range_max - range_min`
is an integer multiple of :code:`step`. In this case the left
most bin edge is :code:`range_min` and the right most bin edge
is less than :code:`range_max` and the distance between the
right most edge and :code:`range_max` is not greater than
:code:`step` (this is the same behavior as the built-in
:code:`range()`). It is not recommended to specify bins in this
manner.
Parameters
----------
range_min : float, optional
The minimum value that may be included as a bin edge
range_max : float, optional
The maximum value that may be included as a bin edge
nbins : int, optional
The number of bins, if specified the length of the returned
value will be nbins + 1
step : float, optional
The step between the bins
Returns
-------
edges : np.array
An array of floats for the bin edges. The last value is the
right edge of the last bin.
"""
num_valid_args = sum((range_min is not None, range_max is not None,
step is not None, nbins is not None))
if num_valid_args != 3:
raise ValueError("Exactly three of the arguments must be non-None "
"not {}.".format(num_valid_args))
if range_min is not None and range_max is not None:
if range_max <= range_min:
raise ValueError("The minimum must be less than the maximum")
if nbins is not None:
if nbins <= 0:
raise ValueError("The number of bins must be positive")
# The easy case
if step is None:
return np.linspace(range_min, range_max, nbins + 1, endpoint=True)
# in this case, the user gave use min, max, and step
if nbins is None:
if step > (range_max - range_min):
raise ValueError("The step can not be greater than the difference "
"between min and max")
nbins = int((range_max - range_min)//step)
ret = range_min + np.arange(nbins + 1) * step
# if the last value is greater than the max (should never happen)
if ret[-1] > range_max:
return ret[:-1]
if range_max - ret[-1] > 1e-10 * step:
logger.debug("Inconsistent "
"(range_min, range_max, step) "
"and step does not evenly divide "
"(range_min - range_max). "
"The bins has been truncated.\n"
"min: %f max: %f step: %f gap: %f",
range_min, range_max,
step, range_max - ret[-1])
return ret
# in this case we got range_min, nbins, step
if range_max is None:
return range_min + np.arange(nbins + 1) * step
# in this case we got range_max, nbins, step
if range_min is None:
return range_max - np.arange(nbins + 1)[::-1] * step
def grid3d(q, img_stack,
nx=None, ny=None, nz=None,
xmin=None, xmax=None, ymin=None,
ymax=None, zmin=None, zmax=None,
binary_mask=None):
"""Grid irregularly spaced data points onto a regular grid via histogramming
This function will process the set of reciprocal space values (q), the
image stack (img_stack) and grid the image data based on the bounds
provided, using defaults if none are provided.
Parameters
----------
q : ndarray
(Qx, Qy, Qz) - HKL values - Nx3 array
img_stack : ndarray
Intensity array of the images
dimensions are: [num_img][num_rows][num_cols]
nx : int, optional
Number of voxels along x
ny : int, optional
Number of voxels along y
nz : int, optional
Number of voxels along z
xmin : float, optional
Minimum value along x. Defaults to smallest x value in q
ymin : float, optional
Minimum value along y. Defaults to smallest y value in q
zmin : float, optional
Minimum value along z. Defaults to smallest z value in q
xmax : float, optional
Maximum value along x. Defaults to largest x value in q
ymax : float, optional
Maximum value along y. Defaults to largest y value in q
zmax : float, optional
Maximum value along z. Defaults to largest z value in q
binary_mask : ndarray, optional
The binary mask provides a mechanism to remove unwanted pixels
from the images.
Binary mask can be two different shapes.
- 1: 2-D with binary_mask.shape == np.asarray(img_stack[0]).shape
- 2: 3-D with binary_mask.shape == np.asarray(img_stack).shape
Returns
-------
mean : ndarray
intensity grid. The values in this grid are the
mean of the values that fill with in the grid.
occupancy : ndarray
The number of data points that fell in the grid.
std_err : ndarray
This is the standard error of the value in the
grid box.
oob : int
Out Of Bounds. Number of data points that are outside of
the gridded region.
bounds : list
tuple of (min, max, step) for x, y, z in order: [x_bounds,
y_bounds, z_bounds]
"""
# validate input
img_stack = np.asarray(img_stack)
# todo determine if we're going to support masked arrays
# todo masked arrays seemed to have been punted to `process_to_q`
# check to see if the binary mask and the image stack are identical shapes
if binary_mask is None or binary_mask.shape == img_stack.shape:
# do a dance :)
pass
elif binary_mask.shape == img_stack[0].shape:
# this is still a valid mask, so make it the same dimensions
# as img_stack.
# should probably change this to use something similar to:
# todo http://stackoverflow.com/questions/5564098/
binary_mask = np.tile(np.ravel(binary_mask), img_stack.shape[0])
else:
raise ValueError("The binary mask must be the same shape as the"
"img_stack ({0}) or a single image in the image "
"stack ({1}). The input binary mask is shaped ({2})"
"".format(img_stack.shape, img_stack[0].shape,
binary_mask.shape))
q = np.atleast_2d(q)
if q.ndim != 2:
raise ValueError("q.ndim must be a 2-D array of shape Nx3 array. "
"You provided an array with {0} dimensions."
"".format(q.ndim))
if q.shape[1] != 3:
raise ValueError("The shape of q must be an Nx3 array, not {0}X{1}"
" which you provided.".format(*q.shape))
# set defaults for qmin, qmax, dq
qmin = np.min(q, axis=0)
qmax = np.max(q, axis=0)
dqn = [_defaults['nx'], _defaults['ny'], _defaults['nz']]
# pad the upper edge by just enough to ensure that all of the
# points are in-bounds with the binning rules: lo <= val < hi
qmax += np.spacing(qmax)
# check for non-default input
for target, input_vals in ((dqn, (nx, ny, nz)),
(qmin, (xmin, ymin, zmin)),
(qmax, (xmax, ymax, zmax))):
for j, in_val in enumerate(input_vals):
if in_val is not None:
target[j] = in_val
# format bounds
bounds = np.array([qmin, qmax, dqn]).T
# creating (Qx, Qy, Qz, I) Nx4 array - HKL values and Intensity
# getting the intensity value for each pixel
q = np.insert(q, 3, np.ravel(img_stack), axis=1)
if binary_mask is not None:
q = q[np.ravel(binary_mask)]
# 3D grid of the data set
# starting time for gridding
t1 = time.time()
# call the c library
mean, occupancy, std_err, oob = ctrans.grid3d(q, qmin, qmax, dqn, norm=1)
# ending time for the gridding
t2 = time.time()
logger.info("Done processed in {0} seconds".format(t2-t1))
# No. of values zero in the grid
empt_nb = (occupancy == 0).sum()
# log some information about the grid at the debug level
if oob:
logger.debug("There are %.2e points outside the grid", oob)
logger.debug("There are %2e bins in the grid", mean.size)
if empt_nb:
logger.debug("There are %.2e values zero in the grid", empt_nb)
return mean, occupancy, std_err, oob, bounds
def bin_edges_to_centers(input_edges):
"""
Helper function for turning a array of bin edges into
an array of bin centers
Parameters
----------
input_edges : array-like
N + 1 values which are the left edges of N bins
and the right edge of the last bin
Returns
-------
centers : ndarray
A length N array giving the centers of the bins
"""
input_edges = np.asarray(input_edges)
return (input_edges[:-1] + input_edges[1:]) * 0.5
# https://docs.python.org/2/library/itertools.html#recipes
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def q_to_d(q):
"""
Helper function to convert :math:`d` to :math:`q`. The point
of this function is to prevent fat-fingered typos.
By definition the relationship is:
..math ::
q = \\frac{2 \pi}{d}
Parameters
----------
q : array
An array of q values
Returns
-------
d : array
An array of d (plane) spacing
"""
return (2 * np.pi) / np.asarray(q)
def d_to_q(d):
"""
Helper function to convert :math:`d` to :math:`q`.
The point of this function is to prevent fat-fingered typos.
By definition the relationship is:
..math ::
d = \\frac{2 \pi}{q}
Parameters
----------
d : array
An array of d (plane) spacing
Returns
-------
q : array
An array of q values
"""
return (2 * np.pi) / np.asarray(d)
def q_to_twotheta(q, wavelength):
"""
Helper function to convert :math:`q` + :math:`\\lambda` to :math:`2\\theta`.
The point of this function is to prevent fat-fingered typos.
By definition the relationship is:
..math ::
\\sin\\left(\\frac{2\\theta}{2}\right) = \\frac{\\lambda q}{4 \\pi}
thus
..math ::
2\\theta_n = 2 \\arcsin\\left(\\frac{\\lambda q}{4 \\pi}\\right
Parameters
----------
q : array
An array of :math:`q` values
wavelength : float
Wavelength of the incoming x-rays
Returns
-------
two_theta : array
An array of :math:`2\\theta` values
"""
q = np.asarray(q)
wavelength = float(wavelength)
pre_factor = wavelength / (4 * np.pi)
return 2 * np.arcsin(q * pre_factor)
def twotheta_to_q(two_theta, wavelength):
"""
Helper function to convert :math:`2\\theta` + :math:`\\lambda` to :math:`q`.
The point of this function is to prevent fat-fingered typos.
By definition the relationship is:
..math ::
\\sin\\left(\\frac{2\\theta}{2}\right) = \\frac{\\lambda q}{4 \\pi}
thus
..math ::
q = \\frac{4 \\pi \\sin\\left(\\frac{2\\theta}{2}\right)}{\\lambda}
Parameters
----------
two_theta : array
An array of :math:`2\\theta` values
wavelength : float
Wavelength of the incoming x-rays
Returns
-------
q : array
An array of :math:`q` values
"""
two_theta = np.asarray(two_theta)
wavelength = float(wavelength)
pre_factor = ((4 * np.pi) / wavelength)
return pre_factor * np.sin(two_theta / 2)
def multi_tau_lags(multitau_levels, multitau_channels):
"""
Standard multiple-tau algorithm for finding the lag times (delay
times).
Parameters
----------
multitau_levels : int
number of levels of multiple-taus
multitau_channels : int
number of channels or number of buffers in auto-correlators
normalizations (must be even)
Returns
-------
total_channels : int
total number of channels ( or total number of delay times)
lag_steps : ndarray
delay or lag steps for the multiple tau analysis
Notes
-----
The multi-tau correlation scheme was used for finding the lag times
(delay times).
References: text [1]_
.. [1] K. Schätzela, M. Drewela and S. Stimaca, "Photon correlation
measurements at large lag times: Improving statistical accuracy,"
J. Mod. Opt., vol 35, p 711–718, 1988.
"""
if (multitau_channels % 2 != 0):
raise ValueError("Number of multiple tau channels(buffers)"
" must be even. You provided {0} "
.format(multitau_channels))
# total number of channels ( or total number of delay times)
tot_channels = (multitau_levels + 1)*multitau_channels//2
lag = []
lag_steps = np.arange(0, multitau_channels)
for i in range(2, multitau_levels + 1):
for j in range(0, multitau_channels//2):
lag.append((multitau_channels//2 + j)*(2**(i - 1)))
lag_steps = np.append(lag_steps, np.array(lag))
return tot_channels, lag_steps
def geometric_series(common_ratio, number_of_images, first_term=1):
"""
This will provide the geometric series for the integration.
Last values of the series has to be less than or equal to number
of images
ex: number_of_images = 100, first_term =1
common_ratio = 2, geometric_series = 1, 2, 4, 8, 16, 32, 64
common_ratio = 3, geometric_series = 1, 3, 9, 27, 81
Parameters
----------
common_ratio : float
common ratio of the series
number_of_images : int
number of images
first_term : float, optional
first term in the series
Return
------
geometric_series : list
time series
Note
----
:math ::
a + ar + ar^2 + ar^3 + ar^4 + ...
a - first term in the series
r - is the common ratio
"""
geometric_series = [first_term]
while geometric_series[-1]*common_ratio < number_of_images:
geometric_series.append(geometric_series[-1]*common_ratio)
return geometric_series
| celiafish/scikit-xray | skxray/core/utils.py | Python | bsd-3-clause | 38,692 | 0.00031 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2013 Rackspace Hosting Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy import types as types
from taskflow.openstack.common import jsonutils
from taskflow.openstack.common import timeutils
from taskflow.openstack.common import uuidutils
BASE = declarative_base()
# TODO(harlowja): remove when oslo.db exists
class TimestampMixin(object):
created_at = Column(DateTime, default=timeutils.utcnow)
updated_at = Column(DateTime, onupdate=timeutils.utcnow)
class Json(types.TypeDecorator, types.MutableType):
impl = types.Text
def process_bind_param(self, value, dialect):
return jsonutils.dumps(value)
def process_result_value(self, value, dialect):
return jsonutils.loads(value)
class ModelBase(TimestampMixin):
"""Base model for all taskflow objects"""
uuid = Column(String, default=uuidutils.generate_uuid,
primary_key=True, nullable=False, unique=True)
name = Column(String, nullable=True)
meta = Column(Json, nullable=True)
class LogBook(BASE, ModelBase):
"""Represents a logbook for a set of flows"""
__tablename__ = 'logbooks'
# Relationships
flowdetails = relationship("FlowDetail",
single_parent=True,
backref=backref("logbooks",
cascade="save-update, delete, "
"merge"))
class FlowDetail(BASE, ModelBase):
__tablename__ = 'flowdetails'
# Member variables
state = Column(String)
# Relationships
parent_uuid = Column(String, ForeignKey('logbooks.uuid'))
taskdetails = relationship("TaskDetail",
single_parent=True,
backref=backref("flowdetails",
cascade="save-update, delete, "
"merge"))
class TaskDetail(BASE, ModelBase):
__tablename__ = 'taskdetails'
# Member variables
state = Column(String)
results = Column(Json)
exception = Column(Json)
stacktrace = Column(Json)
version = Column(String)
# Relationships
parent_uuid = Column(String, ForeignKey('flowdetails.uuid'))
| jessicalucci/TaskManagement | taskflow/persistence/backends/sqlalchemy/models.py | Python | apache-2.0 | 3,197 | 0 |
import warnings
import sys
import argparse
from etk.extractors.cryptographic_hash_extractor import CryptographicHashExtractor
cryptographic_hash_extractor = CryptographicHashExtractor()
def add_arguments(parser):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
parser.description = 'Examples:\n' \
'python -m etk cryptographic_hash_extractor /tmp/input.txt\n' \
'cat /tmp/input.txt | python -m etk cryptographic_hash_extractor'
parser.add_argument('input_file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
def run(args):
"""
Args:
args (argparse.Namespace)
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for line in args.input_file:
extractions = cryptographic_hash_extractor.extract(line)
for e in extractions:
print(e.value)
| usc-isi-i2/etk | etk/cli/cryptographic_hash_extractor.py | Python | mit | 947 | 0.004224 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Description of the CEA FPA Campaign.
:History:
Created on Wed Oct 02 17:26:00 2019
:author: Ruyman Azzollini
"""
# IMPORT STUFF
from collections import OrderedDict
from pdb import set_trace as stop
import numpy as np
import copy
#from vison.pipe import lib as pilib
#from vison.support import context
from vison.support import utils
from vison.fpatests.cea_dec19 import FWD_WARM
from vison.fpatests.cea_dec19 import FPA_BIAS
from vison.fpatests.cea_dec19 import FPA_CHINJ
from vison.fpatests.cea_dec19 import FPA_DARK
# END IMPORT
def generate_test_sequence(toGen, elvis='FPA', FPAdesign='final'):
"""
| Function that generates a number of tests, as instances of their corresponding
task classes.
| Aimed at the TVAC campaign of the FPA at CEA (december 2019).
"""
taskslist = toGen.keys()
test_sequence = OrderedDict()
for taskname in taskslist:
if not toGen[taskname]:
continue
strip_taskname, iteration = utils.remove_iter_tag(taskname, Full=True)
_toGen = OrderedDict()
_toGen[strip_taskname] = True
ans = _generate_test_sequence(_toGen, elvis=elvis, FPAdesign=FPAdesign)
if iteration is not None:
for key in list(ans.keys()):
test_sequence['%s.%i' % (key, iteration)] = copy.deepcopy(ans[key])
else:
for key in list(ans.keys()):
test_sequence[key] = copy.deepcopy(ans[key])
return test_sequence
def _generate_test_sequence(toGen, elvis='FPA', FPAdesign='final'):
""" """
#print 'GENERATING TEST SEQUENCE...'
test_sequence = OrderedDict()
_toGen = dict(FWD_WARM=False,
CHINJ=False,
DARK=False,
BIAS_RWDVS_WARM=False,
BIAS_RWDV_WARM=False,
BIAS_RWDVS_COLD=False,
BIAS_RWDV_COLD=False,
BIAS_FWD_COLD=False)
_toGen.update(toGen)
commoninputs = dict(elvis=elvis,
FPAdesign=FPAdesign)
# DARK-CURRENT RAMP
if _toGen['FWD_WARM']:
fwd_warm_inp = dict(
test='FWD_WARM')
fwd_warm_inp.update(commoninputs)
fwd_warm = FWD_WARM.FWD_WARM(inputs=fwd_warm_inp.copy())
test_sequence['FWD_WARM'] = copy.deepcopy(fwd_warm)
if _toGen['CHINJ']:
chinj_inp = dict(
test='CHINJ',
non=30,
noff=50)
chinj_inp.update(commoninputs)
chinj = FPA_CHINJ.CHINJ(inputs=chinj_inp.copy())
test_sequence['CHINJ'] = copy.deepcopy(chinj)
if _toGen['DARK']:
dark_inp = dict(
test='DARK',
exptime=565.)
dark_inp.update(commoninputs)
dark = FPA_DARK.DARK(inputs=dark_inp.copy())
test_sequence['DARK'] = copy.deepcopy(dark)
if _toGen['BIAS_RWDVS_WARM']:
rwdvs_warm_inp = dict(
test='BIAS_RWDVS_WARM',
temperature='WARM',
readmode='RWDVS')
rwdvs_warm_inp.update(commoninputs)
rwdvs_warm = FPA_BIAS.FPA_BIAS(inputs=rwdvs_warm_inp.copy())
test_sequence['BIAS_RWDVS_WARM'] = copy.deepcopy(rwdvs_warm)
if _toGen['BIAS_RWDV_WARM']:
rwdv_warm_inp = dict(
test='BIAS_RWDV_WARM',
temperature='WARM',
readmode='RWDV')
rwdv_warm_inp.update(commoninputs)
rwdv_warm = FPA_BIAS.FPA_BIAS(inputs=rwdv_warm_inp.copy())
test_sequence['BIAS_RWDV_WARM'] = copy.deepcopy(rwdv_warm)
if _toGen['BIAS_RWDVS_COLD']:
rwdvs_cold_inp = dict(
test='BIAS_RWDVS_COLD',
temperature='COLD',
readmode='RWDVS')
rwdvs_cold_inp.update(commoninputs)
rwdvs_cold = FPA_BIAS.FPA_BIAS(inputs=rwdvs_cold_inp.copy())
test_sequence['BIAS_RWDVS_COLD'] = copy.deepcopy(rwdvs_cold)
if _toGen['BIAS_RWDV_COLD']:
rwdv_cold_inp = dict(
test='BIAS_RWDV_COLD',
temperature='COLD',
readmode='RWDV')
rwdv_cold_inp.update(commoninputs)
rwdv_cold = FPA_BIAS.FPA_BIAS(inputs=rwdv_cold_inp.copy())
test_sequence['BIAS_RWDV_COLD'] = copy.deepcopy(rwdv_cold)
if _toGen['BIAS_FWD_COLD']:
fwd_cold_inp = dict(
test='BIAS_FWD_COLD',
temperature='COLD',
readmode='FWD')
fwd_cold_inp.update(commoninputs)
fwd_cold = FPA_BIAS.FPA_BIAS(inputs=fwd_cold_inp.copy())
test_sequence['BIAS_FWD_COLD'] = copy.deepcopy(fwd_cold)
return test_sequence
| ruymanengithub/vison | vison/campaign/CEAFPAcampaign.py | Python | gpl-3.0 | 4,667 | 0.003643 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import heapq, logging, os, re, socket, time, types
from proton import dispatch, generate_uuid, PN_ACCEPTED, SASL, symbol, ulong, Url
from proton import Collector, Connection, Delivery, Described, Endpoint, Event, Link, Terminus, Timeout
from proton import Message, Handler, ProtonException, Transport, TransportException, ConnectionException
from select import select
class OutgoingMessageHandler(Handler):
"""
A utility for simpler and more intuitive handling of delivery
events related to outgoing i.e. sent messages.
"""
def __init__(self, auto_settle=True, delegate=None):
self.auto_settle = auto_settle
self.delegate = delegate
def on_link_flow(self, event):
if event.link.is_sender and event.link.credit:
self.on_sendable(event)
def on_delivery(self, event):
dlv = event.delivery
if dlv.link.is_sender and dlv.updated:
if dlv.remote_state == Delivery.ACCEPTED:
self.on_accepted(event)
elif dlv.remote_state == Delivery.REJECTED:
self.on_rejected(event)
elif dlv.remote_state == Delivery.RELEASED or dlv.remote_state == Delivery.MODIFIED:
self.on_released(event)
if dlv.settled:
self.on_settled(event)
if self.auto_settle:
dlv.settle()
def on_sendable(self, event):
"""
Called when the sender link has credit and messages can
therefore be transferred.
"""
if self.delegate:
dispatch(self.delegate, 'on_sendable', event)
def on_accepted(self, event):
"""
Called when the remote peer accepts an outgoing message.
"""
if self.delegate:
dispatch(self.delegate, 'on_accepted', event)
def on_rejected(self, event):
"""
Called when the remote peer rejects an outgoing message.
"""
if self.delegate:
dispatch(self.delegate, 'on_rejected', event)
def on_released(self, event):
"""
Called when the remote peer releases an outgoing message. Note
that this may be in response to either the RELEASE or MODIFIED
state as defined by the AMQP specification.
"""
if self.delegate:
dispatch(self.delegate, 'on_released', event)
def on_settled(self, event):
"""
Called when the remote peer has settled the outgoing
message. This is the point at which it shouod never be
retransmitted.
"""
if self.delegate:
dispatch(self.delegate, 'on_settled', event)
def recv_msg(delivery):
msg = Message()
msg.decode(delivery.link.recv(delivery.pending))
delivery.link.advance()
return msg
class Reject(ProtonException):
"""
An exception that indicate a message should be rejected
"""
pass
class Release(ProtonException):
"""
An exception that indicate a message should be rejected
"""
pass
class Acking(object):
def accept(self, delivery):
"""
Accepts a received message.
"""
self.settle(delivery, Delivery.ACCEPTED)
def reject(self, delivery):
"""
Rejects a received message that is considered invalid or
unprocessable.
"""
self.settle(delivery, Delivery.REJECTED)
def release(self, delivery, delivered=True):
"""
Releases a received message, making it available at the source
for any (other) interested receiver. The ``delivered``
parameter indicates whether this should be considered a
delivery attempt (and the delivery count updated) or not.
"""
if delivered:
self.settle(delivery, Delivery.MODIFIED)
else:
self.settle(delivery, Delivery.RELEASED)
def settle(self, delivery, state=None):
if state:
delivery.update(state)
delivery.settle()
class IncomingMessageHandler(Handler, Acking):
"""
A utility for simpler and more intuitive handling of delivery
events related to incoming i.e. received messages.
"""
def __init__(self, auto_accept=True, delegate=None):
self.delegate = delegate
self.auto_accept = auto_accept
def on_delivery(self, event):
dlv = event.delivery
if not dlv.link.is_receiver: return
if dlv.readable and not dlv.partial:
event.message = recv_msg(dlv)
if event.link.state & Endpoint.LOCAL_CLOSED:
if self.auto_accept:
dlv.update(Delivery.RELEASED)
dlv.settle()
else:
try:
self.on_message(event)
if self.auto_accept:
dlv.update(Delivery.ACCEPTED)
dlv.settle()
except Reject:
dlv.update(Delivery.REJECTED)
dlv.settle()
except Release:
dlv.update(Delivery.MODIFIED)
dlv.settle()
elif dlv.updated and dlv.settled:
self.on_settled(event)
def on_message(self, event):
"""
Called when a message is received. The message itself can be
obtained as a property on the event. For the purpose of
refering to this message in further actions (e.g. if
explicitly accepting it, the ``delivery`` should be used, also
obtainable via a property on the event.
"""
if self.delegate:
dispatch(self.delegate, 'on_message', event)
def on_settled(self, event):
if self.delegate:
dispatch(self.delegate, 'on_settled', event)
class EndpointStateHandler(Handler):
"""
A utility that exposes 'endpoint' events i.e. the open/close for
links, sessions and connections in a more intuitive manner. A
XXX_opened method will be called when both local and remote peers
have opened the link, session or connection. This can be used to
confirm a locally initiated action for example. A XXX_opening
method will be called when the remote peer has requested an open
that was not initiated locally. By default this will simply open
locally, which then triggers the XXX_opened call. The same applies
to close.
"""
def __init__(self, peer_close_is_error=False, delegate=None):
self.delegate = delegate
self.peer_close_is_error = peer_close_is_error
@classmethod
def is_local_open(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_ACTIVE
@classmethod
def is_local_uninitialised(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_UNINIT
@classmethod
def is_local_closed(cls, endpoint):
return endpoint.state & Endpoint.LOCAL_CLOSED
@classmethod
def is_remote_open(cls, endpoint):
return endpoint.state & Endpoint.REMOTE_ACTIVE
@classmethod
def is_remote_closed(cls, endpoint):
return endpoint.state & Endpoint.REMOTE_CLOSED
@classmethod
def print_error(cls, endpoint, endpoint_type):
if endpoint.remote_condition:
logging.error(endpoint.remote_condition.description)
elif cls.is_local_open(endpoint) and cls.is_remote_closed(endpoint):
logging.error("%s closed by peer" % endpoint_type)
def on_link_remote_close(self, event):
if event.link.remote_condition:
self.on_link_error(event)
elif self.is_local_closed(event.link):
self.on_link_closed(event)
else:
self.on_link_closing(event)
event.link.close()
def on_session_remote_close(self, event):
if event.session.remote_condition:
self.on_session_error(event)
elif self.is_local_closed(event.session):
self.on_session_closed(event)
else:
self.on_session_closing(event)
event.session.close()
def on_connection_remote_close(self, event):
if event.connection.remote_condition:
self.on_connection_error(event)
elif self.is_local_closed(event.connection):
self.on_connection_closed(event)
else:
self.on_connection_closing(event)
event.connection.close()
def on_connection_local_open(self, event):
if self.is_remote_open(event.connection):
self.on_connection_opened(event)
def on_connection_remote_open(self, event):
if self.is_local_open(event.connection):
self.on_connection_opened(event)
elif self.is_local_uninitialised(event.connection):
self.on_connection_opening(event)
event.connection.open()
def on_session_local_open(self, event):
if self.is_remote_open(event.session):
self.on_session_opened(event)
def on_session_remote_open(self, event):
if self.is_local_open(event.session):
self.on_session_opened(event)
elif self.is_local_uninitialised(event.session):
self.on_session_opening(event)
event.session.open()
def on_link_local_open(self, event):
if self.is_remote_open(event.link):
self.on_link_opened(event)
def on_link_remote_open(self, event):
if self.is_local_open(event.link):
self.on_link_opened(event)
elif self.is_local_uninitialised(event.link):
self.on_link_opening(event)
event.link.open()
def on_connection_opened(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_opened', event)
def on_session_opened(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_opened', event)
def on_link_opened(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_opened', event)
def on_connection_opening(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_opening', event)
def on_session_opening(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_opening', event)
def on_link_opening(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_opening', event)
def on_connection_error(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_error', event)
else:
self.log_error(event.connection, "connection")
def on_session_error(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_error', event)
else:
self.log_error(event.session, "session")
event.connection.close()
def on_link_error(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_error', event)
else:
self.log_error(event.link, "link")
event.connection.close()
def on_connection_closed(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_closed', event)
def on_session_closed(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_closed', event)
def on_link_closed(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_closed', event)
def on_connection_closing(self, event):
if self.delegate:
dispatch(self.delegate, 'on_connection_closing', event)
elif self.peer_close_is_error:
self.on_connection_error(event)
def on_session_closing(self, event):
if self.delegate:
dispatch(self.delegate, 'on_session_closing', event)
elif self.peer_close_is_error:
self.on_session_error(event)
def on_link_closing(self, event):
if self.delegate:
dispatch(self.delegate, 'on_link_closing', event)
elif self.peer_close_is_error:
self.on_link_error(event)
def on_transport_tail_closed(self, event):
self.on_transport_closed(event)
def on_transport_closed(self, event):
if self.delegate and event.connection and self.is_local_open(event.connection):
dispatch(self.delegate, 'on_disconnected', event)
class MessagingHandler(Handler, Acking):
"""
A general purpose handler that makes the proton-c events somewhat
simpler to deal with and/or avoids repetitive tasks for common use
cases.
"""
def __init__(self, prefetch=10, auto_accept=True, auto_settle=True, peer_close_is_error=False):
self.handlers = []
if prefetch:
self.handlers.append(CFlowController(prefetch))
self.handlers.append(EndpointStateHandler(peer_close_is_error, self))
self.handlers.append(IncomingMessageHandler(auto_accept, self))
self.handlers.append(OutgoingMessageHandler(auto_settle, self))
def on_connection_error(self, event):
"""
Called when the peer closes the connection with an error condition.
"""
EndpointStateHandler.print_error(event.connection, "connection")
def on_session_error(self, event):
"""
Called when the peer closes the session with an error condition.
"""
EndpointStateHandler.print_error(event.session, "session")
event.connection.close()
def on_link_error(self, event):
"""
Called when the peer closes the link with an error condition.
"""
EndpointStateHandler.print_error(event.link, "link")
event.connection.close()
def on_reactor_init(self, event):
"""
Called when the event loop - the reactor - starts.
"""
if hasattr(event.reactor, 'subclass'):
setattr(event, event.reactor.subclass.__name__.lower(), event.reactor)
self.on_start(event)
def on_start(self, event):
"""
Called when the event loop starts. (Just an alias for on_reactor_init)
"""
pass
def on_connection_closed(self, event):
"""
Called when the connection is closed.
"""
pass
def on_session_closed(self, event):
"""
Called when the session is closed.
"""
pass
def on_link_closed(self, event):
"""
Called when the link is closed.
"""
pass
def on_connection_closing(self, event):
"""
Called when the peer initiates the closing of the connection.
"""
pass
def on_session_closing(self, event):
"""
Called when the peer initiates the closing of the session.
"""
pass
def on_link_closing(self, event):
"""
Called when the peer initiates the closing of the link.
"""
pass
def on_disconnected(self, event):
"""
Called when the socket is disconnected.
"""
pass
def on_sendable(self, event):
"""
Called when the sender link has credit and messages can
therefore be transferred.
"""
pass
def on_accepted(self, event):
"""
Called when the remote peer accepts an outgoing message.
"""
pass
def on_rejected(self, event):
"""
Called when the remote peer rejects an outgoing message.
"""
pass
def on_released(self, event):
"""
Called when the remote peer releases an outgoing message. Note
that this may be in response to either the RELEASE or MODIFIED
state as defined by the AMQP specification.
"""
pass
def on_settled(self, event):
"""
Called when the remote peer has settled the outgoing
message. This is the point at which it shouod never be
retransmitted.
"""
pass
def on_message(self, event):
"""
Called when a message is received. The message itself can be
obtained as a property on the event. For the purpose of
refering to this message in further actions (e.g. if
explicitly accepting it, the ``delivery`` should be used, also
obtainable via a property on the event.
"""
pass
class TransactionHandler(object):
"""
The interface for transaction handlers, i.e. objects that want to
be notified of state changes related to a transaction.
"""
def on_transaction_declared(self, event):
pass
def on_transaction_committed(self, event):
pass
def on_transaction_aborted(self, event):
pass
def on_transaction_declare_failed(self, event):
pass
def on_transaction_commit_failed(self, event):
pass
class TransactionalClientHandler(MessagingHandler, TransactionHandler):
"""
An extension to the MessagingHandler for applications using
transactions.
"""
def __init__(self, prefetch=10, auto_accept=False, auto_settle=True, peer_close_is_error=False):
super(TransactionalClientHandler, self).__init__(prefetch, auto_accept, auto_settle, peer_close_is_error)
def accept(self, delivery, transaction=None):
if transaction:
transaction.accept(delivery)
else:
super(TransactionalClientHandler, self).accept(delivery)
from proton import WrappedHandler
from cproton import pn_flowcontroller, pn_handshaker, pn_iohandler
class CFlowController(WrappedHandler):
def __init__(self, window=1024):
WrappedHandler.__init__(self, lambda: pn_flowcontroller(window))
class CHandshaker(WrappedHandler):
def __init__(self):
WrappedHandler.__init__(self, pn_handshaker)
class IOHandler(WrappedHandler):
def __init__(self):
WrappedHandler.__init__(self, pn_iohandler)
class PythonIO:
def __init__(self):
self.selectables = []
self.delegate = IOHandler()
def on_unhandled(self, method, event):
event.dispatch(self.delegate)
def on_selectable_init(self, event):
self.selectables.append(event.context)
def on_selectable_updated(self, event):
pass
def on_selectable_final(self, event):
sel = event.context
if sel.is_terminal:
self.selectables.remove(sel)
sel.release()
def on_reactor_quiesced(self, event):
reactor = event.reactor
# check if we are still quiesced, other handlers of
# on_reactor_quiesced could have produced events to process
if not reactor.quiesced: return
reading = []
writing = []
deadline = None
for sel in self.selectables:
if sel.reading:
reading.append(sel)
if sel.writing:
writing.append(sel)
if sel.deadline:
if deadline is None:
deadline = sel.deadline
else:
deadline = min(sel.deadline, deadline)
if deadline is not None:
timeout = deadline - time.time()
else:
timeout = reactor.timeout
if (timeout < 0): timeout = 0
timeout = min(timeout, reactor.timeout)
readable, writable, _ = select(reading, writing, [], timeout)
reactor.mark()
now = time.time()
for s in readable:
s.readable()
for s in writable:
s.writable()
for s in self.selectables:
if s.deadline and now > s.deadline:
s.expired()
reactor.yield_()
| wprice/qpid-proton | proton-c/bindings/python/proton/handlers.py | Python | apache-2.0 | 20,366 | 0.002062 |
"""Add customer foreign key
Revision ID: ccd5b0142a76
Revises: 243adac5e3e9
Create Date: 2017-03-14 18:59:50.505319
"""
# revision identifiers, used by Alembic.
revision = 'ccd5b0142a76'
down_revision = '243adac5e3e9'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key('hail_customer_id', 'hail', 'customer', ['customer_id', 'added_by'], ['id', 'moteur_id'])
### end Alembic commands ###
def downgrade():
op.drop_constraint('hail_customer_id', 'hail', type_='foreignkey')
| openmaraude/APITaxi | APITaxi_models2/migrations/versions/20170314_18:59:50_ccd5b0142a76_add_customer_foreign_key.py.py | Python | agpl-3.0 | 631 | 0.011094 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
An auto-reloading standalone wiki server, useful for development.
"""
import hatta
import werkzeug
if __name__=="__main__":
config = hatta.WikiConfig()
config.parse_args()
# config.parse_files()
application = hatta.Wiki(config).application
host = config.get('interface', 'localhost')
port = int(config.get('port', 8080))
werkzeug.run_simple(host, port, application, use_reloader=True)
| thuydang/djagazin | docs/9_tmp/hatta-wiki-1.5.3/dev.py | Python | bsd-2-clause | 466 | 0.002146 |
from collections import OrderedDict
#*************************#
# PARAMS #
#*************************#
#Paths
#workspaceDir = "/Users/slundquist/workspace"
#filenames = [("sheng","/Users/slundquist/Desktop/ptLIF.txt")]
#filenames = [("sheng","/Users/slundquist/Desktop/retONtoLif.txt")]
workspaceDir = "/Users/dpaiton/Documents/Work/LANL/workspace" #Dylan Mac
probeFileDir = workspaceDir+"/iHouse/output"
filenames = [("label","path")]
filenames = [
# ("onStelVer1",probeFileDir+"/retONtoStellateVer1.txt"),
# ("onStelVer2",probeFileDir+"/retONtoStellateVer2.txt"),
# ("onStelVer3",probeFileDir+"/retONtoStellateVer3.txt"),
# ("onStelVer4",probeFileDir+"/retONtoStellateVer4.txt"),
# ("onStelHor",probeFileDir+"/retONtoStellateHor.txt"),
# ("onStelDia",probeFileDir+"/retONtoStellateDia.txt"),
# ("offStelVer",probeFileDir+"/retOFFtoStellateVer.txt"),
# ("offStelHor",probeFileDir+"/retOFFtoStellateHor.txt"),
# ("offStelDia",probeFileDir+"/retOFFtoStellateDia.txt"),
("onInterVer1",probeFileDir+"/retONtoInterVer1.txt")]
#filenames = [
# ("ptStellate",probeFileDir+"/ptStellate.txt"),
# ("ptInter",probeFileDir+"/ptInter.txt")]
rootFigOutDir = probeFileDir+"/analysis/probeFigs"
rootFigName = 'pr4Oja'
#Values for range of frames
startTime = 2000000
endTime = 2000100 #End must be under number of lines in file
#Which plots
timePlot = True
weightMap = True #Needs 'weight*' in data dictionary
#Other flags
numTCBins = 2 #number of bins for time course plot
doLegend = False #if True, time graph will have a legend
dispFigs = False #if True, display figures. Otherwise, print them to file.
#Data structure for scale, and data array to store all the data
data = OrderedDict()
#Made time for data
#TIME MUST EXIST AND BE FIRST IN THIS LIST
data['t'] = []
####
####OJA STDP CONN
####
#data['prOjaTr*'] = []
#data['prStdpTr*'] = []
#######
#data['prOjaTr_0_0'] = []
#data['prOjaTr_0_1'] = []
#data['prOjaTr_0_2'] = []
#data['prOjaTr_0_3'] = []
data['prOjaTr_0_4'] = []
#data['prOjaTr_0_5'] = []
#data['prOjaTr_0_6'] = []
#data['prOjaTr_0_7'] = []
#data['prOjaTr_0_8'] = []
#data['prOjaTr_0_9'] = []
#data['prOjaTr_1_0'] = []
#data['prOjaTr_1_1'] = []
#data['prOjaTr_1_2'] = []
#data['prOjaTr_1_3'] = []
#data['prOjaTr_1_4'] = []
#data['prOjaTr_1_5'] = []
#data['prOjaTr_1_6'] = []
#data['prOjaTr_1_18'] = []
#data['prOjaTr_1_19'] = []
#data['prOjaTr_1_20'] = []
#data['prOjaTr_1_21'] = []
#data['prOjaTr_1_22'] = []
#data['prOjaTr_1_23'] = []
#data['prOjaTr_1_24'] = []
#data['prOjaTr_1_25'] = []
#######
#data['poIntTr'] = []
#data['poStdpTr'] = []
#data['poOjaTr'] = []
#######
#data['ampLTD'] = []
#######
#data['weight_0_0'] = []
#data['weight_0_1'] = []
#data['weight_0_2'] = []
#data['weight_0_3'] = []
#data['weight_0_4'] = []
#data['weight_0_5'] = []
#data['weight_0_6'] = []
#data['weight_0_7'] = []
#data['weight_0_8'] = []
#data['weight_0_9'] = []
#######
#data['weight*'] = []
####
####lif layer
####
#data['V'] = []
#data['Vth'] = []
#data['a'] = []
#set scales for plots. Key must be the same as what is in the data dictionary
scale = {}
#scale['weight_0_0'] = 100
#scale['weight_0_1'] = 100
#scale['weight_0_2'] = 100
#scale['weight_0_3'] = 100
#scale['weight_0_4'] = 100
#scale['weight_0_5'] = 100
#scale['weight_0_6'] = 100
#scale['weight_0_7'] = 100
#scale['weight_0_8'] = 100
#scale['weight_0_9'] = 100
#scale['weight_0_10'] = 100
#scale['weight_0_11'] = 100
#scale['weight_0_12'] = 100
#scale['weight_0_13'] = 100
#scale['weight_0_14'] = 100
#scale['weight_0_15'] = 100
#scale['weight_0_16'] = 100
#scale['weight_0_17'] = 100
#scale['weight_0_18'] = 100
#scale['weight_0_19'] = 100
#scale['weight_0_20'] = 100
#scale['weight_0_21'] = 100
#scale['weight_0_22'] = 100
#scale['weight_0_23'] = 100
#scale['weight_0_24'] = 100
#scale['weight_4_0'] = 100
#scale['weight_4_1'] = 100
#scale['weight_4_2'] = 100
#scale['weight_4_3'] = 100
#scale['weight_4_4'] = 100
#scale['weight_4_5'] = 100
#scale['weight_4_6'] = 100
#scale['weight_4_7'] = 100
#scale['weight_4_8'] = 100
#scale['weight_4_9'] = 100
#scale['weight_4_10'] = 100
#scale['weight_4_11'] = 100
#scale['weight_4_12'] = 100
#scale['weight_4_13'] = 100
#scale['weight_4_14'] = 100
#scale['weight_4_15'] = 100
#scale['weight_4_16'] = 100
#scale['weight_4_17'] = 100
#scale['weight_4_18'] = 100
#scale['weight_4_19'] = 100
#scale['weight_4_20'] = 100
#scale['weight_4_21'] = 100
#scale['weight_4_22'] = 100
#scale['weight_4_23'] = 100
#scale['weight_4_24'] = 100
| dpaiton/OpenPV | pv-core/python/probe_analysis/readProbeParams.py | Python | epl-1.0 | 4,842 | 0.028501 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtaildocs', '0003_add_verbose_names'),
('articles', '0075_auto_20151015_2022'),
]
operations = [
migrations.AddField(
model_name='articlepage',
name='video_document',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtaildocs.Document', null=True),
),
]
| CIGIHub/greyjay | greyjay/articles/migrations/0076_articlepage_video_document.py | Python | mit | 598 | 0.001672 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The Ibis Addons Operations are intended to help facilitate new expressions
when required before they can be pushed upstream to Ibis.
Raw SQL Filters:
The ability to inject RawSQL into a query DNE in Ibis. It must be built out
and applied to each Ibis Data Source directly as each has
extended it's own registry. Eventually this can potentially be pushed to
Ibis as an override, though it would not apply for Pandas and other
non-textual languages.
"""
import ibis
import sqlalchemy
import ibis.expr.api
from ibis_bigquery.compiler import (
reduction as bq_reduction,
BigQueryExprTranslator
)
import ibis.expr.datatypes as dt
from ibis.expr.operations import (
Arg, Comparison, Reduction, ValueOp
)
import ibis.expr.rules as rlz
from ibis.expr.types import (
BinaryValue, IntegerColumn, StringValue
)
from ibis.backends.impala.compiler import ImpalaExprTranslator
from ibis.backends.pandas import client as _pandas_client
from ibis.backends.base_sqlalchemy.alchemy import AlchemyExprTranslator
from third_party.ibis.ibis_oracle.compiler import OracleExprTranslator
from third_party.ibis.ibis_teradata.compiler import TeradataExprTranslator
# from third_party.ibis.ibis_mssql.compiler import MSSQLExprTranslator # TODO figure how to add RAWSQL
# from third_party.ibis.ibis_snowflake.compiler import SnowflakeExprTranslator
# from third_party.ibis.ibis_oracle.compiler import OracleExprTranslator <<<<<< DB2
class BitXor(Reduction):
"""Aggregate bitwise XOR operation."""
arg = Arg(rlz.column(rlz.integer))
where = Arg(rlz.boolean, default=None)
output_type = rlz.scalar_like('arg')
class Hash(ValueOp):
arg = Arg(rlz.any)
how = Arg(rlz.isin({'fnv', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', dt.int64)
class HashBytes(ValueOp):
arg = Arg(rlz.one_of([rlz.value(dt.string), rlz.value(dt.binary)]))
how = Arg(rlz.isin({'sha256', 'farm_fingerprint'}))
output_type = rlz.shape_like('arg', 'binary')
class RawSQL(Comparison):
pass
def compile_hash(numeric_value, how):
return Hash(numeric_value, how=how).to_expr()
def compile_hash(binary_value, how):
return Hash(binary_value, how=how).to_expr()
def format_hash_bigquery(translator, expr):
op = expr.op()
arg, how = op.args
arg_formatted = translator.translate(arg)
if how == 'farm_fingerprint':
return f'farm_fingerprint({arg_formatted})'
else:
raise NotImplementedError(how)
def compile_hashbytes(binary_value, how):
return HashBytes(binary_value, how=how).to_expr()
def format_hash_bigquery(translator, expr):
arg, how = expr.op().args
compiled_arg = translator.translate(arg)
if how == "farm_fingerprint":
return f"FARM_FINGERPRINT({compiled_arg})"
else:
raise ValueError(f"unexpected value for 'how': {how}")
def format_hashbytes_bigquery(translator, expr):
arg, how = expr.op().args
compiled_arg = translator.translate(arg)
if how == "sha256":
return f"SHA256({compiled_arg})"
elif how == "farm_fingerprint":
return f"FARM_FINGERPRINT({compiled_arg})"
else:
raise ValueError(f"unexpected value for 'how': {how}")
def format_hashbytes_teradata(translator, expr):
arg, how = expr.op().args
compiled_arg = translator.translate(arg)
if how == "sha256":
return f"hash_sha256({compiled_arg})"
elif how == "sha512":
return f"hash_sha512({compiled_arg})"
elif how == "md5":
return f"hash_md5({compiled_arg})"
else:
raise ValueError(f"unexpected value for 'how': {how}")
def compile_raw_sql(table, sql):
op = RawSQL(table[table.columns[0]].cast(dt.string), ibis.literal(sql))
return op.to_expr()
def format_raw_sql(translator, expr):
op = expr.op()
rand_col, raw_sql = op.args
return raw_sql.op().args[0]
def sa_format_raw_sql(translator, expr):
op = expr.op()
rand_col, raw_sql = op.args
return sqlalchemy.text(raw_sql.op().args[0])
_pandas_client._inferable_pandas_dtypes["floating"] = _pandas_client.dt.float64
IntegerColumn.bit_xor = ibis.expr.api._agg_function('bit_xor', BitXor, True)
BinaryValue.hash = compile_hash
StringValue.hash = compile_hash
BinaryValue.hashbytes = compile_hashbytes
StringValue.hashbytes = compile_hashbytes
BigQueryExprTranslator._registry[BitXor] = bq_reduction('BIT_XOR')
BigQueryExprTranslator._registry[Hash] = format_hash_bigquery
BigQueryExprTranslator._registry[HashBytes] = format_hashbytes_bigquery
AlchemyExprTranslator._registry[RawSQL] = format_raw_sql
BigQueryExprTranslator._registry[RawSQL] = format_raw_sql
ImpalaExprTranslator._registry[RawSQL] = format_raw_sql
OracleExprTranslator._registry[RawSQL] = sa_format_raw_sql
TeradataExprTranslator._registry[RawSQL] = format_raw_sql
TeradataExprTranslator._registry[HashBytes] = format_hashbytes_teradata
| GoogleCloudPlatform/professional-services-data-validator | third_party/ibis/ibis_addon/operations.py | Python | apache-2.0 | 5,439 | 0.001839 |
"""This script reponsible put all of send_get_request() function results
into list, gracefull exit any script import it and return analytics
"""
import time
import signal
import sys
from requests_futures.sessions import FuturesSession
tasks = []
session = FuturesSession()
def bg_cb(sess, resp):
"Callback function when requests done"
timestamp = time.time() * 1000
tasks.append({
"timestamp": timestamp,
"status": resp.status_code
})
print("%d - %d - %s" % (timestamp, resp.status_code, resp.request.method))
print(resp.url)
def footer():
"Return result of testing process"
is_find_start = True
count = 0
start, end = 0, 0 # assign this vars prepare if we dont' have downtime
error_dict = {}
for task in tasks:
if is_find_start:
if task.get('status') >= 500:
is_find_start = False
start = task.get('timestamp')
else:
try:
error_dict[task.get('status')] += 1
except:
error_dict[task.get('status')] = 1
if task.get('status') / 100 < 4:
end = task.get('timestamp')
for key in error_dict:
if (int(key) / 100) == 5:
count += error_dict.get(key)
print("Downtime for rolling upgrade process: {} ms" .format(end-start))
print("Number of fail requests (status code >= 500): {}" .format(count))
print(error_dict)
def exit_gracefully(signum, frame):
# Source: Antti Haapala - http://stackoverflow.com/a/18115530
signal.signal(signal.SIGINT, original_sigint)
try:
if raw_input("\nReally quit? (y/n)> ").lower().startswith('y'):
footer()
sys.exit(1)
except KeyboardInterrupt:
print("Ok ok, quitting")
sys.exit(1)
signal.signal(signal.SIGINT, exit_gracefully)
def send_request(url, method, headers=None, data=None, **kwargs):
if method == 'GET':
return session.get(url, headers=headers,
background_callback=bg_cb, **kwargs)
elif method == 'POST':
return session.post(url, headers=headers,
data=data, background_callback=bg_cb, **kwargs)
elif method == 'PUT':
return session.put(url, headers=headers,
data=data, background_callback=bg_cb, **kwargs)
elif method == 'PATCH':
return session.patch(url, headers=headers,
data=data, background_callback=bg_cb, **kwargs)
elif method == 'DELETE':
return session.delete(url, headers=headers, background_callback=bg_cb, **kwargs)
else:
print("Method does not support: {}" .format(method))
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, exit_gracefully)
| daikk115/test-rolling-upgrade-openstack | graceful_exit.py | Python | mit | 2,834 | 0.001059 |
# -*- coding: utf-8 -*-
# Copyright(C) 2017 Vincent A
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module
from weboob.capabilities.weather import CapWeather
from .browser import LameteoagricoleBrowser
__all__ = ['LameteoagricoleModule']
class LameteoagricoleModule(Module, CapWeather):
NAME = 'lameteoagricole'
DESCRIPTION = u'lameteoagricole website'
MAINTAINER = u'Vincent A'
EMAIL = '[email protected]'
LICENSE = 'AGPLv3+'
VERSION = '2.1'
BROWSER = LameteoagricoleBrowser
def iter_city_search(self, pattern):
return self.browser.iter_cities(pattern)
def get_current(self, city_id):
return self.browser.get_current(city_id)
def iter_forecast(self, city_id):
return self.browser.iter_forecast(city_id)
| laurentb/weboob | modules/lameteoagricole/module.py | Python | lgpl-3.0 | 1,524 | 0 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm import imtool
class IntersectDialog(QDialog):
"""A dialog for action of intersection."""
def __init__(self, model, parent=None):
super(IntersectDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
def _init_gui(self):
"""Initialize GUI."""
# set dialog title
self.setWindowTitle("Intersect")
# initialize widgets
source_label = QLabel("Source")
self.source_combo = QComboBox()
mask_label = QLabel("Mask")
self.mask_combo = QComboBox()
vol_list = self._model.getItemList()
self.source_combo.addItems(QStringList(vol_list))
row = self._model.currentIndex().row()
self.source_combo.setCurrentIndex(row)
self.mask_combo.addItems(QStringList(vol_list))
out_label = QLabel("Output volume name")
self.out_edit = QLineEdit()
# layout config
grid_layout = QGridLayout()
#grid_layout.addWidget(source_label, 0, 0)
#grid_layout.addWidget(self.source_combo, 0, 1)
grid_layout.addWidget(mask_label, 0, 0)
grid_layout.addWidget(self.mask_combo, 0, 1)
grid_layout.addWidget(out_label, 1, 0)
grid_layout.addWidget(self.out_edit, 1, 1)
# button config
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.source_combo.currentIndexChanged.connect(self._create_output)
self.mask_combo.currentIndexChanged.connect(self._create_output)
self.run_button.clicked.connect(self._run_intersect)
self.cancel_button.clicked.connect(self.done)
def _create_output(self):
source_name = self.source_combo.currentText()
mask_name = self.mask_combo.currentText()
output_name = '_'.join([str(source_name), str(mask_name)])
self.out_edit.setText(output_name)
def _run_intersect(self):
"""Run an intersecting processing."""
vol_name = str(self.out_edit.text())
if not vol_name:
QMessageBox.critical(self, "No output volume name",
"Please specify output volume's name!")
return
source_row = self.source_combo.currentIndex()
mask_row = self.mask_combo.currentIndex()
source_data = self._model.data(self._model.index(source_row),
Qt.UserRole + 4)
mask_data = self._model.data(self._model.index(mask_row),
Qt.UserRole + 4)
new_vol = imtool.intersect(source_data, mask_data)
self._model.addItem(new_vol,
None,
vol_name,
self._model._data[0].get_header(),
0, 100, 255, 'rainbow')
self.done(0)
| sealhuang/FreeROI | froi/gui/component/intersectdialog.py | Python | bsd-3-clause | 3,390 | 0.00177 |
import time
from aquests.athreads import socket_map
from aquests.athreads import trigger
from rs4.cbutil import tuple_cb
from aquests.client.asynconnect import AsynSSLConnect, AsynConnect
from aquests.dbapi.dbconnect import DBConnect
import threading
from aquests.protocols.http import request as http_request
from aquests.protocols.http import request_handler as http_request_handler
from aquests.protocols.http2 import request_handler as http2_request_handler
from aquests.protocols.grpc.request import GRPCRequest
from aquests.protocols.http import response as http_response
from aquests.protocols.ws import request_handler as ws_request_handler
from aquests.protocols.ws import request as ws_request
from . import rcache
from skitai import lifetime
from aquests import asyncore
import sys
import inspect
from skitai import exceptions
from skitai import REQFAIL, UNSENT, TIMEOUT, NETERR, NORMAL
from ...corequest import corequest, response
import sqlite3
try:
import psycopg2
except ImportError:
class PGIntegrityError (Exception):
pass
else:
PGIntegrityError = psycopg2.IntegrityError
DEFAULT_TIMEOUT = 10
WAIT_POLL = False
class OperationError (Exception):
pass
class Result (response, rcache.Result):
def __init__ (self, id, status, response, ident = None):
rcache.Result.__init__ (self, status, ident)
self.node = id
self.__response = response
def __getattr__ (self, attr):
return getattr (self.__response, attr)
def reraise (self):
if self.status_code >= 300:
try:
self.__response.expt
except AttributeError:
# redircting to HTTPError
raise exceptions.HTTPError ("%d %s" % (self.status_code, self.reason))
else:
self.__response.raise_for_status ()
return self
def close (self):
self.__response = None
def cache (self, timeout = 60, cache_if = (200,)):
if not timeout:
return
if self.status != NORMAL or self.status_code not in cache_if:
return
rcache.Result.cache (self, timeout)
return self
def fetch (self, cache = None, cache_if = (200,), one = False):
self.reraise ()
self.cache (cache, cache_if)
if one:
if len (self.data) == 0:
raise exceptions.HTTPError ("410 Partial Not Found")
if len (self.data) != 1:
raise exceptions.HTTPError ("409 Conflict")
if isinstance (self.data, dict):
return self.data.popitem () [1]
return self.data [0]
return self.data
def one (self, cache = None, cache_if = (200,)):
try:
return self.fetch (cache, cache_if, True)
except (PGIntegrityError, sqlite3.IntegrityError):
# primary or unique index error
raise exceptions.HTTPError ("409 Conflict")
def commit (self):
self.reraise ()
class Results (response, rcache.Result):
def __init__ (self, results, ident = None):
self.results = results
self.status_code = [rs.status_code for rs in results]
rcache.Result.__init__ (self, [rs.status for rs in self.results], ident)
def __iter__ (self):
return self.results.__iter__ ()
@property
def data (self):
return [r.data for r in self.results]
@property
def text (self):
return [r.text for r in self.results]
def reraise (self):
[r.reraise () for r in self.results]
def cache (self, timeout = 60, cache_if = (200,)):
if [_f for _f in [rs.status != NORMAL or rs.status_code not in cache_if for rs in self.results] if _f]:
return
rcache.Result.cache (self, timeout)
return self
def fetch (self, cache = None, cache_if = (200,)):
self.cache (cache, cache_if)
return [r.fetch () for r in self.results]
def one (self, cache = None, cache_if = (200,)):
self.cache (cache, cache_if)
return [r.one () for r in self.results]
class Dispatcher:
def __init__ (self, cv, id, ident = None, filterfunc = None, cachefs = None, callback = None):
self._cv = cv
self.id = id
self.ident = ident
self.filterfunc = filterfunc
self.cachefs = cachefs
self.callback = callback
self.creation_time = time.time ()
self.status = UNSENT
self.result = None
self.handler = None
def get_id (self):
return self.id
def get_status (self):
with self._cv:
return self.status
def request_failed (self):
self.status = REQFAIL
tuple_cb (self, self.callback)
def set_status (self, code, result = None):
with self._cv:
self.status = code
if result:
self.result = result
return code
def get_result (self):
if not self.result:
if self.get_status () == REQFAIL:
self.result = Result (self.id, REQFAIL, http_response.FailedResponse (731, "Request Failed"), self.ident)
else:
self.result = Result (self.id, TIMEOUT, http_response.FailedResponse (730, "Timeout"), self.ident)
return self.result
def do_filter (self):
if self.filterfunc:
self.filterfunc (self.result)
def handle_cache (self, response):
self.set_status (NORMAL, Result (self.id, status, response, self.ident))
def handle_result (self, handler):
if self.get_status () == TIMEOUT:
# timeout, ignore
return
response = handler.response
# DON'T do_filter here, it blocks select loop
if response.code >= 700:
if response.code == 702:
status = TIMEOUT
else:
status = NETERR
else:
status = NORMAL
result = Result (self.id, status, response, self.ident)
cakey = response.request.get_cache_key ()
if self.cachefs and cakey and response.max_age:
self.cachefs.save (
cakey,
response.get_header ("content-type"), response.content,
response.max_age, 0
)
handler.callback = None
handler.response = None
self.set_status (status, result)
tuple_cb (self, self.callback)
class Task (corequest):
DEFAULT_CACHE_TIMEOUT = 42
proto_map = dict (
rpc = http_request.XMLRPCRequest,
xmlrpc = http_request.XMLRPCRequest,
jsonrpc = http_request.JSONRPCRequest,
grpc = GRPCRequest
)
def __init__ (self,
cluster,
uri,
params = None,
reqtype = "get",
headers = None,
auth = None,
meta = None,
use_cache = False,
mapreduce = True,
filter = None,
callback = None,
cache = None,
timeout = 10,
origin = None,
cachefs = None,
logger = None
):
self._uri = uri
self._params = params
self._headers = headers
self._reqtype = reqtype
self._auth = auth
self.set_defaults (cluster, meta, use_cache, mapreduce, filter, callback, cache, timeout, origin, logger, cachefs)
if not self._reqtype.lower ().endswith ("rpc"):
self._build_request ("", self._params)
@classmethod
def add_proto (cls, name, class_):
cls.proto_map [name] = class_
def set_defaults (self, cluster, meta, use_cache, mapreduce, filter, callback, cache, timeout, origin, logger, cachefs = None):
self._cluster = cluster
self._meta = meta or {}
self._use_cache = use_cache
self._mapreduce = mapreduce
self._filter = filter
self._callback = callback
self._cache_timeout = cache
self._timeout = timeout
self._origin = origin
self._cachefs = cachefs
self._logger = logger
self._requests = {}
self._results = []
self._canceled = False
self._init_time = time.time ()
self._cv = None
self._retry = 0
self._numnodes = 0
self._cached_result = None
self._cached_request_args = None
self._request = None
self._ccv = None
if self._cluster:
nodes = self._cluster.get_nodes ()
self._numnodes = len (nodes)
if self._mapreduce:
self._nodes = nodes
else: # anyone of nodes
self._nodes = [None]
def __del__ (self):
self._cv = None
self._results = []
def _get_ident (self):
cluster_name = self._cluster.get_name ()
if cluster_name == "__socketpool__":
_id = "%s/%s" % (self._uri, self._reqtype)
else:
_id = "%s/%s/%s" % (cluster_name, self._uri, self._reqtype)
_id += "/%s/%s" % self._cached_request_args
_id += "%s" % (
self._mapreduce and "/M" or ""
)
return _id
def _add_header (self, n, v):
if self._headers is None:
self._headers = {}
self._headers [n] = v
def _handle_request (self, request, rs, asyncon, handler):
if self._cachefs:
# IMP: mannual address setting
request.set_address (asyncon.address)
cakey = request.get_cache_key ()
if cakey:
cachable = self._cachefs.is_cachable (
request.get_header ("cache-control"),
request.get_header ("cookie") is not None,
request.get_header ("authorization") is not None,
request.get_header ("pragma")
)
if cachable:
hit, compressed, max_age, content_type, content = self._cachefs.get (cakey, undecompressible = 0)
if hit:
header = "HTTP/1.1 200 OK\r\nContent-Type: %s\r\nX-Skitaid-Cache-Lookup: %s" % (
content_type, hit == 1 and "MEM_HIT" or "HIT"
)
response = http_response.Response (request, header)
response.collect_incoming_data (content)
response.done ()
asyncon.set_active (False)
rs.handle_cache (response)
return 0
r = handler (asyncon, request, rs.handle_result)
if asyncon.get_proto () and asyncon.isconnected ():
asyncon.handler.handle_request (r)
else:
r.handle_request ()
return 1
def _build_request (self, method, params):
self._cached_request_args = (method, params) # backup for retry
if self._use_cache and rcache.the_rcache:
self._cached_result = rcache.the_rcache.get (self._get_ident (), self._use_cache)
if self._cached_result is not None:
self._cached_result.meta = self._meta
self._callback and tuple_cb (self._cached_result, self._callback)
return
else:
self._use_cache = False
requests = 0
while self._avails ():
if self._cluster.get_name () != "__socketpool__":
asyncon = self._get_connection (None)
else:
asyncon = self._get_connection (self._uri)
self._auth = self._auth or asyncon.get_auth ()
_reqtype = self._reqtype.lower ()
rs = Dispatcher (
self._cv, asyncon.address,
ident = not self._mapreduce and self._get_ident () or None,
filterfunc = self._filter, cachefs = self._cachefs,
callback = self._collect
)
self._requests [rs] = asyncon
args = (params, self._headers, self._auth, self._logger, self._meta)
try:
if _reqtype in ("ws", "wss"):
handler = ws_request_handler.RequestHandler
request = ws_request.Request (self._uri, *args)
else:
if not self._use_cache:
self._add_header ("Cache-Control", "no-cache")
handler = http_request_handler.RequestHandler
try:
class_ = self.proto_map [_reqtype]
except KeyError:
if _reqtype == "upload":
request = http_request.HTTPMultipartRequest (self._uri, _reqtype, *args)
else:
request = http_request.HTTPRequest (self._uri, _reqtype, *args)
else:
request = class_ (self._uri, method, *args)
requests += self._handle_request (request, rs, asyncon, handler)
except:
self._logger ("Request Creating Failed", "fail")
self._logger.trace ()
rs.request_failed ()
asyncon.set_active (False)
continue
if requests:
self._request = request # sample for unitest
trigger.wakeup ()
if _reqtype [-3:] == "rpc":
return self
def _avails (self):
return len (self._nodes)
def _get_connection (self, id = None):
if id is None: id = self._nodes.pop ()
else: self._nodes = []
asyncon = self._cluster.get (id)
self._setup (asyncon)
return asyncon
def _setup (self, asyncon):
asyncon.set_timeout (self._timeout)
if self._cv is None:
self._cv = asyncon._cv
def _cancel (self):
with self._cv:
self._canceled = True
def _count (self):
with self._cv:
return len (self._requests)
#---------------------------------------------------------
def _fail_log (self, status):
if self._origin:
self._logger ("backend status is {}, {} at {} LINE {}: {}".format (
status, self._origin [3], self._origin [1], self._origin [2], self._origin [4][0].strip ()
), "debug")
def _collect (self, rs, failed = False):
with self._cv:
if not failed and self._canceled:
return
try:
asyncon = self._requests.pop (rs)
except KeyError:
return
status = rs.get_status ()
if status == REQFAIL:
with self._cv:
self._results.append (rs)
self._cluster.report (asyncon, True) # not asyncons' Fault
elif status == TIMEOUT:
with self._cv:
self._results.append (rs)
self._cluster.report (asyncon, False) # not asyncons' Fault
elif not self._mapreduce and status == NETERR and self._retry < (self._numnodes - 1):
self._logger ("cluster response error, switch to another...", "fail")
self._cluster.report (asyncon, False) # exception occured
with self._cv:
self._retry += 1
self._canceled = False
self._nodes = [None]
return self.rerequest ()
elif status >= NETERR:
with self._cv:
self._results.append (rs)
if status == NETERR:
self._cluster.report (asyncon, False) # exception occured
else:
self._cluster.report (asyncon, True) # well-functioning
rs.do_filter ()
with self._cv:
requests = self._requests
callback, self._callback = self._callback, None
if not requests:
if callback:
self._do_callback (callback)
elif not failed:
cv = self._ccv is not None and self._ccv or self._cv
with cv:
cv.notify_all ()
def _do_callback (self, callback):
result = self.dispatch (wait = False)
tuple_cb (result, callback)
#-----------------------------------------------------------------
def rerequest (self):
self._build_request (*self._cached_request_args)
def reset_timeout (self, timeout, ccv = None):
with self._cv:
self._timeout = timeout
self._ccv = ccv
asyncons = list (self._requests.values ())
if timeout > 0:
for asyncon in asyncons:
asyncon.set_timeout (timeout)
def set_callback (self, callback, reqid = None, timeout = None):
if reqid is not None:
self._meta ["__reqid"] = reqid
if self._cv:
with self._cv:
requests = self._requests
self._callback = callback
else:
# already finished or will use cache
requests = self._requests
self._callback = callback
if not requests:
return self._do_callback (callback)
timeout and self.reset_timeout (timeout)
# synchronous methods ----------------------------------------------
def _wait (self, timeout = None):
timeout and self.reset_timeout (timeout)
remain = self._timeout - (time.time () - self._init_time)
if remain > 0:
with self._cv:
if self._requests and not self._canceled:
self._cv.wait (remain)
self._canceled = True
requests = list (self._requests.items ())
for rs, asyncon in requests:
rs.set_status (TIMEOUT)
asyncon.handle_abort () # abort imme
self._collect (rs, failed = True)
def dispatch (self, cache = None, cache_if = (200,), timeout = None, wait = True, reraise = False):
if self._cached_result is not None:
return self._cached_result
wait and self._wait (timeout)
rss = [rs.get_result () for rs in self._results]
for rs in rss:
if rs.status == NORMAL and rs.status_code < 300:
continue
self._fail_log (rs.status)
reraise and rs.reraise ()
if self._mapreduce:
self._cached_result = Results (rss, ident = self._get_ident ())
else:
self._cached_result = rss [0]
self.cache (cache, cache_if)
return self._cached_result
def dispatch_or_throw (self, cache = None, cache_if = (200,), timeout = None):
return self.dispatch (cache, cache_if, reraise = True, timeout = timeout)
def none_or_dispatch (self, cache = None, cache_if = (200,), timeout = None):
r = self.dispatch (cache, cache_if, reraise = True, timeout = timeout)
if r.data is not None:
return r
def wait (self, timeout = None, reraise = False):
return self.dispatch (reraise = reraise, timeout = timeout)
# direct access to data ----------------------------------------------
def commit (self, timeout = None):
return self.wait (timeout, True)
wait_or_throw = commit
def fetch (self, cache = None, cache_if = (200,), timeout = None):
res = self._cached_result or self.dispatch (timeout = timeout, reraise = True)
return res.fetch (cache or self._cache_timeout, cache_if)
def one (self, cache = None, cache_if = (200,), timeout = None):
try:
res = self._cached_result or self.dispatch (timeout = timeout, reraise = True)
except (PGIntegrityError, sqlite3.IntegrityError):
raise exceptions.HTTPError ("409 Conflict")
return res.one (cache or self._cache_timeout, cache_if)
def then (self, func):
from ..tasks import Future
return Future (self, self._timeout, **self._meta).then (func)
def cache (self, cache = 60, cache_if = (200,)):
cache = cache or self._cache_timeout
if not cache:
return self
if self._cached_result is None:
raise ValueError("call dispatch first")
self._cached_result.cache (cache, cache_if)
return self
getwait = getswait = dispatch # lower ver compat.
getwait_or_throw = getswait_or_throw = dispatch_or_throw # lower ver compat.
# cluster base call ---------------------------------------
class _Method:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
class Proxy:
def __init__ (self, __class, *args, **kargs):
self.__class = __class
self.__args = args
self.__kargs = kargs
def __enter__ (self):
return self
def __exit__ (self, type, value, tb):
pass
def __getattr__ (self, name):
return _Method (self.__request, name)
def __request (self, method, params):
cdc = self.__class (*self.__args, **self.__kargs)
cdc._build_request (method, params)
return cdc
class TaskCreator:
def __init__ (self, cluster, logger, cachesfs):
self.cluster = cluster
self.logger = logger
self.cachesfs = cachesfs
def __getattr__ (self, name):
return getattr (self.cluster, name)
def Server (self, uri, params = None, reqtype="rpc", headers = None, auth = None, meta = None, use_cache = True, mapreduce = False, filter = None, callback = None, cache = None, timeout = DEFAULT_TIMEOUT, caller = None):
if type (headers) is list:
h = {}
for n, v in headers:
h [n] = v
headers = h
if reqtype.endswith ("rpc"):
return Proxy (Task, self.cluster, uri, params, reqtype, headers, auth, meta, use_cache, mapreduce, filter, callback, cache, timeout, caller, self.cachesfs, self.logger)
else:
return Task (self.cluster, uri, params, reqtype, headers, auth, meta, use_cache, mapreduce, filter, callback, cache, timeout, caller, self.cachesfs, self.logger)
| hansroh/skitai | skitai/corequest/httpbase/task.py | Python | mit | 22,202 | 0.022205 |
# proxy module
from traitsui.qt4.ui_live import *
| enthought/etsproxy | enthought/traits/ui/qt4/ui_live.py | Python | bsd-3-clause | 50 | 0 |
# -*- encoding:utf-8 -*-
# find_result.py is part of advancedfind-gedit.
#
#
# Copyright 2010-2012 swatch
#
# advancedfind-gedit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from gi.repository import Gtk, Gedit, Gio
import os.path
import urllib
import re
import config_manager
import shutil
import gettext
APP_NAME = 'advancedfind'
CONFIG_DIR = os.path.expanduser('~/.local/share/gedit/plugins/' + APP_NAME + '/config')
#LOCALE_DIR = '/usr/share/locale'
LOCALE_DIR = os.path.join(os.path.dirname(__file__), 'locale')
if not os.path.exists(LOCALE_DIR):
LOCALE_DIR = '/usr/share/locale'
try:
t = gettext.translation(APP_NAME, LOCALE_DIR)
_ = t.gettext
#Gtk.glade.bindtextdomain(APP_NAME, LOCALE_DIR)
except:
pass
#gettext.install(APP_NAME, LOCALE_DIR, unicode=True)
class FindResultView(Gtk.HBox):
def __init__(self, window, result_gui_settings):
Gtk.HBox.__init__(self)
self._window = window
self.result_gui_settings = result_gui_settings
# load color theme of results list
user_formatfile = os.path.join(CONFIG_DIR, 'theme/'+self.result_gui_settings['COLOR_THEME']+'.xml')
if not os.path.exists(user_formatfile):
if not os.path.exists(os.path.dirname(user_formatfile)):
os.makedirs(os.path.dirname(user_formatfile))
shutil.copy2(os.path.dirname(__file__) + "/config/theme/default.xml", os.path.dirname(user_formatfile))
#print os.path.dirname(user_formatfile)
format_file = user_formatfile
#print format_file
self.result_format = config_manager.ConfigManager(format_file).load_configure('result_format')
config_manager.ConfigManager(format_file).to_bool(self.result_format)
# initialize find result treeview
self.findResultTreeview = Gtk.TreeView()
resultsCellRendererText = Gtk.CellRendererText()
if self.result_format['BACKGROUND']:
resultsCellRendererText.set_property('cell-background', self.result_format['BACKGROUND'])
resultsCellRendererText.set_property('font', self.result_format['RESULT_FONT'])
self.findResultTreeview.append_column(Gtk.TreeViewColumn("line", resultsCellRendererText, markup=1))
self.findResultTreeview.append_column(Gtk.TreeViewColumn("content", resultsCellRendererText, markup=2))
#self.findResultTreeview.append_column(Gtk.TreeViewColumn("result_start", Gtk.CellRendererText(), text=4))
#self.findResultTreeview.append_column(Gtk.TreeViewColumn("result_len", Gtk.CellRendererText(), text=5))
self.findResultTreeview.append_column(Gtk.TreeViewColumn("uri", resultsCellRendererText, text=6))
self.findResultTreeview.set_grid_lines(int(self.result_format['GRID_PATTERN'])) # 0: None; 1: Horizontal; 2: Vertical; 3: Both
self.findResultTreeview.set_headers_visible(self.result_format['SHOW_HEADERS'])
try:
column_num = self.findResultTreeview.get_n_columns()
except:
# For older gtk version.
column_num = len(self.findResultTreeview.get_columns())
if self.result_format['SHOW_HEADERS']:
for i in range(0, column_num):
self.findResultTreeview.get_column(i).set_resizable(True)
else:
for i in range(0, column_num):
self.findResultTreeview.get_column(i).set_sizing(1) # 1=autosizing
self.findResultTreeview.set_rules_hint(True)
self.findResultTreemodel = Gtk.TreeStore(int, str, str, object, int, int, str)
self.findResultTreemodel.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.findResultTreeview.connect("cursor-changed", self.on_findResultTreeview_cursor_changed_action)
self.findResultTreeview.connect("button-press-event", self.on_findResultTreeview_button_press_action)
self.findResultTreeview.set_model(self.findResultTreemodel)
# initialize scrolled window
scrollWindow = Gtk.ScrolledWindow()
scrollWindow.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrollWindow.add(self.findResultTreeview)
# put a separator
v_separator1 = Gtk.VSeparator()
# initialize button box
v_box = Gtk.VBox()
v_buttonbox = Gtk.VButtonBox()
v_buttonbox.set_layout(Gtk.ButtonBoxStyle.END)
v_buttonbox.set_spacing(5)
v_buttonbox.set_homogeneous(True)
self.selectNextButton = Gtk.Button(_("Next"))
self.selectNextButton.set_no_show_all(True)
self.selectNextButton.connect("clicked", self.on_selectNextButton_clicked_action)
self.expandAllButton = Gtk.Button(_("Expand All"))
self.expandAllButton.set_no_show_all(True)
self.expandAllButton.connect("clicked", self.on_expandAllButton_clicked_action)
self.collapseAllButton = Gtk.Button(_("Collapse All"))
self.collapseAllButton.set_no_show_all(True)
self.collapseAllButton.connect("clicked", self.on_collapseAllButton_clicked_action)
self.clearHighlightButton = Gtk.Button(_("Clear Highlight"))
self.clearHighlightButton.set_no_show_all(True)
self.clearHighlightButton.connect("clicked", self.on_clearHightlightButton_clicked_action)
self.clearButton = Gtk.Button(_("Clear"))
self.clearButton.set_no_show_all(True)
self.clearButton.connect("clicked", self.on_clearButton_clicked_action)
self.stopButton = Gtk.Button(_("Stop"))
self.stopButton.set_no_show_all(True)
self.stopButton.connect("clicked", self.on_stopButton_clicked_action)
self.stopButton.set_sensitive(False)
v_buttonbox.pack_start(self.selectNextButton, False, False, 5)
v_buttonbox.pack_start(self.expandAllButton, False, False, 5)
v_buttonbox.pack_start(self.collapseAllButton, False, False, 5)
v_buttonbox.pack_start(self.clearHighlightButton, False, False, 5)
v_buttonbox.pack_start(self.clearButton, False, False, 5)
v_buttonbox.pack_start(self.stopButton, False, False, 5)
v_box.pack_end(v_buttonbox, False, False, 5)
#self._status = Gtk.Label()
#self._status.set_text('test')
#self._status.hide()
#v_box.pack_end(self._status, False)
self.pack_start(scrollWindow, True, True, 5)
self.pack_start(v_separator1, False, False, 0)
self.pack_start(v_box, False, False, 5)
self.show_all()
#initialize context menu
self.contextMenu = Gtk.Menu()
self.expandAllItem = Gtk.MenuItem.new_with_label(_('Expand All'))
self.collapseAllItem = Gtk.MenuItem.new_with_label(_('Collapse All'))
self.clearHighlightItem = Gtk.MenuItem.new_with_label(_('Clear Highlight'))
self.clearItem = Gtk.MenuItem.new_with_label(_('Clear'))
self.stopItem = Gtk.MenuItem.new_with_label(_('Stop'))
self.stopItem.set_sensitive(False)
self.markupItem = Gtk.MenuItem.new_with_label(_('Markup'))
self.contextMenu.append(self.expandAllItem)
self.contextMenu.append(self.collapseAllItem)
self.contextMenu.append(self.clearHighlightItem)
self.contextMenu.append(self.clearItem)
self.contextMenu.append(self.stopItem)
self.contextMenu.append(self.markupItem)
self.expandAllItem.connect('activate', self.on_expandAllItem_activate)
self.collapseAllItem.connect('activate', self.on_collapseAllItem_activate)
self.clearHighlightItem.connect('activate', self.on_clearHighlightItem_activate)
self.clearItem.connect('activate', self.on_clearItem_activate)
self.stopItem.connect('activate', self.on_stopItem_activate)
self.markupItem.connect('activate', self.on_markupItem_activate)
self.expandAllItem.show()
self.collapseAllItem.show()
self.clearHighlightItem.show()
self.clearItem.show()
self.stopItem.show()
#self.markupItem.show()
self.contextMenu.append(Gtk.SeparatorMenuItem())
self.showButtonsItem = Gtk.MenuItem.new_with_label(_('Show Buttons'))
self.contextMenu.append(self.showButtonsItem)
self.showButtonsItem.show()
self.showButtonsSubmenu = Gtk.Menu()
self.showNextButtonItem = Gtk.CheckMenuItem.new_with_label(_('Next'))
self.showExpandAllButtonItem = Gtk.CheckMenuItem.new_with_label(_('Expand All'))
self.showCollapseAllButtonItem = Gtk.CheckMenuItem.new_with_label(_('Collapse All'))
self.showClearHighlightButtonItem = Gtk.CheckMenuItem.new_with_label(_('Clear Highlight'))
self.showClearButtonItem = Gtk.CheckMenuItem.new_with_label(_('Clear'))
self.showStopButtonItem = Gtk.CheckMenuItem.new_with_label(_('Stop'))
self.showButtonsSubmenu.append(self.showNextButtonItem)
self.showButtonsSubmenu.append(self.showExpandAllButtonItem)
self.showButtonsSubmenu.append(self.showCollapseAllButtonItem)
self.showButtonsSubmenu.append(self.showClearHighlightButtonItem)
self.showButtonsSubmenu.append(self.showClearButtonItem)
self.showButtonsSubmenu.append(self.showStopButtonItem)
self.showNextButtonItem.connect('activate', self.on_showNextButtonItem_activate)
self.showExpandAllButtonItem.connect('activate', self.on_showExpandAllButtonItem_activate)
self.showCollapseAllButtonItem.connect('activate', self.on_showCollapseAllButtonItem_activate)
self.showClearHighlightButtonItem.connect('activate', self.on_showClearHighlightButtonItem_activate)
self.showClearButtonItem.connect('activate', self.on_showClearButtonItem_activate)
self.showStopButtonItem.connect('activate', self.on_showStopButtonItem_activate)
self.showNextButtonItem.show()
self.showExpandAllButtonItem.show()
self.showCollapseAllButtonItem.show()
self.showClearHighlightButtonItem.show()
self.showClearButtonItem.show()
self.showStopButtonItem.show()
self.showButtonsItem.set_submenu(self.showButtonsSubmenu)
self.show_buttons()
def do_events(self):
while Gtk.events_pending():
Gtk.main_iteration()
def to_xml_text(self, text):
# & -> &
# < -> <
# > -> >
# ' -> '
# " -> "
return text.replace('&', '&').replace('<', '<').replace('>', '>').replace("'", ''').replace('"', '"')
def remove_markup(self, text):
regex = re.compile(r'<.+>([^ <>]+)</.+>')
return regex.sub(r'\1', text)
def on_findResultTreeview_cursor_changed_action(self, object):
if object.get_selection():
model, it = object.get_selection().get_selected()
else:
return
if not it:
return
try:
m = re.search('.+(<.+>)+([0-9]+)(<.+>)+.*', model.get_value(it, 1))
#m = re.search('.+(.+)+([0-9]+)(.+)+.*', model.get_value(it, 1))
line_num = int(m.group(2))
except:
return
result_start = model.get_value(it, 4)
result_len = model.get_value(it, 5)
parent_it = model.iter_parent(it)
if parent_it:
uri = model.get_value(parent_it, 6)
tab = model.get_value(parent_it, 3)
else:
return
# Tab wasn't passed, try to find one
if not tab:
docs = self._window.get_documents()
for doc in docs:
if urllib.unquote(doc.get_uri_for_display()) == uri:
tab = Gedit.Tab.get_from_document(doc)
# Still nothing? Open the file then
if not tab:
m = re.search('[a-zA-Z0-9]+\:\/\/.+', uri)
if m == None:
tab = self._window.create_tab_from_location(Gio.file_new_for_path(uri), None, line_num, 0, False, False)
else:
tab = self._window.create_tab_from_location(Gio.file_new_for_uri(uri), None, line_num, 0, False, False)
self.do_events()
if tab:
self._window.set_active_tab(tab)
doc = tab.get_document()
doc.select_range(doc.get_iter_at_offset(result_start), doc.get_iter_at_offset(result_start + result_len))
view = tab.get_view()
view.scroll_to_cursor()
def on_findResultTreeview_button_press_action(self, object, event):
if event.button == 3:
#right button click
self.contextMenu.popup(None, None, None, None, event.button, event.time)
def on_expandAllItem_activate(self, object):
self.findResultTreeview.expand_all()
def on_collapseAllItem_activate(self, object):
self.findResultTreeview.collapse_all()
def on_clearHighlightItem_activate(self, object):
self.clear_highlight()
def on_clearItem_activate(self, object):
self.clear_find_result()
def on_stopItem_activate(self, object):
self.stopButton.set_sensitive(False)
object.set_sensitive(False)
def on_markupItem_activate(self, object):
model, it = self.findResultTreeview.get_selection().get_selected()
if not it:
return
self.markup_row(model, it)
def markup_row(self, model, it):
if not it:
return
mark_head = '<span background="gray">'
mark_foot = '</span>'
line_str = model.get_value(it, 1)
text_str = model.get_value(it, 2)
if line_str.startswith(mark_head) and line_str.endswith(mark_foot):
model.set_value(it, 1, line_str[len(mark_head):-1*len(mark_foot)])
else:
model.set_value(it, 1, mark_head + line_str + mark_foot)
if text_str.startswith(mark_head) and text_str.endswith(mark_foot):
model.set_value(it, 2, text_str[len(mark_head):-1*len(mark_foot)])
else:
model.set_value(it, 2, mark_head + text_str + mark_foot)
if self.findResultTreemodel.iter_has_child(it):
for i in range(0, self.findResultTreemodel.iter_n_children(it)):
self.markup_row(model, self.findResultTreemodel.iter_nth_child(it, i))
def on_showNextButtonItem_activate(self, object):
if self.showNextButtonItem.get_active() == True:
self.result_gui_settings['NEXT_BUTTON'] = True
self.selectNextButton.show()
else:
self.result_gui_settings['NEXT_BUTTON'] = False
self.selectNextButton.hide()
def on_showExpandAllButtonItem_activate(self, object):
if self.showExpandAllButtonItem.get_active() == True:
self.result_gui_settings['EXPAND_ALL_BUTTON'] = True
self.expandAllButton.show()
else:
self.result_gui_settings['EXPAND_ALL_BUTTON'] = False
self.expandAllButton.hide()
def on_showCollapseAllButtonItem_activate(self, object):
if self.showCollapseAllButtonItem.get_active() == True:
self.result_gui_settings['COLLAPSE_ALL_BUTTON'] = True
self.collapseAllButton.show()
else:
self.result_gui_settings['COLLAPSE_ALL_BUTTON'] = False
self.collapseAllButton.hide()
def on_showClearHighlightButtonItem_activate(self, object):
if self.showClearHighlightButtonItem.get_active() == True:
self.result_gui_settings['CLEAR_HIGHLIGHT_BUTTON'] = True
self.clearHighlightButton.show()
else:
self.result_gui_settings['CLEAR_HIGHLIGHT_BUTTON'] = False
self.clearHighlightButton.hide()
def on_showClearButtonItem_activate(self, object):
if self.showClearButtonItem.get_active() == True:
self.result_gui_settings['CLEAR_BUTTON'] = True
self.clearButton.show()
else:
self.result_gui_settings['CLEAR_BUTTON'] = False
self.clearButton.hide()
def on_showStopButtonItem_activate(self, object):
if self.showStopButtonItem.get_active() == True:
self.result_gui_settings['STOP_BUTTON'] = True
self.stopButton.show()
else:
self.result_gui_settings['STOP_BUTTON'] = False
self.stopButton.hide()
def on_selectNextButton_clicked_action(self, object):
path, column = self.findResultTreeview.get_cursor()
if not path:
return
it = self.findResultTreemodel.get_iter(path)
if self.findResultTreemodel.iter_has_child(it):
self.findResultTreeview.expand_row(path, True)
it1 = self.findResultTreemodel.iter_children(it)
else:
it1 = self.findResultTreemodel.iter_next(it)
if not it1:
it1 = self.findResultTreemodel.iter_parent(it)
if not it1:
return
else:
it2 = self.findResultTreemodel.iter_next(it1)
if not it2:
it2 = self.findResultTreemodel.iter_parent(it1)
if not it2:
return
else:
it3 = self.findResultTreemodel.iter_next(it2)
if not it3:
return
else:
path = self.findResultTreemodel.get_path(it3)
else:
path = self.findResultTreemodel.get_path(it2)
else:
path = self.findResultTreemodel.get_path(it1)
self.findResultTreeview.set_cursor(path, column, False)
def on_clearHightlightButton_clicked_action(self, object):
self.clear_highlight()
def on_expandAllButton_clicked_action(self, object):
self.findResultTreeview.expand_all()
def on_collapseAllButton_clicked_action(self, object):
self.findResultTreeview.collapse_all()
def on_clearButton_clicked_action(self, object):
self.clear_find_result()
def on_stopButton_clicked_action(self, object):
object.set_sensitive(False)
def append_find_pattern(self, pattern, replace_flg = False, replace_text = None):
self.findResultTreeview.collapse_all()
idx = self.findResultTreemodel.iter_n_children(None)
header = '#' + str(idx) + ' - '
if replace_flg == True:
mode = self.result_format['MODE_REPLACE'] %{'HEADER' : header, 'PATTERN' : self.to_xml_text(unicode(pattern, 'utf-8')), 'REPLACE_TEXT' : self.to_xml_text(unicode(replace_text, 'utf-8'))}
#mode = header + ' Replace ' + pattern + ' with ' + replace_text
it = self.findResultTreemodel.append(None, [idx, mode, '', None, 0, 0, ''])
else:
mode = self.result_format['MODE_FIND'] %{'HEADER' : header, 'PATTERN' : self.to_xml_text(unicode(pattern, 'utf-8'))}
#mode = header + ' Search ' + pattern
it = self.findResultTreemodel.append(None, [idx, mode, '', None, 0, 0, ''])
return it
def append_find_result_filename(self, parent_it, filename, tab, uri):
filename_str = self.result_format['FILENAME'] % {'FILENAME' : self.to_xml_text(unicode(filename, 'utf-8'))}
#filename_str = filename
it = self.findResultTreemodel.append(parent_it, [0, filename_str, '', tab, 0, 0, uri])
return it
def append_find_result(self, parent_it, line, text, result_offset_start = 0, result_len = 0, uri = "", line_start_pos = 0, replace_flg = False):
result_line = self.result_format['LINE'] % {'LINE_NUM' : line}
#result_line = 'Line ' + str(line) + ' : '
markup_start = result_offset_start - line_start_pos
markup_end = markup_start + result_len
text_header = self.to_xml_text(text[0:markup_start])
text_marked = self.to_xml_text(text[markup_start:markup_end])
text_footer = self.to_xml_text(text[markup_end:])
if replace_flg == False:
result_text = (text_header + self.result_format['FIND_RESULT_TEXT'] % {'RESULT_TEXT' : text_marked} + text_footer).rstrip()
#result_text = (text_header + text_marked + text_footer).rstrip()
self.findResultTreemodel.append(parent_it, [int(line), result_line, result_text, None, result_offset_start, result_len, uri])
else:
result_text = (text_header + self.result_format['REPLACE_RESULT_TEXT'] % {'RESULT_TEXT' : text_marked} + text_footer).rstrip()
#result_text = (text_header + text_marked + text_footer).rstrip()
self.findResultTreemodel.append(parent_it, [int(line), result_line, result_text, None, result_offset_start, result_len, uri])
def show_find_result(self):
path = Gtk.TreePath.new_from_string(str(self.findResultTreemodel.iter_n_children(None) - 1))
self.findResultTreeview.expand_row(path, True)
pattern_it = self.findResultTreemodel.get_iter(path)
self.findResultTreeview.set_cursor(self.findResultTreemodel.get_path(pattern_it), None, False)
file_cnt = self.findResultTreemodel.iter_n_children(pattern_it)
total_hits = 0
for i in range(0, file_cnt):
it1 = self.findResultTreemodel.iter_nth_child(pattern_it, i)
hits_cnt = self.findResultTreemodel.iter_n_children(it1)
total_hits += hits_cnt
hits_str = self.result_format['HITS_CNT'] % {'HITS_CNT' : str(hits_cnt)}
#hits_str = str(hits_cnt) + ' hits'
self.findResultTreemodel.set_value(it1, 2, hits_str)
total_hits_str = self.result_format['TOTAL_HITS'] % {'TOTAL_HITS': str(total_hits), 'FILES_CNT' : str(file_cnt)}
#total_hits_str = 'Total ' + str(total_hits) + ' hits in ' + str(file_cnt)
self.findResultTreemodel.set_value(pattern_it, 2, total_hits_str)
def clear_highlight(self):
for doc in self._window.get_documents():
start, end = doc.get_bounds()
if doc.get_tag_table().lookup('result_highlight') == None:
tag = doc.create_tag("result_highlight", foreground='yellow', background='red')
doc.remove_tag_by_name('result_highlight', start, end)
def clear_find_result(self):
try:
vadj = self._window.get_active_view().get_vadjustment()
vadj_value = vadj.get_value()
except:
self.findResultTreemodel.clear()
return
self.findResultTreemodel.clear()
vadj.set_value(vadj_value)
def get_show_button_option(self):
return self.result_gui_settings
def show_buttons(self):
if self.result_gui_settings['NEXT_BUTTON'] == True:
self.selectNextButton.show()
self.showNextButtonItem.set_active(True)
if self.result_gui_settings['EXPAND_ALL_BUTTON'] == True:
self.expandAllButton.show()
self.showExpandAllButtonItem.set_active(True)
if self.result_gui_settings['COLLAPSE_ALL_BUTTON'] == True:
self.collapseAllButton.show()
self.showCollapseAllButtonItem.set_active(True)
if self.result_gui_settings['CLEAR_HIGHLIGHT_BUTTON'] == True:
self.clearHighlightButton.show()
self.showClearHighlightButtonItem.set_active(True)
if self.result_gui_settings['CLEAR_BUTTON'] == True:
self.clearButton.show()
self.showClearButtonItem.set_active(True)
if self.result_gui_settings['STOP_BUTTON'] == True:
self.stopButton.show()
self.showStopButtonItem.set_active(True)
def is_busy(self, busy_flg = True):
if busy_flg:
self.clearButton.set_sensitive(False)
self.stopButton.set_sensitive(True)
self.clearItem.set_sensitive(False)
self.stopItem.set_sensitive(True)
else:
self.clearButton.set_sensitive(True)
self.stopButton.set_sensitive(False)
self.clearItem.set_sensitive(True)
self.stopItem.set_sensitive(False)
self.do_events()
if __name__ == "__main__":
view = FindResultView(None)
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.add(view)
window.show_all()
Gtk.main()
| the-linux-schools-project/karoshi-client | clientsetup/buildclient/config/usr/lib/gedit/plugins/advancedfind/find_result.py | Python | agpl-3.0 | 21,931 | 0.031143 |
"""
Copyright (c) 2017, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.constants import (
INSPECT_CONFIG, PLUGIN_KOJI_PARENT_KEY, BASE_IMAGE_KOJI_BUILD, PARENT_IMAGES_KOJI_BUILDS,
KOJI_BTYPE_IMAGE
)
from atomic_reactor.plugins.pre_reactor_config import (
get_deep_manifest_list_inspection, get_koji_session,
get_skip_koji_check_for_base_image, get_fail_on_digest_mismatch,
get_platform_to_goarch_mapping
)
from atomic_reactor.plugins.pre_check_and_set_rebuild import is_rebuild
from atomic_reactor.util import (
base_image_is_custom, get_manifest_media_type, is_scratch_build,
get_platforms, RegistrySession, RegistryClient
)
from copy import copy
from osbs.utils import Labels
import json
import koji
import time
DEFAULT_POLL_TIMEOUT = 60 * 10 # 10 minutes
DEFAULT_POLL_INTERVAL = 10 # 10 seconds
class KojiParentBuildMissing(ValueError):
"""Expected to find a build for the parent image in koji, did not find it within timeout."""
class KojiParentPlugin(PreBuildPlugin):
"""Wait for Koji build of parent images to be available
Uses inspected parent image configs to determine the
nvrs (Name-Version-Release) of the parent images. It uses
this information to check if the corresponding Koji
builds exist. This check is performed periodically until
the Koji builds are all found, or timeout expires.
This check is required due to a timing issue that may
occur after the image is pushed to registry, but it
has not been yet uploaded and tagged in Koji. This plugin
ensures that the layered image is only built with parent
images that are known in Koji.
"""
key = PLUGIN_KOJI_PARENT_KEY
is_allowed_to_fail = False
def __init__(self, tasker, workflow, poll_interval=DEFAULT_POLL_INTERVAL,
poll_timeout=DEFAULT_POLL_TIMEOUT):
"""
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param poll_interval: int, seconds between polling for Koji build
:param poll_timeout: int, max amount of seconds to wait for Koji build
"""
super(KojiParentPlugin, self).__init__(tasker, workflow)
self.koji_session = get_koji_session(self.workflow)
self.poll_interval = poll_interval
self.poll_timeout = poll_timeout
self._base_image_nvr = None
self._base_image_build = None
self._parent_builds = {}
self._poll_start = None
self.platforms = get_platforms(self.workflow)
# RegistryClient instances cached by registry name
self.registry_clients = {}
self._deep_manifest_list_inspection = get_deep_manifest_list_inspection(self.workflow,
fallback=True)
def ignore_isolated_autorebuilds(self):
if not self.workflow.source.config.autorebuild.get('ignore_isolated_builds', False):
self.log.debug("ignoring_isolated_builds isn't configured, won't skip autorebuild")
return
base_koji_build = self.wait_for_parent_image_build(self._base_image_nvr)
is_isolated = base_koji_build['extra']['image'].get('isolated', False)
if is_isolated:
self.log.debug("setting cancel_isolated_autorebuild")
self.workflow.cancel_isolated_autorebuild = True
def run(self):
if is_scratch_build(self.workflow):
self.log.info('scratch build, skipping plugin')
return
if not (self.workflow.builder.dockerfile_images.base_from_scratch or
self.workflow.builder.dockerfile_images.custom_base_image):
self._base_image_nvr = self.detect_parent_image_nvr(
self.workflow.builder.dockerfile_images.base_image,
inspect_data=self.workflow.builder.base_image_inspect,
)
if is_rebuild(self.workflow):
self.ignore_isolated_autorebuilds()
manifest_mismatches = []
for img, local_tag in self.workflow.builder.dockerfile_images.items():
if base_image_is_custom(img.to_str()):
continue
nvr = self.detect_parent_image_nvr(local_tag) if local_tag else None
self._parent_builds[img] = self.wait_for_parent_image_build(nvr) if nvr else None
if nvr == self._base_image_nvr:
self._base_image_build = self._parent_builds[img]
if self._parent_builds[img]:
# we need the possible floating tag
check_img = copy(local_tag)
check_img.tag = img.tag
try:
self.check_manifest_digest(check_img, self._parent_builds[img])
except ValueError as exc:
manifest_mismatches.append(exc)
else:
err_msg = ('Could not get koji build info for parent image {}. '
'Was this image built in OSBS?'.format(img.to_str()))
if get_skip_koji_check_for_base_image(self.workflow, fallback=False):
self.log.warning(err_msg)
else:
self.log.error(err_msg)
raise RuntimeError(err_msg)
if manifest_mismatches:
mismatch_msg = ('Error while comparing parent images manifest digests in koji with '
'related values from registries: %s')
if get_fail_on_digest_mismatch(self.workflow, fallback=True):
self.log.error(mismatch_msg, manifest_mismatches)
raise RuntimeError(mismatch_msg % manifest_mismatches)
self.log.warning(mismatch_msg, manifest_mismatches)
return self.make_result()
def check_manifest_digest(self, image, build_info):
"""Check if the manifest list digest is correct.
Compares the manifest list digest with the value in koji metadata.
Raises a ValueError if the manifest list does not refer to the koji build.
:param image: ImageName, image to inspect
:param build_info: dict, koji build metadata
"""
image_str = image.to_str()
v2_list_type = get_manifest_media_type('v2_list')
v2_type = get_manifest_media_type('v2')
image_digest_data = self.workflow.builder.parent_images_digests[image_str]
if v2_list_type in image_digest_data:
media_type = v2_list_type
elif v2_type in image_digest_data:
media_type = v2_type
else:
# This should not happen - raise just to be safe:
raise RuntimeError('Unexpected parent image digest data for {}. '
'v2 or v2_list expected, got {}'.format(image, image_digest_data))
digest = image_digest_data[media_type]
try:
koji_digest = build_info['extra']['image']['index']['digests'][media_type]
except KeyError as exc:
err_msg = ("Koji build ({}) for parent image '{}' does not have manifest digest data "
"for the expected media type '{}'. This parent image MUST be rebuilt"
.format(build_info['id'], image_str, media_type))
self.log.error(err_msg)
raise ValueError(err_msg) from exc
expected_digest = koji_digest
self.log.info('Verifying manifest digest (%s) for parent %s against its '
'koji reference (%s)', digest, image_str, expected_digest)
if digest != expected_digest:
rebuild_msg = 'This parent image MUST be rebuilt'
mismatch_msg = ('Manifest digest (%s) for parent image %s does not match value in its '
'koji reference (%s). %s')
if not self._deep_manifest_list_inspection:
self.log.error(mismatch_msg, digest, image_str, expected_digest, rebuild_msg)
raise ValueError(mismatch_msg % (digest, image_str, expected_digest, rebuild_msg))
deep_inspection_msg = 'Checking manifest list contents...'
self.log.warning(mismatch_msg, digest, image_str, expected_digest, deep_inspection_msg)
if not self.manifest_list_entries_match(image, build_info['id']):
err_msg = ('Manifest list for parent image %s differs from the manifest list for '
'its koji reference. %s')
self.log.error(err_msg, image_str, rebuild_msg)
raise ValueError(err_msg % (image_str, rebuild_msg))
def manifest_list_entries_match(self, image, build_id):
"""Check whether manifest list entries are in koji.
Compares the digest in each manifest list entry with the koji build
archive for the entry's architecture. Returns True if they all match.
:param image: ImageName, image to inspect
:param build_id: int, koji build ID for the image
:return: bool, True if the manifest list content refers to the koji build archives
"""
if not image.registry:
self.log.warning('Could not fetch manifest list for %s: missing registry ref', image)
return False
v2_type = get_manifest_media_type('v2')
reg_client = self._get_registry_client(image.registry)
manifest_list_response = reg_client.get_manifest_list(image)
if not manifest_list_response:
self.log.warning('Could not fetch manifest list for %s', image)
return False
manifest_list_data = {}
manifest_list = json.loads(manifest_list_response.content)
for manifest in manifest_list['manifests']:
if manifest['mediaType'] != v2_type:
self.log.warning('Unexpected media type in manifest list: %s', manifest)
return False
arch = manifest['platform']['architecture']
v2_digest = manifest['digest']
manifest_list_data[arch] = v2_digest
archives = self.koji_session.listArchives(build_id)
koji_archives_data = {}
for archive in (a for a in archives if a['btype'] == KOJI_BTYPE_IMAGE):
arch = archive['extra']['docker']['config']['architecture']
v2_digest = archive['extra']['docker']['digests'][v2_type]
koji_archives_data[arch] = v2_digest
platform_to_arch_dict = get_platform_to_goarch_mapping(self.workflow)
architectures = [platform_to_arch_dict[platform] for platform in self.platforms]
missing_arches = [a for a in architectures if a not in koji_archives_data]
if missing_arches:
self.log.warning('Architectures "%s" are missing in Koji archives "%s"',
missing_arches, koji_archives_data)
return False
# manifest lists can be manually pushed to the registry to make sure a specific tag
# (e.g., latest) is available for all platforms.
# In such cases these manifest lists may include images from different koji builds.
# We only want to check the digests for the images built in the current parent koji build
err_msg = 'Manifest list digest %s differs from Koji archive digest %s for platform %s'
unmatched_digests = False
for arch in architectures:
if manifest_list_data[arch] != koji_archives_data[arch]:
unmatched_digests = True
self.log.warning(err_msg, manifest_list_data[arch], koji_archives_data[arch], arch)
if unmatched_digests:
return False
self.log.info('Deeper manifest list check verified v2 manifest references match')
return True
def detect_parent_image_nvr(self, image_name, inspect_data=None):
"""
Look for the NVR labels, if any, in the image.
:return NVR string if labels found, otherwise None
"""
if inspect_data is None:
inspect_data = self.workflow.builder.parent_image_inspect(image_name)
labels = Labels(inspect_data[INSPECT_CONFIG].get('Labels', {}))
label_names = [Labels.LABEL_TYPE_COMPONENT, Labels.LABEL_TYPE_VERSION,
Labels.LABEL_TYPE_RELEASE]
label_values = []
for lbl_name in label_names:
try:
_, lbl_value = labels.get_name_and_value(lbl_name)
label_values.append(lbl_value)
except KeyError:
self.log.info("Failed to find label '%s' in parent image '%s'.",
labels.get_name(lbl_name), image_name)
if len(label_values) != len(label_names): # don't have all the necessary labels
self.log.info("Image '%s' NVR missing; not searching for Koji build.", image_name)
return None
return '-'.join(label_values)
def wait_for_parent_image_build(self, nvr):
"""
Given image NVR, wait for the build that produced it to show up in koji.
If it doesn't within the timeout, raise an error.
:return build info dict with 'nvr' and 'id' keys
"""
self.log.info('Waiting for Koji build for parent image %s', nvr)
poll_start = time.time()
while time.time() - poll_start < self.poll_timeout:
build = self.koji_session.getBuild(nvr)
if build:
build_state = koji.BUILD_STATES[build['state']]
self.log.info('Parent image Koji build found with id %s', build.get('id'))
if build_state == 'COMPLETE':
return build
elif build_state != 'BUILDING':
exc_msg = ('Parent image Koji build {} state is {}, not COMPLETE.')
raise KojiParentBuildMissing(exc_msg.format(nvr, build_state))
time.sleep(self.poll_interval)
raise KojiParentBuildMissing('Parent image Koji build NOT found for {}!'.format(nvr))
def make_result(self):
"""Construct the result dict to be preserved in the build metadata."""
result = {}
if self._base_image_build:
result[BASE_IMAGE_KOJI_BUILD] = self._base_image_build
if self._parent_builds:
result[PARENT_IMAGES_KOJI_BUILDS] = self._parent_builds
return result if result else None
def _get_registry_client(self, registry):
"""
Get registry client for specified registry, cached by registry name
"""
client = self.registry_clients.get(registry)
if client is None:
session = RegistrySession.create_from_config(self.workflow, registry=registry)
client = RegistryClient(session)
self.registry_clients[registry] = client
return client
| DBuildService/atomic-reactor | atomic_reactor/plugins/pre_koji_parent.py | Python | bsd-3-clause | 14,943 | 0.003279 |
#!/usr/bin/env python
import os, sys, codecs, re
def usage():
print "Usage info for extract_references.py"
print " extract_references.py ref_sgml ref_prefix"
print
sys.exit()
def main():
if (len(sys.argv) < 3 or sys.argv[1] == "-h"):
usage()
sgml = codecs.open(sys.argv[1], "r", "utf-8")
prefix = sys.argv[2]
doc_pattern = re.compile('.* docid="([^"]*).*"')
seg_pattern = re.compile('.* id="([^"]*)".*')
ref_sets = []
cur_ref_set = []
cur_doc = ""
cur_seg = ""
cur_txt = ""
for line in sgml.readlines():
line_tc = line.strip()
line = line_tc.lower()
if ("<doc " in line):
cur_doc = doc_pattern.search(line).groups()[0]
if ("</refset " in line or
("<doc " in line and cur_doc in map(lambda x: x[0], cur_ref_set))):
ref_sets.append(cur_ref_set)
cur_ref_set = []
if ("<seg " in line):
cur_seg = seg_pattern.search(line).groups()[0]
cur_txt = re.sub("<[^>]*>", "", line_tc)
cur_ref_set.append((cur_doc, cur_seg, cur_txt))
ref_files = []
ref_count = len(ref_sets[0])
for i, ref_set in enumerate(ref_sets):
if (ref_count != len(ref_set)):
print "[ERR] reference lengths do not match: " + str(ref_count) \
+ " vs. " + str(len(ref_set)) + " (ref " + str(i) + ")"
ref_files.append(codecs.open(prefix + "_ref." + str(i), "w", "utf-8"))
for j in range(ref_count):
(cur_doc, cur_seg, cur_txt) = ref_sets[0][j]
for i in range(len(ref_sets)):
if (j >= len(ref_sets[i])):
continue
(doc, seg, txt) = ref_sets[i][j]
if (doc != cur_doc or seg != cur_seg):
print "[ERR] document, segment ids don't match up: "
print "\t" + doc + " vs. " + cur_doc
print "\t" + seg + " vs. " + cur_seg
ref_files[i].write(txt + "\n")
for ref_file in ref_files:
ref_file.close()
if __name__ == "__main__":
main()
| gwenniger/joshua | scripts/toolkit/extract_references.py | Python | lgpl-2.1 | 1,922 | 0.023413 |
"""
Base model definitions for validating front-end user access to resources such as pages and
documents. These may be subclassed to accommodate specific models such as Page or Collection,
but the definitions here should remain generic and not depend on the base wagtail.core.models
module or specific models defined there.
"""
from django.conf import settings
from django.contrib.auth.models import Group
from django.db import models
from django.utils.translation import gettext_lazy as _
class BaseViewRestriction(models.Model):
NONE = 'none'
PASSWORD = 'password'
GROUPS = 'groups'
LOGIN = 'login'
RESTRICTION_CHOICES = (
(NONE, _("Public")),
(LOGIN, _("Private, accessible to logged-in users")),
(PASSWORD, _("Private, accessible with the following password")),
(GROUPS, _("Private, accessible to users in specific groups")),
)
restriction_type = models.CharField(
max_length=20, choices=RESTRICTION_CHOICES)
password = models.CharField(verbose_name=_('password'), max_length=255, blank=True)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True)
def accept_request(self, request):
if self.restriction_type == BaseViewRestriction.PASSWORD:
passed_restrictions = request.session.get(self.passed_view_restrictions_session_key, [])
if self.id not in passed_restrictions:
return False
elif self.restriction_type == BaseViewRestriction.LOGIN:
if not request.user.is_authenticated:
return False
elif self.restriction_type == BaseViewRestriction.GROUPS:
if not request.user.is_superuser:
current_user_groups = request.user.groups.all()
if not any(group in current_user_groups for group in self.groups.all()):
return False
return True
def mark_as_passed(self, request):
"""
Update the session data in the request to mark the user as having passed this
view restriction
"""
has_existing_session = (settings.SESSION_COOKIE_NAME in request.COOKIES)
passed_restrictions = request.session.setdefault(self.passed_view_restrictions_session_key, [])
if self.id not in passed_restrictions:
passed_restrictions.append(self.id)
request.session[self.passed_view_restrictions_session_key] = passed_restrictions
if not has_existing_session:
# if this is a session we've created, set it to expire at the end
# of the browser session
request.session.set_expiry(0)
class Meta:
abstract = True
verbose_name = _('view restriction')
verbose_name_plural = _('view restrictions')
| jnns/wagtail | wagtail/core/models/view_restrictions.py | Python | bsd-3-clause | 2,789 | 0.003944 |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 26 18:03:24 2014
@author: KDB
"""
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.dates import date2num, num2date, HourLocator, DayLocator, AutoDateLocator, DateFormatter
from matplotlib.colors import LogNorm
def carpet(timeseries, **kwargs):
"""
Draw a carpet plot of a pandas timeseries.
The carpet plot reads like a letter. Every day one line is added to the
bottom of the figure, minute for minute moving from left (morning) to right
(evening).
The color denotes the level of consumption and is scaled logarithmically.
If vmin and vmax are not provided as inputs, the minimum and maximum of the
colorbar represent the minimum and maximum of the (resampled) timeseries.
Parameters
----------
timeseries : pandas.Series
vmin, vmax : If not None, either or both of these values determine the range
of the z axis. If None, the range is given by the minimum and/or maximum
of the (resampled) timeseries.
zlabel, title : If not None, these determine the labels of z axis and/or
title. If None, the name of the timeseries is used if defined.
cmap : matplotlib.cm instance, default coolwarm
"""
# define optional input parameters
cmap = kwargs.pop('cmap', cm.coolwarm)
norm = kwargs.pop('norm', LogNorm())
interpolation = kwargs.pop('interpolation', 'nearest')
cblabel = kwargs.pop('zlabel', timeseries.name if timeseries.name else '')
title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '')
# data preparation
if timeseries.dropna().empty:
print('skipped {} - no data'.format(title))
return
ts = timeseries.resample('min', how='mean', label='left', closed='left')
vmin = max(0.1, kwargs.pop('vmin', ts[ts > 0].min()))
vmax = max(vmin, kwargs.pop('vmax', ts.quantile(.999)))
# convert to dataframe with date as index and time as columns by
# first replacing the index by a MultiIndex
# tz_convert('UTC'): workaround for https://github.com/matplotlib/matplotlib/issues/3896
mpldatetimes = date2num(ts.index.tz_convert('UTC').astype(dt.datetime))
ts.index = pd.MultiIndex.from_arrays(
[np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround.
# and then unstacking the second index level to columns
df = ts.unstack()
# data plotting
fig, ax = plt.subplots()
# define the extent of the axes (remark the +- 0.5 for the y axis in order to obtain aligned date ticks)
extent = [df.columns[0], df.columns[-1], df.index[-1] + 0.5, df.index[0] - 0.5]
im = plt.imshow(df, vmin=vmin, vmax=vmax, extent=extent, cmap=cmap, aspect='auto', norm=norm,
interpolation=interpolation, **kwargs)
# figure formatting
# x axis
ax.xaxis_date()
ax.xaxis.set_major_locator(HourLocator(interval=2))
ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
ax.xaxis.grid(True)
plt.xlabel('UTC Time')
# y axis
ax.yaxis_date()
dmin, dmax = ax.yaxis.get_data_interval()
number_of_days = (num2date(dmax) - num2date(dmin)).days
# AutoDateLocator is not suited in case few data is available
if abs(number_of_days) <= 35:
ax.yaxis.set_major_locator(DayLocator())
else:
ax.yaxis.set_major_locator(AutoDateLocator())
ax.yaxis.set_major_formatter(DateFormatter("%a, %d %b %Y"))
# plot colorbar
cbticks = np.logspace(np.log10(vmin), np.log10(vmax), 11, endpoint=True)
cb = plt.colorbar(format='%.0f', ticks=cbticks)
cb.set_label(cblabel)
# plot title
plt.title(title)
return im
def fanchart(timeseries, **kwargs):
"""
Draw a fan chart of the daily consumption profile.
The fan chart shows the different quantiles of the daily consumption, with
the blue line representing the median, and the black line the average.
By default, the consumption of the whole day is taken, but one can select
the hours of interest, e.g. night time standby consumption.
Parameters
----------
timeseries : pandas.Series
start_hour, end_hour : int or float, optional
Start and end hours of period of interest, default values are 0, 24
As of now, ensure that start_hour < end_hour
ylabel, title : str
If not None, these determine the labels of y axis and/or title.
If None, the name of the timeseries is used if defined.
"""
start_hour = 2. + kwargs.pop('start_hour', 0.) / 24.
end_hour = 2. + kwargs.pop('end_hour', 24.) / 24.
ylabel = kwargs.pop('ylabel', timeseries.name if timeseries.name else '')
title = kwargs.pop('title', 'carpet plot: ' + timeseries.name if timeseries.name else '')
# data preparation
if timeseries.dropna().empty:
print('skipped {} - no data'.format(title))
return
ts = timeseries.resample('min', how='mean', label='left', closed='left')
# convert to dataframe with date as index and time as columns by
# first replacing the index by a MultiIndex
# tz_convert('UTC'): workaround for https://github.com/matplotlib/matplotlib/issues/3896
mpldatetimes = date2num(ts.index.tz_convert('UTC').astype(dt.datetime))
ts.index = pd.MultiIndex.from_arrays(
[np.floor(mpldatetimes), 2 + mpldatetimes % 1]) # '2 +': matplotlib bug workaround.
# and then unstacking the second index level to columns
df = ts.unstack()
df = df.T.truncate(start_hour, end_hour)
num = 20
num_max = 4
df_quant = df.quantile(np.linspace(0., 1., 2 * num + 1))
# data plotting
fig, ax = plt.subplots()
im = plt.plot(df.columns, df_quant.iloc[num], 'b', label='median')
for i in range(1, num):
plt.fill_between(df.columns, df_quant.iloc[num - i], df_quant.iloc[min(num + i, 2 * num - num_max)], color='b',
alpha=0.05)
plt.plot(df.columns, df.mean(), 'k--', label='mean')
plt.legend()
# x axis
ax.xaxis_date()
plt.xlim(df.columns[0], df.columns[-1])
plt.ylabel(ylabel)
# plot title
plt.title(title)
plt.grid(True)
return im
| JrtPec/opengrid | opengrid/library/plotting.py | Python | apache-2.0 | 6,242 | 0.001922 |
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import datetime
import decimal
import os
import platform
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.duration import duration_string
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import Oracle_datetime, convert_unicode # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
conn_string = convert_unicode(self._connect_string())
return Database.connect(conn_string, **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
try:
self.connection.stmtcachesize = 20
except AttributeError:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_full_version(self):
with self.temporary_connection():
return self.connection.version
@cached_property
def oracle_version(self):
try:
return int(self.oracle_full_version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
if timezone.is_aware(param):
warnings.warn(
"The Oracle database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango21Warning)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
param = Oracle_datetime.from_datetime(param)
if isinstance(param, datetime.timedelta):
param = duration_string(param)
if ' ' not in param:
param = '0 ' + param
string_size = 0
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, Database.Binary):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if isinstance(self.force_bytes, six.string_types):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = convert_unicode(query, self.charset)
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params.keys()}
query = convert_unicode(query % args, self.charset)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = convert_unicode(query % tuple(args), self.charset)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size))
def fetchall(self):
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchall())
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
| oinopion/django | django/db/backends/oracle/base.py | Python | bsd-3-clause | 24,986 | 0.001641 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.