text
stringlengths 226
34.5k
|
---|
Can convert Tkinter inputs into numbers
Question:
__author__ = 'Feuer'
from tkinter import *
root = Tk()
root.geometry("750x400")
def stop():
exit()
def plus():
global erg11
erg11 = z1 + z2
class Start:
def __init__(self, master):
frame = Frame(master)
frame.pack()
self.Label_1 = Label(frame, text="Bitte wählen Sie die Rechenart aus, die Sie benutzen möchten!")
self.Label_2 = Label(frame, text="Addition")
self.Label_3 = Label(frame, text="Subtraktion")
self.Label_4 = Label(frame, text="Multiplikation")
self.Label_5 = Label(frame, text="Division")
self.Label_6 = Label(frame, text="Wurzel")
self.Label_7 = Label(frame, text="Logarithmus")
self.Button_1 = Button(frame, text="Go!", command=Add)
self.Button_2 = Button(frame, text="Go!")
self.Button_3 = Button(frame, text="Go!")
self.Button_4 = Button(frame, text="Go!")
self.Button_5 = Button(frame, text="Go!")
self.Button_6 = Button(frame, text="Go!")
self.Button_7 = Button(frame, text="Das Programm beenden!", command=stop)
self.Label_1.grid(row=0, columnspan=2)
self.Label_2.grid(row=1, column=0)
self.Label_3.grid(row=2, column=0)
self.Label_4.grid(row=3, column=0)
self.Label_5.grid(row=4, column=0)
self.Label_6.grid(row=5, column=0)
self.Label_7.grid(row=6, column=0)
self.Button_1.grid(row=1, column=1)
self.Button_2.grid(row=2, column=1)
self.Button_3.grid(row=3, column=1)
self.Button_4.grid(row=4, column=1)
self.Button_5.grid(row=5, column=1)
self.Button_6.grid(row=6, column=1)
self.Button_7.grid(row=7, columnspan=2)
class Add:
def __init__(self):
newwin = Toplevel()
newwin.geometry("750x400")
frame2 = Frame(newwin)
frame2.pack()
global erg11
global z1
global z2
erg11 = "Ready"
self.Label_1 = Label(frame2, text="Additionsverfahren")
self.Entry_1 = Entry(frame2)
self.Label_2 = Label(frame2, text="+")
self.Entry_2 = Entry(frame2)
self.Label_3 = Label(frame2, text="=")
self.Button_1 = Button(frame2, text="Zurück", command=newwin.destroy)
self.Button_2 = Button(frame2, text="Ergebniss berechnen")
self.Label_Erg1 = Label(frame2, text=erg11)
self.Button_2.bind("<Button-1>", plus)
self.Label_1.grid(row=0, columnspan=4)
self.Entry_1.grid(row=1, column=0)
self.Label_2.grid(row=1, column=1)
self.Entry_2.grid(row=1, column=2)
self.Label_3.grid(row=1, column=3)
self.Button_2.grid(row=2, columnspan=4)
self.Button_1.grid(row=3, columnspan=4)
self.Label_Erg1.grid(row=1, column=4)
app = Start(root)
root.mainloop()
this is my code i am using at the moment.I try to create a little useless
calculator with gui in Python. I cant figure out how to get the variable (z1 /
z1) out of Entry_1 and _2 when someone is pressing the button_2 (in the second
class). Could any one sugesst me some code to fix it?
* * *
Edit:
_I edited the Code, that everybody could try to find a solution for my
problem, because my solution approaches ended in a stalemate. (Ayres)_
Answer: The content of your entries are read immediately after creation of them,
leading `get()` to return an empty string that can't be converted.
The `get` method has to be called somewhere else, though I can't exactly say
when.
|
Socket issue when using threads
Question: I've been working on a python game in my spare time, and I've run into a
problem. I'm working with sockets using the basic threads module, and it works
fine when I connect to the server file with one client. But more than that,
and any that connect after the first freezes up the server and the first
client.
Here is the code for the server
import socket
import random
import thread
from saveState import Save
from grid import Grid
import time
players = 0
save = Save()
grid = Grid()
def ready(c):
ready = raw_input("Are you ready to play?\n")
if(ready == "yes" or ready == "y"):
grid.makeGrid()
c.send("ready")
def clientThread(conn,players):
while True:
print "taking requests"
request = conn.recv(1024)
segments = request.split(",,")
if(segments[0] == "0" and players<200):
print "registering player", addr
serial = random.choice(list(range(999999)))
conn.send("{}".format(serial))
save.players[serial] = segments[2:]
print save.players[serial][9]
players+=1
elif(segments[0] == "3"):
if(segments[2] == "land"):
conn.send("{},,{},,{},,{}".format(grid.getLandType(int(save.players[serial][9]),int(save.players[serial][10])), grid.getDesc(int(save.players[serial][9]),int(save.players[serial][10])),int(save.players[serial][9]),int(save.players[serial][10])))
elif(segments[0]=="2"):
if(segments[2]=="playerX" and int(segments[3])==-1):
save.players[serial][9] = int(save.players[int(serial)][9])-1
elif(segments[2]=="playerX"):
save.players[serial][9] = int(save.players[int(serial)][9])+1
if(segments[2]=="playerY" and int(segments[3])==-1):
save.players[serial][10] = int(save.players[int(serial)][10])-1
elif(segments[2]=="playerY"):
save.players[serial][10] = int(save.players[int(serial)][10])+1
elif(segments[0]=="4"):
alreadySent = []
for m in grid.monsters:
if(m.X==save.players[int[segment[1]]][9] and m.Y==save.players[int[segment[1]]][10] and alreadySent[m]==False):
conn.send("{},,{}".format(m.name, True))
elif(time.clock == 60*60*(12+8)):
conn.send("{},,{}".format("You see the sun set on the horizon. Monsters will be more aggressive now.", False))
else:
print "sorry, there is an inconsistency in the request or the queue is full."
try:
#start up socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
name = socket.gethostbyname(socket.gethostname())
print name
port = input("select port\n")
s.bind((name, port))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#listen for any attempts to connect to the api
#if anyone connects, give them a serial number and add their data to a storage file
while True:
s.listen(5)
c,addr = s.accept()
thread.start_new_thread(ready,(c,))
thread.start_new_thread(clientThread,(c, players))
conn.close
sock.close
except socket.error:
print " either the server port is closed or in use. try again"
and the client
import random
from grid import Grid
from player import Player
from descriptions import Descriptions
import socket
import time
import thread
description = Descriptions()
def descisionHandler(s,serial):
while True:
s.send("{},,{},,{}".format(3,serial,"land"))
response = s.recv(1024).split(",,")
print "you are on a {} tile \n {} \n {} \n {}".format(response[0], response[1],response[2], response[3])
action=raw_input("What Will You Do?\n")
try:
if(action == "west" and player.locX>0):
s.send("{},,{},,{},,{}".format(2,serial,"playerX",-1))
time.sleep(0.5)
elif(action == "east" and player.locX<199):
s.send("{},,{},,{},,{}".format(2,serial,"playerX",1))
time.sleep(0.5)
elif(action == "north" and player.locY>0):
s.send("{},,{},,{},,{}".format(2,serial,"playerY",-1))
time.sleep(0.5)
elif(action == "south" and player.locY<199):
s.send("{},,{},,{},,{}".format(2,serial,"playerY",1))
time.sleep(0.5)
# elif(action == "attack" and monster_data[1]):
# print "The {} wakes up! A battle begins!".format(monster_data[0])
elif(action == "profile"):
print " You are {} \n {} \n your role is {} \n you have an attack of {} \n a defense of {} \n a speed of {} \n and {} hitpoints \n attacks: {} \n you are located at {} {}".format(player.name,
player.backstory,player.role,player.attack,player.defense,player.speed, player.hitpoints, player.attacks, player.locX, player.locY)
elif(action == "exit"):
break
except IndexError:
pass
def eventHandler(s,serial):
while True:
s.send("{},,{}".format(4,serial))
response = s.recv(1024).split(",,")
print response[0]
return bool(response[1])
while True:
try:
print "\nWelcome to Overseer! We need a few things before we begin\n"
name = raw_input("What is your name?\n")
backstory = raw_input("What is in your past: choose one \n chosen \n magician \n poet\n")
role = raw_input("what is your class: choose one \n Warrior \n Mage \n Rougue \n Bard\n")
player = Player(name,description.player_backstory[backstory], role, 5,5,5,10, {"scrap": 10}, random.choice(list(range(200))), random.choice(list(range(200))))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = raw_input("what host are you connecting to?")
port = input("what port?\n")
s.connect((host,port))
print "connection successful."
time.sleep(5)
s.send("{},,{},,{},,{},,{},,{},,{},,{},,{},,{},,{},,{},,{}".format(0,0,name,backstory,role,5,5,5,5,10,player.attacks,player.locX,player.locY))
serial = s.recv(1024)
print "You're serial number is {}".format(serial)
while(s.recv(1024) != "ready"):
pass
break
except socket.error:
print "server is not running or is busy. please try again."
eventThread = thread.start_new_thread(eventHandler,(s,serial))
descisionThread = thread.start_new_thread(descisionHandler,(s,serial))
while 1:
pass
I did a bit of research and my best guess is that I need to use locks from the
threading module, but I'm not sure. any suggestions?
Thanks in advance!
Answer: So the issue was the console input, as theSmallNothing said. There wasn't
really a way around this limitation without serious hacking, so I proposed to
improvise. My solution was to create a web app with python instead of using a
console. There were a few advantages to this.
* the server can handle multiple inputs at a time easily
* things can happen while input is being entered(the solution to my problem)
* no files need be downloaded for the user as everything can be accessed simply by entering the web address.
While not a perfect solution, sometimes finding an alternative is the next
best thing.
Thanks all for your awesome help!
|
Convert Linux's curl program to a python code
Question: I need to convert the following curl program to equivalent python program and
also want to know how to store the response of this request which is a csv
file in python.
curl --data-binary @obama.txt
"<http://www.sentiment140.com/api/bulkClassify?query=obama>"
This is a program used for calculating sentiment score for each tweet in the
text file obama.txt.
ex: obama.txt may contain: \n obama is awesome \n I do not know who is obama
\n obama is eating a potato
(\n = new line)
the --data-binary flag tells curl to preserve the new line character in the
sent data set, and the @obama.txt tells curls which filename to read the data
from
The response will be a CSV with two fields: -polarity. The polarity values
are: 0: negative 2: neutral 4: positive -the text
How can I possibly implement the same functionality in python. I have very
less experience in using python so if someone can give the exact code then it
would be really appreciated.
Answer: Use python [requests](http://docs.python-requests.org/en/latest/) library
>>> import requests
>>> with open('obama.txt','r') as f:
... text = f.read()
...
>>> r=requests.post('http://www.sentiment140.com/api/bulkClassify?query=obama',data=text)
>>> print r.text
"4","obama is awesome"
"2","I do not know who is obama"
"2","obama is eating a potato"
|
How to call function from class in python, answer must be in links but dont get it
Question: Im using a function I downloaded somewhere however its not a function but a
class and I have no clue how to call it. This is aproximately how it looks,
say this is examplefile.py:
class exampleclass:
somevar = 1
def func1(self, input):
return input+somevar
def func2(self, input):
return input-somevar
def main():
hardvar = 2
examp = exampleclass()
out1 = examp.func1(hardvar)
out2 = examp.func2(hardvar)
print(hardvar,out1,out2)
if __name__ == "__main__"
main()
I just dont get how to use the functions inside of it. I tried
import examplefile
import exampleclass
from examplefile import exampleclass
some of these do import the class, then I try calling it like this
exampleclass.func1(1)
or
exampinstance= exampleclass
exampinstance.func1(1)
which both get me
TypeError: unbound method ...() must be called with ... instance as first argument (got list instance instead)
and thats what never works, I looked at several questions
([such](http://stackoverflow.com/questions/17437837/error-unbound-method-
dragon-must-be-called-with-enemy-instance-as-first-argu)
[as](http://stackoverflow.com/questions/2417192/python-init-issue-unbound-
method-init-must-be-called-with-bank-instan?rq=1)
[these](http://stackoverflow.com/questions/4473184/unbound-method-f-must-be-
called-with-fibo-instance-as-first-argument-got-cla?rq=1)) here but I just
dont get how it works. Maybe you guys can see what Im not getting.
Answer: I call you to look at the following link. It may help:
[How to instantiate a class in
python](http://stackoverflow.com/questions/396856/how-to-instantiate-a-class-
in-python)
However you are nearly there. The code would look something like:
exampinstance= exampleclass()
exampinstance.func1(1)
Note the () at the end of the first line. You need to instantiate the class
properly. Note how the class is instantiated in the Main Method of the file
itself.
|
Python how can i get the timezone aware date in django
Question: I am using delorean for datetime calculation in python django.
<http://delorean.readthedocs.org/en/latest/quickstart.html>
This is what i am using
now = Delorean(timezone=settings.TIME_ZONE).datetime
todayDate = now.date()
But i get this warning
RuntimeWarning: DateTimeField start_time received a naive datetime (2014-12-09 00:00:00) while time zone support is active.
I want to know how to make it aware
I also tried this as well
`todayDate = timezone.make_aware(now.date(), timezone=settings.TIME_ZONE)`
then i get this
`AttributeError: 'datetime.date' object has no attribute 'tzinfo'`
Answer: It's not clear whether you're trying to end up with a `date` object or a
`datetime` object, as Python doesn't have the concept of a "timezone aware
date".
To get a `date` object corresponding to the current time in the current time
zone, you'd use:
from django.utils.timezone import localtime, now
localtime(now()).date()
That is: you're getting the current timezone-aware `datetime` in UTC; you're
converting it to the local time zone (i.e. `TIME_ZONE`); and then taking the
date from that.
If you want to get a `datetime` object corresponding to 00:00:00 on the
current date in the current time zone, you'd use:
localtime(now()).replace(hour=0, minute=0, second=0, microsecond=0)
Based on this and [your other
question](http://stackoverflow.com/questions/27308951/how-can-i-remove-the-
timezone-offset-from-datetime-in-python), I think you're getting confused by
the Delorean package. I suggest sticking with Django's and Python's datetime
functionality.
|
Execute multiple statement on MySQLdb cursor
Question: I want to execute multiple statement on a MySQLdb cursor over a database with
**MyISAM** storage. I followed every other way explained in this
[answer](http://stackoverflow.com/questions/5669878/python-mysqldb-when-to-
close-cursors) and else where, with not success.
>>> import MySQLdb as mdb
>>> from contextlib import closing
>>> con = mdb.connect('host', 'user', 'password', 'database')
>>> with closing(con.cursor()) as cur:
>>> cur.callproc('add_date', [1418112000])
>>> cur.callproc('add_date', [1418119000])
>>> #con.commit()
>>> cur.close()
_mysql_exceptions.ProgrammingError: (2014, "Commands out of sync; you can't run this command now")
Since the storage of tables is **MyISAM** I should not need to call commit().
In fact if I call commit() I will encounter an error.
What is the correct way to call multiple statements?
Answer: After calling the procedure, you have to close the cursor and open it again
before using it to execute another statement:
cur.close()
cur = con.cursor()
The cursor can be closed immediately after `fetchall()`. The result set still
remains and can be looped through. You have to clean/close the cursor after
calling stored procecude in order to execute further sql code. This is not an
issue with Python but with MySQL.
|
Check if second script is running or already finished
Question: I need to check if a `scriptA.py` is still running, in `scriptB.py`. Both are
started individually, but `scriptB.py` may only continue if `scriptA.py` is
still running.
I know I could use
import subprocess
process = subprocess.Popen(['pgrep', 'scriptA.py'], stdout=subprocess.PIPE)
process.wait()
if not process.returncode:
print "Process running"
else:
print "Process not running"
But script a runs in a tmux session. Which is called like `tmux new -d -s
scriptA_2014-12-09-10-54-02-697559 'cd /home/user/scriptA; python scriptA.py
-flag; echo $? > /tmp/scriptA_2014-12-09-10-54-02-697559'`
If i `pgrep scriptA.py` it doesn't return the `PID`. `pgrep tmux` would work,
but that there might be other tmux sessions, so I can't use that.
I could do something like `ps aux | grep scriptA.py | wc -l` and check the line count - but this feels like it's very variable.
How else could I verify if `scriptA.py` is running?
Answer: I'm now using the `PID`, written to a file at script execution.. The code I
use seems to work for my case:
In `scriptA`, at execution start:
pidf = open("./scriptA_pid.tmp","w")
pidf.write(str(os.getpid()))
pidf.close()
In `scriptB`, at the beginning of the loop that needs to be executed until
`scriptA` is done.
with open("./scriptA_pid.tmp","r") as f:
scriptA_pid = f.read()
chk_sA = subprocess.Popen(['kill -0 '+str(scriptA_pid)+' > /dev/null 2>&1; echo $?'],stdout=subprocess.PIPE,stderr=devnull,shell=True)
chk_sA.wait()
sA_status = chk_sA.stdout.read()
if int(sA_status) == 0:
#ScriptA is still running
pass
else:
#ScriptA is not running
sys.exit(0)
|
MAXscript Listener can not run Pyside
Question: Please help me !
I'm creating GUI by Python can run on the 3Ds Max, i heard someone said i have
to use Pyside to make it. And everthing be fine until now.
This is my code :
import sys
from PySide import QtGui
from PySide.QtGui import *
from PySide.QtCore import *
class Window(QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.setMinimumHeight(660)
self.setMinimumWidth(700)
self.setMaximumHeight(660)
self.setMaximumWidth(700)
grid = QtGui.QGridLayout()
grid.addWidget(self.First(), 0,0,2,0)
self.setLayout(grid)
self.setWindowTitle("Library")
self.resize(700, 660)
def First(self):
groupBox = QtGui.QFrame()
groupBox.setMaximumWidth(230)
groupBox.setMaximumHeight(700)
lbRenderer = QtGui.QLabel("Renderer :",self)
lbFolders = QtGui.QLabel("Folders :",self)
cbRenderer = QtGui.QComboBox(self)
cbRenderer.addItem("Vray")
cbRenderer.addItem("Octane")
lvFolders = QtGui.QListView(self)
lvFolders.setMaximumWidth(220)
lvFolders.setMaximumHeight(500)
btnAddNewObject = QtGui.QPushButton('Add New Objects',self)
btnNewSet = QtGui.QPushButton('New Set',self)
vbox = QtGui.QGridLayout()
vbox.addWidget(lbRenderer,0,0)
vbox.addWidget(cbRenderer,0,1,1,3)
vbox.addWidget(lbFolders,2,0,1,4)
vbox.addWidget(lvFolders,3,0,1,4)
vbox.setColumnStretch(1, 1)
vbox.addWidget(btnAddNewObject,4,0,1,2)
vbox.addWidget(btnNewSet,4,3)
groupBox.setLayout(vbox)
return groupBox
app = QApplication.instance()
if app is None:
app = QApplication(sys.argv)
clock = Window()
clock.show()
app.exec_()
I try another code same like my code , it run fine by "MAXScript Listener".
But I dont know why when i try to run this, it dont appear anything(my GUI, or
Alert is my code is not good).

Answer: First of all - you are initializing your script wrong, you call the
'initialize' function which returns `#Success` (meaning python initialized
properly), however you then just send in a string (which is the path to the
file) and this does nothing. What you have to use is:
python.ExecuteFile "C:\\Program Files\\Autodesk\\3ds Max 2015\\scripts\\Python\\yourPythonScript.py"
in maxscript listener\editor.
Autodesk documentation says:
> Autodesk 3ds Max ships with a pre-built version of PySide 1.2 compatible
> with Python 2.7.3. This version includes the following sub-set of modules:
>
>
> QtCore
> QtGui
> QtNetwork
> QtOpenGL
> QtSql
> QtSvg
> QtTest
> QtWebKit
> QtXml
>
They have provided a simple sample script that you can run, save this in a
python file, then execute it properly with the command mentioned in the
beginning.
The code is here:
from PySide import QtGui
import MaxPlus
class _GCProtector(object):
widgets = []
def make_cylinder():
obj = MaxPlus.Factory.CreateGeomObject(MaxPlus.ClassIds.Cylinder)
obj.ParameterBlock.Radius.Value = 10.0
obj.ParameterBlock.Height.Value = 30.0
node = MaxPlus.Factory.CreateNode(obj)
time = MaxPlus.Core.GetCurrentTime()
MaxPlus.ViewportManager.RedrawViews(time)
return
app = QtGui.QApplication.instance()
if not app:
app = QtGui.QApplication([])
def main():
MaxPlus.FileManager.Reset(True)
w = QtGui.QWidget()
w.resize(250, 100)
w.setWindowTitle('Window')
_GCProtector.widgets.append(w)
w.show()
main_layout = QtGui.QVBoxLayout()
label = QtGui.QLabel("Click button to create a cylinder in the scene")
main_layout.addWidget(label)
cylinder_btn = QtGui.QPushButton("Cylinder")
main_layout.addWidget(cylinder_btn)
w.setLayout(main_layout)
cylinder_btn.clicked.connect(make_cylinder)
if __name__ == '__main__':
main()
They also mention this which is important:
> Normally one creates a PySide application object in a script using
> QtGui.QApplication(). However, in 3ds Max, there is already a PySide
> application running, so you get a handle for that object like this:
QtGui.QApplication.instance()
Use that as a start script, and port your GUI items into that and it should
get you up and running.
|
multiprocessing broken pipe after a long time
Question: I develop a crawler using multiprocessing model.
which use multiprocessing.Queue to store url-infos which need to crawl , page
contents which need to parse and something more;use multiprocessing.Event to
control sub processes;use multiprocessing.Manager.dict to store hash of
crawled url;each multiprocessing.Manager.dict instance use a
multiprocessing.Lock to control access.
All the three type params are shared between all sub processes and parent
process, and all the params are organized in a class, I use the instance of
the class to transfer shared params from parent process to sub process. Just
like: ` MGR = SyncManager() class Global_Params(): Queue_URL =
multiprocessing.Queue() URL_RESULY = MGR.dict() URL_RESULY_Mutex =
multiprocessing.Lock() STOP_EVENT = multiprocessing.Event() global_params =
Global_Params() `
In my own timeout mechanism, I use process.terminate to stop the process which
can't stop by itself for a long time!
In my test case, there are 2500+ target sites(some are unservice, some are
huge). crawl site by site that in the target sites file.
At the begining the crawler could work well, but after a long time( sometime 8
hours, sometime 2 hours, sometime moer then 15 hours), the crawler has crawled
moer than 100( which is indeterminate) sites, I'll get error info:"Errno 32
broken pipe"
I have tried the following methods to location and solve the problems:
1. location the site A which crawler broken on, then use crawler to crawls the site separately, the crawler worked well. Even I get a fragment(such as 20 sites) from all the target sites file which contain the site A, the crawler worked well!
2. add "-X /tmp/pymp-* 240 /tmp" to /etc/cron.daily/tmpwatch
3. when Broken occured the file /tmp/pymp-* is still there
4. use multiprocessing.managers.SyncManager replace multiprocessing.Manager and ignore most signal except SIGKILL and SIGTERM
5. for each target site, I clear most shared params(Queues,dicts and event),if error occured, create a new instance:
` while global_params.Queue_url.qsize()>0: try:
global_params.Queue_url.get(block=False) except Exception,e:
print_info(str(e)) print_info("Clear Queue_url error!") time.sleep(1)
global_params.Queue_url = Queue() pass ` the following is the Traceback info,
the print_info function is defined to print and store debug info by myself: `
[Errno 32] Broken pipe Traceback (most recent call last): File "Spider.py",
line 613, in <module> main(args) File "Spider.py", line 565, in main
spider.start() File "Spider.py", line 367, in start print_info("STATIC_RESULT
size:%d" % len(global_params.STATIC_RESULT)) File "<string>", line 2, in
__len__ File
"/usr/local/python2.7.3/lib/python2.7/multiprocessing/managers.py", line 769,
in _callmethod kind, result = conn.recv() EOFError ` I can't understand why,
does anyone knows the reason?
Answer: I don't know if that is fixing your problem, but there is one point to
mention:
global_params.Queue_url.get(block=False)
... throws an Queue.Empty expeption, if the Queue is empty. It's not worth to
recreate the Queue for an empty exception.
The recreation of the queue can lead to race conditions.
From my point of view, you have to possibilities:
1. get rid of the "queue recreation" code block
2. switch to an other Queue implementation
use:
from Queue import Queue
instead of:
from multiprocessing import Queue
|
pprofile.Profile().print_stats() display in IPython
Question: I tried pprofile to profile Python code line by line.
import pprofile
profiler = pprofile.Profile()
I printed the statistics to the Ipython console using
profiler.print_stats()
This works, however, the plotted table has a lot of rows and the upper part is
cut away by IPython. Since those are the data I'm interested in I need to see
those. Is there any basic IPython setting that enlarges the number of rows to
display? Obviously the profiler.print_stats() command can't be modified to
exclude unneeded data from being displayed.
Answer: There are other methods you could call instead of print_stats(), like
> profiler.dump_stats(filename)
which dumps the same output to filename. Or if you know the name of the file
you are insterested in, you could call the method underlying dump_* methods:
> profiler.annotate(sys.stdout, source_filename)
which dumps an annotated version of just given filename.
See the module/class documentation for more information:
> help(pprofile)
|
Is it better practice to pass sometimes complex dicts for parameters instead of parameter objects?
Question: I've been programming Python for a year now, having come from a Java
background, and I've noticed that, at least in my organization, the style for
passing complex parameters to functions is to use dicts or tuples, rather than
instances of a specialized parameter class. For example, we have a method that
takes three dicts, each structured in a particular way, each of which is
itself formatted as tuples. It's complicated for me to build args and to read
the code. Here's an example of a passed arg:
{'[A].X': ((DiscreteMarginalDistribution, ('red', 'blue')), ()),
'[A].Y': ((DiscreteConditionalDistribution, ('yellow', 'green'), ('red', 'blue')),
(IdentityAggregator('[A].X'), ))
My questions are:
1. Is passing dicts/tuples like this a common Python idiom?
2. When, if ever, do you write Python code to use the latter (parameter instances)? E.g., when the nested structure surpasses some complexity threshold.
Thanks in advance!
Answer: * Yes, it is very usual to pass a dictionary to Python functions in order to reduce the number of arguments. Dictionary-style configuration with proper key naming is much more readable than just using tuples.
* I consider it rather uncommon to dynamically construct dedicated instances of a custom config class. I'd stick with dictionaries for that. In case your config dict and the consumer of it go out of sync, you get `KeyError`s, which are pretty good to debug.
**Some recommendations and reasoning:**
If some parts of your application require really really complex configuration,
I consider it a good idea to have a configuration object that properly
represents the current config. However, in my projects I never ended up
passing such objects as function arguments. This smells. In some applications,
I have a _constant_ global configuration object, set up during bootstrap. Such
an object is globally available and treated as "immutable".
Single functions should never be so complex that they require to retrieve a
tremendously complex configuration. This indicates that you should split your
code into several components, each subunit having a rather simple
parameterization.
If the runtime configuration of a function has a somewhat higher complexity
than it is easily dealt with normal (keyword)arguments, it is absolutely
common to pass a dictionary, so to say as a "leightweight" configuration
object. A well thought-through selection of key names makes such an approach
very well readable. Of course you can also build up a hierarchy in case one
level is not enough for your use case.
Most importantly, please note that in many cases the best way is to explicitly
define the parameterization of a function via its signature, using the normal
argument specification:
def f(a, b, c, d, e):
...
In the calling code, you can then prepare the values for these arguments in a
dictionary:
arguments = {
a = 1,
b = 2,
c = 3,
d = 4,
e = "x"
}
and then use Python's snytactic sugar for keyword expansion upon function
call:
f(**arguments)
|
Combining nested collection in mongodb with documents written from parallel nodes
Question: I'm in the process of figuring out whether I can use MongoDB to help with our
storage and processing issues. The idea is that computation will be done on
each node in a multi-processed way and written to mongodb with a unique
mongodb **ObjectId**. The data structure is as follows below from a
dictionary:
{a: {b: {c: [100, 200, 300]} }
a, b and c are integer keys
When the computation has been done and all the records written out to mongo,
the documents must be combined such that the we group by the top level a, then
by b and then c. So two documents might contain (**Example A**):
document1: `{24: {67: {12: [100, 200]}}}`
document2: `{24: {68: {12: [100, 200]}}}`
_then if we combine:_
combined: `{24: {67: {12: [100, 200]}, 68: [100, 200]}}`
and if we had another couple of documents (**ExampleB**):
document1: `{24: {67: {12: [100, 200]}}}`
document2: `{24: {67: {12: [300, 400]}}}`
combined: `{24: {67: {12: [100, 200, 300, 400]}}}`
What is the best approach to combine these nested structures. I could loop
through each document by hand and do this say in python, but is there are
smarter approach to this? I need to keep the underlying data structure.
Answer: What is not smart in doing the aggregation with python? Consider the following
function:
def aggregate(documents, base_document=None, unique=True):
# use unique=False to keep all values in the lists, even if repeated
# like [100, 100, 200, 300], leave it True otherwise
for doc in documents:
if isinstance(doc, list):
if base_document is None: base_document = []
for d in doc:
base_document.append(d)
if unique==True: base_document = set(base_document)
base_document = sorted(base_document)
else:
if base_document is None: base_document = {}
for d in doc:
b = base_document[d] if d in base_document \
else [] if isinstance(doc[d], list) else {}
base_document[d] = aggregate([doc[d]], base_document=b)
return base_document
Tested with the following set of documents, it produces the aggregation :
documents = [ {20: {55: { 7: [100, 200]}}},
{20: {68: {12: [100, 200]}}},
{20: {68: {12: [500, 200]}}},
{23: {67: {12: [100, 200]}}},
{23: {68: {12: [100, 200]}}},
{24: {67: {12: [300, 400]}}},
{24: {67: {12: [100, 200]}}},
{24: {67: {12: [100, 200]}}},
{24: {67: {12: [300, 500]}}},
{24: {67: {13: [600, 400]}}},
{24: {67: {13: [700, 900]}}},
{24: {68: {12: [100, 200]}}},
{25: {67: {12: [100, 200]}}},
{25: {67: {12: [300, 400]}}}, ]
from pprint import pprint
pprint(aggregate(documents))
'''
{20: {55: {7: [100, 200]}, 68: {12: [100, 200, 500]}},
23: {67: {12: [100, 200]}, 68: {12: [100, 200]}},
24: {67: {12: [100, 200, 300, 400, 500], 13: [400, 600, 700, 900]},
68: {12: [100, 200]}},
25: {67: {12: [100, 200, 300, 400]}}}
'''
|
Enable Cython profiling for whole program?
Question: The Cython docs say "Profiling in Cython is controlled by a compiler
directive. It can be set either for an entire file or on a per function basis
via a Cython decorator."
Is there any easy way to enable Cython profiling for an entire Python program?
That is, is there a way for me to not have to go through and add `# cython:
profile=True` to dozens of files each time I want to turn profiling on and
off?
Answer: I believe you can set the directives globally by passing an option on the
command line to `cython`. It is described in the "Compilation" section of the
documentation under "How to set directives"
(<http://docs.cython.org/src/reference/compilation.html#how-to-set-
directives>).
> One can also pass a directive on the command line by using the -X switch:
>
> $ cython -X boundscheck=True ...
>
> Directives passed on the command line will override directives set in header
> comments.
If you are compiling through `distutils` (`setup.py`) and using the
`cythonize` function, it appears that you can add the option
`compiler_directives`, a dictionary that maps directive names to the
corresponding value. I have not found documentation for this feature, but it
appears to be how the `cython` program invokes the `cythonize` function
(<https://github.com/cython/cython/blob/master/Cython/Build/Cythonize.py#L83>).
For example
from distutils.core import setup
from Cython.Build import cythonize
setup(
name = "My hello app",
ext_modules = cythonize("src/*.pyx",
compiler_directives={'profile': True})
)
|
Python Dataset package & looping / updating rows --
Question: I am trying to retrieve the contents of my sqlite3 database and updating this
data utilizing a scraper in a for loop.
The presumed flow is as follows:
* Retrieve all rows from the dataset
* For each row, find the URL column and fetch some additional (updated) data
* Once this data has been obtained, upsert (update, add columns if not existent) this data to the row the URL was taken from.
I love the dataset package because of 'upsert', allowing it to dynamically add
whatever columns I may have added to the database if non-existent.
My code produces an error I can't explain, however.
'ResourceClosedError: This result object is closed.'
How would I go about obtaining my goal without running into this? The
following snippet recreates my issue.
import dataset db = dataset.connect('sqlite:///test.db')
# Add two dummy rows
testrow1 = {'TestID': 1}
testrow2 = {'TestID': 2}
db['test'].upsert(testrow1, ['TestID'])
db['test'].upsert(testrow2, ['TestID'])
print("Inserted testdata before loop")
# This works fine
testdata = db['test'].all()
for row in testdata:
print row
# This gives me an 'ResourceClosedError: This result object is closed.' error?
i = 1 # 'i' here exemplifies data that I'll add through my scraper.
testdata = db['test'].all()
for row in testdata:
data = {'TestID': i+1000}
db['test'].upsert(data, ['TestID'])
print("Upserted within loop (i = " + str(i) + ")")
i += 1
Answer: The issue might be you are querying the dataset and accessing the result
object (under 'this works fine") and reading it all in a loop and then
immediately trying to do another loop again with upserts on the same result
object. The error is telling you that the resource has been closed, basically
once you read it the connection is closed automatically (as a feature!). ([see
this answer](http://stackoverflow.com/questions/14375666/sqlalchemy-prevent-
automatic-closing/14388356#14388356) about 'automatic closing' for more on the
why and ways to get around it.)
Given that result resources tend to get closed, try fetching the results again
at the beginning of your upsert loop:
i = 1 # 'i' here exemplifies data that I'll add through my scraper.
testdata = db['test'].all()
for row in testdata:
data = {'TestID': i}
db['test'].upsert(data, ['TestID'])
print("Upserted within loop (i = " + str(i) + ")")
i += 1
**Edit** : See comment, the above code would change the testdata inside the
loop and thus still gives the same error, so a way to get around this is to
read the data into an array first and then loop through that array to do the
updates. Something like:
i = 1 # 'i' here exemplifies data that I'll add through my scraper.
testdata = [row for row in db['test'].all()]
for row in testdata:
data = {'TestID': i}
db['test'].upsert(data, ['TestID'])
print("Upserted within loop (i = " + str(i) + ")")
i += 1
|
Python cursor based reading and returning for large data following OOP structure
Question: In my situation, I have a main processing Python script that creates a class
(FileIterator) which will iterate through a large data file line by line.
class FileIterator:
def read_data(self, input_data):
with open(input_data, 'r') as input:
for line in input:
<perform operation>
What I am trying to do is to replace "perform operation" with a return command
(or substitute) to return the line back to the main script so that I can do
operations on the line outside of the FileIterator.
main_process.py
import FileIterator
import Operations
def perform_operations():
iterator = FileIterator()
operator = Operations()
line = iterator.read_data('largedata.txt')
operator.do_something(line)
Is there a suitable replacement for read_data() that will still allow me to
read line by line without storing the whole entire file into memory AND be
able to either save the line value into the object attribute self.line or
return it to the calling script?
Please let me know if more details about the design is necessary to reach a
solution.
EDIT: What I'm looking for is to limit FileIterator's responsibility to
reading large files. The script that manages FileIterator should be
responsible for taking each line and feeding these lines to the class
Operations (for simplicity since I will have multiple classes that will need
to act on this line).
Think of this design as an assembly line structure where the FileIterator's
job is to chop up the file. There are other workers that will take the results
from FileIterator and perform other tasks to it.
EDIT 2: Changing title because I feel it was misleading and people are
upvoting the answer that was basically just a copy paste of my question.
Answer: `file` already supports line-wise iteration.
with open('largedata.txt', 'r') as fp:
for line in fp:
operator.do_something(line)
|
How to "pretty print" a python pandas DatetimeIndex
Question: I am new to pandas and still amazed by what it can do, although sometimes also
by how things are done ;-)
I managed to write a little script which will report on the number of missing
values encountered in a timeseries, either in each month or in each year of
the series. Below is the code which uses some dummy data for demonstration.
If I print the returned result (`print cnty` or `print cntm`), everything
looks fine, except that I would like to format the datetime value of the index
according to the resolution of my data, i.e. I would wish to have `2000 1000
10 15` instead of `2000-12-31 1000 10 15` for the annual output and `2000-01
744 10 15` for the monthly output. Is there an easy way to do this in pandas
or do I have to go through some loops and convert things into "plain" python
before printing it. Note: I do not know in advance how many data columns I
have, so anything with fixed format strings per row wouldn't work for me.
import numpy as np
import pandas as pd
import datetime as dt
def make_data():
"""Make up some bogus data where we know the number of missing values"""
time = np.array([dt.datetime(2000,1,1)+dt.timedelta(hours=i)
for i in range(1000)])
wd = np.arange(0.,1000.,1.)
ws = wd*0.2
wd[[2,3,4,8,9,22,25,33,99,324]] = -99.9 # 10 missing values
ws[[2,3,4,10,11,12,565,644,645,646,647,648,666,667,669]] =-99.9 # 15 missing values
data = np.array(zip(time,wd,ws), dtype=[('time', dt.datetime),
('wd', 'f4'), ('ws', 'f4')])
return data
def count_miss(data):
time = data['time']
dff = pd.DataFrame(data, index=time)
# two options for setting missing values:
# 1) replace everything less or equal -99
for c in dff.columns:
ser = pd.Series(dff[c])
ser[ser <= -99.] = np.nan
dff[c] = ser
# 2) alternative: if you know the exact value to be replaced
# you can use the DataFrame replace method:
## dff.replace(-99.9, np.nan, inplace=True)
# add the time variable as data column
dff['time'] = time
# count missing values
# the print expressions will print date labels and the total number of values
# in the time column plus the number of missing values for all other columns
# annually:
cnty = dff.resample('A', how='count', closed='right', label='right')
for c in cnty.columns:
if c != 'time':
cnty[c] = cnty['time']-cnty[c]
# monthly:
cntm = dff.resample('M', how='count', closed='right', label='right')
for c in cntm.columns:
if c != 'time':
cntm[c] = cntm['time']-cntm[c]
return cnty, cntm
if __name__ == "__main__":
data = make_data()
cnty, cntm = count_miss(data)
Final note: is a there is a format method to DatetimeIndex, but unfortunately
no explanation on how to use it.
Answer: The `format` method of `DatetimeIndex` performs similarly to the `strftime` of
a `datetime.datetime` object.
What that means is that you can use the format strings found here:
<http://www.tutorialspoint.com/python/time_strftime.htm>
The trick is that you have to pass a function `formatter` kwarg of the the
`format` method. That looks like this (just as an example somewhat unrelated
to your code:
import pandas
dt = pandas.DatetimeIndex(periods=10, start='2014-02-01', freq='10T')
dt.format(formatter=lambda x: x.strftime('%Y %m %d %H:%M.%S'))
Output:
['2014 02 01 00:00.00',
'2014 02 01 00:10.00',
'2014 02 01 00:20.00',
'2014 02 01 00:30.00',
'2014 02 01 00:40.00',
'2014 02 01 00:50.00',
'2014 02 01 01:00.00',
'2014 02 01 01:10.00',
'2014 02 01 01:20.00',
'2014 02 01 01:30.00']
|
Bad reloc address 0x0 in section.data C extensions for python
Question: I'm trying to write a script to automate a device in python. The device is
programmed in C and I'm currently attempting to write a C wrapper in order for
me to call those functions from Python later. I'm following
[this](http://csl.name/C-functions-from-Python/) tutorial.
The original C functions are hidden in a .lib file but the header file with
all the functions initialization is provided. Here is a snippet of what it
looks like
#ifdef VNX_ATTEN_EXPORTS
#define VNX_ATTEN_API __declspec(dllexport)
#else
#define VNX_ATTEN_API __declspec(dllimport)
#endif
VNX_ATTEN_API void fnLDA_SetTestMode(bool testmode);
VNX_ATTEN_API int fnLDA_GetNumDevices();
VNX_ATTEN_API int fnLDA_GetDevInfo(DEVID *ActiveDevices);
VNX_ATTEN_API int fnLDA_GetModelName(DEVID deviceID, char *ModelName);
VNX_ATTEN_API int fnLDA_InitDevice(DEVID deviceID);
VNX_ATTEN_API int fnLDA_CloseDevice(DEVID deviceID);
VNX_ATTEN_API int fnLDA_GetSerialNumber(DEVID deviceID);
VNX_ATTEN_API int fnLDA_GetDeviceStatus(DEVID deviceID);
Here is the C wrapper that I'm attempting to create
#include <stdio.h>
#include <Python.h>
extern "C" {
#include "VNX_atten.h"
}
//#include <stdafx.h>
/*
* Function to be called from Python
*/
extern "C" {
static PyObject* py_fnLDA_SetTestMode(PyObject* self, PyObject* args)
{
double x;
double y = 1;
PyArg_ParseTuple(args, "d", &x);
if(x==1)
fnLDA_SetTestMode(true);
else
fnLDA_SetTestMode(false);
return Py_BuildValue("d", y);
}
/*
* Another function to be called from Python
*/
static PyObject* py_myOtherFunction(PyObject* self, PyObject* args)
{
double x, y;
PyArg_ParseTuple(args, "dd", &x, &y);
return Py_BuildValue("d", x*y);
}
/*
* Bind Python function names to our C functions
*/
static PyMethodDef myModule_methods[] = {
{"fnLDA_SetTestMode", py_fnLDA_SetTestMode, METH_VARARGS},
{"myOtherFunction", py_myOtherFunction, METH_VARARGS},
{NULL, NULL}
};
/*
* Python calls this to let us initialize our module
*/
void initmyModule()
{
(void) Py_InitModule("myModule", myModule_methods);
}
}
The compilation call I'm trying is
g++ -shared -IC:/Python27/include -LC:/Python27/libs myModule.cpp -lpython27
-o myModule.pyd
Based on my searches I found
[this](http://stackoverflow.com/questions/17773269/bad-reloc-address-using-
mingw) question on SO and tried this gcc -shared -IC:/Python27/include
-LC:/Python27/libs myModule.c -DVNX_ATTEN_EXPORTS=1 -lpython27 -o myModule.pyd
It hasn't helped.
I'm getting the error "bad reloc address 0x0 in section .data" "collect2.exe:
error: ld returned 1 exit status"
Compilation is on a Windows XP (so 32 bit) platform using MinGW libraries and
Python 2.7.
Let me know if you need any further information and thanks in advance!! PS:
Does this count as cross compiling?
EDIT: ADDED THE ENTIRE ERROR MESSAGE
C:\cygwin\home\VRaghu\Attenuator\LDA SDK\ANSI C SDK\VNX_Atest>g++ -shared -IC:/P
ython27/include -LC:/Python27/libs myModule.cpp -DVNX_ATTEN_EXPORTS=1 -lpython27
-o myModule.pyd
C:\DOCUME~1\VRaghu\LOCALS~1\Temp\ccFkPRnf.o:myModule.cpp:(.text+0x40): undefined
reference to `fnLDA_SetTestMode'
C:\DOCUME~1\VRaghu\LOCALS~1\Temp\ccFkPRnf.o:myModule.cpp:(.text+0x50): undefined
reference to `fnLDA_SetTestMode'
c:/mingw/bin/../lib/gcc/mingw32/4.8.1/../../../../mingw32/bin/ld.exe: C:\DOCUME~
1\VRaghu\LOCALS~1\Temp\ccFkPRnf.o: bad reloc address 0x0 in section `.data'
collect2.exe: error: ld returned 1 exit status
Answer: You have to link the lib file using `g++ ... -llibraryName`.
|
Frequency analysis of values produced by generator
Question: I'm doing some analysis of images, and I have a generator that gives me all
pixels in image:
def pixels_g(img):
w, h = img.shape
for y in range(0, h):
for x in range(0, w):
yield img[y][x]
It's output if converted to list would be something like
[0, 1, 2, 5, 240, 5, ... ]
Now I'd like to get "frequency table" for it:
{
0: 0,
1: 5,
2: 10,
3: 0,
4: 0,
#snip
255: 7
}
I've found some useful options in [this
question](http://stackoverflow.com/questions/2600191/how-can-i-count-the-
occurrences-of-a-list-item-in-python), but they all work with list, and I
don't think that creating a list from my generator is a good idea - it can
have **millions of elements**.
I'm therefore looking for a way to do this while preserving the benefits of
generator. I'll process many images and don't want to hog resources too much.
Answer: Use Counter from "collections". It works with any iterable, not just lists.
from collections import Counter
pixels = pixels_g(img)
c = Counter(pixels)
print c[4]
If you need to reuse the contents of 'pixels' generator instance after running
it through Counter, use 'tee' from itertools:
from collections import Counter
from itertools import tee
(pixels, hist) = tee(pixels_g(img))
c = Counter(pixels)
# can use hist for something else
|
Python Float Lost with Big Numbers
Question: I am working with some large numbers and have run into a problem with lost
floating values.
When multiplying large numbers, the float portion seems to go missing, lost,
becomes zero. When using the same code with smaller numbers, this does not
happen.
Trivial example:
import math
f_ProNum(31 * 61) ## Correct: DEBUG float: 314.833333
f_ProNum(5915587277 * 3367900313) ## Incorrect: DEBUG float: 3320518040297852928.000000
def f_ProNum(v_Num):
"""Syntax: (int); Returns: (float)"""
print("DEBUG float: %f") %((v_Num - 2.0)/6.0) # DEBUG
v_PDNum = (v_Num - 2.0)/6.0
return v_PDNum
As seen in the second call, the float seems to get lost, or set to zero.
Why is it doing this, and how may it be addressed, fixed?
Answer: Larger float values lose precision. To simplify a bit, floats have a fixed
number of "digits" that are used, called the mantissa or significand, so large
enough numbers are essentially going to use all their digits on the part to
the left of the decimal place. For more see:
<https://en.wikipedia.org/wiki/Floating_point#Internal_representation>
To solve your problem, you might try using the `decimal` module, which allows
you to configure the precision you need:
<https://docs.python.org/2/library/decimal.html>
|
How do I send NLTK plots to files?
Question: I'm using NLTK to create dispersion plots and do a few other things. Trouble
is, I have to manually close the window that creating a dispersion plot opens
to get the code to continue running. How can I send the plot to a file and
keep the script moving? I assume I'll have the same problem with other plots.
I can see from the [NLTK
source](http://www.nltk.org/_modules/nltk/draw/dispersion.html) that
`dispersion_plot` already includes `pylab.show()` so maybe this isn't possible
without writing my own plotting function?
Here's my code that stops at line 2 until I close the Python window that opens
with the plot.
1 # do some analysis
2 disp_plot(days, key_terms)
3 diversity_table(days, "Day")
Here's the `disp_plot` function:
# dispersion plots for search terms
def disp_plot(texts, terms):
concat_text = ''.join(texts.values())
tokens = nltk.word_tokenize(concat_text)
text = nltk.Text(tokens)
text.dispersion_plot(terms)
Answer: I ran into the same problem and solved it by reassigning pylab.show to my own
function. You might do something like this:
import pylab
counter = 0
def filename_generator():
global counter
to_return = 'myfig{0}.png'.format(counter)
counter += 1
return to_return
def my_show():
return pylab.savefig(filename_generator())
and change your disp_plot() to look like
def disp_plot(texts, terms):
concat_text = ''.join(texts.values())
tokens = nltk.word_tokenize(concat_text)
text = nltk.Text(tokens)
pylab_orig_show = pylab.show
pylab.show = my_show
text.dispersion_plot(terms)
pylab.show = pylab_orig_show
Some would argue about the global, but this is just a quick hack to get the
library to do what you want.
|
Python: generate xlsx in memory and stream file download?
Question: for example the following code creates the xlsx file first and then streams it
as a download but I'm wondering if it is possible to send the xlsx data as it
is being created. For example, imagine if a very large xlsx file needs to be
generated, the user has to wait until it is finished and then receive the
download, what I'd like is to start the xlsx file download in the user
browser, and then send over the data as it is being generated. It seems
trivial with a .csv file but not so with an xlsx file.
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from django.http import HttpResponse
from xlsxwriter.workbook import Workbook
def your_view(request):
# your view logic here
# create a workbook in memory
output = StringIO.StringIO()
book = Workbook(output)
sheet = book.add_worksheet('test')
sheet.write(0, 0, 'Hello, world!')
book.close()
# construct response
output.seek(0)
response = HttpResponse(output.read(), mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=test.xlsx"
return response
Answer: Are you able to write `tempfile`s to disk while generating the XLSX?
If you are able to use `tempfile` you won't be memory bound, which is nice,
but the download will still only start when the XLSX writer is done assembling
the document.
If you can't write `tempfile`s, you'll have to follow this example
<http://xlsxwriter.readthedocs.org/en/latest/example_http_server.html> and
your code is unfortunately completely memory bound.
Streaming CSV is very easy, on the other hand. Here is code we use to stream
any iterator of rows in a CSV response:
import csv
import io
def csv_generator(data_generator):
csvfile = io.BytesIO()
csvwriter = csv.writer(csvfile)
def read_and_flush():
csvfile.seek(0)
data = csvfile.read()
csvfile.seek(0)
csvfile.truncate()
return data
for row in data_generator:
csvwriter.writerow(row)
yield read_and_flush()
def csv_stream_response(response, iterator, file_name="xxxx.csv"):
response.content_type = 'text/csv'
response.content_disposition = 'attachment;filename="' + file_name + '"'
response.charset = 'utf8'
response.content_encoding = 'utf8'
response.app_iter = csv_generator(iterator)
return response
|
Python: add a variable to the scope of an imported function
Question: Consider the following situation:
#module.py
def test():
foo = bar+'foo'
print foo
if __name__ == '__main__':
bar='test'
test()
The main file is:
#main.py
import module
bar = 'test'
module.test()
Running main.py obviously generates a `NameError`. I know that there are
several ways to fix this (such for example, redefining the function in
main.py) but my question is: how is it possible to add the bar variable to the
function scope, without modifying the module.py code and, more generally, what
is it considered best-practice in such a situation?
Edit: What about the same question using `from module import *`?
Answer: I do not know the best practice, but the following works for me:
import module
module.bar='test'
module.test()
|
Merging records from two '.CSV' files in python
Question: I have two '.csv' files in the below format: First File :
Roll_num Class_Name
1 ABC
2 DEF
5 PQR
27 UVW
Second File :
Roll_num Marks Grade
1 75 A
2 60 C
27 68 B
61 45 E
Now i want to add a column in the second file appending a column 'Class_Name'
from First File. The data in both the files has duplicates in it and is not
sorted.
I have written the following code that writes our required data from 2 files
into a new file.
import csv
path="xyz"
file_read=open(path + "ClassName.CSV", "r")
reader_ClassName = csv.reader(file_read)
read_all_data=open(path + "Student.CSV", "r")
reader_Student =csv.reader(read_all_data)
write_all_data=open( path +"Student_Classname.CSV", "w")
for line_Student in reader_Student:
Roll_Student=line_Student[0]
for line_ClassName in reader_ClassName:
Roll_ClassName=line_ClassName[0]
ClassName=line_ClassName[1]
if(Roll_ClassName == Roll_Student):
string= Roll_Student +","+ClassName +"\n"
print string
write_all_data.write(string)
break
Output Expected :
Roll_num Marks Grade Class_Name
1 75 A ABC
2 60 C DEF
27 68 B UVW
61 45 E LMN
Output our code generates:
Roll_num Marks Grade Class_Name
1 75 A ABC
2 60 C DEF
There is some issue in reading the Third line from Second inner 'for' loop. We
have hundreds of thousands of records in both the files.
Answer: I suggest to avoid `loop in loop` by reading the whole ClassName.csv file and
put into an dictionary first. I suggest the idea below
mydict = {}
for each_line in ClassName_csvfile:
rollnum = get_roll_num()
classname = get_class_name()
mydict[rollnum]=classname
for each_line in Student_csv_file:
rollnum = get_roll_num()
mark = get_mark()
grade = get_grade()
Classname = ''
if mydict.has_key(rollnum):
Classname = mydict[rollnum]
writetofile(rollnum, mark, grade, Classname)
Update: you can use `if rollnum in mydict:` instead of
`mydict.has_key(rollnum)` if you are using `Python 2.3+`. I am using python
2.7 and both works
P/s: Sorry for not commenting as it requires me 50 reputations
|
Python parsing html for complete links urls
Question: I have to parse a html page looking for links in it. Unfortunately, the links
don't contain the full url (for instance starting with
"<http://www.example.com/aResource.html>"). So my parsing get only the
relative URL, for get the whole url address i'm using
urlparse.urljoin()
But often it leads to some connection errors, and generally i would prefer a
direct way to extract the comlplete urls link. Here is my code:
import urlparse
import requests
from lxml import html
from lxml import etree
aFile = requests.get(url)
tree = html.fromstring(aFile.text)
linkList = tree.xpath('//a')
urls = []
for link in linkList:
urls.append(str(urlparse.urljoin(url,link.get('href'))))
As you can see i'm working with lxml, but i've also tried with BeautifulSoup
without success.
Answer: Since the information (URL scheme, host server, port, path - base URL) is
missing in `<a href=""...>`, it needs to be added to the relative URL.
Usually it is correct to use `urlparse.urljoin()` as you are already doing.
HTML does allow specification of a base url for the page using the `<base
href="...">` tag, which must be defined once in `<head>`. If this tag is
present you should use it's href attribute as your base URL for `urljoin()`.
Your code could be revised to this:
import urlparse
import requests
from lxml import html
from lxml import etree
aFile = requests.get(url)
tree = html.fromstring(aFile.text)
linkList = tree.xpath('//a')
urls = []
try:
base_url = tree.xpath('//base[1]/@href')[0]
except IndexError:
base_url = url
for link in linkList:
urls.append(str(urlparse.urljoin(base_url,link.get('href'))))
However, if you are getting connection errors, it would appear that some of
the links are invalid. Either the base URL derived from the page's URL, or
from the `<base href="...">` tag, is correct. Any invalid URLs constructed
with this value must be due to an invalid relative URL (or invalid `<base>`
tag).
Have you concrete examples of the URL used when connection errors are
experienced?
You could also look at
[`mechanize`](http://wwwsearch.sourceforge.net/mechanize/):
import mechanize
br = mechanize.Browser()
resp = br.open(url)
urls = [link.absolute_url for link in br.links()]
|
Cannot insert an image into HTML document
Question: I know this is a very basic question, but it is driving me crazy.
I am trying to insert an image ("logo_footer.png") in an HTML document that I
am working on. It is a Python web app and I am using the TurboGears 2
Framework.
I have tried several different methods, and none of them is working for me:
Method 1:
.logo-footer { border: 1px solid green;
height:140px; width:600px;
background:transparent url('{{url2(tg.url("/images/atenea/logo_footer.png"))}}');
background-position:right center !important;
background-repeat:no-repeat;
background-size:contain;
float:left;
}
Method 2:
<div class="pie">
<div class="logo-footer">
<img height="120" src="{{tg.url('/images/atenea/logo_footer.png')}}">
</div>
<div class="firma-footer">
<div class="dato-empresa">Blabla</div>
</div>
</div>
Of course, the image exists. Am I missing something really basic and obvious
here?
Why can't I even make the image appear using the `<img>` tag?
**UPDATE** :
In the end, it turned out that the problem was with the `wkhtmltopdf` library
that I'm using to convert the html to a PDF document. For whatever reason that
I am stil unaware of, the image appears in the html version of my document but
it doesn't appear in the pDF version generated by `wkhtmltopdf`.
Answer:
<img height="120" src="images/atenea/logo_footer.png" />
assuming they are in images folder then ateanea folder from the current script
in the css one try
background:transparent url("/images/atenea/logo_footer.png");
taking the curly's off
|
In Python, why doesn't 'y = x; y += 1' also increment x?
Question: First create a function for displaying reference count (note that we have to
-1 each time to get the correct value, as the function itself INCREF-s the
argument)
>>> from sys import getrefcount as rc
>>> x=1.1
>>> rc(x)-1
1
Now make another reference to the same `PyObject`:
>>> y=x
>>> rc(x)-1
2
>>> rc(y)-1
2
>>> x is y
True
Now perform an operation on the second handle, `y`:
>>> y+=1
This should be invoking `PyNumber_InPlaceAdd` on the `PyObject` that `y`
points to.
So if this is true I would be expecting `x` to also read `2.1`
>>> x,y
(1.1, 2.1)
>>> x is y
False
>>> rc(x)-1
1
>>> rc(y)-1
1
So my question is, what is Python doing internally to provide the right
behaviour, rather than the behaviour I would expect from looking at
`PyNumber_InPlaceAdd`?
(Note: I am using `1.1`; if I used `1` the initial reference count would be
>300, because `1` must be used all over the place behind-the-scenes in
CPython, and it is clever enough to reuse objects.)
_(This also begs the question: if I have`foo = 20; bar = 19; bar += 1` does
this mean it has to look through all its objects and check whether there
already exists an object with this value, and if so reuse it? A simple test
shows that the answer is no. Which is good news. It would be horribly slow
once the program size gets big. So Python must just optimise for small
integers.)_
Answer: You don't need `getrefcount` for this, you can just use
[`id`](https://docs.python.org/2/library/functions.html#id):
>>> x = 1.1
>>> id(x)
50107888
>>> y = x
>>> id(y)
50107888 # same object
>>> y += 1
>>> id(y)
40186896 # different object
>>> id(x)
50107888 # no change there
`float` objects (along with e.g. `str` and `int`) are _immutable_ in Python,
they cannot be changed in-place. The addition operation therefore _creates a
new object_ , with the new value, and assigns it to the name `y`, effectively:
temp = y + 1
y = temp
* * *
In CPython, integers from `-5` to `256` inclusive are "interned", i.e. stored
for reuse, such that any operation with the result e.g. `1` will give a
reference to _the same object_. This saves memory compared to creating new
objects for these frequently-used values each time they're needed. You're
right that it would be a pain to search all existing objects for a match every
time a new object might be needed, so this is only done over a limited range.
Using a contiguous range also means that the "search" is really just an offset
in an array.
|
Asyncore client in thread makes the whole program crash when sending data immediately
Question: I write a simple program in python, with asyncore and threading. I want to
implement a asynchorous client without blocking anything, like this:
[How to handle asyncore within a class in python, without blocking
anything?](http://stackoverflow.com/questions/14483195/how-to-handle-asyncore-
within-a-class-in-python-without-blocking-anything)
Here is my code:
import socket, threading, time, asyncore
class Client(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
mysocket = Client("",8888)
onethread = threading.Thread(target=asyncore.loop)
onethread.start()
# time.sleep(5)
mysocket.send("asfas\n")
input("End")
Now a exception will be throwed in send("asfas\n"), because I didn't open any
server.
I think the exception in send function will call the handle_error function and
won't affect the main program, but most of the time it crashes the whole
program, and sometimes it works! And if I uncomment the time.sleep(5), it will
only crash the thread. Why does it behave like this? Could I write a program
that won't crash the whole program and don't use time.sleep() ? Thanks! Error
message:
Traceback (most recent call last):
File "thread.py", line 13, in <module>
mysocket.send("asfas\n")
File "/usr/lib/python2.7/asyncore.py", line 374, in send
result = self.socket.send(data)
socket.error: [Errno 111] Connection refused
Answer: First of all, I would suggest not using the old asyncore module but to look
into more modern and more efficient solutions:
[gevent](http://www.gevent.org), or going along the asyncio module (Python
3.4), which has been backported somehow to Python 2.
If you want to use asyncore, then you have to know:
* be careful when using sockets created in one thread (the main thread, in your case), and dispatched by another thread (managed by "onethread", in your case), sockets cannot be shared like this between threads it is not threadsafe objects by themselves
* for the same reason, you can't use the global map created by default in asyncore module, you have to create a map by thread
* when connecting to a server, connection may not be immediate you have to wait for it to be connected (hence your "sleep 5"). When using asyncore, "handle_write" is called when socket is ready to send data.
Here is a newer version of your code, hopefully it fixes those issues:
import socket, threading, time, asyncore
class Client(threading.Thread, asyncore.dispatcher):
def __init__(self, host, port):
threading.Thread.__init__(self)
self.daemon = True
self._thread_sockets = dict()
asyncore.dispatcher.__init__(self, map=self._thread_sockets)
self.host = host
self.port = port
self.output_buffer = []
self.start()
def run(self):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((self.host, self.port))
asyncore.loop(map=self._thread_sockets)
def send(self, data):
self.output_buffer.append(data)
def handle_write(self):
all_data = "".join(self.output_buffer)
bytes_sent = self.socket.send(all_data)
remaining_data = all_data[bytes_sent:]
self.output_buffer = [remaining_data]
mysocket = Client("",8888)
mysocket.send("asfas\n")
If you have only 1 socket by thread (i.e a dispatcher's map with size 1),
there is no point using asyncore at all. Just use a normal, blocking socket in
your threads. The benefit of async i/o comes with a lot of sockets.
_EDIT_ : answer has been edited following comments.
|
How to speed up process of loading and reading JSON files in Python?
Question: I am running a script (in multiprocessing mode) that extract some parameters
from a bunch of JSON files but currently it is very slow. Here is the script:
from __future__ import print_function, division
import os
from glob import glob
from os import getpid
from time import time
from sys import stdout
import resource
from multiprocessing import Pool
import subprocess
try:
import simplejson as json
except ImportError:
import json
path = '/data/data//*.A.1'
print("Running with PID: %d" % getpid())
def process_file(file):
start = time()
filename =file.split('/')[-1]
print(file)
with open('/data/data/A.1/%s_DI' %filename, 'w') as w:
with open(file, 'r') as f:
for n, line in enumerate(f):
d = json.loads(line)
try:
domain = d['rrname']
ips = d['rdata']
for i in ips:
print("%s|%s" % (i, domain), file=w)
except:
print (d)
pass
if __name__ == "__main__":
files_list = glob(path)
cores = 12
print("Using %d cores" % cores)
pp = Pool(processes=cores)
pp.imap_unordered(process_file, files_list)
pp.close()
pp.join()
Does any body know how to speed this up?
Answer: First, find out where your bottlenecks are.
If it is on the json decoding/encoding step, try switching to
[`ultrajson`](https://pypi.python.org/pypi/ujson):
> UltraJSON is an ultra fast JSON encoder and decoder written in pure C with
> bindings for Python 2.5+ and 3.
The changes would be as simple as changing the import part:
try:
import ujson as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
I've also done a simple benchmark at [What is faster - Loading a pickled
dictionary object or Loading a JSON file - to a
dictionary?](http://stackoverflow.com/questions/18517949/what-is-faster-
loading-a-pickled-dictionary-object-or-loading-a-json-file-to), take a look.
|
Python please tell me if my maths answer is right
Question: I wish to add a feature which tells the user when he/she has answered
correctly to the random maths questions that are given.
import random
def answers():
correct_answer_P_ = ((str(difficulty_one + difficulty_one))) #P = PLUS, ADDITION +
correct_answer_M_ = ((str(difficulty_one * difficulty_one))) #M = MULTIPLY *
correct_answer_T_ = ((str(difficulty_one - difficulty_one))) #T = TAKE AWAY, MINUS -
def random_symbols():
symbols = random.choice (["+","-","*"])
return symbols
def difficulty_one():
dif_one = random.randrange (1,10,1)
return dif_one
def questions():
question = (str(difficulty_one())) + random_symbols() + (str(difficulty_one())) + " = "
return question
start = input("press start to begin!: ")
if (start == "start"):
print ("here's ten questions, good luck!")
for questions_num in range(1,11):
print ("Question ",questions_num)
input(questions())
if (random_symbols == "+"):
if (dif_one == correct_answer_P_):
print("correct!")
elif(random_symbols == "-"):
if (dif_one == correct_answer_T_):
print("correct!")
elif(random_symbols == "*"):
if (dif_one == correct_answer_M_):
print("correct!")
else:
print("incorrect!")
I tried this from getting some advice from a friend who said that i needed to
create variables for each symbol that would be randomly inserted; the
variables should compare the users answer and say its correct but it skips all
the if statements and goes straight to saying its incorrect.
Any suggestions? Don't be harsh if I'm doing something dumb because I've just
started python at the moment.
Quick note the part in this code has been cut out of my original to make it
simple for people to see what I'm trying to do.
Answer: There's a way easier way of implementing this. Try this instead:
import operator
questions = [(1,2), (4,2), (8,1), (10,100)] # operands for your questions
operators = {"+" : operator.add,
"-" : operator.sub,
"*" : operator.mul}
# a dictionary containing each operator's symbol and the resulting function
# (operator.add(x,y) is equivalent to x+y)
for num, operands in enumerate(questions,start=1):
# iterate through each set of operands, enumerating starting at 1
operator = random.choice(operators)
# return a random symbol from operators
answer = operators[operator](*operands)
# operators[operator] is the function, which we then call with the operands
q_text = "{} {} {} = ?".format(str(operands[0]), operator, str(operands[1]))
print("Question {}".format(str(num)))
print(q_text)
user_answer = input(">>")
if float(user_answer) == answer:
# correct!
else:
# fail!
|
How to gzip the result while writing in Python?
Question: I am parsing some code and writing the result into 11 text files but the
problem is that my ssd cannot afford the normal sizes . that is why I am
looking for a way to write the results in a gzipped format.any idea?
my code:
from __future__ import print_function, division
import os
from glob import glob
from os import getpid
from time import time
from sys import stdout
import resource
from multiprocessing import Pool
import subprocess
try:
import ujson as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
path = '/data/data/A.1/unzipped/*.A.1'
print("Running with PID: %d" % getpid())
def process_file(file):
start = time()
filename =file.split('/')[-1]
print(file)
with open('/data/data/A.1/%s_DI' %filename, 'w') as w:
with open(file, 'r') as f:
for n, line in enumerate(f):
d = json.loads(line)
try:
domain = d['rrname']
ips = d['rdata']
for i in ips:
print("%s|%s" % (i, domain), file=w)
except:
pass
if __name__ == "__main__":
files_list = glob(path)
cores = 12
print("Using %d cores" % cores)
pp = Pool(processes=cores)
pp.imap_unordered(process_file, files_list)
pp.close()
pp.join()
Answer:
import logging
logging.basicConfig(level=logging.DEBUG, filename='logfile')
# ....
with gzip.GzipFile('/data/data/A.1/{0}_DI.gz'.format(filename), 'wb') as w:
logging.debug('{} Opened for writing'.format(filename))
with open(file, 'r') as f:
logging.debug('{} opened for reading'.format(file))
for n, line in enumerate(f):
d = json.loads(line)
logging.debug('{0} <=== json loaded from {1}'.format(d, line))
try:
domain = d['rrname']
logging.debug('domain: {}'.format(domain))
ips = d['rdata']
logging.debug('ips: {}'.format(ips))
for i in ips:
print("%s|%s" % (i, domain), file=w)
except:
pass
Hope that helps...
|
Python : How to call dictionnary-contained callables at a given time?
Question: I'm working with a dictionary object in python which contains numerous keys.
Some of their associated value type is of callable type.
Something like :
dico = {
'key1' : 1,
'key2' : 'cars',
'key3' : <bound method MyClass.MyMethod_A of <MyClass: MyClass object >>,
'deeperkeys':{
subkey1 : 'space',
subkey2 : <bound method MyClass.MyMethod_B of <MyClass: MyClass object >>,
},
}
I know i can do:
dico['key3'] = dico['key3']()
dico['deeperkeys']['subkey2'] = dico['deeperkeys']['subkey2']()
Furthermore, notice that i do not work on a unic `dico` as presented above,
but on a huge list of `dico`-like dictionaries. Also, i sometimes do not know
which keys must be called. What is the best way to discriminately and
transcendentally call all callables ?
# upd
Following Jon Kiparsky, i added a `@property` decorator above each method
definition. And it did the trick by taking the problem upside down. No
callables but called methods with this decorator to the source definition of
`MyClass`. And my lists of dictionaries are generated from it !
Answer: The literal answer to your question is the `callable` function, which returns
true if you pass it something callable and false if you don't.
For example:
def undefer(func_or_value):
if callable(func_or_value):
return func_or_value()
else:
return func_or_value
And now you can write:
dico['key3'] = undefer(dico['key3'])
However, this is probably going to turn out to be a very bad idea. You might
not care about the limitations (e.g., now you can't actually store anything
callable as a value, and you might want to, say, store a type in there one
day), but you obviously care about the confusion engendered by mixing up
nullary functions and values as the same kind of thing, because that confusion
is what leads to this question in the first place.
What I think you _want_ to do is store a dict of nothing but callables, so you
always call the value. Some of these callables just return a constant value,
but that's fine: just store `lambda: 42` instead of 42.
Or you might want to make things even more explicit. It's not really
callability that you care about here; the point of those methods is that they
act like deferred values, or futures. You can write a very simple
deferred/future class that can be constructed from either a constant or a
function call. That also lets you add features like "only call this function
the first time, from then on cache the value". But, more importantly, it makes
it explicit what you're doing; there's no way to confuse a Future with
anything else, error messages from missing a deferral or undeferral will be
obvious and trivial to fix, etc.
|
How should a clock object be made in Python?
Question: I'm making a very simple clock object in Python. I want to be able to
instantiate a clock giving it a name and have it record the time it is running
for, with pause functionality. I think I've nearly got it, but the pause
functionality is giving me some trouble -- the clock continues to accumulate
time even when stopped. I think the problem lies in the way the accumulated
run time (the `_runTime` data attribute) is updated (in the method
`update()`). I would welcome assistance in getting this code working sensibly
and would welcome any other comments on making the ideas here better.
import time
import datetime
def style_datetime_object(
datetimeObject = None,
style = "YYYY-MM-DDTHHMMSS"
):
# filename safe
if style == "YYYY-MM-DDTHHMMSSZ":
return(datetimeObject.strftime('%Y-%m-%dT%H%M%SZ'))
# microseconds
elif style == "YYYY-MM-DDTHHMMSSMMMMMMZ":
return(datetimeObject.strftime('%Y-%m-%dT%H%M%S%fZ'))
# elegant
elif style == "YYYY-MM-DD HH:MM:SS UTC":
return(datetimeObject.strftime('%Y-%m-%d %H:%M:%SZ'))
# UNIX time in seconds with second fraction
elif style == "UNIX time S.SSSSSS":
return((datetimeObject - datetime.datetime.utcfromtimestamp(0)).total_seconds())
# UNIX time in seconds rounded
elif style == "UNIX time S":
return(int((datetimeObject - datetime.datetime.utcfromtimestamp(0)).total_seconds()))
# filename safe
else:
return(datetimeObject.strftime('%Y-%m-%dT%H%M%SZ'))
class Clock(object):
def __init__(
self,
name = None,
start = True
):
# options
self._name = name
self._start = start # Boolean start clock on instantiation
# internal
self._startTime = None
self._stopTime = None
self._runTime = None
self._running = False
# If a global clock list is detected, add a clock instance to it.
if "clocks" in globals():
clocks.add(self)
if self._start:
self.start()
def name(
self
):
return(self._name)
def start(
self
):
self._running = True
self._startTime = datetime.datetime.utcnow()
def stop(
self
):
self._stopTime = datetime.datetime.utcnow()
self._running = False
self.update()
def startTime(
self
):
return(style_datetime_object(datetimeObject = self._startTime))
def update(
self
):
# If the clock is running, the run time is the difference between the
# current time and the start time (added to any previously accumulated
# run time). If the clock is not running, the run time is the difference
# between the stop time and the start time (added to any previously
# accumulated run time).
if self._running:
if self._runTime:
self._runTime = self._runTime + datetime.datetime.utcnow() - self._startTime
else:
self._runTime = datetime.datetime.utcnow() - self._startTime
else:
if self._runTime:
self._runTime = self._runTime + self._stopTime - self._startTime
else:
self._runTime = self._stopTime - self._startTime
def time(
self
):
self.update()
return(self._runTime.total_seconds())
print("create clock")
a = Clock(name = "hello")
print("clock start time: {time}".format(time = a.startTime()))
print("sleep 2 seconds")
time.sleep(2)
print("clock current time (s): {time}".format(time = a.time()))
print
print("create new clock")
b = Clock(name = "world")
print("clock start time: {time}".format(time = b.startTime()))
print("sleep 2 seconds")
time.sleep(2)
print("clock current time (s): {time}".format(time = b.time()))
print("stop clock")
b.stop()
print("sleep 2 seconds")
time.sleep(2)
print("clock current time (s): {time}".format(time = b.time()))
Answer: I found your code hard to follow. Here is my (much smaller) clock anyway, may
be you can draw some inspiration to implement any lacking feature later.
import datetime
class Clock(object):
def __init__(self):
self.reset()
def reset(self):
self.accumulator = datetime.timedelta(0)
self.started = None
def start_stop(self):
if self.started:
self.accumulator += (
datetime.datetime.utcnow() - self.started
)
self.started = None
else:
self.started = datetime.datetime.utcnow()
@property
def elapsed(self):
if self.started:
return self.accumulator + (
datetime.datetime.utcnow() - self.started
)
return self.accumulator
def __repr__(self):
return "<Clock {} ({})>".format(
self.elapsed,
'started' if self.started else 'stopped'
)
Tests:
c = Clock()
print c
print "Starting..."; c.start_stop()
for i in range(4):
time.sleep(2)
print c
print "Stopping..."; c.start_stop()
for i in range(4):
time.sleep(2)
print c
print "Starting..."; c.start_stop()
for i in range(4):
time.sleep(2)
print c
print "Resetting..."; c.reset()
print c
Result:
<Clock 0:00:00 (stopped)>
Starting...
<Clock 0:00:02.002085 (started)>
<Clock 0:00:04.004263 (started)>
<Clock 0:00:06.006483 (started)>
<Clock 0:00:08.007675 (started)>
Stopping...
<Clock 0:00:08.007756 (stopped)>
<Clock 0:00:08.007756 (stopped)>
<Clock 0:00:08.007756 (stopped)>
<Clock 0:00:08.007756 (stopped)>
Starting...
<Clock 0:00:10.009876 (started)>
<Clock 0:00:12.012034 (started)>
<Clock 0:00:14.013143 (started)>
<Clock 0:00:16.015291 (started)>
Resetting...
<Clock 0:00:00 (stopped)>
|
Python: Custom sort a list of lists
Question: I know this has been asked before, but I have not been able to find a
solution.
I'm trying to alphabetize a list of lists according to a custom alphabet.
The alphabet is a representation of the [Burmese
script](http://en.wikipedia.org/wiki/Burmese_alphabet) as used by [Sgaw
Karen](http://en.wikipedia.org/wiki/Karen_languages) in plain ASCII. The
Burmese script is an alphasyllabary—a few dozen onsets, a handful of medial
diacritics, and a few dozen rhymes that can be combined in thousands of
different ways, each of which is a single "character" representing one
syllable. The `map.txt` file has these syllables, listed in (Karen/Burmese)
alphabetical order, but converted in some unknown way into ASCII symbols, so
the first character is `u>m;.Rf` rather than `က` or `[ka̰]`. For example:
u>m;.Rf ug>m;.Rf uH>m;.Rf uX>m;.Rf uk>m;.Rf ul>m;.Rf uh>m;.Rf uJ>m;.Rf ud>m;.Rf uD>m;.Rf u->m;.Rf uj>m;.Rf us>m;.Rf uV>m;.Rf uG>m;.Rf uU>m;.Rf uS>m;.Rf u+>m;.Rf uO>m;.Rf uF>m;.Rf
c>m;.Rf cg>m;.Rf cH>m;.Rf cX>m;.Rf ck>m;.Rf cl>m;.Rf ch>m;.Rf cJ>m;.Rf cd>m;.Rf cD>m;.Rf c->m;.Rf cj>m;.Rf cs>m;.Rf cV>m;.Rf cG>m;.Rf cU>m;.Rf cS>m;.Rf c+>m;.Rf cO>m;.Rf cF>m;.Rf
Each list in the list of lists has, as its first element, a word of Sgaw Karen
converted into ASCII symbols in the same way. For example:
[['u&X>', 'n', 'yard'], ['vk.', 'n', 'yarn'], ['w>ouDxD.', 'n', 'yawn'], ['w>wuDxD.', 'n', 'yawn']]
* * *
This is what I have so far:
def alphabetize(word_list):
alphabet = ''.join([line.rstrip() for line in open('map.txt', 'rb')])
word_list = sorted(word_list, key=lambda word: [alphabet.index(c) for c in word[0]])
return word_list
I would like to alphabetize `word_list` by the first element of each list (eg.
'u&X>', 'vk.'), according to the pattern in `alphabet`.
My code's not working yet and I'm struggling to understand the sorted command
with lambda and the for loop.
Answer: First, if you're trying to look up the entire `word[0]` in `alphabet`, rather
than each character individually, you shouldn't be looping over the characters
of `word[0]`. Just use `alphabet.index(word[0])` directly.
From your comments, it sounds like you're trying to look up each
_transliterated-Burmese-script character_ in `word[0]`. That isn't possible
unless you can write an algorithm to split a word up into those characters.
Splitting it up into the ASCII bytes of the transliteration doesn't help at
all.
* * *
Second, you probably shouldn't be using `index` here. When you think you need
to use `index` or similar functions, 90% of the time, that means you're using
the wrong data structure. What you want here is a _mapping_ (presumably why
it's called `map.txt`), like a dict, keyed by words, not a list of words that
you have to keep explicitly searching. Then, looking up a word in that
dictionary is trivial. (It's _also_ a whole lot more efficient, but the fact
that it's easy to read and understand can be even more important.)
* * *
Finally, I suspect that your `map.txt` is supposed to be read as a whitespace-
separated list of transliterated characters, and what you want to find is the
index into that list for any given word.
* * *
So, putting it all together, something like this:
with open('map.txt', 'rb') as f:
mapping = {word: index for index, word in enumerate(f.read().split())}
word_list = sorted(word_list, key=lambda word: mapping[word[0]])
* * *
But, again, that's only going to work for one-syllable words, because until
you can figure out how to split a word up into the units that should be
alphabetized (in this case, the symbols), there is no way to make it work for
multi-syllable words.
And once you've written the code that does that, I'll bet it would be pretty
easy to just convert everything to proper Unicode representations of the
Burmese script. Each syllable still takes 1-4 code points in Unicode—but
that's fine, because the standard Unicode collation algorithm, which comes
built-in with Python, already knows how to alphabetize things properly for
that script, so you don't have to write it yourself.
Or, even better, unless this is some weird transliteration that you or your
teacher invented, there's probably already code to translate between this
format and Unicode, which means you shouldn't even have to write anything
yourself.
|
Python - efficient way to search file names based on multiple filters
Question: I have a small bit of code to list file names that match a filter string. I'm
trying to extend this to match against multiple filters. I have some working
code which takes a very straight forward loop approach but it is
sloooooooow.... basically running `os.walk` for every filter.
Given the function (shown below) is there a way to test against multiple
filters at once, rather than 1 at a time? i.e. can I past a list of filter
strings to `find_files`?
import os
import fnmatch
# stolen from http://stackoverflow.com/questions/8625991/use-python-os-walk-to-identify-a-list-of-files
def find_files(dir_look, filt):
matches = []
for root, dirnames, filenames in os.walk(dir_look):
for filename in fnmatch.filter(filenames, filt):
matches.append(os.path.join(root, filename))
return matches
#create empty list to store results
filelist=[]
#some example filters, my real data has about 5000 filters
filts = [r'*60830007*',r'*60910259*',r'*60910299*']
#find files for each filter entry
for filter in filts:
filelist.append(find_files(r'C:\some directory', filter))
**EDIT:**
I found a rather obvious way to speed things up by passing the list of filters
to the function then testing each inside the os.walk
def find_files(dir_look, filters):
matches = []
for root, dirnames, filenames in os.walk(dir_look):
for filt in filters:
for filename in fnmatch.filter(filenames, filt):
matches.append(os.path.join(root, filename))
return matches
Answer: This answer will be about algorithms and data structures, instead of python
programming.
1. If you want to test lot of pattern against a string then you should choose a better structure for representation. Instead of char array we use [suffix-trees.](https://en.wikipedia.org/wiki/Suffix_tree) (For python implementations see [this question.](https://stackoverflow.com/questions/9347078/python-library-for-generalized-suffix-trees)
2. If some of your filters has common parts (especially if they have the same prefix) you should represent them as [trie(s)](https://en.wikipedia.org/wiki/Trie). So this way you can test simultaneously with more than one pattern. This solution creates an overhead of building the tree(s) but if you use the same filters multiple times then it's worth.
|
get perfomance date from cloudstack api
Question: Please help me in getting date about perfomace hipervizor using simple api (in
my case i used python).
There is simple example who gets list of machines :
#!/usr/bin/python
import CloudStack
api = 'http://example.com:8080/client/api'
apikey = 'API KEY'
secret = 'API SECRET'
cloudstack = CloudStack.Client(api, apikey, secret)
vms = cloudstack.listVirtualMachines()
for vm in vms:
print "%s %s %s" % (vm['id'], vm['name'], vm['state'])
How from this script i can change it, to get for example CPU of all hipervizor
machine? (if can give info not only about cpu, but about HDD utilization,
memory, etc...)
Thanks, very much.
Answer: You would get all the VM details along with all the CPU, Memory etc related
data for a particular host. pass on the host id for getting the details for
all VMs under that host.
I use response=json as param in the URL for getting back all these details in
json format for easy parsing. Look at below URL for actual input and output
params.
<https://cloudstack.apache.org/docs/api/apidocs-4.5/root_admin/listVirtualMachines.html>
hope it helps!!
|
Fastest Way To Round Number Inside List of Strings Python
Question: Given a list (can be numpy array) of addresses:
>input: ['F/O 1751 HOBART PL NW', '11TH ST NW 2301', '801 MT VERNON ST NW']
where the number doesn't always occur at the same place in the string. Is
there a faster way than first extracting the number with regex and then
inserting it back somehow in order to round it to the nearest (lets say) 100.
>output: ['F/O 1800 HOBART PL NW', '11TH ST NW 2300', '800 MT VERNON ST NW']
Any help would be appreciated. Thank you,
EDIT:
Only Numbers delimited by word boundaries (space, period, comma) would need to
be converted so r'\b\d+\b' would work.
Answer: You could do a little [text
munging](https://docs.python.org/2/library/re.html#text-munging) with
`re.sub()`:
import re
def replace_number(n):
return str(int(round(int(n.group(0)), -2)))
data = ['F/O 1751 HOBART PL NW', '11TH ST NW 2301', '801 MT VERNON ST NW']
## I'm assuming you only want to munge numbers with 3 or more digits:
for datum in data:
print re.sub(r"(\d{3,})", replace_number, datum)
output:
F/O 1800 HOBART PL NW
11TH ST NW 2300
800 MT VERNON ST NW
**Note** \- this will give you potentially undesirable results if there is a
number like 020 in the string:
'020 MT VERNON ST NW'
becomes
'0 MT VERNON ST NW'
If you expect to find that in your data, you'll need to add some checks to the
`replace_number(n)` function
|
How do you make a unique map key in Python?
Question: I want to make a map containing various attributes of a single item and share
it across several modules. The most obvious answer is to use a string, but
since the map will be used across several modules, I don't want maintainers to
have to know about all existing attributes. For example, if two modules want
something associated with "color", they shouldn't clobber each other by
accidentally picking the same name.
A few ideas I've thought of which don't work:
1) Strings, integer keys: As said above, this isn't easily extensible.
2) Create of a particular class, use id(): I thought it would work to make a
Key() class, for example, but this doesn't work in the case of modules
imported from two different places. For example, you might want to make the
"color" attribute described above as follows:
color = Key()
This doesn't work if you do
# module named foo
bar = Key()
import foo
id(foo.bar) == id(bar)
3) Look at the stack trace to see where the Key was created and use that as
part of the identifier. Definitely wouldn't want to use line number, as this
would be too brittle. Using module and name of item fails in the case of the
main module, though. For example:
in foo.py:
key = Key() # will be named foo.py-key
elsewhere, in bar.py:
import foo
foo.key # will be named /Users/person/foo.py-key
Thanks! Any advice is appreciated.
Answer: Use a string, but prefix it with the current module name. Then you won't get
any collisions.
key = __name__ + '.color'
|
Does python zlib library support uuencode?
Question: My python code is trying to decompress a uuencoded file using the zlib
library. Here is the code snippet:
self.decompress = zlib.decompressobj(wbits)
.
.
buf = self.fileobj.read(size)
.
.
uncompress = self.decompress.decompress(buf)
My current value for wbits is '-zlib.MAX_WBITS'. This throws an error:
Error -3 while decompressing: invalid literal/lengths set
I realize that the python zlib library supports:
RFC 1950 (zlib compressed format)
RFC 1951 (deflate compressed format)
RFC 1952 (gzip compressed format)
and the choice for wbits is to be:
to (de-)compress deflate format, use wbits = -zlib.MAX_WBITS
to (de-)compress zlib format, use wbits = zlib.MAX_WBITS
to (de-)compress gzip format, use wbits = zlib.MAX_WBITS | 16
So my questions are:
Where does a uuencoded file fall in this list?
Is it supported by zlib?
If yes, what should be the value for wbits?
If no, how do I proceed with this?
Thanks in advance!
Answer: Here's a quick demo of how to compress with zlib and encode with uuencode, and
then reverse the procedure.
#!/usr/bin/env python
import zlib
data = '''This is a short piece of test data
intended to test uuencoding and decoding
using the uu module, and compression and
decompression using zlib.
'''
data = data * 5
# encode
enc = zlib.compress(data, 9).encode('uu')
print enc
# decode
dec = zlib.decompress(enc.decode('uu'))
#print `dec`
print dec == data
**output**
begin 666 <data>
M>-KMCLL-A# ,1.^I8@I 5$,#(?822V C[%RV>CXY; %[19K+/,U(;ZKBN)+A
MU8[ +EP8]D&P!RA'3J+!2DP(Z[0UUF(DNB K@;B7U/Q&4?E:8#-J*P_/HMBV
;'^PNID]/]^6'^N^[RCRFZ?5Y??[P.0$_I03L
end
True
|
Python set update with iterable
Question: What I have at hand is a text file of decent size (~ 23MB). I am reading the
file line by line and extracting a few words from each line based on some
external criteria. For the sake of this example, let's say each line contains
at least half a dozen tab-separated values and I am picking up all except the
first and the last.
I want to output the set of _unique_ words obtained this way, so obviously I
want to store the extracted words in a `set`. Also, since `set.update(other)`
is significantly faster than looping through `other` and adding one word at a
time with `set.add(elem)`, I tried doing this:
all_words = set()
with open(my_tsv_file) as tsv_file:
for line in tsv_file:
wordlist = based_on_some_criteria(line) # this is a list, not a set
all_words.update(wordlist)
And this works fine. But when I replaced `all_words.update(wordlist)` with
`all_words |= wordlist`, I got the following error:
TypeError: unsupported operand type(s) for |=: 'set' and 'list'
From the documentation, I understand that `update` and `|=` are equivalent.
Further, since `|=` is supposed to accept any iterable, I also confirmed by
doing this:
import collections
isinstance(wordlist, collections.Iterable) # returns True
Why is the first approach using `set.update` working, but not the second one
using `|=`?
Answer: > From the documentation, I understand that update and |= are equivalent.
> Further, since |= is supposed to accept any iterable...
From the documentation:
> Note, the non-operator versions of the `update()`, `intersection_update()`,
> `difference_update()`, and `symmetric_difference_update()` methods will
> accept any iterable as an argument.
The documentation does not seem to agree with your understanding.
|
Applying a regular expression to a text file Python 3
Question:
#returns same result i.e. only the first line as many times as 'draws'
infile = open("results_from_url.txt",'r')
file =infile.read() # essential to get correct formatting
for line in islice(file, 0, draws): # allows you to limit number of draws
for line in re.split(r"Wins",file)[1].split('\n'):
mains.append(line[23:38]) # slices first five numbers from line
stars.append(line[39:44]) # slices last two numbers from line
infile.close()
I am trying to use the above code to iterate through a list of numbers to
extract the bits of interest. In this attempt to learn how to use regular
expressions in Python 3, I am using lottery results opened from the internet.
All this does is to read one line and return it as many times as I instruct in
the value of 'draws'. Could someone tell me what I have done incorrectly,
please. Does re 'terminate' somehow? The strange thing is if I copy the file
into a string and run this routine, it works. I am at a loss - problem
'reading' a file or in my use of the regular expression?
Answer: I can't tell you why your code doesn't work, because I cannot reproduce the
result you're getting. I'm also not sure what the purpose of
for line in islice(file, 0, draws):
is, because you never use the `line` variable after that, you immediately
overwrite it with
for line in re.split(r"Wins",file)[1].split('\n'):
Plus, you could have used `file.split('Wins')` instead of
`re.split(r"Wins",file)`, so you aren't really using regex at all.
* * *
Regex is a tool to find data of a certain format. Why do you use it to split
the input text, when you could use it to find the data you're looking for?
What is it you're looking for? A sequence of seven numbers, separated by
commas. Translated into regex:
(?:\d+,){7}
However, we want to group the first 5 numbers - the "mains" - and the last 2
numbers - the "stars". So we'll add two named capture groups, named "mains"
and "stars":
(?P<mains>(?:\d+,){5})(?P<stars>(?:\d+,){2})
This pattern will find all numbers you're looking for.
* * *
import re
data= open("infile.txt",'r').read()
mains= []
stars= []
pattern= r'(?P<mains>(?:\d+,){5})(?P<stars>(?:\d+,){2})'
iterator= re.finditer(pattern, data)
for count in range(int(input('Enter number of draws to examine: '))):
try:
match= next(iterator)
except StopIteration:
print('no more matches')
break
mains.append(match.group('mains'))
stars.append(match.group('stars'))
print(mains,stars)
This will print something like `['01,03,31,42,46,'] ['04,11,']`. You may want
to remove the commas and convert the numbers to ints, but in essence, this is
how you would use regex.
|
Python (Tkinter) - canvas for-loop color change
Question: I generated a grid using a for-loop in Tkinter, but want to know how I would
be able to bind an on-click function to such that when I click on each
individual generated rectangle, the rectangle will change color.
from Tkinter import *
master = Tk()
def rowgen(row, col):
for i in range(row):
for j in range(col):
w.create_rectangle(25+50*i, 25+50*j, 50+50*i, 50+50*j, fill="green")
w = Canvas(master, width=225, height=225)
w.pack()
rowgen(4, 4)
master.resizable(0,0)
mainloop()
I'm thinking that I have to first iterate through another for-loop to make an
event, where if I click within these coordinates, I'd reconfig the color of
one of the rectangles.
Answer: By following Curly Joe's hints and making some mistakes, I got the following,
which requires only one tag_bind. You might want to try it yourself first.
from tkinter import *
master = Tk()
def box_click(event):
box = event.widget.find_closest(event.x, event.y)
print(box) # remove later
w.itemconfig(box, fill='red')
def rowgen(row, col):
for i in range(row):
for j in range(col):
w.create_rectangle(25+50*i, 25+50*j, 50+50*i, 50+50*j,
fill="green", tag='BOX')
w = Canvas(master, width=225, height=225)
w.pack()
rowgen(4, 4)
w.tag_bind('BOX', '<Button-1>', box_click)
master.resizable(0,0)
mainloop()
|
Python module error:Randint
Question:
from random import randint
This is the code I've used to import the Random module. When I run the code,
it instead imports a file of mine for testing code called **random.py**. This
was leading to all sorts of errors, so I 'permanently' deleted it in the
Recycle Bin and all it's shortcuts I could find.Yet it still is there
somewhere and it causes errors.My entire program depends on
x = randint(1, 100)
if x % 2 == 0:
b = 'c'
The game I'm making depends in randint to randomly create ships and
submarines. I am running Windows 7.
Thanks
Answer: Python creates a _bytecode cache_ for each python module you import; for
Python 2 the same name as the original file with the extension `.pyc` is used.
It is that file that is imported when you import `random`, even if the source
`.py` file was deleted.
Use:
import random
print(random.__file__)
to locate the stale `random.pyc` bytecode file and delete it.
In Python 3 these files are normally stored in a subdirectory called
`__pycache__` but these are ignored if there is no corresponding `.py` source
file. People that want to distribute just the bytecode files (to obfuscate
their code in commercial software distributions, for eample) they'll have to
use the [`compilall` script with the `-b`
switch](https://docs.python.org/3/library/compileall.html#cmdoption-
compileall-b), something you almost certainly did not do.
|
How to clear python console (i.e. Ctrl+L command line equivalent)
Question: OS = Linux
[boris@E7440-DELL ~]$ uname -a
Linux E7440-DELL 3.17.4-200.fc20.x86_64 #1 SMP Fri Nov 21 23:26:41 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux
From python console (Spyder 2.2.4, Python 2.7.5 64bits, Qt 4.8.5) it is seen
as:
>>> import os
>>> print(os.name)
posix
I'm trying to find out a way to clear python console. Not just any solution is
suitable, but it must be exactly same result as pressing Ctrl+L.
From other threads I have already tried several options:
>>> import os
>>> os.system('clear')
256
>>> import subprocess
>>> subprocess.call("clear", shell=True)
1
>>> print '\n'*1000
As you can see neither `os.system('clear')` nor `subprocess.call("clear",
shell=True)` produce desired result. They just output a value (256 or 1
respectively). `print '\n'*1000` is so far closest the desired outcome.
However, there are two issues with it:
1. the cursor is not at the top of the screen (as Ctrl+L does), but it stays at the bottom, so all new lines printed by my code are being scrolled upwards, which makes it impossible to read.
2. the visual experience is highly dependent on the value, so in order to make it somewhat readable I have to use `print '\n'*100000` instead
Does anyone know the right solution, the one that can really do Ctrl+L from
the command line? (yes I am using linux, and I have no interest in windows
solutions)
Answer: You can try:
os.system('tput reset')
To hide the return value, use:
variable = os.system('tput reset')
|
Where can I locate the library for python base functions in Linux?
Question: I am mainly trying to get examples from the library of the base functions to
help aid me in my studies of Python. I am running Linux Mint 17 and I would
like to simply know the path to the base functions, so I can open them and
view the Python code they contain.
Answer: Each non built-ins modules have a `__file__` attribute. He contains the
fullpath of the loaded file, so if it's a module write in python, you will get
a '.pyc' file, if it's a C module a '.so'.
>>> import collections # from the std lib in pure python
>>> collections.__file__
'/usr/lib/python2.7/collections.pyc'
>>> import datetime # from the std lib as a C module
>>> datetime.__file__
'/usr/lib/python2.7/lib-dynload/datetime.so'
>>> import itertools # from the std lib but a built-in module
>>> itertools.__file__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute '__file__'
You can also use the inspect module, who have a `.getsourcefile` function,
this work not only at module level but a function level too. **If the function
is declared in python !**
>>> import inspect
>>> inspect.getsourcefile(collections) # Pure python
'/usr/lib/python2.7/collections.py'
>>> inspect.getsourcefile(collections.namedtuple) # Work with a function too.
'/usr/lib/python2.7/collections.py'
>>> inspect.getsourcefile(datetime) # C module so it will return just None
>>> inspect.getsourcefile(itertools) # Built-in so it raise an exception
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/inspect.py", line 444, in getsourcefile
filename = getfile(object)
File "/usr/lib/python2.7/inspect.py", line 403, in getfile
raise TypeError('{!r} is a built-in module'.format(object))
TypeError: <module 'itertools' (built-in)> is a built-in module
Has you can see, if it's a external C library, `.getsourcefile` return
nothing. And if it's a built-in module/function/class, it raise a `TypeError`
exception.
The others advantages of `.getsourcefile` over `__file__` is that if the
function/class is declared in a subfile of the module it return the right
file. And you can even use it on the type of a "an unknown" object and do
`inspect.getsourcefile(type(obj))`.
(It's test if the source file exist too and return `None` if the '.pyc' is
loaded but the '.py' doesn't exist)
|
Define writable method in asyncore client makes sending data very slow
Question: I wrote a asynchorous client using python asyncore, and met some problems. I
have solved this with the help of this:
[Asyncore client in thread makes the whole program crash when sending data
immediately](http://stackoverflow.com/questions/27399960/asyncore-client-in-
thread-makes-the-whole-program-crash-when-sending-data-immedi)
But now I meet some other problem.
My client program:
import asyncore, threading, socket
class Client(threading.Thread, asyncore.dispatcher):
def __init__(self, host, port):
threading.Thread.__init__(self)
self.daemon = True
self._thread_sockets = dict()
asyncore.dispatcher.__init__(self, map=self._thread_sockets)
self.host = host
self.port = port
self.output_buffer = []
self.start()
def send(self, msg):
self.output_buffer.append(msg)
def writable(self):
return len("".join(self.output_buffer)) > 0
def handle_write(self):
all_data = "".join(self.output_buffer)
bytes_sent = self.socket.send(all_data)
remaining_data = all_data[bytes_sent:]
self.output_buffer = [remaining_data]
def handle_close(self):
self.close()
def handle_error(self):
print("error")
def handle_read(self):
print(self.recv(10))
def run(self):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((self.host, self.port))
asyncore.loop(map = self._thread_sockets)
mysocket = Client("127.0.0.1",8400)
while True:
a=str(input("input"))
mysocket.send("popo")
And my server program:
import socket
HOST="127.0.0.1"
PORT=8400
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("socket created")
s.bind((HOST, PORT))
s.listen(1)
print("listen")
conn,addr = s.accept()
print("Accepted. Receiving")
while True:
data = conn.recv(20)
print("Received: ")
print(data)
data = input("Please input reply message:\n").encode("utf-8")
conn.send(data)
print("Data sended. Receiving")
My problem is sending data from client to server is very slow, about 20 to 30
seconds! But it could always send data successfully. And if I comment out the
writable method in client, the sending process becomes very fast. Why does it
behave like this? How to fix it if I want to use the writable method? Thanks!
I start the server with python3, and client with python 2. I use ubuntu 14.04.
Answer: The `asyncore` loop calls `writable()` when it is ready to do something with
the socket. If the method `writable()` tells that there is something to write
then `handle_write()` is called. The default `writable()` always returns
`True`, so in that case there is busy loop calling `handle_write()` and
`writable()`.
In the above implementation the method `writable()` is called immediately when
the client loop is started. At that moment there is nothing in the buffer, so
`writable()` tells that there is nothing to write.
The `asyncore` loop calls `select()`. Now the loop is in "standby" state. It
can be wakened only when some data is received by the socket or by timeout
event. After any of those events the loop again checks `writable()`.
The server sends nothing to the client and the client waits for timeout. The
default `timeout` is 30 seconds, so that is why it is needed to wait up to 30
seconds before something is sent. It is possible to reduce the timeout during
starting `asyncore.loop()`:
asyncore.loop(map = self._thread_sockets, timeout = 0.5)
* * *
Another idea that may come here is to check if the buffer is empty in `send()`
and if it is empty send it immediately. However, it is a bad idea. The
`send()` is called in the main thread, but the socket is managed by the
`asyncore` loop in another thread.
For the same reason it makes sense to protect usage of `output_buffer` for
concurrent access from different threads. The lock object `threading.Lock()`
can be used here:
def __init__(self, host, port):
#...
self.lock = threading.Lock()
def send(self, msg):
self.lock.acquire()
try:
self.output_buffer.append(msg)
finally:
self.lock.release()
def writable(self):
is_writable = False;
self.lock.acquire()
try:
is_writable = len("".join(self.output_buffer)) > 0
finally:
self.lock.release()
return is_writable
def handle_write(self):
self.lock.acquire()
try:
all_data = "".join(self.output_buffer)
bytes_sent = self.socket.send(all_data)
remaining_data = all_data[bytes_sent:]
self.output_buffer = [remaining_data]
finally:
self.lock.release()
There is no thread safe mechanism to waken `asyncore` from another thread. So,
the only solution is to reduce loop timeout, although too small timeout
increases CPU usage.
|
Google App Engine Python -> configure yaml and websockets
Question: I`m beginning programmist with google API working in python. I have pycharm
3.4.1 version.
I 'm trying to make a project (backend) of game bomberman. It is like i have
to use content of already written game in javascript. I have my project folder
and there i have this content: ![link at the bottom][1]
1. I don't understand how exactly i should modify my yaml file to use content of web folder. I need handlers to java script, css and etc. using content of web. If someone could show me example of javascript import then I think I would be able to do analogically the same with others handlers.
2. What's more. I ve been searching information about implementation websockets in python using google API but I just cant find anything useful or I just don't understand the info. I would appreciate if somebody could show me how to implement websockets and add short explanation. Please be easygoing on me because it's my first approach to google API and i don't know much about it.
Thanks for help and sorry for mistakes if I did any in the text:)
[1]<http://i.stack.imgur.com/aNJCz.png>
Answer: 1 - To handle static files in your App Engine instance, you can add something
similar to the following in your app.yaml file:
handlers:
- url: /css
static_dir: static/css
- url: /js
static_dir: static/js
Just place your files in the static/css or static/js directories in your
project. This allows you to access the css and javascript files from the /css
and /js paths on your site. An example js import in your html file would be:
<script src=“/js/your-file.js"></script>
More options for configuring your static files is in the App Engine docs at:
<https://cloud.google.com/appengine/docs/python/config/appconfig>
2 - App Engine has the Channel Python API available. You can find more info on
that at;
<https://cloud.google.com/appengine/docs/python/channel/>
However, depending on your needs, some have indicated this is too slow for
some situations. You can also create a Compute Engine instance and run Node.js
with socket.io for real-time communication between users. This is a little
more involved than the basic App Engine instance but you can see that
environment layout in this real-time game example:
<https://cloud.google.com/appengine/docs/python/channel/>
|
How to convert API timestamp into python datetime object
Question: I am getting the following string from an API call:
s = '2014-12-11T20:46:12Z'
How would I then convert this into a python object? Is there an easy way, or
should I be splitting up the string, for example:
year = s.split('-')[0]
month = s.split('-')[1]
day = s.split('-')[2]
time = s.split('T')[1]
...etc...
Answer: You can use the
[`datetime.datetime.strptime`](https://docs.python.org/3/library/datetime.html)
function:
>>> from datetime import datetime
>>> s = '2014-12-11T20:46:12Z'
>>> datetime.strptime(s, '%Y-%m-%dT%H:%M:%SZ')
datetime.datetime(2014, 12, 11, 20, 46, 12)
>>>
For a complete list of the available format codes, see [`strftime()` and
`strptime()`
Behavior](https://docs.python.org/3/library/datetime.html#strftime-and-
strptime-behavior).
|
Convert datetime obj to timezone-aware datetime
Question: I have the following date I am trying to save:
timestamp = datetime.datetime.strptime(timestamp_raw, '%Y-%m-%dT%H:%M:%SZ')
When I save it, I get the following Warning:
/Library/Python/2.7/site-packages/django/db/models/fields/__init__.py:808:
RuntimeWarning: DateTimeField received a naive datetime
(2014-12-11 21:42:58) while time zone support is active. RuntimeWarning)
How would I convert my datetime object to a tz-aware datetime object?
Answer: Django provides a [utility
function](https://docs.djangoproject.com/en/dev/ref/utils/#module-
django.utils.timezone) for this:
from django.utils.timezone import make_aware
timestamp = make_aware(timestamp)
By default this will use the [current time
zone](https://docs.djangoproject.com/en/dev/topics/i18n/timezones/#default-
time-zone-and-current-time-zone), which by default is `TIME_ZONE`. If that's
not what you want you can pass the desired timezone as an argument to
`make_aware`.
|
how to access the first result of a google search result ( prominently a video link)?
Question: [https://www.google.co.in/search?q=black+sabbath+%E2%80%93+iron+man&oq=black+sabbath+%E2%80%93+iron+man&aqs=chrome..69i57.461j0j4&sourceid=chrome&es_sm=122&ie=UTF-8](https://www.google.co.in/search?q=black+sabbath+%E2%80%93+iron+man&oq=black+sabbath+%E2%80%93+iron+man&aqs=chrome..69i57.461j0j4&sourceid=chrome&es_sm=122&ie=UTF-8)
In the link provided above, the very first result is the video link to the
youtube, I want to access the link provided. How can I do that in python?
EDIT: My input will be a string that I query in the google-search box. Like in
this case "black sabbath iron man"
Answer: Scraping HTML is _fragile_ \-- yes you can do it with beautifulsoup4, e.g
import bs4
soup = bs4.BeautifulSoup(html_string)
href = soup.find('h3').find('a').get('href')
print(href)
will show
**/url?q=[http://www.youtube.com/watch%3Fv%3D9LjbMVXj0F8&sa=U&ei=ESCPVPD6NcT3yQS-04C4DA&ved=0CBQQtwIwAA&usg=AFQjCNGV1u7FshGW4K_Ffu0zLzwaW7sCzw](http://www.youtube.com/watch%3Fv%3D9LjbMVXj0F8&sa=U&ei=ESCPVPD6NcT3yQS-04C4DA&ved=0CBQQtwIwAA&usg=AFQjCNGV1u7FshGW4K_Ffu0zLzwaW7sCzw)**
or the like. However, the slightest cosmetic change to Youtube search results
might break your application.
Better to register your app with Google and use the provided API, as per
[Google's own docs](https://developers.google.com/youtube/v3/). The Python
client library nicely supports App Engine, see
<https://developers.google.com/youtube/v3/code_samples/python_appengine> for
example.
|
Python : which importing module is calling my function at runtime
Question: I don't know if there is a way to get the information I want, so maybe there
isn't. Anyway, here is my question:
I have a module, say "m.py", with a function , say "def f(): ..."
Now imagine some other modules are importing m.py
in f, I would like to know which module is calling me, at runtime.
is that possible ?
thanks for your help
Answer:
import sys,inspect,os
_logLevel = 10
def debugPrint(*args,**kwargs):
if _logLevel > kwargs.get('required_level',0):
args = list(args)
info = inspect.stack()[1]
info = tuple([os.path.basename(info[1]),info[2]])
args.insert(0,"%s %s:"%info)
sys.stdout.write(" ".join("%s"%arg for arg in args))
debugPrint("Hello world!")
is a method I like to use that might help you
|
Determine which Radiobutton has been selected
Question: I am sorry for the silly question but there doesn't seem to be too much
documentation on this that is understandable.
So far I have this:
self.firstRadioButton = Radiobutton(self.__canvas, text="ONE", fg='white', bg=BACKGROUND_COLOR, variable=self.selectedONE, value=1)
self.secondRadioButton = Radiobutton(self.__canvas, text="TWO", fg='white', bg=BACKGROUND_COLOR, variable=self.selectedTWO, value=2)
Well first of all, the reason it's a radio button is because the user has to
pick one. So one automatically has to be selected (I think Python already does
that) good.
Second of all, I am trying to get the value of whichever one the user picks.
That is my problem, I cannot for the life of me figure this out.
Answer: The key is to make sure that both radiobuttons share the same variable. Then,
to know which one is selected you simply need to get the value of the
variable.
Here is an example:
import tkinter
# function that is called when you select a certain radio button
def selected():
print(var.get())
root = tkinter.Tk()
var = tkinter.StringVar() #used to get the 'value' property of a tkinter.Radiobutton
# Note that I added a command to each radio button and a different 'value'
# When you press a radio button, its corresponding 'command' is called.
# In this case, I am linking both radio buttons to the same command: 'selected'
rb1 = tkinter.Radiobutton(text='Radio Button 1', variable=var,
value="Radio 1", command=selected)
rb1.pack()
rb2 = tkinter.Radiobutton(text='Radio Button 2', variable=var,
value="Radio 2", command=selected)
rb2.pack()
root.mainloop()
|
How to fix ImportError: No module named packages.urllib3?
Question: I'm running Python 2.7.6 on an Ubuntu machine. When I run `twill-sh` (Twill is
a browser used for testing websites) in my Terminal, I'm getting the
following:
Traceback (most recent call last):
File "dep.py", line 2, in <module>
import twill.commands
File "/usr/local/lib/python2.7/dist-packages/twill/__init__.py", line 52, in <module>
from shell import TwillCommandLoop
File "/usr/local/lib/python2.7/dist-packages/twill/shell.py", line 9, in <module>
from twill import commands, parse, __version__
File "/usr/local/lib/python2.7/dist-packages/twill/commands.py", line 75, in <module>
browser = TwillBrowser()
File "/usr/local/lib/python2.7/dist-packages/twill/browser.py", line 31, in __init__
from requests.packages.urllib3 import connectionpool as cpl
ImportError: No module named packages.urllib3
However, I can import urllib in Python console just fine. What could be the
reason?
Answer: There is a difference between the standard `urllib` and `urllib2` and the
third-party `urllib3`.
It looks like twill does not install the dependencies so you have to do it
yourself. Twill depends on `requests` library which comes with and uses
`urllib3` behind the scenes. You also need `lxml` and `cssselect` libraries.
You can install them on terminal as follows:
`pip install requests`
`pip install lxml`
and
`pip install cssselect`
|
Fast Fourier Transform for Harmonic Analysis
Question: I'm analyzing the harmonics present in the wave motion as a function of where
along the string the pluck occurs. I hope to obtain a plot like those
exhibited on this page:
<https://softwaredevelopmentperestroika.wordpress.com/2013/12/10/fast-fourier-
transforms-with-python-the-noise-and-the-signal/>. To do this, I've written
code modelling an asymmetric triangle and implemented numpy's fft. The data
being output, however, is not what I expect, and it's peaked about a frequency
of 0 Hz. Here is my code and its output:
from numpy.fft import fft as npfft, fftfreq as npfftfreq
#triangular pulse
def triangular_pulse(x, xmean, sigma):
for i in x:
if x[i]<=xmean:
y[i] = x[i]*(sigma/xmean)
else :
y[i] = sigma-(x[i]-xmean)*(sigma/(200-xmean))
return y
N_masses = 200
T = 0.0669264714
mu = .03937
cSq = T/mu
c = np.sqrt(cSq)
dx = 1.0
dt = dx/c
print dt
#Initialize some arrays
x0 = np.arange(N_masses)*dx
y = np.zeros(N_masses)
vy = np.zeros(N_masses)
ay = np.zeros(N_masses)
#Set Initial conditions (pluck)
# # half-pluck
# y = 30*gaussian_pulse(x0,x0[N_masses/2],2)
# quarter-pluck
y = triangular_pulse(x0,x0[N_masses/4],1)
rhat=npfft(y)
freaq=npfftfreq(len(y),dt)
plt.plot(freaq,np.abs(rhat)/len(rhat))
plt.show()
Please let me know if you spot the source of my problem. Thanks!
# Update
Added y = triangular_pulse(x0,x0[N_masses/40],1) y-=np.mean(y) with the result
of a broader non-zero band; peak is still centered around "0", however.
Answer: Just subtract the mean of the signal before running the frequency analysis,
i.e. after calling `triangular_pulse`:
y-=y.mean()
and you will obtain the peak at a non-zero frequency. This is because the
signal has a mean component that is not zero, that will show up as the
component at zero frequency.
EDIT: as a comment, you can rewrite the triangular pulse function using numpy
[where](http://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html):
def triangular_pulse2(x,xmean,sigma):
return where(x<xmean,x*sigma/xmean,sigma-(x-xmean)*(sigma/(200-xmean)))
|
Django Query Natural Sort
Question: Let's say I have this Django model:
class Question(models.Model):
question_code = models.CharField(max_length=10)
and I have 15k questions in the database.
I want to sort it by _question_code_ , which is alphanumeric. This is quite a
classical problem and has been talked about in:
* <http://blog.codinghorror.com/sorting-for-humans-natural-sort-order/>
* [Does Python have a built in function for string natural sort?](http://stackoverflow.com/questions/4836710/does-python-have-a-built-in-function-for-string-natural-sort)
I tried the code in the 2nd link (which is copied below, changed a bit), and
notice it takes up to 3 seconds to sort the data. To make sure about the
function's performance, I write a test which creates a list of 100k random
alphanumeric string. It takes only 0.76s to sort that list. So what's
happening?
This is what I think. The function needs to get the _question_code_ of each
question for comparing, thus calling this function to sort 15k values means
requesting mysql 15k separate times. And this is the reason why it takes so
long. Any idea? And any solution to natural sort for Django in general? Thanks
a lot!
def natural_sort(l, ascending, key=lambda s:s):
def get_alphanum_key_func(key):
convert = lambda text: int(text) if text.isdigit() else text
return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
sort_key = get_alphanum_key_func(key)
return sorted(l, key=sort_key, reverse=ascending)
Answer: As far as I'm aware there isn't a generic Django solution to this. You can
reduce your memory usage and limit your db queries by building an
id/question_code lookup structure
from natsort import natsorted
question_code_lookup = Question.objects.values('id','question_code')
ordered_question_codes = natsorted(question_code_lookup, key=lambda i: i['question_code'])
Assuming you want to page the results you can then slice up
ordered_question_codes, perform another query to retrieve all the questions
you need order them according to their position in that slice
#get the first 20 questions
ordered_question_codes = ordered_question_codes[:20]
question_ids = [q['id'] for q in ordered_question_codes]
questions = Question.objects.filter(id__in=question_ids)
#put them back into question code order
id_to_pos = dict(zip((question_ids), range(len(question_ids))))
questions = sorted(questions, key = lambda x: id_to_pos[x.id])
If the lookup structure still uses too much memory, or takes too long to sort,
then you'll have to come up with something more advanced. This certainly
wouldn't scale well to a huge dataset
|
Pyplot colormap line by line
Question: I'm beginning with plotting on python using the very nice pyplot. I aim at
showing the evolution of two series of data along time. Instead of doing a
casual plot of data function of time, I'd like to have a scatter plot
(data1,data2) where the time component is shown as a color gradient.
In my two column file, the time would be described by the line number. Either
written as a 3rd column in the file either using the intrinsic capability of
pyplot to get the line number on its own.
Can anyone help me in doing that ?
Thanks a lot.
Nicolas
Answer: When plotting using
[`matplotlib.pyplot.scatter`](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter)
you can pass a third array via the keyword argument `c`. This array can choose
the colors that you want your scatter points to be. You then also pick an
appropriate colormap from
[`matplotlib.cm`](http://matplotlib.org/api/cm_api.html) and assign that with
the `cmap` keyword argument.
This toy example creates two datasets `data1` and `data2`. It then also
creates an array `colors`, an array of continual values equally spaced between
0 and 1, and with the same length as `data1` and `data2`. It doesn't need to
know the "line number", it just needs to know the total number of data points,
and then equally spaces the colors.
I've also added a `colorbar`. You can remove this by removing the
`plt.colorbar()` line.
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
N = 500
data1 = np.random.randn(N)
data2 = np.random.randn(N)
colors = np.linspace(0,1,N)
plt.scatter(data1, data2, c=colors, cmap=cm.Blues)
plt.colorbar()
plt.show()

|
How to make subprocess run for 60 sec
Question: I have the following python script that runs. I want is to run the subprocess
to run for 60 sec and then send the SIGINT signal to subprocess and write the
output in file.If i use sleep the subprocess doesn't run.
#!/usr/bin/python
import os
import subprocess
PIPE = subprocess.PIPE
import signal
import time
def handler(signum, frame):
pass
signal.signal(signal.SIGALRM, handler)
signal.alarm(60)
command = "strace -c ./server"
os.chdir("/root/Desktop/")
p = subprocess.Popen(command, stdout=PIPE, stderr=PIPE)
time.sleep(60)
p.send_signal(signal.SIGINT)
signal.alarm(0)
print p.communicate()[1]
Answer: In Python 3.3 and newer, there is a simpler version of the answer (untested
code):
with open('output', 'wb', 0) as output_file:
p = subprocess.Popen("strace -c ./server".split(),
stdout=output_file, stderr=subprocess.STDOUT,
cwd="/root/Desktop/",
close_fds=True)
try:
p.wait(60) # Wait for the child process to finish, or for 60 seconds, which ever comes first.
except subprocess.TimeoutExpired:
p.send_signal(signal.SIGINT)
p.wait() # Wait fro the process to actually exit after receiving the terminate signal.
See also:
<https://docs.python.org/3/library/subprocess.html#subprocess.Popen.wait>
|
Scala - best API for doing work inside multiple threads
Question: In Python, I am using a library called `futures`, which allows me to do my
processing work with a pool of N worker processes, in a succinct and crystal-
clear way:
schedulerQ = []
for ... in ...:
workParam = ... # arguments for call to processingFunction(workParam)
schedulerQ.append(workParam)
with futures.ProcessPoolExecutor(max_workers=5) as executor: # 5 CPUs
for retValue in executor.map(processingFunction, schedulerQ):
print "Received result", retValue
_(The`processingFunction` is CPU bound, so there is no point for async
machinery here - this is about plain old arithmetic calculations)_
I am now looking for the closest possible way to do the same thing in Scala.
Notice that in Python, to avoid the GIL issues, I was using processes (hence
the use of `ProcessPoolExecutor` instead of `ThreadPoolExecutor`) - and the
library automagically marshals the `workParam` argument to each process
instance executing `processingFunction(workParam)` \- and it marshals the
result back to the main process, for the executor's `map` loop to consume.
Does this apply to Scala and the JVM? My processingFunction can, in principle,
be executed from threads too (there's no global state at all) - but I'd be
interested to see solutions for both multiprocessing and multithreading.
The key part of the question is whether there is anything in the world of the
JVM with as clear an API as the Python `futures` you see above... I think this
is one of the best SMP APIs I've ever seen - prepare a list with the function
arguments of all invocations, and then just two lines: create the
poolExecutor, and `map` the processing function, getting back your results as
soon as they are produced by the workers. Results start coming in as soon as
the first invocation of `processingFunction` returns and keep coming until
they are all done - at which point the for loop ends.
Answer: You have way less boilerplate than that using parallel collections in Scala.
myParameters.par.map(x => f(x))
will do the trick if you want the default number of threads (same as number of
cores).
If you insist on setting the number of workers, you can like so:
import scala.collection.parallel._
import scala.concurrent.forkjoin._
val temp = myParameters.par
temp.tasksupport = new ForkJoinTaskSupport(new ForkJoinPool(5))
temp.map(x => f(x))
The exact details of return timing are different, but you can put as much
machinery as you want into `f(x)` (i.e. both compute and do something with the
result), so this may satisfy your needs.
In general, simply having the results appear as completed is not enough; you
then need to process them, maybe fork them, collect them, etc.. If you want to
do this in general, Akka Streams (follow links from
[here](http://akka.io/docs/)) are nearing 1.0 and will facilitate the
production of complex graphs of parallel processing.
|
Install dpkt on python 3
Question: I am trying to install dpkt on python 3 and I get the following error when I
am installing:
(venv)[root@miura dpkt-1.8]# python setup.py install
Traceback (most recent call last):
File "setup.py", line 4, in <module>
import dpkt
File "/root/dpkt-1.8/dpkt/__init__.py", line 13, in <module>
import ah
ImportError: No module named 'ah'
What am I missing? I see that "ah" is the first module imported, and ah.py is
inside the dpkt directory.
Same thing I get if I try to install using pip.
Answer: I also ran into this issue today.
I gave installing under Python 2.7 a go, and it worked straight away. Dpkt
hasn't been updated to work in 3.x yet by the looks of things.
I used the 2to3 tool that comes with Python 3.4 to convert the source:
python C:\Python34\Tools\Scripts\2to3.py -w dpkt-1.8
This succeeded for all files except ieee80211.py, which I had to edit manually
to separate the imports so that it read:
from . import dpkt
import socket, struct
Once you've done that, you can run python 3.4 and use setup.py install as you
would normally.
I haven't done any extensive testing however so keep in mind that a script
converted the source...
**Update:** I am not able to read a pcap file with this method - it looks like
more careful porting is required.
|
Python How to convert 8-bit ASCII string to 16-Bit Unicode
Question: Although Python 3.x solved the problem that uppercase and lowercase for some
locales (for example tr_TR.utf8) Python 2.x branch lacks this. Several
workaround for this issuse like <https://github.com/emre/unicode_tr/> but did
not like this kind of a solution.
So I am implementing a new upper/lower/capitalize/title methods for monkey-
patching unicode class with
[string.maketrans](https://docs.python.org/2/library/string.html#string.maketrans)
method.
The problem with maketrans is the lenghts of two strings must have same
lenght. The nearest solution came to my mind is "How can I convert 1 Byte char
to 2 bytes?"
* * *
**Note:** `translate` method does work only ascii encoding, when I pass `u'İ'`
(1 byte length \u0130) as arguments to `translate` gives ascii encoding error.
from string import maketrans
import unicodedata
c1 = unicodedata.normalize('NFKD',u'i').encode('utf-8')
c2 = unicodedata.normalize('NFKD',u'İ').encode('utf-8')
c1,len(c1)
('\xc4\xb1', 2)
# c2,len(c2)
# ('I', 1)
'istanbul'.translate( maketrans(c1,c2))
ValueError: maketrans arguments must have same length
Answer: Unicode objects allow multicharacter translation via a dictionary instead of
two byte strings mapped through `maketrans`.
#!python2
#coding:utf8
D = {ord(u'i'):u'İ'}
print u'istanbul'.translate(D)
Output:
İstanbul
If you start with an ASCII byte string and want the result in UTF-8, simply
decode/encode around the translation:
#!python2
#coding:utf8
D = {ord(u'i'):u'İ'}
s = 'istanbul'.decode('ascii')
t = s.translate(D)
s = t.encode('utf8')
print repr(s)
Output:
'\xc4\xb0stanbul'
The following technique can do the job of `maketrans`. Note that the
dictionary keys must be Unicode ordinals, but the value can be Unicode
ordinals, Unicode strings or `None`. If `None`, the character is deleted when
translated.
#!python2
#coding:utf8
def maketrans(a,b):
return dict(zip(map(ord,a),b))
D = maketrans(u'àáâãäå',u'ÀÁÂÃÄÅ')
print u'àbácâdãeäfåg'.translate(D)
Output:
ÀbÁcÂdÃeÄfÅg
Reference:
[str.translate](https://docs.python.org/2.7/library/stdtypes.html?highlight=maketrans#str.translate)
|
Why my Python regular expression pattern run so slowly?
Question: Please see my regular expression pattern code:
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import re
print 'Start'
str1 = 'abcdefgasdsdfswossdfasdaef'
m = re.match(r"([A-Za-z\-\s\:\.]+)+(\d+)\w+", str1) # Want to match something like 'Moto 360x'
print m # None is expected.
print 'Done'
It takes 49 seconds to finish, any problem with the pattern?
Answer: See [Runaway Regular Expressions: Catastrophic
Backtracking](http://www.regular-expressions.info/catastrophic.html).
In brief, if there are extremely many combinations a substring can be split
into the parts of the regex, the regex matcher may end up trying them all.
Constructs like `(x+)+` and `x+x+` practically guarantee this behaviour.
To detect and fix the problematic constructs, the following concept can be
used:
* At conceptual level, the presence of a problematic construct means that your regex is _ambiguous_ \- i.e. _if you disregard greedy/lazy behaviour, there's no single "correct" split of some text into the parts of the regex_ (or, equivalently, a subexpression thereof). So, to avoid/fix the problems, you need to see and eliminate all ambiguities.
* One way to do this is to
* always split the text into its _meaningful parts_ (=parts that have separate meanings for the task at hand), and
* define the parts in such a way that they cannot be confused (=using the same characteristics that you yourself would use to tell which is which if you were parsing it by hand)
|
python script showing error in php execution
Question: I am using executing a `python script` in php , but it is showing an error
`ValueError: invalid literal for int() with base 10`: 'param2' ,whereas it is
running fine from terminal.
here is my code :
$String = "Hello there, how are you.";
$no_of_chars_per_word = "3";
$special_word = "is";
$stop_list = "is are";
$param1 = $String;
$param2 = $no_of_chars_per_word;
$param3 = $special_word;
$param4 = $stop_list;
$command = "/usr/bin/python2.7 /home/anupam/public_html/Text_Analysis_Python/Total_word_count.py";
$command .= " param1 param2 param3 param4 2>&1";
header('Content-Type: text/html; charset=utf-8');
echo '<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />';
echo "<style type='text/css'>
body{
background:#000;
color: #7FFF00;
font-family:'Lucida Console',sans-serif !important;
font-size: 12px;
}
</style>";
$pid = popen( $command,"r");
echo "<body><pre>";
while( !feof( $pid ) )
{
echo fread($pid, 256);
flush();
ob_flush();
echo "<script>window.scrollTo(0,99999);</script>";
usleep(100000);
}
pclose($pid);
echo "</pre><script>window.scrollTo(0,99999);</script>";
echo "<br /><br />Script finalizado<br /><br />";
?>
but it is showing an error :
Traceback (most recent call last):
File "/home/anupam/public_html/Text_Analysis_Python/Total_word_count.py", line 37, in
main()
File "/home/anupam/public_html/Text_Analysis_Python/Total_word_count.py", line 17, in main
min_char_per_word = int(minimum_characters_per_word)
ValueError: invalid literal for int() with base 10: 'param2'
Script finalizado
wheareas it is running fine on my terminal:
anupam@JAZZ:~/public_html/Text_Analysis_Python$ python Total_word_count.py "hello there, how are you." "4" the "so the are"
3
here is my python code::
import sys
def main():
"""Accessing the command line arguments as per constraint"""
test_data = sys.argv[1] #string to be analysed
minimum_characters_per_word = sys.argv[2] #minimum characters per word to be analysed
special_word = sys.argv[3] #special word to be analysed
stop_list_string = sys.argv[4] #stop list to be ignored in the anaysis
"""Conversion of minimum characters per word to integer type"""
min_char_per_word = int(minimum_characters_per_word)
"""conversion of stop list as a string type to a list type"""
stop_list = [] #stop list initially empty
for words in stop_list_string.split():
stop_list.append(words) #appending the stop list as per spaces between them
total_word_count_res = total_word_count(test_data,min_char_per_word,special_word,stop_list)
print total_word_count_res
def total_word_count(test_data,min_no_chars,spec_words,stop_list):
"""Counts the total number of words in test data"""
word_count = 0
for words in test_data.split():
if len(words) >= min_no_chars and words not in(stop_list) :
word_count += 1
return word_count
if __name__ == '__main__':
main()
how could I resolve this??
Answer: I think that
$command .= " param1 param2 param3 param4 2>&1";
should be
$command .= ' "'.$param1.'" "'.$param2.'" "'.$param3.'" "'.$param4.'" 2>&1';
else it just gives "paramX" strings.
|
Msgpack on Cloudant
Question: I am trying to use msgpack with Cloudant, and I couldn't find any
documentation on it.
2 years ago, Tim Anglade [present msgpack as a wonderfull way to pack your
data instead of
JSON](https://www.youtube.com/watch?feature=player_detailpage&v=zEMfvCqVL4E#t=887
"CouchDB & Ruby: You're Doing It Wrong by Tim Anglade"). I was thinking that
now it was stable to post some msgpack data out of the box ([like the Tim fork
of couchDB seems to
do](https://github.com/timanglade/couchdb/commit/b601286dae04bdc2488a0d9bf028c58e6feb3449
"Replace JSON w/ MessagePack for non-chunked responses.")).
Here my try in python:
import requests, msgpack
requests.post("https://me.cloudant.com/mydb",
data=msgpack.packb({ "type"="Test", "ok" : True }),
auth=(username, password),
headers={
"Content-Type" : "application/x-msgpack"
})
I get an `Unsupported Media Type, Content-Type must be
application/json`response...
Do you have any solutions, or suggestion for compressing my data ? We are
pushing a huge amount of data from a mobile app, and we really need to stay
small.
Answer: So, I get a response from Cloudant, it's simply not possible to use MsgPack to
transfert my datas.
> It looks like 'Content-Type:application/msgpack' is not supported by
> Cloudant and there is no development work currently being done to do so.
> Sorry for any inconvenience this may cause.
It looks like there is no way of encoding data in a more efficient way than
JSON, that sad.
|
How to use Pygtk in a C/C++ application?
Question: I would like to integrate a simple Pygtk window in a C/C++ application (The
reason being a previously designed GUI in pyGtk is to be integrated into a
bigger GTK+ (in C) application)
I get Segmentation fault error
Here what i did :
=====> In python (tmp.py):
#!/usr/bin/python
#from gi.repository import Gtk, Gio
win=Gtk.Window()
win.connect("delete-event",Gtk.main_quit)
win.show_all()
Gtk.main()
======> In C/C++ (simple.cpp):
i want to just execute that little window
#include <Python.h>
int main(int argc, char *argv[])
{
Py_SetProgramName(argv[0]); /* optional but recommended */
Py_Initialize();
FILE *fp = fopen ("tmp.py", "r+");
PyObject *mainModule = PyImport_AddModule("__main__");
PyObject * subModules = PyList_New(0);
PyList_Append(subModules, PyString_FromString("Gtk"));
PyList_Append(subModules, PyString_FromString("Gio"));
PyObject *Gtkstuff = PyImport_ImportModuleEx("gi.repository",NULL,NULL,subModules);
PyObject *GtkMod =PyObject_GetAttr(Gtkstuff,PyString_FromString("Gtk"));
PyObject *GioMod =PyObject_GetAttr(Gtkstuff,PyString_FromString("Gio"));
PyModule_AddObject(mainModule, "Gtk", GtkMod);
PyModule_AddObject(mainModule, "Gio", GioMod);
PyRun_SimpleFile(fp,"tmp.py");
Py_Finalize();
return 0;
}
The way how i compile is :
g++ $(python-config --cflags) -o simple $(python-config --ldflags) ./simple.cpp
The output :
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/lib64/python2.7/site-packages/gi/importer.py", line 68, in load_module
dynamic_module._load()
File "/usr/lib64/python2.7/site-packages/gi/module.py", line 291, in _load
overrides_modules = __import__('gi.overrides', fromlist=[self._namespace])
File "/usr/lib64/python2.7/site-packages/gi/overrides/Gtk.py", line 1502, in <module>
initialized, argv = Gtk.init_check(sys.argv)
AttributeError: 'module' object has no attribute 'argv'
Traceback (most recent call last):
File "tmp.py", line 5, in <module>
win=Gtk.Window()
File "/usr/lib64/python2.7/site-packages/gi/overrides/Gtk.py", line 415, in __init__
if not initialized:
NameError: global name 'initialized' is not defined
Answer: This works perfectly, the rest of the code doesn't make sense to me, since you
can do all the initialization in the python script.
#include <Python.h>
int main(int argc, char *argv[])
{
Py_SetProgramName(argv[0]); /* optional but recommended */
Py_Initialize();
FILE *fp = fopen("tmp.py", "r");
if (fp == NULL)
return 2;
PyRun_SimpleFile(fp, "tmp.py");
Py_Finalize();
fclose(fp);
return 0;
}
|
Tango With Django: User Authentication - User being saved but not receiving confirmation
Question: I am on chapter 9 of Tango With Django:
<http://www.tangowithdjango.com/book17/chapters/login.html#demo>
Whenever I create a user, I get an error page on my browser as shown below:
IntegrityError at /rango/register/
rango_userprofile.user_id may not be NULL
Request Method: POST
Request URL: http://127.0.0.1:8000/rango/register/
Django Version: 1.7.1
Exception Type: IntegrityError
Exception Value:
rango_userprofile.user_id may not be NULL
Exception Location: C:\Python27\lib\site-packages\django\db\backends\sqlite3\base.py in execute, line 485
Python Executable: C:\Python27\python.exe
Python Version: 2.7.8
Python Path:
['C:\\Users\\Paul.Zovighian\\desktop\\tango\\tango_with_django_project',
'C:\\Python27\\lib\\site-packages\\pandas-0.14.1-py2.7-win32.egg',
'C:\\Python27\\lib\\site-packages\\pytz-2014.7-py2.7.egg',
'C:\\Windows\\system32\\python27.zip',
'C:\\Python27\\DLLs',
'C:\\Python27\\lib',
'C:\\Python27\\lib\\plat-win',
'C:\\Python27\\lib\\lib-tk',
'C:\\Python27',
'C:\\Python27\\lib\\site-packages',
'C:\\Python27\\lib\\site-packages\\win32',
'C:\\Python27\\lib\\site-packages\\win32\\lib',
'C:\\Python27\\lib\\site-packages\\Pythonwin']
Server time: Fri, 12 Dec 2014 16:50:14 +0000
I can see that there is an integrity error, but I am not sure why this is the
message I get. If I try registering that user again, it won't let me because
it says that that username already exists. So it's like, working for
registering new users, but it just doesn't acknowledge the successful
registration.
Here is my code:
models.py
from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Page(models.Model):
category = models.ForeignKey(Category)
title = models.CharField(max_length=128)
url = models.URLField()
views = models.IntegerField(default=0)
def __unicode__(self):
return self.title
class UserProfile(models.Model):
# This line is required. Links UserProfile to a User model instance.
user = models.OneToOneField(User)
# The additional attributes we wish to include.
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
# Override the __unicode__() method to return out something meaningful!
def __unicode__(self):
return self.user.username
views.py
from django.http import HttpResponse
from django.shortcuts import render
from rango.models import Category
from rango.models import Page
from rango.forms import CategoryForm
from rango.forms import PageForm
from rango.forms import UserForm, UserProfileForm
def index(request):
# Query the database for a list of ALL categories currently stored.
# Order the categories by no. likes in descending order.
# Retrieve the top 5 only - or all if less than 5.
# Place the list in our context_dict and dictionary which will be passed to the template engine.
category_list = Category.objects.order_by('-likes')[:5]
page_list = Page.objects.order_by('-views')[:5]
context_dict = {'categories': category_list, 'pages': page_list}
# Render the response and send it back!
return render(request, 'rango/index.html', context_dict)
def about(request):
context_dict = {'italicmessage': "I am italicised font from the context"}
return render(request, 'rango/about.html', context_dict)
def category(request, category_name_slug):
# Create a context dictionary which we can pass to the template rendering engine
context_dict = {}
try:
# Can we find a category name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# So the .get() method returns one model instance or raises an exception.
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
# Retrieve all the associated pages.
# Note that filter returns >= 1 model instance.
pages = Page.objects.filter(category=category)
# Adds our results list to the template context under name pages.
context_dict['pages'] = pages
# We also add the category object from the database to the context dictionary.
# We'll use this in the template to verify that the category exists.
context_dict['category'] = category
context_dict['category_name_slug'] = category_name_slug
except Category.DoesNotExist:
# We get here if we didn't find the specified category.
# Don't do anything - the template displayes the "no category message for us."
pass
# Go render the response and return it to the client.
return render(request, 'rango/category.html', context_dict)
def add_category(request):
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
# save the new category to the database.
form.save(commit=True)
# Now call the index() view.
# The user will be shown the homepage.
return index(request)
else:
# The supplied form contained errors - just print them to the terminal.
print form.errors
else:
# If the request was not a POST, display the form to enter details.
form = CategoryForm()
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
return category(request, category_name_slug)
else:
form = PageForm()
context_dict = {'form': form, 'category': cat, 'category_name_slug': category_name_slug}
return render(request, 'rango/add_page.html', context_dict)
def register(request):
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now we sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save()
profile.user = user
# Did the user provide a profile picture?
# If so, we need to get it from the input form and put it in the UserProfile model.
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# Now we save the UserProfile model instance.
profile.save()
# Update our variables to tell the template registration was successful.
registered = True
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print user_form.errors, profile_form.errors
# Not a HTTP POST, so we render our form using two ModuleForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context.
return render(request,'rango/register.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
forms.py
from django import forms
from django.contrib.auth.models import User
from rango.models import Page, Category, UserProfile
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text="Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
# An inline class to provide additional information on the form.
class Meta:
# Provide an association between the ModelForm and a model
model = Category
fields = ('name',)
class PageForm(forms.ModelForm):
title = forms.CharField(max_length=128, help_text="Please enter the title of the page.")
url = forms.URLField(max_length=200, help_text="Please enter the URL of the page.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
model = Page
exclude = ('category',)
# or specify the fields to include (.i.e. not include the category field)
#fields = ('title', 'url', 'views')
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
# If url is not empty and doesn't start with 'http://', prepend 'http://'.
if url and not url.startswith('http://'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username','email','password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('website', 'picture')
I think I have all relevant files included here, but let me know if I could
provide any other info to clear things up!
Thanks in advance
Answer: Your comment says "Since we need to set the user attribute ourselves, we set
commit=False", but you don't actually do that. It should be:
profile = profile_form.save(commit=False)
In future, please cut your code down to the minimum that exhibits your
problem: the error was occurring on register, you didn't need to show all the
code relating to categories.
|
Parsing text with regex in Python: determine match order and mapping value to object type?
Question: I'm attempting to use an 'estate description' field to obtain information
regarding different houses as an exercise for learning python. What I'm
interested in (what a lot of sites do _not_ show) is how many rooms there are
and, more importantly, how big each room/area is. I have the total number of
rooms per property (which was scraped separately), with which I'll be able to
guide to approximate how many rooms I'll allow per description.
The following examples are strings that occur and that I want to dissect. The
following strings are snippets that occur in sentences, as exemplified in the
first example.
This wonderful apartment has a size of 250m2...which consists of a \
Light living room (ca. 5,81 x 5,75 m)... the view from building is xxx and leads into the
...Lovely kitchen with a lovely fridge (300x575)...
three bedrooms (5m2, 12m2 and 15m2)
two bedrooms (3,55 x 3,40 and 3,54 x 2,27 m)
a nice balcony (15, 15m2)
beautiful outdoor space (20m2)
The issue is as follows: getting most room sizes is not that difficult
utilizing the following regex:
(\d{1,4}.{0,2}m[ .]?[2|xb2])| # Finds 16m2
( \d{1,4}[.,]?\d{0,3} # Finds 3.90 x 3,00 m
[ ]?[x]?[ ]\d{1,4}[.,]?\d{0,3}[ ?]m)| #
(\d{1,4}[ ]?x[ ]?\d{1,4}) # Finds 640x390
The issue I'm having is that twofold:
* How do I match them in order? i.e.: first find option A, then find option B? Observing the current behavior think regex prefers shorter strings over longer strings, and is not matching them from left to right. I could maybe do a regex, replace with nothing and then move on in a loop?
* Second: I have a hard time trying to envision how I'd be able to go from a list of matches with these room/area sizes to mapping them to the kind of area they describe (if possible). For example: 250m2 found => Look for a list of 'area types' (["bedroom", "kitchen", "apartment") in the previous ~ 20 characters or so in the past x words (going backwards), or perhaps via another regex looking back?
I'm not expecting a 100% solution, I'm just trying to grasp how I'd go about
parsing such text and trying to obtain interesting info from blobs of text.
Any help is greatly appreciated.
Answer: First part of your question: If you intend to match them in order, you could
strictly sequence the regexes into something like a list.
regexes = [re.compile(r'expression 1'), re.compile(r'expression 2'), ...]
for regex in regexes:
match = regex.search(input_text)
if match: break
if not match:
print("We didn't find anything!")
Second part of your question: If you anticipate those blobs coming in similar
structure to the samples you've posted, perhaps isolate the portion of the
input string up to the point where a digit first appears and analyze just that
portion.
fuzzy_regex = re.compile(r'^(\D*)(.*)$')
(before_numbers, part_with_numbers) = fuzzy_regex.search(input_string).groups()
# Analyze before_numbers to see what kind of room, etc.
# Analyze part_with_numbers to see square footage, etc.
A more concrete example:
>>> import re
>>> def dissect(text):
... return re.search(r'^(\D*)(.*)$', text).groups()
...
>>> dissect('The entire apartment is 250m2')
('The entire apartment is ', '250m2')
>>> dissect('three bedrooms (5m2, 12m2 and 15m2)')
('three bedrooms (', '5m2, 12m2 and 15m2)')
|
How to flatten a list of tuples into a pythonic list
Question: Given the following list of tuples:
INPUT = [(1,2),(1,),(1,2,3)]
How would I flatten it into a list?
OUTPUT ==> [1,2,1,1,2,3]
Is there a one-liner to do the above?
Similar: [Flatten list of Tuples in
Python](http://stackoverflow.com/questions/15994124/flatten-list-of-tuples-in-
python)
Answer: You could use a [list
comprehension](https://docs.python.org/3/tutorial/datastructures.html#list-
comprehensions):
>>> INPUT = [(1,2),(1,),(1,2,3)]
>>> [y for x in INPUT for y in x]
[1, 2, 1, 1, 2, 3]
>>>
[`itertools.chain.from_iterable`](https://docs.python.org/3/library/itertools.html#itertools.chain.from_iterable)
is also used a lot in cases like this:
>>> from itertools import chain
>>> INPUT = [(1,2),(1,),(1,2,3)]
>>> list(chain.from_iterable(INPUT))
[1, 2, 1, 1, 2, 3]
>>>
That's not exactly a one-liner though.
|
how can I save the output of a search for files matching *.txt to a variable?
Question: I'm fairly new to python. I'd like to save the text that is printed by at this
script as a variable. (The variable is meant to be written to a file later, if
that matters.) How can I do that?
import fnmatch
import os
for file in os.listdir("/Users/x/y"):
if fnmatch.fnmatch(file, '*.txt'):
print(file)
Answer: You can store it in a list:
import fnmatch
import os
matches = []
for file in os.listdir("/Users/x/y"):
if fnmatch.fnmatch(file, '*.txt'):
matches.append(file)
|
get integer from textfile and write to excel with python
Question: I already get the value from text file and write it to excel file. But somehow
in the excel cell the integer written in string. So there's a green triangle
in the cell. Like this 
I want to print like this

and this is the code
from itertools import chain
import glob ,csv, sys, os
sys.path.insert(0,'D:/apera/Python27/xlwt-0.7.5')
import xlwt
openMesureFile = 'D:/apera/Workspace/Python scripting test 2/sounding0.txt'
savePlace = 'D:/apera/Workspace/Sounding/sounding{0:03d}.txt'
openSoundingFile = 'D:/apera/Workspace/Sounding/*.txt'
with open(openMesureFile, 'rb') as inf:
header = next(inf)
for index, line in enumerate(inf,start=0):
with open(savePlace.format(index) ,'w') as outf:
outf.write('Filename:;%s\n\n' %outf.name)
outf.write(header)
for line in chain([line], inf):
if 'Sounding :;Sondage n°' in line:
header = line
break
outf.write(line)
for filename in glob.glob(openSoundingFile):
wb = xlwt.Workbook(encoding="latin1")
sheet = wb.add_sheet('INPUT')
newName = filename
spamReader = csv.reader(open(filename, 'rb'), delimiter=';',quotechar='"')
for rowx, row in enumerate(spamReader):
for colx, value in enumerate(row):
sheet.write(rowx, colx, value)
sheet.col(0).width = 5555
sheet.col(1).width = 11110
sheet.col(2).width = 5555
sheet.col(3).width = 3333
wb.save(newName[:-4] + ".xls")
print "success"
Answer: From the
[docs](https://secure.simplistix.co.uk/svn/xlwt/trunk/xlwt/doc/xlwt.html?p=4966)
> write(r, c, label="", style=Style.default_style) [#]
> label The data value to be written. An int, long, or decimal.Decimal
> instance is converted to float.
Thus a `str` input will be taken as a `string` in excel. Thus you will have to
type cast it explicitly.
In your program this can be done from changing a single line:
from
sheet.write(rowx, colx, value)
to
sheet.write(rowx, colx, float(value))
|
Python regex alternative for join
Question: Suppose I have a string `string = 'abcdefghi'` and I want the output as `'a-b-
c-d-e-f-g-h-i'` I can easily use `'-'.join(string)` and get the required
output. But what if I want to do the same using regex? How would I do the same
using regex?
I am asking because I'm learning to use regex and would like to know how to
think in it.
Answer: A solution using look arounds will be
>>> import re
>>> str="abcdefghi"
>>> re.sub(r'(?<=\w)(?=\w)', '-', str)
'a-b-c-d-e-f-g-h-i'
* `(?<=\w)` asserts that a letter is presceded by the postion
* `(?=\w)` asserts that a letter is followed by the postion
**OR**
>>> re.sub(r'(?<=.)(?=.)', '-', str)
'a-b-c-d-e-f-g-h-i'
|
Python And Random Forest Algorithm
Question: I'm trying to use Python's Random Forest ML (machine learning) algorithm with
a *.csv file, and this is information is inside that *csv.file
DateTime;Status;Energy
28-02-2014 19:30:00;True;10,1
28-02-2011 06:15:00;False;15,6;
28-02-2011 06:30:00;False;15,2;
28-02-2011 06:45:00;False;15,6;
......
Which packages or library(random forest models) do I need to use for analysis
with that information?
My code :
from sklearn.ensemble import RandomForestClassifier
from numpy import genfromtxt, savetxt
def main():
dataset = genfromtxt(open("C:\\Users\\PVanDro\\Desktop\\Ddata\\Building0.csv"), delimiter=';', dtype='f8')[1:]
target = [x[0] for x in dataset]
train = [x[1:] for x in dataset]
rf = RandomForestClassifier(n_estimators=100)
rf.fit(train, target)
savetxt("C:\\Users\\PVanDro\\Desktop\\Ddata\\Building0_0.csv", delimiter=';', fmt='%f')
if __name__=='__main__':
main()
But i had errors :
File "C:/Users/PVanDro/Desktop/Folder for test/RandomForestExamples1/MainFile.py", line 17, in main
rf.fit(train, target)
File "C:\Python27\lib\site-packages\sklearn\ensemble\forest.py", line 224, in fit
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense")
File "C:\Python27\lib\site-packages\sklearn\utils\validation.py", line 283, in check_arrays
_assert_all_finite(array)
File "C:\Python27\lib\site-packages\sklearn\utils\validation.py", line 43, in _assert_all_finite
" or a value too large for %r." % X.dtype)
ValueError: Input contains NaN, infinity or a value too large for dtype('float32').
How i can fix that errors ?
Answer: This is a [great tutorial](https://www.kaggle.com/c/digit-
recognizer/forums/t/2299/getting-started-python-sample-code-random-forest)
that explains what you are looking for. Here is some sample code to try out.
from sklearn.ensemble import RandomForestClassifier
from numpy import genfromtxt, savetxt
def main():
#create the training & test sets, skipping the header row with [1:]
dataset = genfromtxt(open('Data/train.csv','r'), delimiter=',', dtype='f8')[1:]
target = [x[0] for x in dataset]
train = [x[1:] for x in dataset]
test = genfromtxt(open('Data/test.csv','r'), delimiter=',', dtype='f8')[1:]
#create and train the random forest
#multi-core CPUs can use: rf = RandomForestClassifier(n_estimators=100, n_jobs=2)
rf = RandomForestClassifier(n_estimators=100)
rf.fit(train, target)
savetxt('Data/submission2.csv', rf.predict(test), delimiter=',', fmt='%f')
if __name__=="__main__":
main()
Once you have created your new, say predictive data set, you can use a
multitude of libraries to visualize that data with graphics. Here are a few:
1. [Bokeh](http://bokeh.pydata.org/) \- A Python based visualization library for web-based representations
2. [D3](http://d3js.org/) \- Another web-base JavaScript library for visualizing data. [Here](http://bl.ocks.org/mbostock/3887235) is one example you could use with CSV.
3. [Ploty](https://plot.ly/python/) \- A Python based visualization
There are more, but You can interrogate Google for that. ;)
|
How to start a python program recursive with os.execv?
Question: I have the following lines:
#!/root/p34/bin/python
import os
import sys
for i in range(10):
print(i)
currentFile = os.path.abspath(__file__)
print(currentFile)
os.execv(currentFile, sys.argv)
When I try to run from console(Ubuntu 14.04) ./restart.py I get:
': [Errno 2] No such file or directory'
When I run `/root/p34/bin/python restart.py` I get python error:
Traceback (most recent call last):
File "restart.py", line 10, in <module>
os.execv(currentFile, sys.argv)
FileNotFoundError: [Errno 2] No such file or directory
Can anyone help me with this problem?
Answer: `os.execv` does not look for shebang lines; that is a shell function instead.
Use
[`sys.executable`](https://docs.python.org/2/library/sys.html#sys.executable)
to get the path to the current Python binary:
os.execv(sys.executable, [sys.executable] + sys.argv)
|
Please help me in debugging this python code
Question: This is a program that finds anagrams for words greater than 15
letters.There's no syntax error but the output is not as expected.I will be
extremely grateful if u could point out the logical error in the program. I
downloaded the word list from <http://thinkpython.com/code/words.txt> PS-I am
new to Python.If there's a better approach please tell me....
def anagrams():
fin1=open('words.txt')
val=-1
fin2=open('dic.txt')
for i in range(100000):
fin2.seek(0)
word1=fin1.readline()
if len(word1)>18:
for j in range(100000):
word2=fin2.readline()
flag=1
if len(word2)==len(word1)and word1!=word2:
for k in range(len(word1)-1):
if word1.find(word2[k])==-1:
flag=1
break
else:
flag=0
if flag==0:
print word2,'is anagram of ',word1,'\n'
Answer: Not related to the code you've posted, but you can do this using simple
[`Counter`](https://docs.python.org/2/library/collections.html#counter-
objects) objects, which will maintain the Count of all the characters for you.
>>> from collections import Counter
>>> def is_anagram(string1, string2):
... return Counter(string1) == Counter(string2)
...
>>> is_anagram("anagram anagram", "gramana anagram")
True
|
Django Rest Framework 3.0 to_representation not implemented
Question: I'm upgrading from Django Rest Framework 2.4 to 3.0.1 using Django 1.7.1 and
Python 2.7 and can't get past the following error:
File "/Users/bjacobel/.virtualenvs/hey/lib/python2.7/site-packages/rest_framework/fields.py", line 375, in to_representation
raise NotImplementedError('to_representation() must be implemented.')
The code I'm using worked just fine under 2.4 and I'm struggling to find any
documentation on what changed in the DRF classes I'm using. I commented out
everything but one of my endpoints (the one that provides CRUD for
`django.contrib.auth.models.User` and I still get the error.
_serializers.py_ :
from django.contrib.auth.models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name', 'email', 'username')
_views.py_ :
from django.contrib.auth.models import User
from hey.apps.api import serializers
from rest_framework import viewsets, permissions, filters
class User(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = serializers.UserSerializer
permission_classes = (permissions.IsAuthenticated,)
filter_backends = (filters.OrderingFilter,)
_urls.py_ :
from django.conf.urls import patterns, url, include
from hey.apps.api import views
from rest_framework.routers import SimpleRouter
router = SimpleRouter()
router.register(r'user', views.User)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
)
_pagination.py_
from rest_framework import pagination
from rest_framework import serializers
class LinksSerializer(serializers.Serializer):
next = pagination.NextPageField(source='*')
prev = pagination.PreviousPageField(source='*')
class CustomPaginationSerializer(pagination.BasePaginationSerializer):
links = LinksSerializer(source='*') # Takes the page object as the source
total_results = serializers.Field(source='paginator.count')
results_field = 'objects'
_settings.py_
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_SERIALIZER_CLASS': 'hey.apps.api.pagination.CustomPaginationSerializer',
'PAGINATE_BY': 20, # Default to 20
'PAGINATE_BY_PARAM': 'limit', # Allow client to override, using `?limit=xxx`.
'MAX_PAGINATE_BY': 100, # Maximum limit allowed when using `?limit=xxx`.
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
Thank you.
Answer: The issue is in your pagination serializer, as you are using
`serializer.Field` which is now `serializer.ReadOnlyField` in Django REST
Framework 3.0. This is a subtle change that was made, though it is called out
in the release announcement, and it's most noticeable for those who were
overriding pagination serializers.
The [updated default pagination
serializer](https://github.com/tomchristie/django-rest-
framework/blob/dd712a1c2620b5dc9ad8eef5ff78ef232feb12e8/rest_framework/pagination.py#L81)
uses `ReadOnlyField` for the `count` field. You should be able to fix your
serializer by just swapping out the field.
class CustomPaginationSerializer(pagination.BasePaginationSerializer):
links = LinksSerializer(source='*') # Takes the page object as the source
total_results = serializers.ReadOnlyField(source='paginator.count')
results_field = 'objects'
|
My program crashes(python.exe has stopped working)
Question: So i've made a project for school, and it won't run and i have no idea why.
Everytime i press the "start" button i made, the whole program freezes and
stops working, and seeing as i've made a similar program earlier(this one is
just much cleaner) i get really confused. here is the code:
from tkinter import *
root=Tk()
root.minsize(width=3, height=100)
root.title("Counter")
list=[]
list.append(0)
def counter():
t = 10
c = 0
for row in list:
if (t < 60) or (c < 5):
l=Label(root, text=t).grid(row=1, column=c)
t=10+10
c=0+1
list.append(t)
print("") #This is just for debugging purpose
for row in list:
if (t < 110) or (c < 10):
c=0
ll=Label(root, text=t).grid(row=2, column=c)
c=0+1
t+10
list.append(t)
return
label=Label(root, text="...").grid(row=0, column=0)
b=Button(root, text="Start", command=counter).grid(row=0, column=1)
label=Label(root, text="...").grid(row=0, column=2)
root.mainloop()
Thanks for all the tips, figured it out due to you! :D
Answer: The problem is that you're modifying the list you're iterating over. Besides
`list` isn't a good name as it shadows the builtin one, you always append an
element to your list. Additionally, none of the conditions will be met since
`t` is always assigned to `20` and `c` to `1`.
|
How can I get the proper capitalization for a path?
Question: Let's say I have a class which represents a directory (simplified example of
course):
import os
class Dir:
def __init__(self, path):
self.path = os.path.normcase(path)
To make things easier to implement internally, I am calling
[`os.path.normcase`](https://docs.python.org/3/library/os.path.html#os.path.normcase)
on the `path` argument before I save it into an attribute. This works great,
but it lowercases the path:
>>> import os
>>> os.path.normcase(r'C:\Python34\Lib')
'c:\\python34\\lib'
>>>
I would like a way to turn the path back into its properly capitalized form of
`C:\Python34\Lib`. I plan to do this inside the `__repr__` method so that I
can get nice outputs such as:
>>> my_dir
Dir(r'C:\Python34\Lib')
>>>
when I am in the interactive interpreter. Is there anything like this in the
standard library?
* * *
**Note:** I am not referring to the string that the user supplied as the
`path` argument. If a user does:
my_dir = Dir('c:\PYTHON34\lib')
I still want `Dir('C:\Python34\Lib')` to be printed in the interpreter because
that is the proper capitalization. Basically, I want the outputed paths to be
the same as they are in the file explorer.
Answer: **Update:**
For those using the newer versions of Python, the new [`pathlib`
module](https://docs.python.org/3.4/library/pathlib.html) possesses this
functionality in the form of
[`pathlib.Path.resolve`](https://docs.python.org/3.4/library/pathlib.html#pathlib.Path.resolve):
>>> from pathlib import Path
>>> Path(r'c:\python34\lib').resolve()
WindowsPath('C:/Python34/Lib')
>>> str(Path(r'c:\python34\lib').resolve())
'C:\\Python34\\Lib'
>>>
So, you could store the user-supplied path as a `Path` object:
from pathlib import Path
class Dir:
def __init__(self, path):
self.path = Path(path)
and then implement the `__repr__` method like so:
def __repr__(self):
return "Dir('{}')".format(self.path.resolve())
As an added bonus, we no longer need the `os.path.normcase` function since
`Path` objects support case-insensitive comparisons directly.
One downside to `pathlib` though is that it is only available in Python 3.4
(the currently newest version). So, those using earlier versions will need to
either get a backport to their version or use the `os.path._getfinalpathname`
function as demonstrated below.
* * *
While I was digging through the standard library, I came across an
undocumented function in the `os.path` module named `_getfinalpathname`:
>>> import os
>>> os.path._getfinalpathname(r'c:\python34\lib')
'\\\\?\\C:\\Python34\\Lib'
>>>
Using
[`str.lstrip`](https://docs.python.org/3/library/stdtypes.html#str.lstrip), I
can get the output I need:
>>> os.path._getfinalpathname(r'c:\python34\lib').lstrip(r'\?')
'C:\\Python34\\Lib'
>>>
The only downside to this approach is that the function is undocumented and
somewhat hidden. But it suits my needs for now (of course, I'd love to hear a
better approach if you know of one :)
|
Python 2.2.3 HTTP Basic Authentication Implementation
Question: I am trying to implement the HTTP Basic Authentication in Python 2.2.3. This
is code:
import urllib2
proxyUserName1='<proxyusername>'
proxyPassword1='<proxypassword>'
realmName1='<realm>'
proxyUri1='<uri>'
passman=urllib2.HTTPPasswordMgr()
passman.add_password(realm=realmName1, uri=proxyUri1, user=proxyUserName1, passwd=proxyPassword1)
auth_handler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
# Setting up the request & request parameters
login_url_request = urllib2.Request('<URL To be Accessed>')
# Getting the Response & reading it.
try:
url_socket_connection = urllib2.urlopen(login_url_request)
except urllib2.URLError, urlerror:
print ("URL Error Occured:")
print (urlerror.code)
print (urlerror.headers)
except urllib2.HTTPError, httperror:
print ("HTTP Error Occured:")
print (httperror.code)
print (httperror.headers)
else:
login_api_response = str(url_socket_connection.read())
print (login_api_response)
I always get the URL Error 401. This code works perfectly in Python 3.4.
Unfortunately I need to get this running in Python 2.2.3. Can someone please
tell where am I going wrong ?
Answer: It worked after changing the code:
import urllib2
import base64
proxyUserName1='<proxyusername>'
proxyPassword1='<proxypassword>'
realmName1='<realm>'
proxyUri1='<uri>'
base64encodedstring = base64.encodestring('%s:%s' % (proxyUserName1, proxyPassword1)).replace('\n', '')
passman=urllib2.HTTPPasswordMgr()
passman.add_password(realm=realmName1, uri=proxyUri1, user=proxyUserName1, passwd=proxyPassword1)
auth_handler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
# Setting up the request & request parameters
login_url_request = urllib2.Request('<URL To be Accessed>')
login_url_request.add_header('Authorization', 'Basic %s' % base64encodedstring)
# Getting the Response & reading it.
try:
url_socket_connection = urllib2.urlopen(login_url_request)
except urllib2.URLError, urlerror:
print ("URL Error Occured:")
print (urlerror.code)
print (urlerror.headers)
except urllib2.HTTPError, httperror:
print ("HTTP Error Occured:")
print (httperror.code)
print (httperror.headers)
else:
login_api_response = str(url_socket_connection.read())
print (login_api_response)
|
python socket, how to receive all the message when buffer is not big enough?
Question:
# addition_server.py
import socket
buf_size = 4
host = ''
port = 8000
server_addr = (host, port)
def get_msg(soc):
msg = ''
while True:
temp = soc.recv(buf_size)
if not temp:
break
msg += temp
return msg
if __name__ == '__main__':
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# socket.error: [Errno 98] Address already in use
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soc.bind(server_addr)
soc.listen(0)
runnnig = True
while runnnig:
client_soc, client_addr = soc.accept()
print client_addr
# socket.error: [Errno 104] Connection reset by peer
message = client_soc.recv(buf_size)
#message = get_msg(client_soc)
if message == 'q':
runnnig = False
numbers = message.split(' ')
numbers = filter(None, numbers)
try:
numbers = map(int, numbers)
s = sum(numbers)
numbers = map(str, numbers)
answer = ' + '.join(numbers)
answer = '%s = %s' % (answer, s)
except Exception as e:
print e
answer = 'error'
client_soc.sendall(answer)
client_soc.close()
soc.close()
* * *
# addition_client.py
import socket
from addition_server import get_msg
from addition_server import server_addr
buf_size = 1
runnnig = True
while runnnig:
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind(('', 6060)) # without this, random port will be assigned
soc.connect(server_addr)
msg = raw_input('> ')
if not msg:
soc.close()
break
else:
if msg == 'q':
runnnig = False
soc.sendall(msg)
#reply = soc.recv(buf_size)
# socket.error: [Errno 104] Connection reset by peer
reply = get_msg(soc)
print reply
soc.close()
* * *
~/nuts/git/socket_learn/pairs$ python addition_client.py
> 1 2
1 + 2 = 3
> 1 2 3
Traceback (most recent call last):
File "addition_client.py", line 23, in <module>
reply = get_msg(soc)
File "/home/phy/nuts/git/socket_learn/pairs/addition_server.py", line 14, in get_msg
temp = soc.recv(buf_size)
socket.error: [Errno 104] Connection reset by peer
* * *
The buffer size in the server is intentionally small, so the you can see the
above error. but the get_msg(client_soc) method not works in the server, I
don't know why.
Answer: The socket stream protocol implements a **stream** and there are no implicit
message boundaries.
There is no way for the reader to know if a message is complete or not except
if this is specified in the message content itself. No extra boundaries are
added on the wire.
When you call `recv` and there is no data in the buffer the call will either
raise an exception (non-blocking socket) or will just wait (blocking socket).
The `sendall` facility is just for avoiding writing a loop when sending a
buffer but of course there is no way to implement `recvall` because there's no
way to know when the message is complete.
You need to add a message boundary to your protocol; this could be a newline
separating messages or prefixing each message with a message size. This way
the reader will know when a message is complete before starting processing.
|
How does `tkinter. Spinbox()` behaviour depends on the type of value passed to its `textvariable`
Question: Been trying to pick up on Python (3.4) and this is my first posting on Stack
Overflow. I have questions regarding the behaviour of the `textvariable`
option in the `tkinter.Spinbox()` widget constructor.
The following codes are working. It's a layout of 6 spinboxes, there are 3
pairs of spinboxes (each pair side by side). Each spinbox in a pair are
suppose to be independent of each other, i.e. when one changes, it's not
suppose to affect the other.
Pair A receives their `textvariable` parameter as type `StringVar()` (e.g.
`var = StringVar()`)
Pair B receives their `textvariable` parameter as string declared between
quotes (e.g. `var = '0'`)
Pair C receives their `textvariable` parameter as type `integer` (e.g. `var =
0`)
Note: I'd like to post a screen capture of the resulting window but the page
said "You need at least 10 reputation to post images"... of which I apparently
do not have at the moment...
I'd just like to understand what is the reason behind why the members of Pair
C seems to be "non-independent" of each other? -> Change one, and the other
follows the with the same changes made.
How is the intepretation for the `textvariable` parameter in the
`tkinter.Spinbox()` different for different types of value assigned?
Here are my codes:
from tkinter import *
class spinModuleStrA():
''' spinModuleNum() - Spinbox taking textvariable as <StringVar() value> '''
def __init__(self, master, moduleName):
self.root = master
self.moduleName = moduleName
self.root.grid()
self.var = StringVar()
self.var.set('r')
self.label1 = Label(self.root, text=self.moduleName, bg=self.root["bg"])
self.label1.pack(expand = True)
self.spinBox1 = Spinbox(self.root ,from_=0, to=100, width=10, textvariable=self.var)
self.spinBox1.pack(expand = True)
class spinModuleStrB():
''' spinModuleNum() - Spinbox taking textvariable as <'' string value> '''
def __init__(self, master, moduleName):
self.root = master
self.moduleName = moduleName
self.root.grid()
self.var = ''
self.label1 = Label(self.root, text=self.moduleName, bg=self.root["bg"])
self.label1.pack(expand = True)
self.spinBox1 = Spinbox(self.root ,from_=0, to=100, width=10, textvariable=self.var)
self.spinBox1.pack(expand = True)
class spinModuleNum():
''' spinModuleNum() - Spinbox taking textvariable as <numerical value> '''
def __init__(self, master, moduleName):
self.root = master
self.moduleName = moduleName
self.root.grid()
self.var = 0
self.label1 = Label(self.root, text=self.moduleName, bg=self.root["bg"])
self.label1.pack(expand = True)
self.spinBox1 = Spinbox(self.root ,from_=0, to=100, width=10, textvariable=self.var)
self.spinBox1.pack(expand = True)
class app():
def __init__(self):
self.root = Tk()
self.root.geometry('300x300+500+200')
for i in range(2): # number of columns
self.root.columnconfigure(i, weight=1)
for i in range(3): # number of rows
self.root.rowconfigure(i, weight=1)
self.frame1 = Frame(self.root, bg='#f55')
self.frame1.grid(row=0, column=0, padx=10, pady=10, sticky=W+E+N+S)
self.module1 = spinModuleStrA(self.frame1, "Spin <StringVar()> A")
self.frame2 = Frame(self.root, bg='#faa')
self.frame2.grid(row=0, column=1, padx=10, pady=10, sticky=W+E+N+S)
self.module2 = spinModuleStrA(self.frame2, "Spin <StringVar() B>")
self.frame3 = Frame(self.root, bg='#5f5')
self.frame3.grid(row=1, column=0, padx=10, pady=10, sticky=W+E+N+S)
self.module3 = spinModuleStrB(self.frame3, "Spin <''> A")
self.frame4 = Frame(self.root, bg='#5fa')
self.frame4.grid(row=1, column=1, padx=10, pady=10, sticky=W+E+N+S)
self.module4 = spinModuleStrB(self.frame4, "Spin <''> B")
self.frame5 = Frame(self.root, bg='#55f')
self.frame5.grid(row=2, column=0, padx=10, pady=10, sticky=W+E+N+S)
self.module5 = spinModuleNum(self.frame5, "Spin <numerical> A")
self.frame6 = Frame(self.root, bg='#5af')
self.frame6.grid(row=2, column=1, padx=10, pady=10, sticky=W+E+N+S)
self.module6 = spinModuleNum(self.frame6, "Spin <numerical> B")
app1 = app()
Answer: The reason is irrelevant. Passing a string or integer to the textvariable
option is incorrect usage. While it will sometimes work (depending on how you
define "work"), it is incorrect. The textvariable requires one of the special
variable classes provided by tkinter. Without using one of those, there's no
point in defining the textvariable attribute at all.
However, to answer the specific question:
**Pair A** each get a unique textvariable, so it works as expected. The
underlying tcl/tk engine gets a unique string for each instance, which is why
the two are independent.
**Pair B** effectively gets _no_ textvariable because you specify the empty
string. To the underlying tcl/tk engine an empty string in this situation is
equivalent to python's `None`. If, instead of `''` you had used a non-empty
string (eg: `'w00t'`), you would notice the same behavior as pair C. This is
because each spinbox gets a variable with the same name, which to the
underlying tcl/tk engine means they are the same textvariable and thus tied
together.
**Pair C** uses a constant 0 (zero). Because it is a constant, both widgets
effectively get the same textvariable because ultimately this parameter
becomes the name of an tcl/tk variable. Since the name is identical for both
spinboxes, they are tied together.
|
How to update a label in Tkinter, StringVar() not working
Question: I am working on this short code that compares the single characters of two
strings. After the first running, when I change the strings in the
entryBoxes,I would like to replace the label created before, instead of
creating a new one. I have already tried with StringVar() but it seems not
working. (If it can be useful I'm using Python 2.7.6). Could you please give
me a hint?
from Tkinter import *
app = Tk()
app.geometry('450x300')
labelTF = Label(app, text="Insert sequence of TF").pack()
eTF = Entry(app, width=50)
eTF.pack()
eTF.focus_set()
labelSpazio = Label(app, text="\n").pack()
labelResultedSequence = Label(app, text="Insert sequence of ResultedSequence").pack()
eResultedSequence = Entry(app, width=50)
eResultedSequence.pack()
eResultedSequence.focus_set()
def prova():
count = 0
uno = eTF.get().lower()
due = eResultedSequence.get().lower()
if len(uno)==len(due):
for i in range(0,len(uno)):
if uno[i] == due[i]:
if uno[i] in ("a", "c","g","t"):
count = count + 1
if uno[i] == "r" and due[i] in ("a", "g"):
count = count + 1
if uno[i] == "y" and due[i] in ("t", "c"):
count = count + 1
percentage = int(float(count)/float(len(uno))*100)
labelSpazio = Label(app, text="\n").pack()
mlabel3=Label(app,text= "The final similarity percentage is: "+(str(percentage) + " %")).pack()
if len(uno)!=len(due):
mlabel2 = Label(app,text="The length of the sequences should be the same").pack()
b = Button(app, text="get", width=10, command=prova)
b.pack()
mainloop()
Answer: Create the labels only once outside of the for loop and use a StringVar to
modify its value. It would look like this:
# initialization
app = Tk()
label3text = StringVar()
mlabel3 = Label(app, textvariable=label3text, width=100)
mlabel3.pack()
Then in the for loop inside your function:
label3text.set("The final similarity percentage is: "+(str(percentage) + " %"))
|
How to import sqlite3 in my python3.4 successfully?
Question: There are two python version in my debian7, one is python2.7 the system
default version, the other is python3.4 which compiled to install this way.
apt-get update
apt-get upgrade
apt-get install build-essential
wget http://www.python.org/ftp/python/3.4.0/Python-3.4.0.tgz
tar -zxvf Python-3.4.0.tgz
cd Python-3.4.0
mkdir /usr/local/python3.4
./configure --prefix=/usr/local/python3.4
make
make install
ln -s /usr/local/python3.4/bin/python3.4 /usr/bin/python3.4
ln -s /usr/local/python3.4/bin/pip3.4 /usr/bin/pip3.4
I have installed sqlite this way on my debian.
sudo apt-get install sqlite3 libsqlite3-dev
In python2.7
root@rebuild:~# python
Python 2.7.3 (default, Mar 14 2014, 11:57:14)
[GCC 4.7.2] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlite3
In python3.4
root@rebuild:~# python3.4
Python 3.4.0 (default, Nov 27 2014, 13:54:17)
[GCC 4.7.2] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlite3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/python3.4/lib/python3.4/sqlite3/__init__.py", line 23, in <module>
from sqlite3.dbapi2 import *
File "/usr/local/python3.4/lib/python3.4/sqlite3/dbapi2.py", line 26, in <module>
from _sqlite3 import *
ImportError: No module named '_sqlite3'
How can i import sqlite3 in my python3.4 successfully?
Answer: From the information provided and order presented, it looks like you installed
python 3.4 from source BEFORE making installing the sqlite-dev package
available. If you watch the python 3.4 install closely you would have noticed
any number of modules it did not build out (one of which would be _sqlite3).
Solution: Reinstall 3.4 now that sqlite3 dev is available.
|
Python. Django - How to call a python function when submitting form with Ajax
Question: I have an error when loading the app which says that url in form's _action_
attribute is not correct. I've googled for several hours and tried different
variants, still no result. I'm totally new to django and would appreciate any
help. Thanks!
This is my views.py:
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
from form import UserForm
def index(request):
text = "Welcome"
template = loader.get_template('myapp/index.html')
context = RequestContext(request, {
'text': text,
})
return HttpResponse(template.render(context))
def get_data(request):
if request.POST:
form = UserForm(request.POST)
if form.is_valid():
login = form.cleaned_data['login']
password = form.cleaned_data['password']
return render(request, 'myapp/index.html', {
'login': login,
'password': password, })
urls.py:
from django.conf.urls import patterns, url
from myapp import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^$/login/$', views.get_data, name='get_data'),
)
index.html:
<h1>{{ text }}</h1>
<form action="{% url get_data %}" method="post" id="auth">
{% csrf_token %}
<p>Login:<br>
<input type="text" name="login" id="login"> </p>
<p>Password:<br>
<input type="text" name="password" id="password"> </p>
<input type="submit" value="Log in"">
</form>
<div id="check"></div>
<div>{{login}}</br>
{{password}}
</div>
<script type="text/javascript">
var frm = $('#auth');
frm.submit(function () {
$.ajax({
type: frm.attr('method'),
url: frm.attr('action'),
data: frm.serializeArray(),
cache: false,
success: function (data) {
$("#check").html(data);
},
error: function(data) {
$("#check").html("Something went wrong!");
}
});
return false;
});
</script>
Answer: The action attribute of the form should be action="{% url 'get_data' %}", not
action="{% url get_data %}".
|
Redirect screen output to text file
Question: My python script calls an executable (binary compiled from C++) like this:
subprocess.call(["./rti", '1', '0.05', fileForRTI])
where `rti` executable name, `1`, `0.05` and `fileForRTI` are arguments.
This executable generates output to the console, but I want to output it to
the text file as well. What I tried is this (according to
[this](http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-
in-python)):
import sys
someFile = 'testFile'
sys.stdout = open(someFile, "w")
print(str(subprocess.call(["./rti", '1', '0.05', fileForRTI])))
But for some reason what is written to the file is just '1', while output is
much bigger.
I also tried this:
p = Popen(['./rti', '1', '0.05', fileForRTI], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
print(rc)
But it's once again shows 1.
EDIT: also Kasra's solution give output 1 instead of what I see normally on
the screen.
Not sure if allowed, but a side question, if that `./rti` executable takes
long time to generate code, so is it possible to stop terminate and output to
file what is already seen on the screen.
Answer: You can use `subprocess.check_output()` :
import subprocess
try:
out_bytes = subprocess.check_output(["./rti", '1', '0.05', fileForRTI])
except subprocess.CalledProcessError as e:
out_bytes = e.output # Output generated before error
code= e.returncode # Return code
This runs the specified command and returns its output as a byte string. If
you need to interpret the resulting bytes as text, add a further decoding
step. For example:
out_text = out_bytes.decode('utf-8')
The `check_output()` function is the easiest way to execute an external
command and get its output. However, if you need to perform more advanced
communication with a subprocess, such as sending it input, you’ll need to take
a difference approach. For that,use the `subprocess.Popen` class directly
|
handling command output in python
Question: I want to work with the output of a wifi scan command. The output is several
lines and I am interested in 2 information out of it. The goal is to have the
ESSID and the address in a two dimmension array (hope thats right?) Here is
what I got so far:
#!/usr/bin/python
import subprocess
import re
from time import sleep
# set wifi interface
wif = "wlan0"
So I get the command stdout and I find out that to work with this output in a
loop I have to use iter
# check for WiFis nearby
wifi_out = subprocess.Popen(["iwlist", wif ,"scan"],stdout=subprocess.PIPE)
wifi_data = iter(wifi_out.stdout.readline,'')
Then I used enumerate to have the index and therefore I search for the line
with the address and the next line (index + 1) would contain the ESSID
for index, line in enumerate(wifi_data):
searchObj = re.search( r'.* Cell [0-9][0-9] - Address: .*', line, re.M|re.I)
if searchObj:
print index, line
word = line.split()
wifi = [word[4],wifi_data[index + 1]]
Now I have two problems
1) wifi_data is the wrong Type
`TypeError: 'callable-iterator' object has no attribute '__getitem__'`
2) I guess with
`wifi = [word[4],wifi_data[index + 1]]`
I set the the variable every time new instead of have something that appends.
But I want a variable that in the and has all ESSIDs together with all
corresponding addresses. I am new with python, so currently I imaging
something like
WIFI[0][0] returns ESSID
WIFI[0][1] returns address to ESSID in WIFI[0][0]
WIFI[1][0] returns next ESSID
WIFI[1][1] returns address to ESSID in WIFI[1][0]
and so on. Or would be something else in python better to work with such kind
of information?
Answer: I think you want
next(wifi_data)
since you cannot index into an iterator ... this will give you the next item
... but it may screw up your loop ...
although really you could just do
wifi_out = subprocess.Popen(["iwlist", wif ,"scan"],stdout=subprocess.PIPE)
wifi_data = wifi_out.communicate()[0].splitlines()
or even easier perhaps
wifi_data = subprocess.check_output(["iwlist",wif,"scan"]).splitlines()
and then you will have a list ... which will work more like you expect with
regards to accessing the data via index (theres not really a good reason to
use an iter for this that I can tell)
|
How to end Tkinter propbably if not using the Quit button
Question: I tried to find out a solution for my problem, but I couldn't find one. I am
using Python27 on Windows 7.
I have an easy Tkinter GUI with a button:
import Tkinter
import sys
def close_window():
root.destroy()
sys.exit()
root = Tkinter.Tk()
#exit button
draw_button = Tkinter.Button(root, text="Quit", command = close_window)
draw_button.grid(row=1, column=1)
root.mainloop()
Now if I use the Quit button the program closes and there is not task left of
the program. The problem is if someone uses the X-Button to close the Windows
or for examples uses Alt+F4, the task is still running. For later use I freeze
the script to make an executable and if someone uses some method to close the
program except the Quit button the task is still running. And if the task is
still running he or she can't open the program again, because it is still
running in the background and Windows raise an error that the program is still
running.
I tried to add some commands after the mainloop but they are all ignored. How
can I solve this problem?
Thanks for your help! Max
Answer: What about using `WM_DELETE_WINDOW`. For example:
import tkinter
import sys
def close_window():
root.destroy()
sys.exit()
def win_deleted():
print("closed");
close_window();
root = tkinter.Tk()
#exit button
draw_button = tkinter.Button(root, text="Quit", command = close_window)
draw_button.grid(row=1, column=1)
root.protocol("WM_DELETE_WINDOW", win_deleted)
root.mainloop()
This will close app with ctr+F4.
|
application folder not showing up on sys.path [Python]
Question: I have a web application that I am developing on my local machine. I copy over
the files to my server, and now my program will not run.
I examined the sys.path on both machines and on my server (where the code wont
run) i am missing the top-level directory from my path. I am using virtualenvs
on both machines. See the structure below
myapplication/
code/
main.py
db.py
app/
models.py
On my home machine, when I call `$ python2.7 main.py`, everything works fine.
I also see that `myapplication/` is on my path, as is `myapplication/app/`
(`print sys.path` from within `db.py`)
On the server, I am getting an import error `ImportError: No module named
app.models`
The import line in `db.py` is `from app import models`
Looking at the output of `print sys.path` on the server shows that the top
level `myapplication/` is missing from the path (only `myapplication/code`)
I have tried deleting and resyncing all of the `__init__.py` files and
deleting all of the `*.pyc` files
Any suggestions on why the top-level directory is getting omitted?
Answer: I'm not sure of the reason for the missing path in server. But what you can do
is, add the root directory in system path.
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
In your main.py
|
Portable code: __import__ parameter string type between Python 2 and Python 3
Question: What should I do, in a world where all text literals are Unicode by default,
to make `__import__` work in both Python 2 and 3?
I'm slowly learning about making Python code that will run under both Python 2
(version 2.6 or above) and Python 3 (version 3.2 or above).
This entails, I believe, the admonition to ensure text literals are Unicode by
default:
from __future__ import unicode_literals
and to specify bytes literals explicitly with `b'wibble'` if needed.
The `__import__` built-in function, though, is tripping up.
A contrived, trivial project layout:
$ mkdir fooproject/
$ cd fooproject/
$ mkdir foo/
$ printf "" > foo/__init__.py
$ mkdir foo/bar/
$ printf "" > foo/bar/__init__.py
Here's a simple `fooproject/setup.py` for that project:
from __future__ import unicode_literals
main_module_name = 'foo'
main_module = __import__(main_module_name, fromlist=['bar'])
assert main_module.bar
That fails under Python 2, but runs fine under Python 3:
$ python2 ./setup.py
Traceback (most recent call last):
File "./setup.py", line 4, in <module>
main_module = __import__(main_module_name, fromlist=['bar'])
TypeError: Item in ``from list'' not a string
$ python3 ./setup.py
We've deliberately made unadorned strings Unicode by default. By “not a
string”, I presume Python 2 means “not a ‘bytes’ object”.
Okay, so we'll explicitly set that to a `bytes` literal:
from __future__ import unicode_literals
main_module_name = 'foo'
main_module = __import__(main_module_name, fromlist=[b'bar'])
assert main_module.bar
Now Python 2 is satisfied, but Python 3 complains:
$ python2 ./setup.py
$ python3 ./setup.py
Traceback (most recent call last):
File "./setup.py", line 4, in <module>
main_module = __import__(main_module_name, fromlist=[b'bar'])
File "<frozen importlib._bootstrap>", line 2281, in
_handle_fromlist
TypeError: hasattr(): attribute name must be string
So I've deliberately set unadorned strings to be Unicode by default, just as
I'm supposed to; but that's apparently breaking the expectations of
`__import__` between Python 2 and Python 3.
How can I get that `__import__` call, complete with its `fromlist` parameter,
working correctly under both Python 2 and Python 3, keeping the
`unicode_literals` setting?
Answer: recall `str` works differently for python2 and python3 (see @BrenBarn's
comment), so:
main_module = __import__(main_module_name, fromlist=[str('bar')])
or more generally
main_module = __import__(main_module_name, fromlist=list(map(str, ['bar'])))
|
How to properly implement tkMessageBox in Python3.4?
Question: I want to launch a warning using tkMessageBox in Python3. This warning is
supposed to launch when a user doesn't select an element from a listbox.
Unfortunately whenever I try to implement message box it does not launch like
it is supposed to. I have code for a script called pietalkgui.py which
contains the code where I want to implement the message box:
from tkinter import messagebox
# Gives warning if no user is selected for whisper
def whisperwarning(self):
# show warning to user
showwarning("Select User","Select a user to whisper to!")
# Handles whisper
def whispermessage(self):
# stores element selected in temp variable
temp = self.userslist.get(self.userslist.curselection())
# if no item is selected from userslist (listbox)
if temp == "":
# launch warning to user if no item is selected
self.whisperwarning()
else:
# retrieves usernames from userslist
username = temp
# storing whisper
outwhisper = ' /w "' + username +'" ' + self.messagebox.get("0.0",END)
# handling whisper
self.handler(outwhisper)
# erase message in message box
self.messagebox.delete("0.0",END)
Am I doing something wrong in the implementation of tkMessageBox? Or am I not
properly checking if not item is selected from the listbox?
Answer: It appears that you are calling the method `showwarning`, but haven't defined
it or imported it. That is the name of a function the `messagebox` module, so
perhaps you need to change this:
showwarning("Select User","Select a user to whisper to!")
... to this:
messagebox.showwarning("Select User","Select a user to whisper to!")
Also, FWIW, this code is slightly incorrect:
`self.messagebox.delete("0.0",END)` \-- text indices start at "1.0", not
"0.0".
|
running time of python program is very small in ubuntu as compare to windows. why?
Question: I implemented [Dijkstra's
algorithm](http://en.wikipedia.org/wiki/Dijkstra%27s_algorithm) in Python and
ran the script under Ubuntu and windows 8. Both x64 architecture.
I profiled the script using `python -m cProfile name.py`
I was surprised to see that program took half the time in Ubuntu compared to
Windows.
Ubuntu run time for 1000 x 1000 matrix : `0.4 sec`
Windows run time for 1000 x 1000 matrix = `1.2 sec`
This is on the same, dual booted, machine.
How this is happening ?
Answer: First - the obvious is that while the versions of python might be the same,
they have been compiled with different compilers and naturally that means the
optimization levels are completely different. You could recompile python for
both systems using gcc for example and run the whole thing again. However I
will advise you to do this only if you are sure about what you are doing. More
specifically for linux since many of the processes which run depend heavily on
python. Take a look at [this](http://stackoverflow.com/questions/10150881/why-
is-python-so-much-slower-on-windows) discussion.
Second - windows naturally takes up a lot more resources than linux and out of
the box windows starts significantly more processes and every process is
generally an infinite loop, taking up resources.
Comparing performance based on OS is a wrong concept. It's basically like
comparing apples to oranges. Each system has different ways of managing it's
memory and the running processes. The file system is another very important
part - since python is in most cases an interpreted language, each import is
performing disk operations.
|
df.to_sql gives TypeError with dtype=sqlalchemy.timestamp(timezone=True)
Question: I'm trying to use DataFrame().to_sql to input a time aware dataframe series.
Here is an example of my code.
times = ['201412120154', '201412110254']
df = pd.DataFrame()
df['time'] = pd.to_datetime(times, utc=True)
df.time.to_sql('test', engine,
dtype={'time': sqlalchemy.TIMESTAMP(timezone=True)})
The error I recieve is:
TypeError: issubclass() arg 1 must be a class
The following code works but obviously results in a postgresql column that is
not timezone aware.
times = ['201412120154', '201412110254']
df = pd.DataFrame()
df['time'] = pd.to_datetime(times, utc=True)
df.time.to_sql('test', engine,
dtype={'time': sqlalchemy.TIMESTAMP})
I'm using python 2.7, pandas 0.15.2, postsgresql 9.3 and SQLAlchemy 0.9.7
Answer: Update: this is fixed in 0.16
This is a bug in pandas 0.15.2 that hinders you from providing an instantiated
sqlalchemy type with arguments (like `TIMESTAMP(timezone=True)` instead of
`TIMESTAMP`). This will be fixed in a next version, but for now you can use
the patch below.
* * *
I will post the workaround here as well. If you run this, you will be able to
specify sqlalchemy types instantiated with arguments to the `dtype` keyword in
`to_sql`:
from pandas.io.sql import SQLTable
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
patched version of https://github.com/pydata/pandas/blob/v0.15.2/pandas/io/sql.py#L1129
"""
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
pd.io.sql.SQLDatabase.to_sql = to_sql
|
Zed Shaw exercise 20 doesn't work
Question: When I type this below:
from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind (current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
It doesn't print in my powershell or work it should print this:
First let’s print the whole file:
This is line 1
This is line 2
This is line 3
Now let’s rewind, kind of like a tape.
Let’s print three lines:
1 This is line 1
2 This is line 2
3 This is line 3
in powershell type this: `$ python ex20.py test.txt`
Answer: You are not _calling anything_. Your script defines functions but never
actually uses any of them.
That's because all the lines that are supposed to call the functions are
indented to be part of the `print_a_line()` function. Un-indent those to move
them to the module level so that they are executed when the script is run:
def print_a_line(line_count, f):
print line_count, f.readline()
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind (current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
|
Installing pydot and graphviz packages in Anaconda environment
Question: I want to be able to create graphical decision trees in Python, and I am
currently trying to install both `pydot` and `graphviz`.
I am using Anaconda as my environment (along with Spyder), and have tried to
run the following lines of code
conda install -c https://conda.binstar.org/t/TOKEN/j14r pydot
with the result
Error: unknown host: http://repo.continuum.io/pkgs/pro/win-32/
Error: unknown host: http://repo.continuum.io/pkgs/free/win-32/
Error: unknown host: https://conda.binstar.org/t/TOKEN/j14r/win-32/
Error: No packages found matching: pydot
I have also tried using `pip install pydot` and `pip install graphviz` with
similar results:
Downloading/unpacking pydot
Cannot fetch index base URL https://pypi.python.org/simple/
Could not find any downloads that satisfy the requirement pydot
Cleaning up...
No distributions at all found for pydot
Storing complete log in [...]
I am getting pretty bored at trying to figure out how to go about this, so I
was hoping anyone out there could give me some tips perhaps.
Thanks
Answer: I had the same issue and solved it by (order is **important**):
1. Installing `graphviz`, simply via `sudo apt-get install graphviz`
2. Installing `graphviz` for Python via conda `sudo ~/anaconda2/bin/conda install graphviz`
3. Finally, by installing `pydot` using conda `sudo ~/anaconda2/bin/conda install pydot`
This answer is overdue but this post helped me (together with [this
one](http://stackoverflow.com/questions/18438997/why-is-pydot-unable-to-find-
graphvizs-executables-in-windows-8) that mentions the installation order), so
hopefully this answer will help someone else. I'm using Ubuntu 14.04 and
Python 2.7.
P.S. apparently, there could be some issues with step 1 of the above
algorithm, [this post](http://askubuntu.com/questions/196230/how-do-i-install-
graphviz-2-29-in-12-04) mentions how to fix them.
|
Python Pyserial read data form multiple serial ports at same time
Question: I'm trying to read out multiple serial ports at the same time with Python 2.7
and PySerial.
Features should be:
1. in the main program I get all open serial ports, open them and append the serial object to serialobjects
2. I want to read each serial port data in one subprocess for parallelization
The big problem is: how do I pass the serial port object to the subprocess?
OR:
Does another (and maybe better) solution exist to this? (Maybe
[this](http://stackoverflow.com/questions/16255807/pyserial-is-there-a-way-to-
select-on-multiple-ports-at-once): How do I apply twisted serial ports to my
problem?)
## EDIT
I think I wasn't totally clear what i want to achieve.
I want to read out 2 or more serial ports at the same time. Because of timeout
and readout times it isn't possible to read them out at the same time in one
process.
The following approach
ser1 = serial.Serial(port="COM1",baudrate=9600)
ser2 = serial.Serial(port="COM2",baudrate=9600)
ser1.write('command for reading out device 1')
output1 = ser1.readline()
ser2.write('command for reading out device 2')
# now you have to wait at least 100ms for device 2 to respond
output2 = ser2.readline()
doesn't serve my needs.
Another approch is to parallelize the serial readings in subprocesses.
## main.py
import serial # serial communication
from subprocess import Popen, PIPE
ports = ["COM1", "COM2"]
for port in ports:
ser = serial.Serial()
ser.port=port
ser.baudrate=9600
# set parity and ...
serialobjects.append(ser)
# call subprocess
# pass the serial object to subprocess
# read out serial port
# HOW TO PASS SERIAL OBJECT HERE to stdin
p1 = Popen(['python', './ReadCOM.py'], stdin=PIPE, stdout=PIPE, stderr=PIPE) # read COM1 permanently
p2 = Popen(['python', './ReadCOM.py'], stdin=PIPE, stdout=PIPE, stderr=PIPE) # read COM2 permanently
for i in range(10):
print "received from COM1: %s" % p1.stdout.readline() # print output from ReadCOM.py for COM1
print "received from COM2: %s" % p2.stdout.readline() # print output from ReadCOM.py for COM2
## ReadCOM.py (taken from [related
post](http://stackoverflow.com/questions/8260502/python-multiple-threads-
multiple-processes-for-reading-serial-ports) and edited)
import sys
while True: # The program never ends... will be killed when master is over.
# sys.stdin.readline()
ser.write('serial command here\n') # send command to serial port
output = ser.readline() # read output
sys.stdout.write(output) # write output to stdout
sys.stdout.flush()
Thanks in advance!
Answer: First change `ReadCOM.py` to receive arguments
import sys
import serial
ser = serial.Serial(port=sys.argv[1],baudrate=int(sys.argv[2]))
while True: # The program never ends... will be killed when master is over.
# sys.stdin.readline()
ser.write('serial command here\n') # send command to serial port
output = ser.readline() # read output
sys.stdout.write(output) # write output to stdout
sys.stdout.flush()
and after pass it in `main.py`:
from subprocess import Popen, PIPE
# call subprocess
# pass the serial object to subprocess
# read out serial port
# HOW TO PASS SERIAL OBJECT HERE to stdin
p1 = Popen(['python', './ReadCOM.py', "COM1", "9600"], stdin=PIPE, stdout=PIPE, stderr=PIPE) # read COM1 permanently
p2 = Popen(['python', './ReadCOM.py', "COM2", "9600"], stdin=PIPE, stdout=PIPE, stderr=PIPE) # read COM2 permanently
for i in range(10):
print "received from COM1: %s" % p1.stdout.readline() # print output from ReadCOM.py for COM1
print "received from COM2: %s" % p2.stdout.readline() # print output from ReadCOM.py for COM2
|
Migrate an existant project from Django 1.6 to 1.7
Question: I have a project running Django 1.6 with Python 2.7 and, for several reasons,
I would like to upgrade to Django 1.7.
Consider that South has never been used in this project and that we have a
custom user model (this is important to keep in mind because custom user
models seem to clash with `convert_to_south` according to [this
ticket](http://south.aeracode.org/ticket/1179)).
What would be the different steps to take in order to upgrade to Django 1.7
smoothly? My secret dream would be to avoid using South and directly take
advantage of the native schema migration support of Django 1.7.
Answer: from the db point of view:
* pip install django==1.7
* run `./manage.py makemigrations`
* run `./manage.py migrate`
that's all
for other compatibilities check release notes:
<https://docs.djangoproject.com/en/dev/releases/1.7/>
The main problems that you can find is related to the new [application
loading](https://docs.djangoproject.com/en/dev/releases/1.7/#app-loading-
refactor) mechanism (ie you cannot use `get_user_model()` anywhere). There is
no one way to solve it, depends by the code structure.
some 'everywhere valid' point are:
* if use custom user model. use always `settings.AUTH_USER_MODEL` in ForeignKeys
* do not access to any Model in your `__init__`
some reference:
* <https://docs.djangoproject.com/en/1.7/howto/upgrade-version/>
* <https://groups.google.com/forum/#!topic/django-developers/lmT1JxsWHPo>
* <http://www.biggleszx.com/2014/09/notes-on-upgrading-to-django-1-6-and-1-7/>
|
Extracting data from xml format in python
Question: I have the following nmap output as xml format:
<ports><extraports state="closed" count="991">
<extrareasons reason="conn-refused" count="991"/>
</extraports>
<port protocol="tcp" portid="22"><state state="open" reason="syn-ack" reason_ttl="0"/><service name="ssh" method="table" conf="3"/></port>
<port protocol="tcp" portid="25"><state state="open" reason="syn-ack" reason_ttl="0"/><service name="smtp" method="table" conf="3"/></port>
<port protocol="tcp" portid="139"><state state="open" reason="syn-ack" reason_ttl="0"/><service name="netbios-ssn" method="table" conf="3"/></port>
<port protocol="tcp" portid="443"><state state="open" reason="syn-ack" reason_ttl="0"/><service name="https" method="table" conf="3"/></port>
I want to get port numbers that are open:
print 'Port Number: '+host.find('ports').find('port').get('portid')
But the result is just `22`.
How can I have the results:
22
25
139
443
Answer: Find all `port` elements, and get `portid` attributes.
Using
[`Element.findall`](https://docs.python.org/2/library/xml.etree.elementtree.html#xml.etree.ElementTree.Element.findall)
and [list
comprehension](https://docs.python.org/2/tutorial/datastructures.html#list-
comprehensions):
>>> import xml.etree.ElementTree as ET
>>> root = ET.fromstring('''
<ports><extraports state="closed" count="991">
<extrareasons reason="conn-refused" count="991"/>
</extraports>
<port protocol="tcp" portid="22"><state state="open" reason="syn-ack" reason_ttl="0"/><service name="ssh" method="table" conf="3"/></port>
<port protocol="tcp" portid="25"><state state="open" reason="syn-ack" reason_ttl="0"/><service name="smtp" method="table" conf="3"/></port>
<port protocol="tcp" portid="139"><state state="open" reason="syn-ack" reason_ttl="0"/><service name="netbios-ssn" method="table" conf="3"/></port>
<port protocol="tcp" portid="443"><state state="open" reason="syn-ack" reason_ttl="0"/><service name="https" method="table" conf="3"/></port>
</ports>
''')
>>> [port.get('portid') for port in root.findall('.//port')]
['22', '25', '139', '443']
|
Python pandas - filter rows after groupby
Question: For example I have following table:
index,A,B
0,0,0
1,0,8
2,0,8
3,1,0
4,1,5
After grouping by `A`:
0:
index,A,B
0,0,0
1,0,8
2,0,8
1:
index,A,B
3,1,5
4,1,3
What I need is to drop rows from each group, where the number in column `B` is
less than maximum value from all rows from group's column `B`. Well I have a
problem translating and formulating this problem to English so here is the
example:
Maximum value from rows in column `B` in group `0`: **8**
So I want to drop row with index `0` and keep rows with indexes `1` and `2`
Maximum value from rows in column `B` in group `1`: **5**
So I want to drop row with index `4` and keep row with index `3`
I have tried to use pandas filter function, but the problem is that it is
operating on all rows in group at one time:
data = <example table>
grouped = data.groupby("A")
filtered = grouped.filter(lambda x: x["B"] == x["B"].max())
**So what I ideally need is some filter, which iterates through all rows in
group.**
Thanks for help!
P.S. Is there also way to only delete rows in groups and do not return
`DataFrame` object?
Answer: You just need to use `apply` on the `groupby` object. I modified your example
data to make this a little more clear:
import pandas
from io import StringIO
csv = StringIO("""index,A,B
0,1,0.0
1,1,3.0
2,1,6.0
3,2,0.0
4,2,5.0
5,2,7.0""")
df = pandas.read_csv(csv, index_col='index')
groups = df.groupby(by=['A'])
print(groups.apply(lambda g: g[g['B'] == g['B'].max()]))
Which prints:
A B
A index
1 2 1 6
2 4 2 7
|
Is HTTP Post blocked by Cloud9?
Question: I've been playing around with Python/Flask on Cloud9 ide. Pretty fun so far.
But when I try to add a http post to my test project, Flask returns either a
403 or a 500. From what I can tell, when I attach data or send the POST
method, the 'request' object is None. It doesn't make sense though. This is
pretty straight forward and should work as far as I can tell. Here's the
python:
from flask import Flask, jsonify, abort, request
@app.route('/test', methods = ['POST'])
def post():
print ('started')
print request
if request.method == 'POST':
something = request.get_json()
print something
Flask is running correctly. I can hit a GET url, returning data just fine. I
get an error when I land on 'print request' because request is None.
Thanks,
Answer: You have two problems here:
* You're getting a 500 error
* "something" is always None
The first problem is because you're not returning anything from your route
function.
127.0.0.1 - - [15/Dec/2014 15:08:59] "POST /test HTTP/1.1" 500 -
Traceback (most recent call last):
...snip...
ValueError: View function did not return a response
You can remedy this by adding a return statement at the end of the function.
Don't forget it needs to be a string.
@app.route('/hi', methods = ['POST'])
def post():
return "Hello, World!"
The second problem isn't what it seems. I suspect that the object isn't None,
but the function that returns the string representation returns None, so
that's what gets printed. Try `print type(request)` to see this in action.
What I think you want access to is the `form` field. Here is a complete
example:
from flask import Flask, request
app = Flask(__name__)
@app.route('/test', methods = ['POST'])
def post():
print type(request)
if request.method == 'POST':
print request.form
return str(request.form)
app.run(debug=True)
|
Syntax error in if statement Python
Question: For some reason I get a syntax error on this function.
def log(user, successful):
if successful == True:
with open('userlog.txt', 'a') as logfile:
logfile.append(datetime.datetime + ' User ' + user + ' Logged in'
else:
with open('userlog.txt', 'a') as logfile:
logfile.append(datetime.datetime + ' Unsuccessful login attempt as ' + user) )
Here is the error
Traceback (most recent call last):
File "C:\conf.py", line 4, in <module>
import func
File "C:\func.py", line 48
else:
^
SyntaxError: invalid syntax
Now, i've seen other posts that say I should add an extra parenthesis when
this happens. I tried it and it didnt work. Thanks in advance.
Answer: It looks like you copy/pasted some code wrong. This line needs a closing
parenthesis:
logfile.append(datetime.datetime + ' User ' + user + ' Logged in'
# ^
and this one has an extra closing parenthesis:
logfile.append(datetime.datetime + ' Unsuccessful login attempt as ' + user) )
# ^
Simply move the extra parenthesis from the second line to the first and all
will be well.
* * *
Actually, you have one more problem. As @MartijnPieters said in the comments,
you cannot concatenate a `datetime.datetime` object with strings.
I _think_ you actually meant to do:
logfile.append(str(datetime.datetime.now()) + ' Unsuccessful login attempt as ' + user)
This will return the current time as a string, which can then be concatenated
with other strings:
>>> import datetime
>>> str(datetime.datetime.now())
'2014-12-15 13:03:49.242187'
>>>
|
python: using file handle to print contents of file
Question: I am following this advice: [File as command line argument for argparse -
error message if argument is not
valid](http://stackoverflow.com/questions/11540854/file-as-command-line-
argument-for-argparse-error-message-if-argument-is-not-va) to print the
contents of a file. Here is an MWE:
import argparse
import os
def is_valid_file(parser, arg):
"""
:rtype : open file handle
"""
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return open(arg, 'r') # return an open file handle
parser = argparse.ArgumentParser(description='do shit')
parser.add_argument("-i", dest="filename", required=True,
help="input file with two matrices", metavar="FILE",
type=lambda x: is_valid_file(parser, x))
args = parser.parse_args()
print(args.filename.read)
However, I am getting this instead of the file content:
<built-in method read of _io.TextIOWrapper object at 0x7f1988b3bb40>
What am I doing wrong?
Answer: replace this :
print(args.filename.read)
to:
print(args.filename.read())
Read about Class and object here: [Class and
Object](https://docs.python.org/2/tutorial/classes.html)
|
Python: Why "__all__" doesn't work for import?
Question: File structure;
./__init__.py
a.py
/lib
__init__.py
Foo1.py # contains `class Foo1`
Foo2.py # contains `class Foo2`
# so on ...
Tested this in `a.py` and worked, doing this;
from lib.Foo1 import Foo1
from lib.Foo2 import Foo2
But, when I do `from lib import *` doing `__all__ = ["Foo1", "Foo2"]` in
`__init__.py` it doesn't work.
Error: `<type 'exceptions.TypeError'>: 'module' object is not callable`
What I'm missing?
Here is `a.py`;
#!/usr/bin/python
import cgi, cgitb
cgitb.enable()
from lib import *
print "Content-Type: text/html"
print ""
print "Test!"
foo1 = Foo1()
foo2 = Foo2()
// used this refs:
// <http://docs.python.org/2/tutorial/modules.html#importing-from-a-package>
// [Loading all modules in a folder in
Python](http://stackoverflow.com/q/1057431/362780)
Answer:
from lib import *
Will import everything in `lib` to the current module, so our `globals()`
looks like:
{'Foo1':<module lib.Foo1>,
'Foo2':<module lib.Foo2>}
Whereas
from lib.Foo1 import *
from lib.Foo2 import *
Makes our `globals()` become
{'Foo1':<class lib.Foo1.Foo1>,
'Foo2':<class lib.Foo2.Foo2>}
So in the first case we're just importing modules, not the classes inside them
which is what we want.
|
Handle multiple messages with Queue get()
Question: Thanks to @user5402 for the previous
[solution](https://stackoverflow.com/questions/27487207/how-to-run-a-thread-
more-than-once-in-python/27488251?noredirect=1#comment43421261_27488251).
I am trying to handle multiple messages that are queued up. Here is the code:
import sys
import socket
from multiprocessing import Process, Queue
UDP_ADDR = ("", 13000)
def send(m):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(m, UDP_ADDR)
def receive(q):
buf = 1024
Sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Sock.bind(UDP_ADDR)
while True:
(data, addr) = Sock.recvfrom(buf)
q.put(data)
In the client function I want to handle multiple messages, that have knock on
affects.
def client():
q = Queue()
r = Process(target = receive, args=(q,))
r.start()
print "client loop started"
while True:
m = q.get()
print "got:", m
while m == "start":
print "started"
z = q.get()
if z == "stop":
return
print "loop ended"
r.terminate()
So when `start` is sent, it then goes into a while loop that is infinitely
printing `"started"`, and waiting for the `stop` message to come through. The
above `client` code does not work.
Here is the code to start the function:
if __name__ == '__main__':
args = sys.argv
if len(args) > 1:
send(args[1])
else:
client()
Answer: You can write the client loop like this:
print "client loop started"
while True:
m = q.get()
print "waiting for start, got:", m
if m == "start":
while True:
try:
m = q.get(False)
except:
m = None
print "waiting for stop, got:", m
if m == "stop":
break
Based on your comments, this would be a better approach:
import sys
import socket
import Queue as Q
import time
from multiprocessing import Process, Queue
UDP_ADDR = ("", 13000)
def send(m):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(m, UDP_ADDR)
def receive(q):
buf = 1024
Sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Sock.bind(UDP_ADDR)
while True:
(data, addr) = Sock.recvfrom(buf)
q.put(data)
def doit():
# ... what the processing thread will do ...
while True:
print "sleeping..."
time.sleep(3)
def client():
q = Queue()
r = Process(target = receive, args=(q,))
r.start()
print "client loop started"
t = None # the processing thread
while True:
m = q.get()
if m == "start":
if t:
print "processing thread already started"
else:
t = Process(target = doit)
t.start()
print "processing thread started"
elif m == "stop":
if t:
t.terminate()
t = None
print "processing thread stopped"
else:
print "processing thread not running"
elif m == "quit":
print "shutting down"
if t:
t.terminate()
t = None # play it safe
break
else:
print "huh?"
r.terminate()
if __name__ == '__main__':
args = sys.argv
if len(args) > 1:
send(args[1])
else:
client()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.