text
stringlengths 226
34.5k
|
---|
AttributeError: 'tuple' object has no attribute 'high0'
Question: I am trying to write a list of data to an excel spreadsheet. Something is
going wrong when I try to iterate over my entire list in parallel. I get the
following error:
File "canadascript.py", line 57, in <module>
sheet.write(row, high0_col, c.high0)
AttributeError: 'tuple' object has no attribute 'high0'
I am importing a separate script that successfully returns all the variables
(high0, low0, etc.). This is the script I am running when I receive the error:
#!/usr/bin/env python
from xlutils.copy import copy
from xlrd import open_workbook
import canada
#import os
#os.chdir("/data/ops/Ops Documents/MexTemps")
cities = canada.getCities()
for c in cities :
c.retrieveTemps()
##
# writing to excel
##
file_name = 'fcst_hilo_TEST.xls'
new_file_name = 'fcst_hilo.xls'
row = 1
# column constants
high0_col = 1
low1_col = 2
high1_col = 3
low2_col = 4
high2_col = 5
low3_col = 6
high3_col = 7
low4_col = 8
high4_col = 9
low5_col = 10
high5_col = 11
low6_col = 12
high6_col = 13
low7_col = 14
high7_col = 15
workbook_file = None
try :
# currently xlwt does not implement this option for xslx files
workbook_file = open_workbook(file_name, formatting_info=True)
except :
workbook_file = open_workbook(file_name)
workbook = copy(workbook_file)
sheet = workbook.get_sheet(0)
# iterate over list in parallel, zip returns a tuple
for c in zip(cities) :
sheet.write(row, high0_col, c.high0)
sheet.write(row, low1_col, c.low1)
sheet.write(row, high1_col, c.high1)
sheet.write(row, low2_col, c.low2)
sheet.write(row, high2_col, c.high2)
sheet.write(row, low3_col, c.low3)
sheet.write(row, high3_col, c.high3)
sheet.write(row, low4_col, c.low4)
sheet.write(row, high4_col, c.high4)
sheet.write(row, low5_col, c.low5)
sheet.write(row, high5_col, c.high5)
sheet.write(row, low6_col, c.low6)
sheet.write(row, high6_col, c.high6)
sheet.write(row, low7_col, c.low7)
sheet.write(row, high7_col, c.high7)
workbook.save(new_file_name)
EDIT:
Here is the script I import into this one:
#!usr/bin/env python
import urllib
from datetime import datetime
from datetime import timedelta
date = datetime.now()
date1 = date + timedelta(days=1)
date2 = date + timedelta(days=2)
date3 = date + timedelta(days=3)
date4 = date + timedelta(days=4)
date5 = date + timedelta(days=5)
date6 = date + timedelta(days=6)
class city :
def __init__(self, city_name, link) :
self.name = city_name
self.url = link
self.high0 = 0
self.high1 = 0
self.high2 = 0
self.high3 = 0
self.high4 = 0
self.high5 = 0
self.high6 = 0
self.high7 = 0
self.low1 = 0
self.low2 = 0
self.low3 = 0
self.low4 = 0
self.low5 = 0
self.low6 = 0
self.low7 = 0
def retrieveTemps(self) :
filehandle = urllib.urlopen(self.url)
# get lines from result into array
lines = filehandle.readlines()
# (for each) loop through each line in lines
line_number = 0 # a counter for line number
for line in lines:
line_number = line_number + 1 # increment counter
# find string, position otherwise position is -1
position0 = line.rfind('title="{}"'.format(date.strftime("%A")))
position1 = line.rfind('title="{}"'.format(date1.strftime("%A")))
position2 = line.rfind('title="{}"'.format(date2.strftime("%A")))
position3 = line.rfind('title="{}"'.format(date3.strftime("%A")))
position4 = line.rfind('title="{}"'.format(date4.strftime("%A")))
position5 = line.rfind('title="{}"'.format(date5.strftime("%A")))
position6 = line.rfind('title="{}"'.format(date6.strftime("%A")))
if position0 > 0 :
self.high0 = lines[line_number + 4].split('&')[0].split('>')[-1]
self.low1 = lines[line_number + 18].split('&')[0].split('>')[-1]
if position1 > 0 :
self.high1 = lines[line_number + 4].split('&')[0].split('>')[-1]
self.low2 = lines[line_number + 19].split('&')[0].split('>')[-1]
if position2 > 0 :
self.high2 = lines[line_number + 4].split('&')[0].split('>')[-1]
self.low3 = lines[line_number + 19].split('&')[0].split('>')[-1]
if position3 > 0 :
self.high3 = lines[line_number + 4].split('&')[0].split('>')[-1]
self.low4 = lines[line_number + 19].split('&')[0].split('>')[-1]
if position4 > 0 :
self.high4 = lines[line_number + 4].split('&')[0].split('>')[-1]
self.low5 = lines[line_number + 19].split('&')[0].split('>')[-1]
if position5 > 0 :
self.high5 = lines[line_number + 4].split('&')[0].split('>')[-1]
self.low6 = lines[line_number + 19].split('&')[0].split('>')[-1]
self.low7 = lines[line_number + 19].split('&')[0].split('>')[-1]
if position6 > 0 :
self.high6 = lines[line_number + 4].split('&')[0].split('>')[-1]
self.high7 = lines[line_number + 4].split('&')[0].split('>')[-1]
break # done with loop, break out of it
filehandle.close()
#BRITISH COLUMBIA CITIES
def getCities():
c1 = city('Prince George', 'http://www.weatheroffice.gc.ca/city/pages/bc-79_metric_e.html')
c2 = city('Kamloops', 'http://www.weatheroffice.gc.ca/city/pages/bc-45_metric_e.html')
c3 = city('Blue River', 'http://www.weatheroffice.gc.ca/city/pages/bc-22_metric_e.html')
c4 = city('High Level', 'http://www.weatheroffice.gc.ca/city/pages/ab-24_metric_e.html')
c5 = city('Peace River', 'http://www.weatheroffice.gc.ca/city/pages/ab-25_metric_e.html')
c6 = city('Jasper', 'http://www.weatheroffice.gc.ca/city/pages/ab-70_metric_e.html')
c7 = city('Edmonton', 'http://www.weatheroffice.gc.ca/city/pages/ab-50_metric_e.html')
c8 = city('Calgary', 'http://www.weatheroffice.gc.ca/city/pages/ab-52_metric_e.html')
#SASKATCHEWAN CITIES
c9 = city('Biggar', 'http://www.weatheroffice.gc.ca/city/pages/sk-2_metric_e.html')
c10 = city('Saskatoon', 'http://www.weatheroffice.gc.ca/city/pages/sk-40_metric_e.html')
c11 = city('Melville', 'http://www.weatheroffice.gc.ca/city/pages/sk-8_metric_e.html')
c12 = city('Canora', 'http://www.weatheroffice.gc.ca/city/pages/sk-3_metric_e.html')
c13 = city('Yorkton', 'http://www.weatheroffice.gc.ca/city/pages/sk-33_metric_e.html')
#MANITOBA CITIES
c14 = city('Winnipeg', 'http://www.weatheroffice.gc.ca/city/pages/mb-38_metric_e.html')
c15 = city('Sprague', 'http://www.weatheroffice.gc.ca/city/pages/mb-23_metric_e.html')
#ONTARIO CITIES
c16 = city('Thunder Bay', 'http://www.weatheroffice.gc.ca/city/pages/on-100_metric_e.html')
c17 = city('Sioux Lookout', 'http://www.weatheroffice.gc.ca/city/pages/on-135_metric_e.html')
c18 = city('Armstrong', 'http://www.weatheroffice.gc.ca/city/pages/on-111_metric_e.html')
c19 = city('Hornepayne', 'http://www.weatheroffice.gc.ca/city/pages/on-78_metric_e.html')
c20 = city('Sudbury', 'http://www.weatheroffice.gc.ca/city/pages/on-40_metric_e.html')
c21 = city('South Parry', 'http://www.weatheroffice.gc.ca/city/pages/on-103_metric_e.html')
c22 = city('Toronto', 'http://www.weatheroffice.gc.ca/city/pages/on-143_metric_e.html')
c23 = city('Kingston', 'http://www.weatheroffice.gc.ca/city/pages/on-69_metric_e.html')
c24 = city('Cornwall', 'http://www.weatheroffice.gc.ca/city/pages/on-152_metric_e.html')
#QUEBEC CITIES
c25 = city('Montreal', 'http://www.weatheroffice.gc.ca/city/pages/qc-147_metric_e.html')
c26 = city('Quebec', 'http://www.weatheroffice.gc.ca/city/pages/qc-133_metric_e.html')
c27 = city('La Tuque', 'http://www.weatheroffice.gc.ca/city/pages/qc-154_metric_e.html')
c28 = city('Saguenay', 'http://www.weatheroffice.gc.ca/city/pages/qc-166_metric_e.html')
c29 = city('Riviere-du-loup', 'http://www.weatheroffice.gc.ca/city/pages/qc-108_metric_e.html')
#NOVA SCOTIA CITIES
c30 = city('Truro', 'http://www.weatheroffice.gc.ca/city/pages/ns-25_metric_e.html')
c31 = city('Halifax', 'http://www.weatheroffice.gc.ca/city/pages/ns-19_metric_e.html')
#NEW BRUNSWICK CITIES
c32 = city('Edmundston', 'http://www.weatheroffice.gc.ca/city/pages/nb-32_metric_e.html')
c33 = city('Moncton', 'http://www.weatheroffice.gc.ca/city/pages/nb-36_metric_e.html')
c34 = city('Sarnia', 'http://www.weatheroffice.gc.ca/city/pages/on-147_metric_e.html')
cities = []
cities.append(c1)
cities.append(c2)
cities.append(c3)
cities.append(c4)
cities.append(c5)
cities.append(c6)
cities.append(c7)
cities.append(c8)
cities.append(c9)
cities.append(c10)
cities.append(c11)
cities.append(c12)
cities.append(c13)
cities.append(c14)
cities.append(c15)
cities.append(c16)
cities.append(c17)
cities.append(c18)
cities.append(c19)
cities.append(c20)
cities.append(c21)
cities.append(c22)
cities.append(c23)
cities.append(c24)
cities.append(c25)
cities.append(c26)
cities.append(c27)
cities.append(c28)
cities.append(c29)
cities.append(c30)
cities.append(c31)
cities.append(c32)
cities.append(c33)
cities.append(c34)
return (cities)
Any ideas? Thanks!
Answer: The error is on the expression `c.high0`.
If you look at what `c` is, it's one of the elements of a
[`zip`](http://docs.python.org/3/library/functions.html#zip):
for c in zip(cities) :
And that means it's a tuple. That's what `zip` does: it takes an iterable of
sequences, and turns it into an iterable of tuples, where each tuple has one
member of each sequence.
If you can explain what you expected each `c` to be, or why you're calling
`zip`, we could probably explain how to do it properly.
|
MySQLdb on MAC OS 10.6.8
Question:
>>> import MySQLdb
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/MySQL_python-1.2.4b4-py2.7-macosx-10.6-intel.egg/MySQLdb/__init__.py", line 19, in <module>
import _mysql
ImportError: dlopen(/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/MySQL_python-1.2.4b4-py2.7-macosx-10.6-intel.egg/_mysql.so, 2): no suitable image found. Did find:
/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/MySQL_python-1.2.4b4-py2.7-macosx-10.6-intel.egg/_mysql.so: mach-o, but wrong architecture
I'm having a lot of troubles with MySQLdb installation on MAC 10.6.8 I read
many guides on how to solve without success.
Answer: What's the output on:
`# which python`
`# which pip`
If pip is installed try:
# pip install mysqldb
|
How to share Ipython notebook kernels?
Question: I have some very large IPython (1.0) notebooks, which I find very unhandy to
work with. I want to split the large notebook into several smaller ones, each
covering a specific part of my analysis. However, the notebooks need to share
data and (unpickleable) objects. Now, I want these notebooks to connect to the
same kernel. How do I do this? How can I change the kernel to which a notebook
is connected? (And any ideas how to automate this step?)
I don't want to use the parallel computing mechanism (which would be a trivial
solution), because it would add much code overhead in my case.
Answer: When I have a long noetbook, I create functions from my code, and hide it into
python modules, which I then import in the notebook. So that I can have huge
chunk of code hidden on the background, and my notebook smaller for handier
manipulation.
|
Moving Around A Python Canvas Without Using Scrollbars
Question: **Background:**
I have a program using Tkinter as the basis of the GUI. The program has a
canvas which is populated with a large number of objects. Currently, in order
to move all objects on the screen, I am simply binding a movement function to
the tag 'all' which of course moves all objects on the screen. However, it is
vital for me to keep track of _all_ canvas object positions- i.e. after every
move I log the new position, which seems unnecessarily complicated.
**Question:**
What is the best way to effectively scroll/drag around the whole canvas
(several times the size of the screen) using only the mouse (not using
scrollbars)?
**My Attempts:**
I have implemented scrollbars and found several guides to setting up
scrollbars, but none that deal with this particular requirement.
**Example of disused scrollbar method:**
from Tkinter import *
class Canvas_On:
def __init__(self, master):
self.master=master
self.master.title( "Example")
self.c=Canvas(self.master, width=find_res_width-20, height=find_res_height, bg='black', scrollregion=(0,0,5000,5000))
self.c.grid(row=0, rowspan=25, column=0)
self.c.tag_bind('bg', '<Control-Button-1>', self.click)
self.c.tag_bind('bg', '<Control-B1-Motion>', self.drag)
self.c.tag_bind('dot', '<Button-1>', self.click_item)
self.c.tag_bind('dot', '<B1-Motion>', self.drag_item)
draw=Drawing_Utility(self.c)
draw.drawer(self.c)
def click(self, event):
self.c.scan_mark(event.x, event.y)
def drag(self, event):
self.c.scan_dragto(event.x, event.y)
def click_item(self, event):
self.c.itemconfigure('dot 1 text', text=(event.x, event.y))
self.drag_item = self.c.find_closest(event.x, event.y)
self.drag_x, self.drag_y = event.x, event.y
self.c.tag_raise('dot')
self.c.tag_raise('dot 1 text')
def drag_item(self, event):
self.c.move(self.drag_item, event.x-self.drag_x, event.y-self.drag_y)
self.drag_x, self.drag_y = event.x, event.y
class Drawing_Utility:
def __init__(self, canvas):
self.canvas=canvas
self.canvas.focus_set()
def drawer(self, canvas):
self.canvas.create_rectangle(0, 0, 5000, 5000,
fill='black', tags='bg')
self.canvas.create_text(450,450, text='', fill='black', activefill='red', tags=('draggable', 'dot', 'dot 1 text'))
self.canvas.create_oval(400,400,500,500, fill='orange', activefill='red', tags=('draggable', 'dot', 'dot 2'))
self.canvas.tag_raise(("dot"))
root=Tk()
find_res_width=root.winfo_screenwidth()
find_res_height=root.winfo_screenheight()
run_it=Canvas_On(root)
root.mainloop()
**My Particular Issue**
My program generates all canvas object coordinates and then draws them. The
objects are arranged in various patterns, but critically they must 'know'
where each other is. When moving around the canvas using the method @abarnert
kindly supplied, and a similar method I wrote that moved all canvas objects,
the issue arises that each object 'thinks' it is at the canvas coordinates
generated before the objects were drawn. For example if I drag the canvas 50
pixels to the left and clicked on an object in my program, it jumps 50 pixels
back to the right to it's original position. My solution to this was to write
some code that, upon release of the mouse button, logged the last position and
updated the coordinate data of each object. However, I'm looking for a way to
remove this last step- I was hoping there was a way to move the canvas such
that the object positions were absolute, and assumed a function similar to a
'scroll' function would do this. I realise I've rambled here, but I've added a
couple of lines to the example above which highlights my issue- by moving the
canvas you can see that the coordinates change. Thank you again.
Answer: I'll give you the code for the simplest version first, then explain it so you
can expand it as needed.
class Canvas_On:
def __init__(self, master):
# ... your original code here ...
self.c.bind('<Button-1>', self.click)
self.c.bind('<B1-Motion>', self.drag)
def click(self, event):
self.c.scan_mark(event.x, event.y)
def drag(self, event):
self.c.scan_dragto(event.x, event.y)
First, the easy part: scrolling the canvas manually. As the
[documentation](http://effbot.org/tkinterbook/canvas.htm#Tkinter.Canvas.xview-
method) explains, you use the `xview` and `yview` methods, exactly as your
scrollbar `command`s do. Or you can just directly call `xview_moveto` and
`yview_moveto` (or the `foo_scroll` methods, but they don't seem to be what
you want here). You can see that I didn't actually use these; I'll explain
below.
Next, to capture click-and-drag events on the canvas, you just bind
`<B1-Motion>`, as you would for a normal drag-and-drop.
The tricky bit here is that the drag event gives you screen pixel coordinates,
while the `xview_moveto` and `yview_moveto` methods take a fraction from 0.0
for the top/left to 1.0 for the bottom/right. So, you'll need to capture the
coordinates of the original click (by binding `<Button-1>`; with that, the
coordinates of the drag event, and the canvas's bbox, you can calculate the
`moveto` fractions. If you're using the `scale` method and want to drag
appropriately while zoomed in/out, you'll need to account for that as well.
But unless you want to do something unusual, the `scan` helper methods do
exactly that calculation for you, so it's simpler to just call them.
* * *
Note that this will also capture click-and-drag events on the items on the
canvas, not just the background. That's probably what you want, unless you
were planning to make the items draggable within the canvas. In the latter
case, add a background rectangle item (either transparent, or with whatever
background you intended for the canvas itself) below all of your other items,
and `tag_bind` that instead of `bind`ing the canvas itself. (IIRC, with older
versions of Tk, you'll have to create a tag for the background item and
`tag_bind` that… but if so, you presumably already had to do that to bind all
your other items, so it's the same here. Anyway, I'll do that even though it
shouldn't be necessary, because tags are a handy way to create groups of items
that can all be bound together.)
So:
class Canvas_On:
def __init__(self, master):
# ... your original code here ...
self.c.tag_bind('bg', '<Button-1>', self.click)
self.c.tag_bind('bg', '<B1-Motion>', self.drag)
self.c.tag_bind('draggable', '<Button-1>', self.click_item)
self.c.tag_bind('draggable', '<B1-Motion>', self.drag_item)
# ... etc. ...
def click_item(self, event):
x, y = self.c.canvasx(event.x), self.c.canvasy(event.y)
self.drag_item = self.c.find_closest(x, y)
self.drag_x, self.drag_y = x, y
self.tag_raise(item)
def drag_item(self, event):
x, y = self.c.canvasx(event.x), self.c.canvasy(event.y)
self.c.move(self.drag_item, x-self.drag_x, y-self.drag_y)
self.drag_x, self.drag_y = x, y
class Drawing_Utility:
# ...
def drawer(self, canvas):
self.c.create_rectangle(0, 0, 5000, 5000,
fill='black', tags='bg')
self.c.create_oval(50,50,150,150, fill='orange', tags='draggable')
self.c.create_oval(1000,1000,1100,1100, fill='orange', tags='draggable')
Now you can drag the whole canvas around by its background, but dragging other
items (the ones marked as 'draggable') will do whatever else you want instead.
* * *
If I understand your comments correctly, your remaining problem is that you're
trying to use window coordinates when you want canvas coordinates. The section
Coordinate Systems in the docs explains the distinction.
So, let's say you've got an item that you placed at 500, 500, and the origin
is at 0, 0. Now, you scroll the canvas to 500, 0. The window coordinates of
the item are now 0, 500, but its canvas coordinates are still 500, 500. As the
docs say:
> To convert from window coordinates to canvas coordinates, use the `canvasx`
> and `canvasy` methods
|
Iterate over two nested 2D lists where list2 has list1's row numbers
Question: I'm new to Python. So I want to get this done with loops without using some
fancy stuff like generators. I have two 2D arrays, one integer array and the
other string array like this:
1. Integer 2D list:
Here, dataset2d[0][0] is number of rows in the table, dataset[0][1] is number
of columns. So the below 2D list has 6 rows and 4 columns
dataset2d = [
[6, 4],
[0, 0, 0, 1],
[1, 0, 2, 0],
[2, 2, 0, 1],
[1, 1, 1, 0],
[0, 0, 1, 1],
[1, 0, 2, 1]
]
2. String 2D list:
partition2d = [
['A', '1', '2', '4'],
['B', '3', '5'],
['C', '6']
]
`partition[*][0]` i.e first column is a label. For group A, 1,2 and 4 are the
row numbers that I need to pick up from dataset2d and apply a formula. So it
means I will read 1, go to row 1 in `dataset2d` and read the first column
value i.e `dataset2d[1][0]`, then I will read 2 from `partition2d`, go to row
2 of dataset 2d and read the first column i.e `dataset2d[2][0]`. Similarly
next one I'll read `dataset2d[4][0]`.
Then I will do some calculations, get a value and store it in a 2D list, then
go to the next column in dataset2d for those rows. So in this example, next
column values read would be `dataset2d[1][1]`, `dataset2d[2][1]`,
`dataset2d[4][1]`. And again do some calculation and get one value for that
column, store it. I'll do this until I reach the last column of `dataset2d`.
The next row in `partition2d` is `[B, 3, 5]`. So I'll start with
`dataset2d[3][0]`, `dataset2d[5][0]`. Get a value for that column be a
formula. Then real `dataset2d [3][1]`, `dataset2d[5][1]` etc. until I reach
last column. I do this until all rows in partition2d are read.
What I tried:
for partitionRow in partition2d:
for partitionCol in partitionRow:
for colDataset in dataset2d:
print dataset2d[partitionCol][colDataset]
What problem I'm facing:
1. partition2d is a string array where I need to skip the first column which has characters like A,B,C.
2. I want to iterate in dataset2d column wise only over the row numbers given in partition2d. So the colDataset should increment only after I'm done with that column.
Update1:
I'm reading the contents from a text file, and the data in 2D lists can vary,
depending on file content and size, but the structure of file1 i.e dataset2d
and file2 i.e partition2d will be the same.
Update2: Since Eric asked about how the output should look like.
0.842322 0.94322 0.34232 0.900009 (For A)
0.642322 0.44322 0.24232 0.800009 (For B)
This is just an example and the numbers are randomly typed by me. So the first
number 0.842322 is the result of applying the formula to column 0 of dataset2d
i.e dataset2d[parttionCol][0] for group A having considered rows 1,2,4.
The second number, 0.94322 is the result of applying formula to column 1 of
dataset2d i.e dataset2d[partitionCol][1] for group A having considered rows
1,2 4.
The third number, 0.34232 is the result of applying formula to column 2 of
dataset2d i.e dataset2d[partitionCol][2] for group A having considered rows
1,2 4. Similarly we get 0.900009.
The first number in second row, i.e 0.642322 is the result of applying the
formula to column 0 of dataset2d i.e dataset2d[parttionCol][0] for group B
having considered rows 3,5. And so on.
Answer: You can use [Numpy](http://www.numpy.org) (I hope this is not fancy for you):
import numpy
dataset2D = [ [6, 4], [0, 0, 0, 1], [1, 0, 2, 0], [2, 2, 0, 1], [1, 1, 1, 0], [0, 0, 1, 1], [1, 0, 2, 1] ]
dataset2D_size = dataset2D[0]
dataset2D = numpy.array(dataset2D)
partition2D = [ ['A', '1', '2', '4'], ['B', '3', '5'], ['C', '6'] ]
for partition in partition2D:
label = partition[0]
row_indices = [int(i) for i in partition[1:]]
# Take the specified rows
rows = dataset2D[row_indices]
# Iterate the columns (this is the power of Python!)
for column in zip(*rows):
# Now, column will contain one column of data from specified row indices
print column, # Apply your formula here
print
or **if you don't want to install Numpy** , here is what you can do (this is
what you want, actually):
dataset2D = [ [6, 4], [0, 0, 0, 1], [1, 0, 2, 0], [2, 2, 0, 1], [1, 1, 1, 0], [0, 0, 1, 1], [1, 0, 2, 1] ]
partition2D = [ ['A', '1', '2', '4'], ['B', '3', '5'], ['C', '6'] ]
dataset2D_size = dataset2D[0]
for partition in partition2D:
label = partition[0]
row_indices = [int(i) for i in partition[1:]]
rows = [dataset2D[row_idx] for row_idx in row_indices]
for column in zip(*rows):
print column,
print
both will print:
(0, 1, 1) (0, 0, 1) (0, 2, 1) (1, 0, 0)
(2, 0) (2, 0) (0, 1) (1, 1)
(1,) (0,) (2,) (1,)
**Explanation of second code (without Numpy)** :
[dataset2D[row_idx] for row_idx in row_indices]
This is basically you take each row (`dataset2D[row_idx]`) and collate them
together as a list. So the result of this expression is a list of lists (which
comes from the specified row indices)
for column in zip(*rows):
Then `zip(*rows)` will **iterate column-wise** (the one you want). This works
by taking the first element of each row, then combine them together to form a
[tuple](http://docs.python.org/release/1.5.1p1/tut/tuples.html). In each
iteration, the result is stored in variable `column`.
Then inside the `for column in zip(*rows):` you already have your intended
column-wise iterated elements from specified rows!
To apply your formula, just change the `print column,` into the stuff you
wanna do. For example I modify the code to include row and column number:
print 'Processing partition %s' % label
for (col_num, column) in enumerate(zip(*rows)):
print 'Column number: %d' % col_num
for (row_num, element) in enumerate(column):
print '[%d,%d]: %d' % (row_indices[row_num], col_num, element)
which will result in:
Processing partition A
Column number: 0
[1,0]: 0
[2,0]: 1
[4,0]: 1
Column number: 1
[1,1]: 0
[2,1]: 0
[4,1]: 1
Column number: 2
[1,2]: 0
[2,2]: 2
[4,2]: 1
Column number: 3
[1,3]: 1
[2,3]: 0
[4,3]: 0
Processing partition B
Column number: 0
[3,0]: 2
[5,0]: 0
Column number: 1
[3,1]: 2
[5,1]: 0
Column number: 2
[3,2]: 0
[5,2]: 1
Column number: 3
[3,3]: 1
[5,3]: 1
Processing partition C
Column number: 0
[6,0]: 1
Column number: 1
[6,1]: 0
Column number: 2
[6,3]: 2
Column number: 3
[6,3]: 1
I hope this helps.
|
NetLocalGroupGetMembers returns different members number according to LOCALGROUP_MEMBERS_INFO value
Question: I'd like to retrieve the number of users belonging to some Windows UserGroup.
From the documentation of the Python API :
win32net.NetLocalGroupGetMembers(server, group, *level*)
I understand that according to the _level_ param, I'll get differently
detailed data, corresponding to Windows LOCALGROUP_MEMBERS_INFO_0,
LOCALGROUP_MEMBERS_INFO_1, LOCALGROUP_MEMBERS_INFO_2 or
LOCALGROUP_MEMBERS_INFO_3 structures.
Thus, if 93 users belong to the specified userGroup, I expect to **always**
get 93 objects/structures of one of those types.
But my results are quite different. Here's what I get
>>> import win32net
>>> import win32api
>>> server = "\\\\" + win32api.GetComputerName()
>>> users = []
>>> group = u"MyGroup"
>>> (users, total, res) = win32net.NetLocalGroupGetMembers(server, group, 0)
>>> len(users)
93
>>> (users, total, res) = win32net.NetLocalGroupGetMembers(server, group, 1)
>>> len(users)
56
>>> (users, total, res) = win32net.NetLocalGroupGetMembers(server, group, 2)
>>> len(users)
39
>>> (users, total, res) = win32net.NetLocalGroupGetMembers(server, group, 3)
>>> len(users)
68
I expect to get 93 users. And then I want the 93 usernames. The username is
accessible when specifying level=1 and with that param, only 56 are returned.
Any clue ? Thanks.
Answer: The call returns different numbers of results due to the size of the data for
the requested level. You can use the returned resume handle to continue
fetching the rest, or increase the buffer size to get all results in one call.
Here's the full parameter list from the pywin32 help file:
NetLocalGroupGetMembers(server, groupName , level , resumeHandle , prefLen )
|
wxPython: program runs but not displaying
Question: Before I made some changes to the following program, everything went fine:
## Program before modification:
#! /usr/bin/env python
""" A bare-minimum wxPython program """
import wx
class MyApp(wx.App):
def OnInit(self):
return True
class MyFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title)
if __name__ == '__main__':
app = wx.App()
frame = MyFrame(None, "Sample")
frame.Show(True)
app.MainLoop()
But after I put `frame` into the definition of `OnInit`, the program runs
without syntax error but nothing displayed.:(
## Program after modification:
#! /usr/bin/env python
""" A bare-minimum wxPython program """
import wx
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(None, "Sample") ## add two lines here
self.frame.Show(True)
return True
class MyFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title)
if __name__ == '__main__':
app = wx.App()
app.MainLoop()
I try to use the debugger and step over the program. It seems that
`self.frame` is not defined (not even appear from beginning to end).
What am I going wrong with the program? I'm very new to Python and wxPython,
please help. Thx.
## EDIT:
app = MyApp()
stdout/stderr:
`NameError: global name 'Show' is not defined`
Answer: You should create `MyApp` (not `wx.App`) object:
#! /usr/bin/env python
""" A bare-minimum wxPython program """
import wx
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(None, "Sample") ## add two lines here
self.frame.Show(True)
return True
class MyFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title)
if __name__ == '__main__':
app = MyApp() # <---
app.MainLoop()
|
Python Tkinter indeterminate progress bar not running
Question: I'm currently creating a Tkinter Gui for Python 2.7 and having trouble working
the progress bar. I need to load largish files into my program which takes
some time, so I wanted to get a progress bar to show the user the program
isn't frozen loading the files. Unfortunately my progress bar does not seem to
update while loading files :( I've tried creating a new thread for the
progress bar with no luck. So I'm wondering what do I need to do to get an
indeterminate progress bar to run during a heavy function call?
What the relevant parts of my code looks like are:
import Tkinter as tk
import ttk as ttk
import pandas as pd
import tkFileDialog as tfgiag
self.pb = ttk.Progressbar(frame, orient=tk.VERTICAL, mode='indeterminate')
mynewdata = tfgiag.askopenfilenames(parent=root,title='Choose a file',filetypes=[('CSV files', '.csv')])
self.t = threading.Thread(target = self.pb.start)
self.t.start()
#read in each CSV file selected by the user
for myfile in root.tk.splitlist(mynewdata):
foo = pd.read_csv(myfile)
self.data.appendMainData(foo)
self.pb.stop()
Answer: Python "threads" are all still sort of locked together sequentially by what's
called the GIL, global interpreter lock. It basically means that threads
spawned from the same python process won't run in parallel like you want them
to. Instead, they all fight for time on the main python process.
In your case, if there's an intensive process you're trying to monitor with
once process, its probably hogging the GIL, and not releasing it to the
thread.
One option: Try using a readline method, so it splits up the file input work
enough to insert a progress bar update line.
openfile = open(filename, 'r')
for eachline in openfile.readlines():
append_line(eachline)
update_progressBar()
Another option that may be easier is to offload the csv opening to another
process using python's
[multiprocessing](http://docs.python.org/2/library/multiprocessing.html)
module. This emulates the threads you're probably more used to. I'd kick off a
new process that reads in the csv, and appends the lines to a queue. When it's
done, append a sentinel value to the queue signalling its done, so the main
process knows when to stop updating the progress bar and join the spawned
process. Something like:
import Tkinter as tk
import ttk as ttk
import pandas as pd
import tkFileDialog as tfgiag
from multiprocessing import Process, Queue
self.pb = ttk.Progressbar(frame, orient=tk.VERTICAL, mode='indeterminate')
mynewdata = tfgiag.askopenfilenames(parent=root,title='Choose a file',filetypes=[('CSV files', '.csv')])
csvqueue=Queue(1) #A mp-enabled queue with one slot to results.
#read in each CSV file selected by the user
offloadedProcess=Process(target=csvread, args=(filename, outputqueue))
offloadedProcess.start()
procNotDone=False
while procNotDone:
result = getNewResultFromQueue(outputqueue) #pesudo code
update_ProgressBar() #<--- this should get hit more often now
if result.isLastValue:
offloadedProcess.join() #Join the process, since its now done
else:
csvresults.append(result)
def csvreadOffload(filename, outputqueue):
for myfile in root.tk.splitlist(mynewdata):
foo = pd.read_csv(myfile)
if foo is not END:#Pesudo code here
outputqueue.append(foo)
else:
outputqueue.append('Empty results')#pseudo code
|
ssh using sshpass in python seems to not work
Question: I have a python script which is supposed to ssh in to a client and execute a
bash from the client. As a test scenario I am using just 1 machine to connect
but the objective is to connect to several clients and execute bash scripts
from those machines.
My Python code:
import os
import subprocess
import time
def ssh_login_execute():
if device['PWD'] != "":
run=('sshpass -p %s ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -t -p %s %s@%s' % (device['PWD'], device['PORT'], device['USER'], device['IP']))
else:
run=('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -t -p %s %s@%s' % (device['PORT'], device['USER'], device['IP']))
cmd = ('cd %s' % (script_path))
run2=run.split()
run2.append(cmd)
t=subprocess.Popen(run2, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'))
print "I am in 192.168.1.97"
execute_tg()
return t
def execute_tg():
path = "/home/"
os.chdir(path)
print os.getcwd()
cmd=("sh my_script.sh")
t=subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if __name__ == "__main__":
device = {}
device['PORT']=22
device['PWD']= "abcd"
device['USER']= "root"
device['IP']= "192.168.1.97"
script_path= "/home/"
ssh_login_execute()
On running the code "python script.py", I see output as:
I am in 192.168.1.97
/home/
Output is sh: 0: Can't open my_script.sh
Although the "my_script.sh" is in /home directory in 192.168.1.97. How do I
get rid of this issue and at the same time make it scalable to ssh to multiple
clients and execute bash.
Answer: Your script `my_script.sh` is probably not in `/home/` as expected in the
code.
path = "/home/"
os.chdir(path)
print os.getcwd()
cmd=("sh my_script.sh")
Also it should print the current directory as well with `print os.getcwd()`.
You should change those values based on the real location of your script.
|
Python simplejson quoting for R rjson input
Question: I'm processing data within Python, and would like to stream records to R using
JSON formatting and `simplejson` on the Python side, and `rjson` on the `R`
side.
How can I output records out of Python so that R's `fromJSON` can process them
into a one-line dataframe? Thanks
try:
import simplejson as json
except ImportError:
import json
record = {'x':1,'y':1}
print json.dumps( record )
Result:
{"y": 1, "x": 1}
However, I'd need the result to be `"{\"x\":1,\"y\":2}"`, as `R` needs that
formatting to use the data:
library(rjson)
as.data.frame( fromJSON( "{\"x\":1,\"y\":2}" ) )
x y
1 1 2
Thanks.
Answer: Two options:
> (1) If your JSON does **not** contain both single & double quotes, wrap the
> entire JSON in the quote type not being used.
>
> (2) If you need to escape the quotes (ie, because both are in your JSON),
> then you need to escape the escape character. That is, use double slashes:
> `\\"y\\"`
(note that the second point applies to any string in R that has needs an
escape character)
|
Python set Parallel Port data pins high/low
Question: I am wondering how to set the data pins on a parallel port high and low. I
believe I could use PyParallel for this, but I am unsure how to set a specific
pin.
Thanks!
Answer: You're talking about a software-hardware interface here. They are usually set
low and high by assigning a 1-byte value to a register. A [parallel
port](http://en.wikipedia.org/wiki/Parallel_port) has 8 pins for data to
travel across. In a low level language like C, C++, there would be a register,
lets call it 'A', somewhere holding 8 bits corresponding to the 8 pins of
data. So for example:
Assuming resgister A is setup like pins: [7,6,5,4,3,2,1,0]
C-like pseudocode
A=0x00 // all pins are set low
A=0xFF // all pins are high
A=0xF0 // Pins 0:3 are low, Pins 4:7 are high
This idea follows through with
[PyParallel](http://en.wikipedia.org/wiki/Parallel_port)
import parallel
p = parallel.Parallel() # open LPT1
p.setData(0x55) #<--- this is your bread and butter here
p.setData is the function you're interested in. 0x55 converted to binary is
0b01010101
-or-
[L H L H L H L H]
So now you can set the data to a certain byte, but how would I sent a bunch of
data... lets say 3 bytes 0x00, 0x01, 0x02? Well you need to watch the ack line
for when the receiving machine has confirmed receipt of whatever was just
sent.
A naive implementation:
data=[0x00, 0x01, 0x02]
while data:
onebyte=data.pop()
p.setDataStrobe('low') #signal that we're sending data
p.setData(onebyte)
while p.getInAcknowledge() == 'high': #wait for this line to go 'low'
# to indicate an ACK
pass #we're waiting for it to acknowledge...
p.setDataStrobe('high')#Ok, we're done sending that byte.
Ok, that doesn't directly answer your question. Lets say i ONLY want to set
pin 5 high or low. Maybe I have an LED on that pin. Then you just need a bit
of binary operations.
portState = 0b01100000 #Somehow the parallel port has this currently set
newportState = portState | 0b00010000#<-- this is called a bitmask
print newportState
>>> 0b011*1*0000
Now lets clear that bit...
newportState = 0b01110000
clearedPin5 = newportState & 11101111
print clearedPin5
>>> 0b011*0*0000
If these binary operations are foreign, I recommend this excellent
[tutorial](http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&p=40348&highlight=programming%20101#40348)
over on avrfreaks. I would become intimate with them before progressing
further. Embedded software concepts like these are full of bitmasks and
bitshifting.
|
Necessary data structure for making heatmaps in Python
Question: **EDIT** Just realized the way I was parsing in the data was deleting numbers
so I didn't have an array for the correct shape. Thanks mgilson, you provided
fantastic answers!
I'm trying to make a heatmap of data using python. I have found this basic
code that works:
import matplotlib.pyplot as plt
import numpy as np
data = np.random.rand(3,3)
fig, ax = plt.subplots()
heatmap = ax.pcolor(data, cmap=plt.cm.Blues)
plt.show()
f.close()
However, when I try to put in my data, which is currently formatted as a list
of lists (data=[[1,2,3],[1,2,3],[1,2,3]]), it gives me the error:
AttributeError: 'list' object has no attribute 'shape.'
What is the data structure that np.random.rand() produces/ python uses for
heatmaps? How do I convert my list of lists into that data structure? Thanks
so much!
This is what my data looks like, if that helps:
[[0.174365079365079, 0.147356200527704, 0.172903394255875, 0.149252948885976, 0.132479381443299, 0.279736780258519, 0.134908163265306, 0.127802340702211, 0.131209302325581, 0.100632627646326, 0.127636363636364, 0.146028409090909],
[0.161473684210526, 0.163691529709229, 0.166841698841699, 0.144, 0.13104, 0.146225563909774, 0.131002409638554, 0.125977358490566, 0.107940372670807, 0.100862068965517, 0.13436641221374, 0.130921518987342],
[0.15640362225097, 0.152472361809045, 0.101713567839196, 0.123847328244275, 0.101428924598269, 0.102045112781955, 0.0999014778325123, 0.11909887359199, 0.186751958224543, 0.216221343873518, 0.353571428571429],
[0.155185378590078, 0.151626168224299, 0.112484210526316, 0.126333764553687, 0.108763358778626],
[0.792675, 0.681526248399488, 0.929269035532995, 0.741649167733675, 0.436010126582278, 0.462519447929736, 0.416332480818414, 0.135318181818182, 0.453331639135959, 0.121893919793014, 0.457028132992327, 0.462558139534884],
[0.779800766283525, 1.02741401273885, 0.893561712846348, 0.710062015503876, 0.425114754098361, 0.388704980842912, 0.415049608355091, 0.228122605363985, 0.128575796178344, 0.113307392996109, 0.404273195876289, 0.414923673997413],
[0.802428754813864, 0.601316326530612, 0.156620689655172, 0.459367588932806, 0.189442875481386, 0.118344827586207, 0.127080939947781, 0.2588, 0.490834196891192, 0.805660574412533, 3.17598959687906],
[0.873314136125655, 0.75143661971831, 0.255721518987342, 0.472793854033291, 0.296584980237154]]
Answer: It's a `numpy.ndarray`. You can construct it easily from your data:
import numpy as np
data = np.array([[1,2,3],[1,2,3],[1,2,3]])
(`np.asarray` would also work -- If given an array, it just returns it,
otherwise it constructs a new one compared to `np.array` which always
constructs a new array)
|
Parsing args and kwargs in decorators
Question: I've got a function that takes args and kwargs, and I need to do something in
my decorator based on the value of the **2nd** arg in the function, like in
the code below:
def workaround_func():
def decorator(fn):
def case_decorator(*args, **kwargs):
if args[1] == 2:
print('The second argument is a 2!')
return fn(*args, **kwargs)
return case_decorator
return decorator
@workaround_func()
def my_func(arg1, arg2, kwarg1=None):
print('arg1: {} arg2: {}, kwargs: {}'.format(arg1, arg2, kwarg1))
The problem is that python allows users to call the function with the second
argument as a regular argument OR a keyword-argument, so if the user calls
`my_func` with `arg2` as a kwarg, it raises an `IndexError`, see below:
In [8]: d.my_func(1, 2, kwarg1=3)
The second argument is a 2!
arg1: 1 arg2: 2, kwargs: 3
In [9]: d.my_func(1, arg2=2, kwarg1=3)
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-9-87dc89222a9e> in <module>()
----> 1 d.my_func(1, arg2=2, kwarg1=3)
/home/camsparr/decoratorargs.py in case_decorator(*args, **kwargs)
2 def decorator(fn):
3 def case_decorator(*args, **kwargs):
----> 4 if args[1] == 2:
5 print('The second argument is a 2!')
6 return fn(*args, **kwargs)
IndexError: tuple index out of range
Is there a way around this without just doing a `try/except` and catch the
`IndexError`?
Answer: This is the most robust way that I can think of to handle it... The trick is
to inspect the name of the second argument. Then, in the decorator, you check
to see if that name is present in `kwargs`. If yes, then you use that. If no,
then you use `args`.
from inspect import getargspec
def decorate(fn):
argspec = getargspec(fn)
second_argname = argspec[0][1]
def inner(*args, **kwargs):
special_value = (kwargs[second_argname]
if second_argname in kwargs else args[1])
if special_value == 2:
print "foo"
else:
print "no foo for you"
return fn(*args, **kwargs)
return inner
@decorate
def foo(a, b, c=3):
pass
foo(1,2,3)
foo(1,b=2,c=4)
foo(1,3,5)
foo(1,b=6,c=5)
running this results in:
foo
foo
no foo for you
no foo for you
as expected.
|
python xml to string, insert into postgres
Question: Here is my code:
#!/usr/bin/python
import psycopg2
import sys
from lxml import etree
def main():
#Define our connection string
conn_string = ("host=host dbname=lal user=user password=pass")
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object
cursor = conn.cursor()
print "Connected!\n"
# Open file
parser = etree.parse("XML/epg.xml")
for row in parser:
print row
postgres = ('INSERT INTO epg_live (channel_id, program, start, duration) VALUES (%s, %s, %s, %s)', (row, row, row, row))
cursor.execute(parser,postgres)
cursor.commit()
print "Gotovo!"
if __name__ == "__main__":
main()
Can you help me with parsing XML file to string and insert into table in
posgresql. When I run script i get errors like:
File "./xml.py", line 32, in <module>
main()
File "./xml.py", line 22, in main
parser = etree.parse("XML/epg.xml")
File "lxml.etree.pyx", line 2953, in lxml.etree.parse (src/lxml/lxml.etree.c:56204)
File "parser.pxi", line 1533, in lxml.etree._parseDocument (src/lxml/lxml.etree.c:82287)
File "parser.pxi", line 1562, in lxml.etree._parseDocumentFromURL (src/lxml/lxml.etree.c:82580)
File "parser.pxi", line 1462, in lxml.etree._parseDocFromFile (src/lxml/lxml.etree.c:81619)
File "parser.pxi", line 1002, in lxml.etree._BaseParser._parseDocFromFile (src/lxml/lxml.etree.c:78528)
File "parser.pxi", line 569, in lxml.etree._ParserContext._handleParseResultDoc (src/lxml/lxml.etree.c:74472)
File "parser.pxi", line 650, in lxml.etree._handleParseResult (src/lxml/lxml.etree.c:75363)
File "parser.pxi", line 590, in lxml.etree._raiseParseError (src/lxml/lxml.etree.c:74696)
lxml.etree.XMLSyntaxError: Opening and ending tag mismatch: epg line 2 and item, line 26, column 10
My XML is fine it looks like:
<item><program> Program 3
</program><start> Start 20130918 15:00:00
</start><duration> Duration 04:30:00
</duration><title> Title Nujna seja Odbora za finance in monetarno politiko
</title></item>
Can you help me with some solution for python, thx guys for reading this post.
Answer: You can read xml into parameters and send to PostgreSQL like this:
root = etree.parse("XML/epg.xml")
for i in root.findall("item"):
p = [i.find(n).text for n in ("program", "start", "duration")]
# now you get list with values of parameters
postgres = ('INSERT INTO epg_live (program, start, duration) VALUES (%s, %s, %s)', p)
cursor.execute(parser,postgres)
cursor.commit()
don't know where to get `channel_id` parameter
|
Google Drive SDK not returning headRevisionId for google Docs format
Question: I have been working on google drive sync mechanism. I am using Google Drive
Python SDK for it. The issue i am having is that the google SDK does not
return headRevisionId is file resource's metadata if the file is google
MimeType i.e it has been created with google docs. Its important for me to
store headRevisionId.
Files which are uploaded by user from his local machine does have
headRevisionId in its metadata. this issue is for only those google docs.
How do i get headRevisionId of such files. any workaround for this?
Thanks Akif
Answer: I'm seeing the same behavior, despite messages indicating the issue was
addressed: [Head revision not working as intended for Google Docs
formats](http://stackoverflow.com/questions/13410459/head-revision-not-
working-as-intended-for-google-docs-formats).
For now the way I retrieve headRevisionId on a Google Doc is to make a
separate call to list revisions (drive.revisions.list) on the fileId.
Details on managing revisions: <https://developers.google.com/drive/manage-
revisions>
|
NameError: name 'FileAttachment' is not defined
Question: I have two different Django Apps, one of them is `fileUpload`. In File Upload
I added a Generic Relation as:
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
class FileAttachment(models.Model):
file = models.FileField(upload_to='fileuploads/%Y-%m-%d/')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
attachee = generic.GenericForeignKey()
In my other app named `XYZ` I added following reverse generic relation in
`models.py`
attachment = generic.GenericRelation(FileAttachment)
Now, so if I run `manage.py syncdb` or any other manage command, I get error:
**NameError: FileAttachment**
IN installed Apps I have following stuff:
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.markup',
'django.contrib.humanize',
'south',
'taggit',
'taggit_autocomplete_modified',
'compressor',
'bootstrapform',
'fileupload',
'XYZ'
)
Both apps, `XYZ` and `fileupload` are on same root level.
I am using Django 1.5 with Python2.7
Answer: Did you import the FileAttachement in the XYZ models?
Try this in **XYZ/models.py** :
`from fileupload.models import FileAttachment`
|
why my coroutine blocks whole tornado instance?
Question:
from tornado import web, gen
import tornado, time
class CoroutineFactorialHandler(web.RequestHandler):
@web.asynchronous
@gen.coroutine
def get(self, n, *args, **kwargs):
n = int(n)
def callbacker(iterator, callback):
try:
value = next(iterator)
except StopIteration:
value = StopIteration
callback(value)
def factorial(n):
x = 1
for i in range(1, n+1):
x *= i
yield
yield x
iterator = factorial(n)
t = time.time()
self.set_header("Content-Type", "text/plain")
while True:
response = yield gen.Task(callbacker, iterator)
#log.debug("response: %r" %response)
if response is StopIteration:
break
elif response:
self.write("took : %f sec" %(time.time() - t))
self.write("\n")
self.write("f(%d) = %d" %(n, response))
self.finish()
application = tornado.web.Application([
(r"^/coroutine/factorial/(?P<n>\d+)", CoroutineFactorialHandler),
#http://localhost:8888/coroutine/factorial/<int:n>
])
if __name__ == "__main__":
application.listen(8888)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.start()
21 lines yanked above is the simple factorial calculator. it loops N times, in
generator fashion.
the problem is, when this code is executing it blocks the whole tornado.
what I want to achieve is writing some helper for tornado that treats
generators as coroutine, and therefore can serve requests in asynchronous
manner. (I have read [Using a simple python generator as a co-routine in a
Tornado async handler?](http://stackoverflow.com/questions/8812715/using-a-
simple-python-generator-as-a-co-routine-in-a-tornado-async-handler?rq=1))
why does the simple increase-and-multiply-by-n loop block the whole tornado?
edit : I edited the code to include the whole application, that you can run
and test it. I'm running tornado 3.1.1 on python 2.7
Answer: You have to remember that Tornado runs in one thread. The code is split into
task that are called sequentially in main loop. If one of these task takes
long to finish (because of blocking functions like `time.sleep()` or some
heavy computation like factorial) it will block entire loop as a result.
So what you can do...? One solution is to create loop using
`IOLoop.add_callback()`:
from tornado import web, gen
import tornado, time
class CoroutineFactorialHandler(web.RequestHandler):
def factorial(self, limit=1):
count = 1
fact = 1
while count <= limit:
yield fact
count = count + 1
fact = fact * count
def loop(self):
try:
self.fact = self.generator.next()
tornado.ioloop.IOLoop.instance().add_callback(self.loop)
except StopIteration:
self.write("took : %f sec" %(time.time() - self.t))
self.write("\n")
self.write("f(%d) = %d" % (self.n, self.fact))
self.finish()
@web.asynchronous
def get(self, n, *args, **kwargs):
self.n = int(n)
self.generator = self.factorial(self.n)
self.t = time.time()
self.set_header("Content-Type", "text/plain")
tornado.ioloop.IOLoop.instance().add_callback(self.loop)
application = tornado.web.Application([
(r"^/coroutine/factorial/(?P<n>\d+)", CoroutineFactorialHandler),
#http://localhost:8888/coroutine/factorial/<int:n>
])
if __name__ == "__main__":
application.listen(8888)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.start()
Every multiplication is a separate task here, which allows mixing `factorial`
generator calls from different requests. This is a good approach if every call
to generator took same amount of time. However if you will be computing
100000! then at some point in time tasks in sequence will be looking like
90000!*90001, 90001!*90002 and so on. It takes some time to have this computed
even if its only one multiplication instead of whole loop so the other request
will be delayed. For such big input integer you have to make computations in
another thread to have fair share of processor time for a request. Here is
example how to do this: <http://lbolla.info/blog/2013/01/22/blocking-tornado>
As a side note, in factorial you have a lot of redundancy so you should keep
list of solutions for some n at memory to turn them back instantly without
wasting processor time for same computation over and over again.
|
RRD wrong values
Question: I'm playing with RRDTool, but it shows wrong values. I have little python
script:
import sys
import rrdtool
import time
i = 0
rrdtool.create(
'tempo.rrd',
'--step', '10',
'DS:temp:GAUGE:20:-40:100',
'RRA:LAST:0.5:1:1500'
)
while 1:
ret = rrdtool.update('tempo.rrd','N:' + `i`);
print "i %i" % i
rrdtool.graph(
'test.png',
'--imgformat', 'PNG',
'--width', '540',
'--height', '200',
'--start', "-%i" % 60,
'--end', "-1",
'--vertical-label', 'Temperatura',
'--title', 'Temperatura lauke',
'--lower-limit', '-1',
'DEF:actualtemp=tempo.rrd:temp:LAST',
'LINE1:actualtemp#ff0000:Actual',
'GPRINT:actualtemp:LAST:Actual %0.1lf C'
)
i += 1
time.sleep(10)
After inserting [0, 1, 2], I get graph with wrong values -
<http://i.imgur.com/rfWWDMm.png> (sorry, I can't post images). As you see,
after inserting 0, graph shows 0, after inserting 1, graph shows 0.8 and after
inserting 2, graph shows 1.8. Sometimes after inserting 1, graph shows 0.6 and
so on. Am I doing something wrong?
Answer: This is how RRDtool works. RRDtool works with rates, exclusively. You can
input gauge data (discrete values in time) but RRDtool will always treat them
internally as rates.
When you created your RRD file (tempo.rrd), internally RRDtool created buckets
with a starting timestamp at creation time and each subsequent bucket +10s
from that timestamp. For example
bucket 1 - 1379713706
bucket 2 - 1379713716
bucket 3 - 1379713726
...
bucket 100 - 1379714706
bucket 101 - 1379714716
bucket 102 - 1379714726
If you were to insert your integer values at exactly the timestamps matching
the buckets, you'd be ok but you're not. Your script is inserting values using
the _current_ timestamp which is almost certainly not going to be equal to a
bucket value. Hypothetically, lets say current timestamp is 1379714708 and you
want to insert a value of 2. When you insert your value, RRDtool needs to
choose which bucket to put it in. In this case 1379714706 is the nearest so it
will choose that one (there's a bit more logic here but that's the gist). You
might think it would insert '2' into the bucket, but to RRDtool, that would be
a lie. It might be 2 now, but it probably wasn't 2 a few seconds ago. Bearing
in mind that it sees all these values as rates, it tries to figure out how
much it should subtract from that value to make it right by looking at the
rate of change of previous values That's why you see values such as 1.8 and
2.8 and not the integer values you expect. Things get more complicate if you
insert multiple values between buckets or skip buckets.
There's an excellent tutorial at
<http://oss.oetiker.ch/rrdtool/tut/rrdtutorial.en.html> that goes into more
detail.
|
Using Iron Python with Solidworks API
Question: I've been working on scripting some repetitive behaviors in Solidworks using
python. I spent a while trying to go through the win32com library and managed
to get a lot to work, but ran into a roadblock. So I'm now trying to control
the API via Iron Python. Just trying to get rolling and have run into an
issue. I've tried to run the code below:
import clr
clr.AddReferenceToFileAndPath('..\\Redist\\SolidWorks.Interop.sldworks.dll')
clr.AddReference('SolidWorks.Interop.swconst')
from SolidWorks.Interop import sldworks
from SolidWorks.Interop import swconst
print sldworks
swApp = sldworks.ISldWorks()
swApp.Visible = True
On running this code, I get "TypeError: Cannot create instances of ISldWorks
because it is abstract"
Upon looking at the solidworks documentation
[here](http://help.solidworks.com/2013/English/api/sldworksapi/SolidWorks.Interop.sldworks~SolidWorks.Interop.sldworks.ISldWorks.html?id=11f79586cf2348529713b8667e1fdba3#Pg0)
I see this information: "This interface is the highest-level object in the
SolidWorks API. This interface provides a general set of functions that allow
application-level operations such as create, open, close, and quit documents,
arrange icons and windows, change the active document, and create attribute
definitions.
Use CreateObject, GetObject, New, or similar functions to obtain the ISldWorks
object from a Dispatch application (Visual Basic or C++ Dispatch). Standalone
.exe C++ COM applications can use CoCreateInstance. All of the SolidWorks API
add-in wizards automatically create the ISldWorks object for you.
Events are implemented with delegates in the Microsoft .NET Framework. See the
Overview topic for a list of delegates for this interface."
Now, while I'm quite familiar with python programming this whole .net thing is
a new animal for me so I'm sure I'm doing something simple wrong, but I'm
certainly struggling to figure out exactly what that is. Thanks for your help.
\--UPDATE So I have gone through and looked into how the .net system works,
and I feel like I have a better handle on it. So if I understand correctly my
goal is to try to create an instance of the Solidworks application object, or
ISldWorks, and then I should be able to access all of the members. In my
research I've come across these two articles: [Solidworks standalone
app](http://www.linkedin.com/groups/Create-simple-SolidWorks-StandAlone-
Application-4961405.S.235214502) and iron python documentation from these, and
your very helpful response, it seems like the code below should work. Though
when run, I get an error that says "EnvironmentError:
System.Runtime.InteropServices.COMException (0x8002802B): Element not found.
(Exception from HRESULT: 0x8002802B (TYPE_E_ELEMENTNOTFOUND))" which would
lead me to believe that the object still is not instatiating correctly.
import System
t = System.Type.GetTypeFromProgID('SldWorks.Application')
swApp = System.Activator.CreateInstance(t)
swApp.Visible = True
Answer: Simplifying a bit:
In .NET, and in COM, you don't normally create instance by directly calling
the constructor of a class. In fact, most services don't even expose the
actual class. Instead, they expose an interface—that is, an abstract type
which is a supertype of the actual class, and just defines the public methods
they want you to have—and then either (a) a factory function that generates an
instance of some concrete subclass of that interface, or (b) a concrete class
that COM can use in "automatic factory" functions like `CreateObject`.
That's what the docs mean when they say:
> Use CreateObject, GetObject, New, or similar functions to obtain the
> ISldWorks object from a Dispatch application (Visual Basic or C++ Dispatch).
> Standalone .exe C++ COM applications can use CoCreateInstance.
`CreateObject` takes a "ProgID", a string representing a concrete type name,
and does all the magic needed to get a concrete type from that name, pass it
through .NET/COM interop, ask the service to create an object of that concrete
type, verify that it matches the appropriate interface, and wrap it up. If
there's an example of using SolidWorks from VB.NET, it will probably use
`CreateObject`, and you can do the same thing from IronPython.
However, really, at some point you're going to need to read some documentation
on .NET and understand what all of it means.
|
At what point does a cache key get correctly generated
Question: In the according to the
[docs](https://docs.djangoproject.com/en/dev/topics/cache/#cache-key-
prefixing) it effectively says that you should use a `KEY_PREFIX` when sharing
a cache instance between servers. My question is when does is at what point
does the KEY_PREFIX apply? Using
[MemcachedStats](https://github.com/dlrust/python-memcached-stats) here is
basic example
from memcached_stats import MemcachedStats
from django.core.cache import get_cache
cache = get_cache('default')
assert len(cache._servers) == 1
mem = MemcachedStats(*cache._servers[0].split(":"))
# Now lets play verify no key
cache.get("TEST") == None
key = next((x for x in mem.keys() if "TEST" in x))
# Create a key
cache.set("TEST", "X", 30)
key = next((x for x in mem.keys() if "TEST" in x))
print key
':1:TEST'
At this point it looks OK - I mean the prefix is set or so I think..
from django.conf import settings
print settings.KEY_PREFIX
'beta'
print settings.SITE_ID
2
print settings.CACHE_MIDDLEWARE_KEY_PREFIX
'beta'
At this point is this just a bug?
Answer: Interesting problem. Turns out you need to look very closely at the
[documentation](https://docs.djangoproject.com/en/dev/ref/settings/#std%3asetting-
CACHES-KEY_FUNCTION) and notice that KEY_PREFIX is a subkey in the
CACHES[`<cache>`]. You need to define it like this.
CACHE_MIDDLEWARE_KEY_PREFIX = 'staging'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'production_cache_server:11211',
'KEY_PREFIX': CACHE_MIDDLEWARE_KEY_PREFIX,
}
}
This is also the way to define a `KEY_FUNCTION` as well. I verified this will
also work.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'production.jxycyn.cfg.usw1.cache.amazonaws.com:11211',
'KEY_FUNCTION': 'apps.core.cache_utils.make_key',
}
}
|
Corrupted VIM Indentation?
Question: From time to time the identation within my python scripts suddenly become, i
suppose you could say corrupted. The identation will suddenly alter, making my
programs fail.
If I look at the file using cat I can see the identation is wrong. But within
VIM it shows as fine. Here is the outputs and settings,
Any ideas ???
**via a 'cat -e'**
validate_hostname = RegexValidator(regex=r'[a-zA-Z0-9-_]*\.[a-zA-Z]{2,6}',message="Enter a valid hostname.")$
validate_hostname(host_input)$
except ValidationError, e:$
print type(e)$
print str(e[0])$
error = str(e)$
else:$
error = "Please complete all fields." $
$
print error$
return [error,host_input,record_input]$
**within in VIM** ,
validate_hostname = RegexValidator(regex=r'[a-zA-Z0-9-_]*\.[a-zA-Z]{2,6}',message="Enter a valid hostname.")
validate_hostname(host_input)
except ValidationError, e:
print type(e)
print str(e[0])
error = str(e)
else:
error = "Please complete all fields."
print error
return [error,host_input,record_input]
my **.vimrc** looks like,
syntax on
se bg=dark
set tabstop=4 " insert 4 spaces when a tab is pressed
set shiftwidth=4 " change the number of space characters inserted for indentation
set expandtab " insert spaces whenver a tab key is pressed
Answer: It looks like you have mixed spaces and tabs. The code looks different in
`vim` and in `cat -e` (or simply `less`) because they use different width for
the tab, due to your `set tabstop=4`.
If in `vim` it looks fine, then doing `:retab` should fix it: it will replace
tab characters with the amount of spaces as you see it. The result will look
the same, but all tab characters will be gone.
It's important to have the right `tabstop` value before you do `retab`. For
example if you have the opposite problem--code looks correct in `less` but
broken in `vim`, and you do `:retab` in that state, that will break the Python
script.
Check out this fantastic article about tabs in vim:
<http://vimcasts.org/episodes/tabs-and-spaces/>
In particular, I think you should add these settings to your `.vimrc`:
set softtabstop=4
set smarttab
|
facing errors in installing django-tracking
Question: I am trying to install django-tracker and have extracted it in a directory and
now when am running python setup.py i am getting the followig error
vikas@vikas-laptop:~/djcode/django-tracking-0.4.1$ python setup.py
Traceback (most recent call last):
File "setup.py", line 6, in <module>
import tracking
File "/home/vikas/djcode/django-tracking-0.4.1/tracking/__init__.py", line 1, in <module>
import listeners
File "/home/vikas/djcode/django-tracking-0.4.1/tracking/listeners.py", line 6, in <module>
from django.core.cache import cache
File "/usr/local/lib/python2.7/dist-packages/django/core/cache/__init__.py", line 70, in <module>
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
File "/usr/local/lib/python2.7/dist-packages/django/conf/__init__.py", line 53, in __getattr__
self._setup(name)
File "/usr/local/lib/python2.7/dist-packages/django/conf/__init__.py", line 46, in _setup
% (desc, ENVIRONMENT_VARIABLE))
django.core.exceptions.ImproperlyConfigured: Requested setting CACHES, but settings are not configured. You must either define the environment variable DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.
Can anyone please help me out in solving this problem?
Answer: django-tracking needs some fixes to work with Django 1.5 and 1.6.
I've created a fork here <https://github.com/pcraston/django-tracking>
(fixes for Django 1.5 were copied from
<https://bitbucket.org/romanalexander/django-tracking>)
|
Trying to use timeit.timeit
Question: I'd like to measure the time of running for two codes, I tried looking up the
python documentation for timeit, but I didn't really understand. Could someone
explain in a more beginner-level vocabulary?
Answer: _Note:_ Copied to [How to use timeit
module](http://stackoverflow.com/questions/8220801/how-to-use-timeit-module/).
I'll let you in on a secret: the best way to use `timeit` is on the command
line.
On the command line, `timeit` does proper statistical analysis: it tells you
how long the shortest run took. This is good because _all_ error in timing is
positive. So the shortest time has the least error in it. There's no way to
get negative error because a computer can't ever compute faster than it can
compute!
So, the command-line interface:
%~> python -m timeit "1 + 2"
10000000 loops, best of 3: 0.0468 usec per loop
That's quite simple, eh?
You can set stuff up:
%~> python -m timeit -s "x = range(10000)" "sum(x)"
1000 loops, best of 3: 543 usec per loop
which is useful, too!
If you want multiple lines, you can either use the shell's automatic
continuation or use separate arguments:
%~> python -m timeit -s "x = range(10000)" -s "y = range(100)" "sum(x)" "min(y)"
1000 loops, best of 3: 554 usec per loop
That gives a setup of
x = range(1000)
y = range(100)
and times
sum(x)
min(y)
* * *
If you want to have longer scripts you might be tempted to move to `timeit`
inside a Python script. I suggest avoiding that because the analysis and
timing is simply better on the command line. Instead, I tend to make shell
scripts:
SETUP="
... # lots of stuff
"
echo Minmod arr1
python -m timeit -s "$SETUP" "Minmod(arr1)"
echo pure_minmod arr1
python -m timeit -s "$SETUP" "pure_minmod(arr1)"
echo better_minmod arr1
python -m timeit -s "$SETUP" "better_minmod(arr1)"
... etc
This can take a bit longer due to the multiple initialisations, but normally
that's not a big deal.
* * *
But what if you _want_ to use `timeit` inside your module?
Well, the simple way is to do:
def function(...):
...
timeit.Timer(function).timeit(number=NUMBER)
and that gives you cumulative (_not_ minimum!) time to run that number of
times.
To get a good analysis, use `.repeat` and take this min:
min(timeit.Timer(function).repeat(repeat=REPEATS, number=NUMBER))
You should normally combine this with `functools.partial` instead of `lambda:
...` to lower overhead. Thus you could have something like:
from functools import partial
def to_time(items):
...
test_items = [1, 2, 3] * 100
times = timeit.Timer(partial(to_time, test_items)).repeat(3, 1000)
# Divide by the number of repeats
time_taken = min(times) / 1000
* * *
You can also do:
timeit.timeit("...", setup="from __main__ import ...", number=NUMBER)
which would give you something closer to the _interface_ from the command-
line, but in a much less cool manner. The `"from __main__ import ..."` lets
you use code from your main module inside the artificial environment created
by `timeit`.
It's worth noting that this is a convenience wrapper for
`Timer(...).timeit(...)` and so isn't particularly good at timing. I
personally far prefer using `Timer` as I've shown above.
* * *
### Warnings
There are a few caveats with `timeit` that hold everywhere.
* Overhead is not accounted for. Say you want to time `x += 1`, to find out how long addition takes:
>>> python -m timeit -s "x = 0" "x += 1"
10000000 loops, best of 3: 0.0476 usec per loop
Well, it's _not_ 0.0476 µs. You only know that it's _less_ than that. All
error is positive.
So try and find _pure_ overhead:
>>> python -m timeit -s "x = 0" ""
100000000 loops, best of 3: 0.014 usec per loop
That's a good **30%** overhead just from timing! This can massively skew
relative timings. But you only really cared about the _adding_ timings; the
look-up timings for `x` also need to be included in overhead:
>>> python -m timeit -s "x = 0" "x"
100000000 loops, best of 3: 0.0166 usec per loop
The difference isn't much larger, but it's there.
* Mutating methods are dangerous.
python -m timeit -s "x = [0]*100000" "while x: x.pop()"
10000000 loops, best of 3: 0.0436 usec per loop
But that's _completely wrong!_ `x` is the empty list after the first
iteration. You'll need to reinitialize:
>>> python -m timeit "x = [0]*100000" "while x: x.pop()"
100 loops, best of 3: 9.79 msec per loop
But then you have lots of overhead. Account for that separately.
>>> python -m timeit "x = [0]*100000"
1000 loops, best of 3: 261 usec per loop
Note that subtracting the overhead is reasonable here _only because_ the
overhead is much a small-ish fraction of the time.
|
Python Reading Multiple NetCDF Rainfall files of variable size
Question: The issue I have is that the Australian Bureau of Meteorology has supplied me
with Rainfall Data Files, that contains rainfall records recorded every 30
minutes for all active gauges. The problem is that for 1 day there are 48
30Minute files. I want to create time series of a particular Gauge. Which
means reading all 48 files and searching for the Gauge ID, making sure it
doesn't fail if for 1 30 minute period the gauge did not record anything??
here is link to file format:
<https://dl.dropboxusercontent.com/u/15223371/14/gauge_30min_20100214_000000.nc>
<https://dl.dropboxusercontent.com/u/15223371/14/gauge_30min_20100214_003000.nc>
<https://dl.dropboxusercontent.com/u/15223371/14/gauge_30min_20100214_010000.nc>
This is what I have tried so far:
"""
This script is used to read a directory of raingauge data from a Data Directory
"""
from anuga.file.netcdf import NetCDFFile
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
import os
import glob
from easygui import *
import string
import numpy
"""
print 'Default file Extension...'
msg="Enter 3 letter extension."
title = "Enter the 3 letter file extension to search for in DIR "
default = "csv"
file_extension = enterbox(msg,title,default)
"""
print 'Present Directory Open...'
title = "Select Directory to Read Multiple rainfall .nc files"
msg = "This is a test of the diropenbox.\n\nPick the directory that you wish to open."
d = diropenbox(msg, title)
fromdir = d
filtered_list = glob.glob(os.path.join(fromdir, '*.nc'))
filtered_list.sort()
nf = len(filtered_list)
print nf
import numpy
rain = numpy.zeros(nf,'float')
t = numpy.arange(nf)
Stn_Loc_File='Station_Location.csv'
outfid = open(Stn_Loc_File, 'w')
prec = numpy.zeros((nf,1752),numpy.float)
gauge_id_list = ['570002','570021','570025','570028','570030','570032','570031','570035','570036',
'570047','570772','570781','570910','570903','570916','570931','570943','570965',
'570968','570983','570986','70214','70217','70349','70351']
"""
title = "Select Gauge to plot"
msg = "Select Gauge"
gauge_id = int(choicebox(msg=msg,title=title, choices=gauge_id_list))
"""
#for gauge_id in gauge_id_list:
# gauge_id = int(gauge_id)
try:
for i, infile in enumerate(filtered_list):
infilenet = NetCDFFile(infile, netcdf_mode_r)
print infilenet.variables
raw_input('Hold.... check variables...')
stn_lats = infilenet.variables['latitude']
stn_longs = infilenet.variables['longitude']
stn_ids = infilenet.variables['station_id']
stn_rain = infilenet.variables['precipitation']
print stn_ids.shape
#print stn_lats.shape
#print stn_longs.shape
#print infile.dimensions
stn_ids = numpy.array(stn_ids)
l_id = numpy.where(stn_ids == gauge_id)
if stn_ids in gauge_id_list:
try:
l_id = l_id[0][0]
rain[i] = stn_rain[l_id]
except:
rain[i] = numpy.nan
print 'End for i...'
#print rain
import pylab as pl
pl.bar(t,rain)
pl.title('Rain Gauge data')
pl.xlabel('time steps')
pl.ylabel('rainfall (mm)')
pl.show()
except:
pass
raw_input('END....')
Answer: OK, you got the data in a format that's more convoluted than it would need to
be. They could easily have stuffed the whole day into a netCDF file. And
indeed, one option for you to solve this would have been to combine all files
into one with a times dimension, using for example the NCO command line tools.
But here is a solution that uses the scipy netcdf module. I believe it is
deprecated -- myself, I prefer the NetCDF4 library. The main approach is:
preset your output data structure with `np.nan` values; loop through your
input files and retrieve precipitation and station ids; for each of your
stationids of interest, retrieve index, and then precipitation at that index;
add to the output structure. (I didn't do the work to extract timestamps -
that's up to you.)
import glob
import numpy as np
from scipy.io import netcdf
# load data file names
stationdata = glob.glob('gauge*.nc')
stationdata.sort()
# initialize np arrays of integer gauging station ids
gauge_id_list = ['570002','570021','570025','570028','570030','570032','570031','570035','570036',
'570047','570772','570781','570910','570903','570916','570931','570943','570965',
'570968','570983','570986','70214','70217','70349','70351']
gauge_ids = np.array(gauge_id_list).astype('int32')
ngauges = len(gauge_ids)
ntimesteps = 48
# initialize output dictionary
dtypes = zip(gauge_id_list, ['float32']*ngauges)
timeseries_per_station = np.empty((ntimesteps,))
timeseries_per_station.fill(np.nan)
timeseries_per_station = timeseries_per_station.astype(dtypes)
# Instead of using the index, you could extract the datetime stamp
for timestep, datafile in enumerate(stationdata):
data = netcdf.NetCDFFile(datafile, 'r')
precip = data.variables['precip'].data
stid = data.variables['stid'].data
# create np array of indices of the gaugeid present in file
idx = np.where(np.in1d(stid, gauge_ids))[0]
for i in idx:
timeseries_per_station[str(stid[i])][timestep] = precip[i]
data.close()
np.set_printoptions(precision=1)
for gauge_id in gauge_id_list:
print "Station %s:" % gauge_id
print timeseries_per_station[gauge_id]
The output looks like this:
Station 570002:
[ 1.9 0.3 0. nan nan nan nan nan nan nan nan nan nan nan nan
nan nan nan nan nan nan nan nan nan nan nan nan nan nan nan
nan nan nan nan nan nan nan nan nan nan nan nan nan nan nan
nan nan nan]
Station 570021:
[ 0. 0. 0. nan nan nan nan nan nan nan nan nan nan nan nan
nan nan nan nan nan nan nan nan nan nan nan nan nan nan nan
nan nan nan nan nan nan nan nan nan nan nan nan nan nan nan
nan nan nan]
...
(Obviously, there were only three files.)
**Edit:** The OP noted that the code wasn't running without errors for him
because his variable names are "precipitation" and "station_id". The code runs
for me on the files he posted. Obviously, he should be using whatever variable
names are used in the files that he was supplied with. As they seem to be
custom-produced files for his use, it is conceivable that the authors may not
be consistent in variable naming.
|
Tornado websockets supporting binary
Question: I am using tornado as a server. I would like it to receive binary data. The
server side is as simple as simple gets:
import tornado.websocket
import tornado.httpserver
import tornado.ioloop
import tornado.web
class WebSocketServer(tornado.websocket.WebSocketHandler):
def open(self):
print 'OPEN'
def on_message(self, message):
print 'GOT MESSAGE: {}'.format(message)
def on_close(self):
print 'CLOSE'
app = tornado.web.Application([
(r'/', WebSocketServer)
])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(9500)
tornado.ioloop.IOLoop.instance().start()
This server is just used to visualize incoming data, not too special. The
server works just find with standard ascii, but it explodes when it gets any
unicode (my test for fake binary data). I used the site
<http://www.websocket.org/echo.html> and redirected the sending to go to
`ws://172.0.0.1:9500/` which is where I set up the server. The server then
prompted me with the very nasty error:
ERROR:tornado.application:Uncaught exception in /
Traceback (most recent call last):
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site packages/tornado/websocket.py", line 303, in wrapper
return callback(*args, **kwargs)
File "test.py", line 11, in on_message
print 'GOT MESSAGE: {}'.format(message)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xa1' in position 0: ordinal not in range(128)
The character was `¡`, an upside down !. Now I know that tornado can [send
binary](http://www.tornadoweb.org/en/branch2.1/websocket.html#tornado.websocket.WebSocketProtocol8.write_message),
but apparently not receive? I am probably doing some petty mistake, but where
is it?
Answer: In the line
print 'GOT MESSAGE: {}'.format(message)
you advise Python to format a character string into a byte string, which fails
if the character string contains non-ASCII characters. Simply use a character
string (prefixed with `u` in Python 2.x) instead (parentheses optional):
print (u'GOT MESSAGE: {}'.format(message))
# ^
Alternatively, if you want to inspect binary characters, use
[`repr`](http://docs.python.org/2/library/functions.html#func-repr):
print ('GOT MESSAGE: {}'.format(repr(message)))
# ^^^^^ ^
|
Stopping a turtle when it reaches a point, Python
Question: I am currently in a beginning programming class, and I am blowing through the
assignments. Right now, I have to make 3 houses with the module turtle (which
I accomplished):
def drawBody(mover):
#Rectangle part
mover.fillcolor("blue")
mover.begin_fill()
for i in range(2):
mover.forward(100)
mover.right(90)
mover.forward(75)
mover.right(90)
mover.end_fill()
#Triangle part
mover.fillcolor("red")
mover.begin_fill()
mover.left(45)
for i in range(2):
mover.forward(70.5)
mover.right(90)
mover.right(45)
mover.forward(100)
mover.end_fill()
#Create preproduction turtle
import turtle
wn = turtle.Screen()
josh = turtle.Turtle()
pointGoTo = -175
for houses in range(3):
josh.penup()
josh.goto(pointGoTo,0)
josh.pendown()
drawBody(josh)
josh.right(180)
pointGoTo = pointGoTo + 125
wn.exitonclick()
here is the while code. So I want the turtle to stop at a certain point, The
top left corner of the red square. I have tried multiple points but the while
just doesnt break/stop. Is my syntax off? or am I approaching this whole line
of the house thing all wrong? If i am being vague, please ask what you dont
understand, I really want to figure this out, but i am all out of ideas.:
def drawBody(mover):
#Rectangle part
mover.fillcolor("blue")
mover.begin_fill()
for i in range(2):
mover.forward(100)
mover.right(90)
mover.forward(75)
mover.right(90)
mover.end_fill()
#Triangle part
mover.fillcolor("red")
mover.begin_fill()
mover.left(45)
for i in range(2):
mover.forward(70.5)
mover.right(90)
mover.right(45)
mover.forward(100)
mover.end_fill()
mover.left(90)
mover.forward(75)
mover.left(90)
n = mover.position()
print(n)
while True:
mover.forward(100)
n = mover.position()
print(n)
mover.left(90)
mover.forward(5)
mover.left(90)
n = mover.position()
print(n)
mover.forward(100)
mover.right(90)
mover.forward(5)
mover.right(90)
if n == (-75.30,0.00):
break
#Create preproduction turtle
import turtle
wn = turtle.Screen()
josh = turtle.Turtle()
pointGoTo = -175
for houses in range(3):
josh.penup()
josh.goto(pointGoTo,0)
josh.pendown()
drawBody(josh)
josh.right(180)
pointGoTo = pointGoTo + 125
wn.exitonclick()
Answer: first of all, you're checking in the wrong place, you ahve to check it here:
while True:
mover.forward(100)
mover.left(90)
mover.forward(5)
mover.left(90)
n = mover.position()
if abs(n - (-75.30, 0.00)) < 0.01:
break
mover.forward(100)
mover.right(90)
mover.forward(5)
mover.right(90)
Your check is not succesfull because n is actually
[turtle.Vec2D](http://docs.python.org/2/library/turtle.html#turtle.Vec2D), and
coordinates are float, you can see it if you do `print n[0], n[1]`. There're
many links on SO about comparing floats, like [Floating point equality in
python](http://stackoverflow.com/questions/4028889/floating-point-equality-in-
python), for example. In your case you can do:
if abs(n - (-75.30, 0.00)) < 0.01:
break
But I think, the best way for you would be to just paint fixed amount of
times:
mover.left(90)
mover.forward(70)
mover.left(90)
for i in xrange(7):
mover.forward(100)
mover.left(90)
mover.forward(5)
mover.left(90)
mover.forward(100)
mover.right(90)
mover.forward(5)
mover.right(90)
Also you have to change your code like this:
for houses in range(3):
josh.penup()
josh.goto(pointGoTo,0)
josh.pendown()
drawBody(josh)
pointGoTo = pointGoTo + 125
|
Python script for trasnforming ans sorting columns in ascending order, decimal cases
Question: I wrote a script in Python removing tabs/blank spaces between two columns of
strings (x,y coordinates) plus separating the columns by a comma and listing
the maximum and minimum values of each column (2 values for each the x and y
coordinates). E.g.:
100000.00 60000.00
200000.00 63000.00
300000.00 62000.00
400000.00 61000.00
500000.00 64000.00
became:
100000.00,60000.00
200000.00,63000.00
300000.00,62000.00
400000.00,61000.00
500000.00,64000.00
10000000 50000000 60000000 640000000
This is the code I used:
import string
input = open(r'C:\coordinates.txt', 'r')
output = open(r'C:\coordinates_new.txt', 'wb')
s = input.readline()
while s <> '':
s = input.readline()
liste = s.split()
x = liste[0]
y = liste[1]
output.write(str(x) + ',' + str(y))
output.write('\n')
s = input.readline()
input.close()
output.close()
I need to change the above code to also transform the coordinates from two
decimal to one decimal values and each of the two new columns to be sorted in
ascending order based on the values of the x coordinate (left column).
I started by writing the following but not only is it not sorting the values,
it is placing the y coordinates on the left and the x on the right. In
addition I don't know how to transform the decimals since the values are
strings and the only function I know is using %f and that needs floats. Any
suggestions to improve the code below?
import string
input = open(r'C:\coordinates.txt', 'r')
output = open(r'C:\coordinates_sorted.txt', 'wb')
s = input.readline()
while s <> '':
s = input.readline()
liste = string.split(s)
x = liste[0]
y = liste[1]
output.write(str(x) + ',' + str(y))
output.write('\n')
sorted(s, key=lambda x: x[o])
s = input.readline()
input.close()
output.close()
thanks!
Answer: Your code looks more like C than like Python; it is quite unidiomatic. I
suggest you read the [Python
tutorial](http://docs.python.org/2/tutorial/index.html) to find some
inspiration. For example, iterating using a `while` loop is usually the wrong
approach. The `string` module is deprecated for the most part, `<>` should be
`!=`, you don't need to call `str()` on an object that's already a string...
Then, there are some errors. For example, `sorted()` returns a sorted version
of the iterable you're passing - you need to assign that to something, or the
result will be discarded. But you're calling it on a string, anyway, which
won't give you the desired result. You also wrote `x[o]` where you clearly
meant `x[0]`.
You should be using something like this (assuming Python 2):
with open(r'C:\coordinates.txt') as infile:
values = []
for line in infile:
values.append(map(float, line.split()))
values.sort()
with open(r'C:\coordinates_sorted.txt', 'w') as outfile:
for value in values:
outfile.write("{:.1f},{:.1f}\n".format(*value))
|
Dynamic Semantic errors in Python
Question: i came across this as an interview question. This question seemed interesting.
So, i am posting it here.
Consider the operation which gives semantic error like division by zero. By
default, python compiler gives output like "Invalid Operation" or something.
Can we control the output that is given out by Python compiler, like print
some other error message, skip that division by zero operation, and carry on
with rest of the instructions?
And also, how can i evaluate the cost of run-time semantic checks? There are
many python experts here. I am hoping someone will throw some light on this.
Thanks in advance.
Answer: > Can we control the output that is given out by Python compiler, like print
> some other error message, skip that division by zero operation, and carry on
> with rest of the instructions?
No, you cannot. You can manually wrap every dangerous command with a
`try...except` block, but I'm assuming you're talking about an automatic
recovery to specific lines _within_ a `try...except` block, or even completely
automatically.
By the time the error has fallen through such that `sys.excepthook` is called,
or whatever outer scope if you catch it early, the inner scopes are gone. You
can change line numbers with
[`sys.settrace`](http://docs.python.org/2/library/sys.html#sys.settrace) in
CPython although that is **only an implementation detail** , but since the
outer scopes are gone there is no reliable recorvery mechanism.
If you try to use the humorous `goto` April fools module (that uses the method
I just described) to jump blocks _even within a file_ :
from goto import goto, label
try:
1 / 0
label .foo
print("recovered")
except:
goto .foo
you get an error:
Traceback (most recent call last):
File "rcv.py", line 9, in <module>
goto .foo
File "rcv.py", line 9, in <module>
goto .foo
File "/home/joshua/src/goto-1.0/goto.py", line 272, in _trace
frame.f_lineno = targetLine
ValueError: can't jump into the middle of a block
so I'm pretty certain it's impossible.
* * *
> And also, how can i evaluate the cost of run-time semantic checks?
I don't know what that is, but you're probably looking for a
[`line_profiler`](http://pythonhosted.org/line_profiler/):
import random
from line_profiler import LineProfiler
profiler = LineProfiler()
def profile(function):
profiler.add_function(function)
return function
@profile
def foo(a, b, c):
if not isinstance(a, int):
raise TypeError("Is this what you mean by a 'run-time semantic check'?")
d = b * c
d /= a
return d**a
profiler.enable()
for _ in range(10000):
try:
foo(random.choice([2, 4, 2, 5, 2, 3, "dsd"]), 4, 2)
except TypeError:
pass
profiler.print_stats()
output:
Timer unit: 1e-06 s
File: rcv.py
Function: foo at line 11
Total time: 0.095197 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
11 @profile
12 def foo(a, b, c):
13 10000 29767 3.0 31.3 if not isinstance(a, int):
14 1361 4891 3.6 5.1 raise TypeError("Is this what you mean by a 'run-time semantic check'?")
15
16 8639 20192 2.3 21.2 d = b * c
17 8639 20351 2.4 21.4 d /= a
18
19 8639 19996 2.3 21.0 return d**a
So the "run-time semantic check", in this case would be taking 36.4% of the
time of running `foo`.
* * *
If you want to time specific blocks manually that are larger than you'd use
`timeit` on but smaller than you'd want for a profiler, instead of using two
`time.time()` calls (which is quite an inaccurate method) I suggest [Steven
D'Aprano's Stopwatch context manager](https://code.google.com/p/my-startup-
file/source/browse/timer.py).
|
GAE Configure for Django App
Question: Can someone please help me transition a django app into the Google App Engine
(GAE)? I would like to be abel to take all of the files in my django app and
copy them to the GAE app. However, I am not sure how the default files for the
GAE should be configured. How should main.py file look so that runs the django
app like it was designed to do:
main.py
import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
app.yaml
application: appname
version: 1
runtime: python27
api_version: 1
threadsafe: true
libraries:
- name: django
version: "1.3"
builtins:
- django_wsgi: on
Answer: I have a django app running on app engine. I followed this link to get it up
and running. <http://www.allbuttonspressed.com/projects/djangoappengine>.
There were a lot of little changes in all of the config files compared to
regular django. I now do not use django as i love app engine and hate django.
Below are some of my file examples. Note in your question you have a webapp2
request handler, you wont use anything like that with django. It will be all
of the normal view definitions as functions, not classes like app engine. If
you decide to try this approach out, let me know how it goes.
This is what my app.yaml after i follow the link above.
application: app
version: production
runtime: python27
api_version: 1
threadsafe: yes
libraries:
- name: django
version: latest
handlers:
- url: /_ah/queue/deferred
script: djangoappengine.deferred.handler.application
login: admin
- url: /_ah/stats/.*
script: djangoappengine.appstats.application
- url: /.*
script: djangoappengine.main.application
my settings.py
# Initialize App Engine and import the default settings (DB backend, etc.).
# If you want to use a different backend you have to remove all occurences
# of "djangoappengine" from this file.
from djangoappengine.settings_base import *
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'djangotoolbox',
'autoload',
'dbindexer',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
'context_processors.general'
)
ADMIN_MEDIA_PREFIX = '/media/'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
os.path.join(os.path.dirname(__file__), 'media'),
)
main.py
import os,sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
# Google App Engine imports.
from google.appengine.ext.webapp import util
# Force Django to reload its settings.
from django.conf import settings
settings._target = None
import django.core.handlers.wsgi
import django.core.signals
import django.db
import django.dispatch
# Log errors.
#import logging
#def log_exception(*args, **kwds):
# logging.exception('Exception in request:')
#
#django.dispatch.Signal.connect(
# django.core.signals.got_request_exception, log_exception)
# Unregister the rollback event handler.
django.dispatch.Signal.disconnect(
django.core.signals.got_request_exception,
django.db._rollback_on_exception)
def main():
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
Python Import Error for modules installed with Homebrew
Question: I've already installed PySide using homebrew, but I get an error when my
scripts run things such as
from PySide import QtWebKit
When I try `brew install pyside` I get an error that `pyside-1.2.0 already
installed`
When I try `pip install pyside` I get the following error:
In file included from /Users/fitvalet/wgwt/env/build/pyside/sources/pyside/plugins/customwidgets.cpp:23:
/Users/fitvalet/wgwt/env/build/pyside/sources/pyside/plugins/customwidget.h:27:10: fatal error: 'QtDesigner/QtDesigner' file not found
fatal error: 'QtDesigner/QtDesigner' file not found
#include <QtDesigner/QtDesigner>
^
2 warnings and 1 error generated.
make[2]: *** [plugins/CMakeFiles/uiplugin.dir/customwidgets.cpp.o] Error 1
make[1]: *** [plugins/CMakeFiles/uiplugin.dir/all] Error 2
make: *** [all] Error 2
error: Error compiling pyside
...
Command /Users/fitvalet/WGWT/env/bin/python -c
"import setuptools;__file__='/Users/fitvalet/WGWT/env/build/pyside/setup.py';exec(compile(open(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))"
install --record /var/folders/rb/qjx8psqs3gj48qmpgbqqvrhc0000gn/T/pip-h69ltB-record/install-record.txt --single-version-externally-managed --install-headers
/Users/fitvalet/WGWT/env/include/site/python2.7 failed with error code 1 in
/Users/fitvalet/WGWT/env/build/pyside
Storing complete log in /Users/fitvalet/.pip/pip.log
I also tried `easy_install pyside` and got this error:
2 warnings and 1 error generated.
make[2]: *** [plugins/CMakeFiles/uiplugin.dir/customwidgets.cpp.o] Error 1
make[1]: *** [plugins/CMakeFiles/uiplugin.dir/all] Error 2
make: *** [all] Error 2
error: Setup script exited with error: Error compiling pyside
Answer: I figured out the problem by reinstalling the homebrew installation of PySide.
When you install using homebrew, you get a warning that
For non-homebrew python (2.x), you need to amend your PYTHONPATH like so:
export PYTHONPATH=/usr/local/lib/python2.7/site-packages:$PYTHONPATH
Ran this and the module worked.
To make the change automatic rather than having to type the line each time I
opened a new terminal console, I needed to add that line to my ./bash_profile
file.
|
xampp-control-panel import error missing gtk on mac os x lion
Question: I installed XAMPP 1.8.3 on Mac OS X Lion (10.8.5). I'm trying to launch xampp-
control-panel in /Applications/XAMPP/xamppfiles/share/xampp-control-panel. It
asks that I run xampp-control-panel application as root. I get the following
error:
Traceback (most recent call last): File "xampp-control-panel.py", line 18, in
import gtk ImportError: No module named gtk
I have only Apple's pre-installed Python. The command: $ which python outputs
/usr/bin/python. I haven't installed any other Python distribution.
Answer: This question is a little old but I recently faced it as well. I found this
solution,[How to start xampp gui](http://askubuntu.com/questions/529500/how-
to-start-xampp-gui) and this guide [Xampp Installation in
Ubuntu](https://emuhendis.wordpress.com/2009/07/19/ubuntuda-xampp-server-
kurulumu/) both of which did not work for me to run the Gui. So I put them
them here in case they might help some of you.
For now, I use the following commands to start and stop XAMPP,
sudo /opt/lampp/lampp start
sudo /opt/lampp/lampp stop
|
Paste formatted Python code in command line
Question: I want to run Python code from an
[example](http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.vq.kmeans.html#scipy.cluster.vq.kmeans)
in the [Anaconda](https://store.continuum.io/cshop/anaconda/) shell.
Unfortunately the statement I want to paste has lines starting with `...`. Is
there an easy way to run such a statement without having to manually remove
the `...`? I know that other shells exist, but I don't want to have to try
getting them working with Anaconda
>>> features = array([[ 1.9,2.3],
... [ 1.5,2.5],
... [ 0.8,0.6],
... [ 0.4,1.8],
... [ 0.1,0.1],
... [ 0.2,1.8],
... [ 2.0,0.5],
... [ 0.3,1.5],
... [ 1.0,1.0]])
Answer: Python's native [doctest
parser](http://docs.python.org/3.3/library/doctest.html#doctest.DocTestParser)
is used to dealing with those pesky repr prompts. :)
>>> from doctest import DocTestParser
>>> repr_code = '''
... >>> features = array([[ 1.9,2.3],
... ... [ 1.5,2.5],
... ... [ 0.8,0.6],
... ... [ 0.4,1.8],
... ... [ 0.1,0.1],
... ... [ 0.2,1.8],
... ... [ 2.0,0.5],
... ... [ 0.3,1.5],
... ... [ 1.0,1.0]])
... '''
>>> p = DocTestParser()
>>> code = next(filter(None, p.parse(repr_code.strip()))) # Filter out the useless parts
>>> print(code.source)
features = array([[ 1.9,2.3],
[ 1.5,2.5],
[ 0.8,0.6],
[ 0.4,1.8],
[ 0.1,0.1],
[ 0.2,1.8],
[ 2.0,0.5],
[ 0.3,1.5],
[ 1.0,1.0]])
>>> array = list # Because it's cheaper than numpy
>>> exec(code.source) # If you're feeling very lucky...
>>> len(features)
9
|
Automatically remove hot/dead pixels from an image in python
Question: I am using numpy and scipy to process a number of images taken with a CCD
camera. These images have a number of hot (and dead) pixels with very large
(or small) values. These interfere with other image processing, so they need
to be removed. Unfortunately, though a few of the pixels are stuck at either 0
or 255 and are always at the same value in all of the images, there are some
pixels that are temporarily stuck at other values for a period of a few
minutes (the data spans many hours).
I am wondering if there is a method for identifying (and removing) the hot
pixels already implemented in python. If not, I am wondering what would be an
efficient method for doing so. The hot/dead pixels are relatively easy to
identify by comparing them with neighboring pixels. I could see writing a loop
that looks at each pixel, compares its value to that of its 8 nearest
neighbors. Or, it seems nicer to use some kind of convolution to produce a
smoother image and then subtract this from the image containing the hot
pixels, making them easier to identify.
I have tried this "blurring method" in the code below, and it works okay, but
I doubt that it is the fastest. Also, it gets confused at the edge of the
image (probably since the gaussian_filter function is taking a convolution and
the convolution gets weird near the edge). So, is there a better way to go
about this?
Example code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage
plt.figure(figsize=(8,4))
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
#make a sample image
x = np.linspace(-5,5,200)
X,Y = np.meshgrid(x,x)
Z = 255*np.cos(np.sqrt(x**2 + Y**2))**2
for i in range(0,11):
#Add some hot pixels
Z[np.random.randint(low=0,high=199),np.random.randint(low=0,high=199)]= np.random.randint(low=200,high=255)
#and dead pixels
Z[np.random.randint(low=0,high=199),np.random.randint(low=0,high=199)]= np.random.randint(low=0,high=10)
#Then plot it
ax1.set_title('Raw data with hot pixels')
ax1.imshow(Z,interpolation='nearest',origin='lower')
#Now we try to find the hot pixels
blurred_Z = scipy.ndimage.gaussian_filter(Z, sigma=2)
difference = Z - blurred_Z
ax2.set_title('Difference with hot pixels identified')
ax2.imshow(difference,interpolation='nearest',origin='lower')
threshold = 15
hot_pixels = np.nonzero((difference>threshold) | (difference<-threshold))
#Don't include the hot pixels that we found near the edge:
count = 0
for y,x in zip(hot_pixels[0],hot_pixels[1]):
if (x != 0) and (x != 199) and (y != 0) and (y != 199):
ax2.plot(x,y,'ro')
count += 1
print 'Detected %i hot/dead pixels out of 20.'%count
ax2.set_xlim(0,200); ax2.set_ylim(0,200)
plt.show()
And the output: 
Answer: Basically, I think that the fastest way to deal with hot pixels is just to use
a size=2 median filter. Then, poof, your hot pixels are gone and you also kill
all sorts of other high-frequency sensor noise from your camera.
If you really want to remove ONLY the hot pixels, then substituting you can
subtract the median filter from the original image, as I did in the question,
and replace only these values with the values from the median filtered image.
This doesn't work well at the edges, so if you can ignore the pixels along the
edge, then this will make things a lot easier.
If you want to deal with the edges, you can use the code below. However, it is
not the fastest:
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage
plt.figure(figsize=(10,5))
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
#make some sample data
x = np.linspace(-5,5,200)
X,Y = np.meshgrid(x,x)
Z = 100*np.cos(np.sqrt(x**2 + Y**2))**2 + 50
np.random.seed(1)
for i in range(0,11):
#Add some hot pixels
Z[np.random.randint(low=0,high=199),np.random.randint(low=0,high=199)]= np.random.randint(low=200,high=255)
#and dead pixels
Z[np.random.randint(low=0,high=199),np.random.randint(low=0,high=199)]= np.random.randint(low=0,high=10)
#And some hot pixels in the corners and edges
Z[0,0] =255
Z[-1,-1] =255
Z[-1,0] =255
Z[0,-1] =255
Z[0,100] =255
Z[-1,100]=255
Z[100,0] =255
Z[100,-1]=255
#Then plot it
ax1.set_title('Raw data with hot pixels')
ax1.imshow(Z,interpolation='nearest',origin='lower')
def find_outlier_pixels(data,tolerance=3,worry_about_edges=True):
#This function finds the hot or dead pixels in a 2D dataset.
#tolerance is the number of standard deviations used to cutoff the hot pixels
#If you want to ignore the edges and greatly speed up the code, then set
#worry_about_edges to False.
#
#The function returns a list of hot pixels and also an image with with hot pixels removed
from scipy.ndimage import median_filter
blurred = median_filter(Z, size=2)
difference = data - blurred
threshold = 10*np.std(difference)
#find the hot pixels, but ignore the edges
hot_pixels = np.nonzero((np.abs(difference[1:-1,1:-1])>threshold) )
hot_pixels = np.array(hot_pixels) + 1 #because we ignored the first row and first column
fixed_image = np.copy(data) #This is the image with the hot pixels removed
for y,x in zip(hot_pixels[0],hot_pixels[1]):
fixed_image[y,x]=blurred[y,x]
if worry_about_edges == True:
height,width = np.shape(data)
###Now get the pixels on the edges (but not the corners)###
#left and right sides
for index in range(1,height-1):
#left side:
med = np.median(data[index-1:index+2,0:2])
diff = np.abs(data[index,0] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[index],[0]] ))
fixed_image[index,0] = med
#right side:
med = np.median(data[index-1:index+2,-2:])
diff = np.abs(data[index,-1] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[index],[width-1]] ))
fixed_image[index,-1] = med
#Then the top and bottom
for index in range(1,width-1):
#bottom:
med = np.median(data[0:2,index-1:index+2])
diff = np.abs(data[0,index] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[0],[index]] ))
fixed_image[0,index] = med
#top:
med = np.median(data[-2:,index-1:index+2])
diff = np.abs(data[-1,index] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[height-1],[index]] ))
fixed_image[-1,index] = med
###Then the corners###
#bottom left
med = np.median(data[0:2,0:2])
diff = np.abs(data[0,0] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[0],[0]] ))
fixed_image[0,0] = med
#bottom right
med = np.median(data[0:2,-2:])
diff = np.abs(data[0,-1] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[0],[width-1]] ))
fixed_image[0,-1] = med
#top left
med = np.median(data[-2:,0:2])
diff = np.abs(data[-1,0] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[height-1],[0]] ))
fixed_image[-1,0] = med
#top right
med = np.median(data[-2:,-2:])
diff = np.abs(data[-1,-1] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[height-1],[width-1]] ))
fixed_image[-1,-1] = med
return hot_pixels,fixed_image
hot_pixels,fixed_image = find_outlier_pixels(Z)
for y,x in zip(hot_pixels[0],hot_pixels[1]):
ax1.plot(x,y,'ro',mfc='none',mec='r',ms=10)
ax1.set_xlim(0,200)
ax1.set_ylim(0,200)
ax2.set_title('Image with hot pixels removed')
ax2.imshow(fixed_image,interpolation='nearest',origin='lower',clim=(0,255))
plt.show()
Output: 
|
how to fetch data from google plus using api key in python?
Question: ***** *When i send a request like --
f = urllib.urlopen(https://www.googleapis.com/plus/v1/people/103777531434977807649/activities/public?key=*************** )
json=f.read()
print json
it returns some thing like this not the required json
{
"kind": "plus#activityFeed",
"etag": "\"seVFOlIgH91k2i-GrbizYfaw_AM/chWYjTdvKRLG9yxkeAfrCrofGHk\"",
"nextPageToken": "CAIQ__________9_IAAoAA",
"title": "Google+ List of Activities for Collection PUBLIC",
"items": []
}
what i have to do to get the right response????
this is the code: import json
f = urllib.urlopen('https://www.googleapis.com/plus/v1/people/'+id+'/activities /public?key=*****************&maxResults=100')
s = f.read()
f.close()
ss=json.loads(s)
print ss
try:
nextpagetoken=str(ss['nextPageToken'])
i=0
str_current_datetime=str(datetime.now())
gp_crawldate=str_current_datetime.split(" ")[0]
gp_groupid=id
db = MySQLdb.connect("localhost","root","****","googleplus" )
cursor=db.cursor()
while i<len(ss['items']):
gp_kind=str(ss['items'][i]['kind'])
gp_title=str(ss['items'][i]['title'].encode('utf8'))
gp_published=str(ss['items'][i]['published'][0:10])
check=int(cool(str(ss['items'][i]['published'][0:19])))#this method is defined in the code
gp_activityid=str(ss['items'][i]['id'])
gp_actorid=str(ss['items'][i]['actor']['id'])
gp_verb=str(ss['items'][i]['verb'])
gp_objecttype=str(ss['items'][i]['object']['objectType'])
gp_originalcontent=str(ss['items'][i]['object']['content'].encode('utf8'))
gp_totalreplies=str(ss['items'][i]['object']['replies']['totalItems'])
gp_totalplusone=str(ss['items'][i]['object']['plusoners']['totalItems'])
gp_totalreshare=str(ss['items'][i]['object']['resharers']['totalItems'])
#gp_geocode=str(ss['items'][i]['geocode'])
#gp_placename=str(ss['items'][i]['placeName'])
i=i+1
is the any change in g+api???
Answer: The response you posted is a correct response. If the `items` field is an
empty list, then the user that you are fetching the posts for has probably
never posted anything publicly. In this case, I confirmed that the user has no
public posts simply by visiting their profile.
|
Making a layout with variable number of columns per row in kivy?
Question: In kivy, what is the preferred way to make a screen that has a variable number
of columns per row? Is there a way to accomplish this without explicitly
specifying the positions and sizes of the widgets in a layout (i.e. is there a
way to do this as if you were stacking a bunch of GridLayouts with different
numbers of rows and cols within a Screen)? What is the way to do this using
only python code?
For instance, let's say you have a Screen which contain some type of Layout,
called "layout_scr1". How would you go about arranging things so that, for
example, the first row of layout_scr1 contains 1 column, the second row
contains 2 columns, and the third row contains 4 columns? Thank you.
Answer: There are quite a few options, but I think the simplest way would be using
`BoxLayout` instead of `GridLayout` or even `StackLayout`. `StackLayout` could
go to a second row the width is not enough whereas `BoxLayout` and
`GridLayout` stays on the same line. You can find and explanation of the
difference between `BoxLayout` and `GridLayout`
[here](http://stackoverflow.com/questions/18222194/kivy-boxlayout-vs-
gridlayout/18237038#18237038).
Here is the output:

Here is the code:
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.floatlayout import FloatLayout
Builder.load_string("""
<Boxes>:
AnchorLayout:
anchor_x: 'center'
anchor_y: 'top'
ScreenManager:
size_hint: 1, .9
id: _screen_manager
Screen:
name: 'screen1'
BoxLayout:
orientation: 'vertical'
padding: 50
BoxLayout:
orientation: 'horizontal'
Button:
text: "1"
BoxLayout:
orientation: 'horizontal'
Button:
text: "2"
Button:
text: "3"
Button:
text: "4"
BoxLayout:
orientation: 'horizontal'
Button:
text: "5"
Button:
text: "6"
BoxLayout:
orientation: 'horizontal'
Button:
text: "7"
Button:
text: "8"
Button:
text: "9"
Button:
text: "10"
Screen:
name: 'screen2'
Label:
text: 'Another Screen'
AnchorLayout:
anchor_x: 'center'
anchor_y: 'bottom'
BoxLayout:
orientation: 'horizontal'
size_hint: 1, .1
Button:
text: 'Go to Screen 1'
on_press: _screen_manager.current = 'screen1'
Button:
text: 'Go to Screen 2'
on_press: _screen_manager.current = 'screen2'""")
class Boxes(FloatLayout):
pass
class TestApp(App):
def build(self):
return Boxes()
if __name__ == '__main__':
TestApp().run()
If you still want to use `GridLayouts` you can substitute:
BoxLayout:
orientation: 'vertical'
for this:
GridLayout:
cols: 1
and this:
BoxLayout:
orientation: 'vertical'
for this:
GridLayout:
cols: 1
And just in case you were looking for a more dynamic approach:
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
Builder.load_string("""
<Boxes>:
boxes: _boxes
AnchorLayout:
anchor_x: 'center'
anchor_y: 'top'
ScreenManager:
size_hint: 1, .9
id: _screen_manager
Screen:
name: 'screen1'
BoxLayout:
orientation: 'vertical'
padding: 50
id: _boxes
Screen:
name: 'screen2'
Label:
text: 'Another Screen'
AnchorLayout:
anchor_x: 'center'
anchor_y: 'bottom'
BoxLayout:
orientation: 'horizontal'
size_hint: 1, .1
Button:
text: 'Go to Screen 1'
on_press: _screen_manager.current = 'screen1'
Button:
text: 'Go to Screen 2'
on_press: _screen_manager.current = 'screen2'""")
class Boxes(FloatLayout):
def __init__(self, **kwargs):
super(Boxes, self).__init__(**kwargs)
bx1 = BoxLayout(orientation='horizontal')
bx2 = BoxLayout(orientation='horizontal')
bx3 = BoxLayout(orientation='horizontal')
bx4 = BoxLayout(orientation='horizontal')
for i in range(1,2):
bx1.add_widget(Button(text=str(i)))
for i in range(2,5):
bx2.add_widget(Button(text=str(i)))
for i in range(5,7):
bx3.add_widget(Button(text=str(i)))
for i in range(7,11):
bx4.add_widget(Button(text=str(i)))
self.boxes.add_widget(bx1)
self.boxes.add_widget(bx2)
self.boxes.add_widget(bx3)
self.boxes.add_widget(bx4)
class TestApp(App):
def build(self):
return Boxes()
if __name__ == '__main__':
TestApp().run()
|
how to pass char pointer as argument in ctypes python
Question: Please help me in converting below line of c++ code into ctypes python:
Ret = openFcn(&Handle, "C:\\Config.xml");
below are the declarations of each:
typedef uint16_t (* OpenDLLFcnP)(void **, const char *);
OpenDLLFcnP openFcn = NULL;
openFcn = (OpenDLLFcnP) myLibrary.resolve("Open");
void *Handle = NULL;
Answer: `myLibrary.resolve` is undefined, but the general code you need (untested) is:
import ctypes
dll = ctypes.CDLL('your.dll')
Open = dll.Open
Open.argtypes = [ctypes.POINTER(ctypes.c_void_p),ctypes.c_char_p]
Open.restype = ctypes.c_uint16
Handle = ctypes.c_void_p()
result = Open(ctypes.byref(Handle),'c:\\Config.xml')
This assumes you have a DLL named `your.dll` with a function `Open` you want
to call.
|
Calculate entropy of a file
Question: I have tried to search this function for over two hours from google, forums,
wikipedia and many, many forums but I couldn't find it. How I can do this? I
tried the following but it didn't work.
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <stdint.h>
static unsigned int mylog2 (unsigned int val) {
unsigned int ret = -1;
while (val != 0) {
val >>= 1;
ret++;
}
return ret;
}
int main(int argc, char **argv)
{
FILE *pFile;
int i; // various loop index
int j; // filename loop index
int n; // Bytes read by fread;
int size; // Filesize
float entropy;
float temp; // temp value used in entropy calculation
long alphabet[256];
unsigned char buffer[1024];
/* do this for all files */
for(j = 1; j < argc; j++)
{
/* initialize all values */
size = 0;
entropy = 0.0;
memset(alphabet, 0, sizeof(long) * 256);
pFile = fopen(argv[j], "rb");
if(pFile == NULL)
{
printf("Failed to open `%s`\n", argv[j]);
continue;
}
/* Read the whole file in parts of 1024 */
while((n = fread(buffer, 1, 1024, pFile)) != 0)
{
/* Add the buffer to the alphabet */
for (i = 0; i < n; i++)
{
alphabet[(int) buffer[i]]++;
size++;
}
}
fclose(pFile);
/* entropy calculation */
for (i = 0; i < 256; i++)
{
if (alphabet[i] != 0)
{
temp = (float) alphabet[i] / (float) size;
entropy += -temp * mylog2(temp);
}
}
printf("%02.5f [ %02.5f ]\t%s\n", entropy, entropy / 8, argv[j]);
}
return 0;
}
I know I am doing it wrong. In python it's seems to be a lot easier, in python
it is:
import sys
import math
if len(sys.argv) != 2:
print "Usage: file_entropy.py [path]filename"
sys.exit()
# read the whole file into a byte array
f = open(sys.argv[1], "rb")
byteArr = map(ord, f.read())
f.close()
fileSize = len(byteArr)
print 'File size in bytes:'
print fileSize
print
# calculate the frequency of each byte value in the file
freqList = []
for b in range(256):
ctr = 0
for byte in byteArr:
if byte == b:
ctr += 1
freqList.append(float(ctr) / fileSize)
# print 'Frequencies of each byte-character:'
# print freqList
# print
# Shannon entropy
ent = 0.0
for freq in freqList:
if freq > 0:
ent = ent + freq * math.log(freq, 2)
ent = -ent
print 'Shannon entropy (min bits per byte-character):'
print ent
print
print 'Min possible file size assuming max theoretical compression efficiency:'
print (ent * fileSize), 'in bits'
print (ent * fileSize) / 8, 'in bytes'
### Modifications to file_entropy.py to create the Histogram start here ###
### by Ken Hartman www.KennethGHartman.com
import numpy as np
import matplotlib.pyplot as plt
N = len(freqList)
ind = np.arange(N) # the x locations for the groups
width = 1.00 # the width of the bars
#fig = plt.figure()
fig = plt.figure(figsize=(11,5),dpi=100)
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, freqList, width)
ax.set_autoscalex_on(False)
ax.set_xlim([0,255])
ax.set_ylabel('Frequency')
ax.set_xlabel('Byte')
ax.set_title('Frequency of Bytes 0 to 255\nFILENAME: ' + sys.argv[1])
plt.show()
How to achieve the same in C++ ? Hopefully somebody answers factually.
Answer: The Shannon entropy is `H= -1*sum(p_i*log(p_i))` where **p_i** is the
_frequency of each symbol i (the sum) and the result is in bits per symbol if
the log base is 2_ , **"nats"** _if the log base is n_. But it changes if you
change how you express the data, i.e. if the same data is expressed as bit,
bytes, etc. So you can **divide by log(n)** where _n is the number of symbols
available (2 for binary, 256 for bytes) and H will range from 0 to 1 (this is
normalized intensive Shannon entropy)_.
The above entropy is an **"intensive"** form, i.e. per symbol which is
analogous to specific entropy in physics, per kg or per mole. Regular
"extensive" entropy like physics entropy is `S=N*H` where N is the number of
symbols in the file. A little math with the above H gives **normalized
extensive entropy for a file** , where "n" is number of distinct "i" symbols
(2 for binary, 256 for bytes):
S=N * H / log(n) = sum(count_i*log(N/count_i))/log(n)
For files with equal frequency of each symbol this gives `S=N`. Entropy does
not do any compression on the data and is thereby completely ignorant of any
patterns so 000000111111 has same H and S as 010111101000 (6 1's and 6 0's in
both cases).
|
Printing net-snmp getbulk results with newlines at each index
Question: I have the following Python code:
#!/usr/bin/python
import netsnmp
session = netsnmp.Session(DestHost='_destination address_', Version=2, Community='_string_')
vars = netsnmp.VarList(netsnmp.Varbind('ifIndex',), netsnmp.Varbind('ifDescr',), netsnmp.Varbind('ifOperStatus',))
print(session.getbulk(0, 48, vars))
The results of `session.getbulk` are as follows:
('1', 'Vlan1', '1', '2', 'Vlan2', '2', '10101', 'GigabitEthernet0/1', '2', '10102',
'GigabitEthernet0/2', '2', '10103', 'GigabitEthernet0/3', '2', '10104',
'GigabitEthernet0/4', '2', '10105', 'GigabitEthernet0/5', '2', '10106',
'GigabitEthernet0/6', '2', '10107', 'GigabitEthernet0/7', '2', '10108',
'GigabitEthernet0/8', '2', '10109', 'GigabitEthernet0/9', '2', '10110',
'GigabitEthernet0/10', '2', '10111', 'GigabitEthernet0/11', '2', '10112',
'GigabitEthernet0/12', '2', '10113', 'GigabitEthernet0/13', '1', '10114',
'GigabitEthernet0/14', '1', '10115', 'GigabitEthernet0/15', '2', '10116',
'GigabitEthernet0/16', '1', '10117', 'GigabitEthernet0/17', '2')
I would like to print the information returned by `session.getbulk` on a
newline per each interface. If my understanding of my program is correct, I
should get three values for each interface, (`ifIndex`, `ifDescr`, and
`ifOperStatus`.)
As it stands, the results are presented in a single block of information, and
it may be hard for my audience to differentiate between.
However, being totally new to programming I am having a hard time figuring out
how to do this. If anybody is willing to point me toward an appropriate
tutorial or documentation for this, I'd much appreciate it.
Thanks!
Answer: If I am understanding you correctly, I think this is what you want?:
result = session.getbulk(0, 48, vars)
for i in range(0, len(result), 3):
print "ifind: "+result[i]+" ifdesc: "+result[i+1]+" status: "+result[i+2]
|
Plugins usually don't work, how do I debug?
Question: I am trying to write a plugin that will create a bitmap font. However, this is
very frustrating to learn... while I am not familiar with python, it is not
that hard to pick up and have not had problems with it outside of GIMP.
Copied some of the code from:
<https://github.com/sole/snippets/blob/master/gimp/generate_bitmap_font/sole_generate_bitmap_font.py>
and from <http://gimpbook.com/scripting/>
Does work:
#!/usr/bin/env python
# Hello World in GIMP Python
from gimpfu import *
def create_font(cwidth, cheight, font, size, color) :
#Set GLOBAL
char_begin = 32
char_end = 127
num_chars = char_end - char_begin
# Figure out total width & height
"""twidth = cwidth * 10
theight = cheight * 10
# Create Image
img = gimp.Image(cwidth * 10, cheight * 10, RGB)
img.disable_undo()
# Save the current foreground color:
pdb.gimp_context_push()
# Set the text color & background color
gimp.set_foreground(color)
gimp.set_background(0, 0, 0)
# Create All Layers & Position Accordingly
for i in range(char_begin, char_end):
string = '%c' % i
offset = i - char_begin
x_pos = offset * cwidth
y_pos = offset * cheight
text_layer = pdb.gimp_text_fontname(img, None, x_pos, y_pos, string, -1, False, size, PIXELS, font)
gimp.progress_update(float(offset) / float(num_chars))
pdb.gimp_image_flatten(img)
img.enable_undo()
# Create a new image window
gimp.Display(img)
# Show the new image window
gimp.displays_flush()
# Restore the old foreground color:
pdb.gimp_context_pop()"""
register(
"python_fu_bitmap_font",
"Bitmap Font",
"Create a new bitmap font",
"*****",
"*****",
"2013",
"Bitmap Font (Py)...",
"", # Create a new image, don't work on an existing one
[
(PF_SPINNER, "cwidth", "Cell Width", 24, (1, 3000, 1)),
(PF_SPINNER, "cheight", "Cell Height", 51, (1, 3000, 1)),
(PF_FONT, "font", "Font face", "Consolas"),
(PF_SPINNER, "size", "Font size", 50, (1, 3000, 1)),
(PF_COLOR, "color", "Text color", (1.0, 0.0, 0.0))
],
[],
create_font, menu="<Image>/File/Create")
main()
Does not work:
#!/usr/bin/env python
# Hello World in GIMP Python
from gimpfu import *
def create_font(cwidth, cheight, font, size, color) :
#Set GLOBAL
char_begin = 32
char_end = 127
num_chars = char_end - char_begin
# Figure out total width & height
twidth = cwidth * 10
theight = cheight * 10
# Create Image
"""img = gimp.Image(cwidth * 10, cheight * 10, RGB)
img.disable_undo()
# Save the current foreground color:
pdb.gimp_context_push()
# Set the text color & background color
gimp.set_foreground(color)
gimp.set_background(0, 0, 0)
# Create All Layers & Position Accordingly
for i in range(char_begin, char_end):
string = '%c' % i
offset = i - char_begin
x_pos = offset * cwidth
y_pos = offset * cheight
text_layer = pdb.gimp_text_fontname(img, None, x_pos, y_pos, string, -1, False, size, PIXELS, font)
gimp.progress_update(float(offset) / float(num_chars))
pdb.gimp_image_flatten(img)
img.enable_undo()
# Create a new image window
gimp.Display(img)
# Show the new image window
gimp.displays_flush()
# Restore the old foreground color:
pdb.gimp_context_pop()"""
register(
"python_fu_bitmap_font",
"Bitmap Font",
"Create a new bitmap font",
"*****",
"*****",
"2013",
"Bitmap Font (Py)...",
"", # Create a new image, don't work on an existing one
[
(PF_SPINNER, "cwidth", "Cell Width", 24, (1, 3000, 1)),
(PF_SPINNER, "cheight", "Cell Height", 51, (1, 3000, 1)),
(PF_FONT, "font", "Font face", "Consolas"),
(PF_SPINNER, "size", "Font size", 50, (1, 3000, 1)),
(PF_COLOR, "color", "Text color", (1.0, 0.0, 0.0))
],
[],
create_font, menu="<Image>/File/Create")
main()
It seems that the after changing the beginning comment from line 15 to line 19
that everything goes to hell. And to be honest, I am not even sure how to
debug this. I tried using the console under Filters>Python-Fu>Console -
however this kept telling me line 1 was the issue... which I think we can all
agree is not the case.
I tried running pieces of this code in a python script and works perfectly
fine.
What should I do?
Answer: First of all, try to remove the shebang at line 1.
Then something that has nothing to with the actual problem, but why are you
creating such a big string?
# Create Image
"""img = gimp.Image(cwidth * 10, cheight * 10, RGB)
img.disable_undo()
# Save the current foreground color:
pdb.gimp_context_push()
# Set the text color & background color
gimp.set_foreground(color)
gimp.set_background(0, 0, 0)
# Create All Layers & Position Accordingly
for i in range(char_begin, char_end):
string = '%c' % i
offset = i - char_begin
x_pos = offset * cwidth
y_pos = offset * cheight
text_layer = pdb.gimp_text_fontname(img, None, x_pos, y_pos, string, -1, False, size, PIXELS, font)
gimp.progress_update(float(offset) / float(num_chars))
pdb.gimp_image_flatten(img)
img.enable_undo()
# Create a new image window
gimp.Display(img)
# Show the new image window
gimp.displays_flush()
# Restore the old foreground color:
pdb.gimp_context_pop()"""
Is this your way to comment out the code?
|
How should data files be included to mrjob on EMR?
Question: I am trying to run a mrjob on Amazon's EMR. I've tested the job locally using
the inline runner, but it fails when running on Amazon. I've narrowed the
failure down to my dependence on an external data file `zip_codes.txt`. If I
run without that dependency using hardcoded zip code data it works just fine.
I've tried to include the necessary data file using the upload file argument.
When I look on S3, the file did make it there, but clearly something is going
wrong so that I cannot access it locally.

Here is my `mrjob.conf` file:
runners:
emr:
aws_access_key_id: FOOBARBAZQUX
aws_secret_access_key: IAMASECRETKEY
aws_region: us-east-1
ec2_key_pair: mapreduce
ec2_key_pair_file: $ENV/keys/mapreduce.pem
ssh_tunnel_to_job_tracker: true
ssh_tunnel_is_open: true
cleanup_on_failure: ALL
cmdenv:
TZ: America/Los_Angeles
This is my `MR_zip.py` file.
from mrjob.job import MRJob
import mrjob
import csv
def distance(p1, p2):
# d = ...
return d
class MR_zip(MRJob):
OUTPUT_PROTOCOL = mrjob.protocol.JSONProtocol
zip_codes = {int(zip_code): (float(latitude), float(longitude)) for zip_code, latitude, longitude in csv.reader(open("zip_codes.txt", "r"))}
def mapper(self, _, line):
zip_code_1, poi = line.split(",")
zip_code_1 = int(zip_code_1)
lat1, lon1 = self.zip_codes[zip_code_1]
for zip_code_2, (lat2, lon2) in self.zip_codes.items():
d = distance((lat1, lon1), (lat2, lon2))
yield zip_code_2, (zip_code_1, poi, d)
def reducer(self, zip_code_1, ds):
result = {}
for zip_code_2, poi, d in ds:
if poi not in result:
result[poi] = (zip_code_2, d)
elif result[poi][1] > d:
result[poi] = (zip_code_2, d)
yield zip_code_1, result
if __name__ == '__main__':
MR_zip.run()
And finally, I run it with the following command:
python MR_zip.py -r emr --conf mrjob.conf --file zip_codes.txt < poi.txt
Where zip_codes.txt looks like:
...
62323,39.817702,-90.66923
62324,39.988988,-90.94976
62325,40.034398,-91.16278
62326,40.421857,-90.80333
...
And poi.txt looks like:
...
210,skate park
501,theatre
29001,theatre
8001,knitting club
20101,food bank
...
Answer: Also, you might find useful
[`MRJob.add_file_option`](http://mrjob.readthedocs.org/en/latest/guides/writing-
mrjobs.html#file-options) routine. For example, specifying
self.add_file_option('--config-file', dest='config_file',
default=None, help='file with labels', action="append")
you can reference uploaded files via `self.options.config_file` paths list.
|
Commands starting with "%" and "-" characters
Question: I've seen these in books and over the internet but never understood them.
Something like this
...modify changer.py without stopping Python...
% vi changer.py
and
python -mtimeit -s
What do they mean? And where are they applied?
Answer: `%` is the C shell prompt. It's not universal for all shells. Depending on
what kind of shell you're using, you could get a different kind of prompt when
its waiting for your commands. For example UNIX shells commonly use the `$`
sign and the Windows CMD prompt uses the current directory followed by a `>`
character.
`vi` is simply a _shell command_ which calls vi, a Unix-platform text editor.
In your example, he's just using the text-editor vi to modify the Python
script changer.py. It has nothing to do with Python other than the fact that
the text editor can be used to edit Python scripts.
`python -mtimeit -s` is using the `-m` _command line argument_ to load a
module and then start the interpreter with the module `timeit` pre-loaded.
That way you don't have to type `import <modulename>` after starting the
interpreter. It is also using the `-s` command line argument to specify that
the user-site-packages directory should not be added to `sys.path`.
For more information on command line arguments for the python interpreter,
check out the [official
documentation.](http://docs.python.org/2/using/cmdline.html)
|
Python SHA512encryption with PHP
Question: This is the first time I am trying to deal with a python code. My client has
recently gave me a python code:
python -c 'import crypt; print crypt.crypt("Pa55w0rd!", "$6$x88yEvVg")'
Which is used to encrypt the password, in the above code.
The password is **Pa55w0rd!** and the salt value is **x88yEvVg** . Can I
execute the above code in PHP? I have tried doing this:
echo exec(`python -c "import crypt;print crypt.crypt('Pa55w0rd!', '\$6\$x88yEvVg\')"`);
Thanks.
Answer: Do you absolutely need to encrypt using python? Depending on your PHP version
you could do this for PHP >= 5.3:
openssl_digest("Pa55w0rd!"."$6$x88yEvVg", "sha512");
and this for PHP 5.2 or 5.1
hash("sha512", "Pa55w0rd!"."$6$x88yEvVg");
This assumes that your salt value is just being concatenated with your
password value.
|
Python3: TkInter
Question: I've just coded this simple program but need help with Tkinter! I want to use
what the user types into the adult ticket box so i set it as global but
because the user hasn't clicked the button yet the input_adult.get() only
returns a blank string instead of the integer the user typed. Is there any way
to get around this? Thanks in advance!!
from tkinter import *
import sys
adult_fare = 7.45
child_fare = 5.00
adult_tickets = 0
def child_Gui():
mGui = Tk()
labelNo = Label(mGui, text = "How many child/concession tickets do you need?").pack()
input_child = Entry(mGui)
input_child.pack()
input_child.focus_set()
b = Button(mGui, text = "GO", width = 10, command = child_travel)
b.pack()
def adult_travel():
print(adult_tickets)
def adult_Gui():
global adult_tickets
mGui = Tk()
labelNo = Label(mGui, text = "How many adult tickets do you need?").pack()
input_adult = Entry(mGui)
input_adult.pack()
input_adult.focus_set()
b = Button(mGui, text = "GO", width = 10, command = adult_travel)
b.pack()
adult_tickets = input_adult.get()
def compare_sunday():
sunday_answer = sundayEntry.get().lower()
if sunday_answer == "yes":
global child_fare
global adult_fare
adult_fare = 3.00
child_fare = 3.00
labelNo = Label(sundayGui, text = "Ok your traveling on a sunday. All prices will be $3.00!!").pack()
okButton = Button(sundayGui, text = "Click here to continue", width = 40, command = adult_Gui).pack()
elif sunday_answer == "no":
labelNo = Label(sundayGui, text = "Ok your not traveling on a sunday.").pack()
okButton = Button(sundayGui, text = "Click here to continue", width = 40, command = adult_Gui).pack()
else:
labelElse = Label(sundayGui, text = "Please type yes or no!!").pack()
sundayGui = Tk()
sundayGui.title("Travel Calculator")
label_sunday = Label(sundayGui, text = "Are you traveling on a sunday?").pack()
sundayEntry = Entry(sundayGui)
sundayEntry.pack()
sundayEntry.focus_set()
sundayB = Button(sundayGui, text = "Go", width = 10, command = compare_sunday).pack()
Answer: You need to call the `get` method in the callback for the button. This
requires that you make the entry widget available globally:
def adult_Gui():
global input_adult
...
input_adult = Entry()
...
def adult_travel():
adult_tickets = input_adult.get()
print(adult_tickets)
|
Python - How to run a specific function with variables
Question: I'm new to Python. This is the code I am trying to use, basically I am trying
to start by adding a hosted zone:
<http://ijabour.com/myfaceapp/build/boto/bin/route53>
The function for this is: **create**
If I want to add a hosted zone called "test.com", how would I use this library
to do this? I want to know how to involve a specific function in this python
file and parse an argument to it.
Answer: When you want to call the create function in that module, just import the
module and call the create function.
import route53
conn = .... # init connection here
route53.create(conn, "test.com")
|
Python module import error with command prompt of Windows 7
Question: I have a script in Python where i import several modules
from __future__ import division
import os
import glob
import sys
import tempfile
import shutil
import math
import datetime
import gdal
import random
from shapely.geometry import Point
from shapely.geometry import Polygon
from shapely.geometry import box
from liblas import file
when i use an IDLE (es: PyCharm or PyScripter) i have no problem to import the
external modules (gdal, shapely.geometry, and liblas). When i run the script i
got this error message
C:\PythonScript\myscript.py
Traceback (most recent call last):
File "C:\PythonScript\myscript.py", line 10, in <module>
import gdal
ImportError: No module named gdal
where
print(gdal.__file__)
C:\Python27\lib\site-packages\gdal.pyc
and
print(sys.path)
['C:\\Program Files (x86)\\JetBrains\\PyCharm 2.7.3\\helpers\\pydev', 'C:\\Python27\\lib\\site-packages\\progressbar-2.3-py2.7.egg', 'C:\\windows\\system32\\python27.zip', 'C:\\Python27\\DLLs', 'C:\\Python27\\lib', 'C:\\Python27\\lib\\plat-win', 'C:\\Python27\\lib\\lib-tk', 'C:\\Python27', 'C:\\Python27\\lib\\site-packages', 'C:\\PythonScript']
i installed gdal using Unofficial Windows Binaries for Python Extension
Packages using an file *.exe.
Answer: Try to check the path of Python install and site-packages. if something wrong
with that, your system might not be able to find the modules and site-packages
which may cause such errors.
|
Filtering data with pandas
Question: I'm a newbie to Pandas and I'm trying to apply it to a script that I have
already written. I have a csv file from which I extract the data, and use the
columns '_candidate_ ', '_final track_ ' and '_status_ ' for my data frame.
My problem is, I would like to filter the data, using perhaps the method shown
in Wes Mckinney's 10min tutorial
('<http://nbviewer.ipython.org/urls/gist.github.com/wesm/4757075/raw/a72d3450ad4924d0e74fb57c9f62d1d895ea4574/PandasTour.ipynb>').
In the section `In [80]:` he uses `aapl_bars.close_price['2009-10-15']`.
I would like to use a similar method to select all the data which have `*` as
a status. Data from the other columns are also deleted if there is no * in
that row.
My **code** at the moment:
def establish_current_tacks(filename):
df=pd.read_csv(filename)
cols=[df.iloc[:,0], df.iloc[:,10], df.iloc[:,11]]
current_tracks=pd.concat(cols, axis=1)
return current_tracks
My **DataFrame** :
>>> current_tracks
<class 'pandas.core.frame.DataFrame'>
Int64Index: 707 entries, 0 to 706
Data columns (total 3 columns):
candidate 695 non-null values
final track 670 non-null values
status 670 non-null values
dtypes: float64(1), object(2)
I would like to use something such as `current_tracks.status['*']`, but this
does not work
Apologies if this is obvious, struggling a little to get my head around it.
Answer: Since the data you want to filter based on is not part of the data frame's
index, but instead is a regular column, you need to do something like this:
current_tracks[current_tracks.status == '*']
Full example:
import pandas as pd
current_tracks = pd.DataFrame({'candidate': ['Bob', 'Jim', 'Alice'],
'final_track': [10, 15, 13], 'status': ['*', '.', '*']})
current_tracks
Out[3]:
candidate final_track status
0 Bob 10 *
1 Jim 15 .
2 Alice 13 *
current_tracks[current_tracks.status == '*']
Out[4]:
candidate final_track status
0 Bob 10 *
2 Alice 13 *
If `status` was part of your dataframe's index, your original syntax would
have worked:
current_tracks = current_tracks.set_index('status')
current_tracks.candidate['*']
Out[8]:
status
* Bob
* Alice
Name: candidate, dtype: object
|
Python - Uniquely determining which text file an element originated from
Question: I'm loading in three separate text files using numpy with the code:
str = 'data'
Di = np.loadtxt(str+'i.txt', dtype=np.float64)
Dj = np.loadtxt(str+'j.txt', dtype=np.float64)
Dk = np.loadtxt(str+'k.txt', dtype=np.float64)
The text files contain 2-dimensional data with `2` columns and roughly `6000`
rows (they all contain `2` columns but the number of rows is variable). Given
an element `[a,b]` \- how can i uniquely determine which text file it
originated from?
I can't entirely ensure that the elements are unique though, the number
`[a,b]` may occur in both (for example) the datai and dataj text files -
however it is very unlikely, but I can't rule it out entirely.
Edit:
Loading in the text files, for example, gives:
Di = [[1 4] Dj = [[9 4] Dk = [[2 4]
[1 5] [5 5] [5 6]
[4 5] [3 6]] [4 7]]
datai.txt dataj.txt datak.txt
So given the element `[1 4]` the output would be `datai.txt`, letting me know
the element `[1 4]` originated from the `datai.txt` file.
Answer: Something like:
import numpy
Di = numpy.array([[1, 4], [1, 5], [4, 5]])
Dj = numpy.array([[9, 4], [5, 5], [3, 6]])
Dk = numpy.array([[2, 4], [5, 6], [4, 7]])
#>>>
next(array for array in [Di, Dj, Dk] if ([5, 5] == array).all(1).any())
#>>> array([[9, 4],
#>>> [5, 5],
#>>> [3, 6]])
?
If you want the index:
next(i for i, array in enumerate([Di, Dj, Dk]) if ([5, 5] == array).all(1).any())
#>>> 1
or the name:
next(k for k, array in {"Di":Di, "Dj":Dj, "Dk":Dk}.items() if ([5, 5] == array).all(1).any())
#>>> 'Dj'
* * *
The
([5, 5] == array).all(1).any()
is the key part, it does (using [9, 4] for explanation)
[9, 4] == array
#>>> array([[ True, True],
#>>> [False, False],
#>>> [False, False]], dtype=bool)
Then you `all` along the axis going across.
([9, 4] == Dj).all(1)
#>>> array([ True, False, False], dtype=bool)
And then you check if any of the axis matched.
* * *
The
next(array for array in [Di, Dj, Dk] if CONDITION)
makes an iterable that only contains those arrays that satisfy CONDITION,
`next` gets the first.
You can use `next(..., fallback)` if you don't like catching `StopIteration`.
|
How do you listen for Mediakey events under gnome 3 using python?
Question: I'm trying to listen for MediaKey events under Gnome 3 (Gnome Shell). All the
examples I find refer to using DBus to connect to
org.gnome.SettingsDaemon.MediaKeys. This service doesn't seem to exist on my
platform.
I'm trying to do this using Python via GObject-Introspection. The examples say
do something like this
from gi.reposiotry import Gio
connection = Gio.bus_get_sync(Gio.BusType.SESSION, None)
proxy = Gio.DBusProxy.new_sync(connection, 0, None, 'org.gnome.SettingsDaemon', '/org/gnome/SettingsDaemon/MediaKeys', 'org.gnome.SettingsDaemon.MediaKeys', None)
This fails, unsurprisingly. Am I missing an install which provides this
service, or do I have to do this another way?
## UPDATE
This is for a media key listener, which listens for key events no matter which
window has the focus. It's meant for an app which doesn't even have it's own
GUI, and is Desktop wide. That's why I tried the Mediakeys DBus service, only
to find it is missing from my Desktop.
## UPDATE 2
I should be clear, the MediaKeys service is not present. I can't event connect
to the service and create the proxy as it's not there. What I'm wanting to
know is, am I missing an install, or was this service removed in one of the
Gnome 3 updates? If it was removed, then how can I listen for Media Keys in
this new environment? I'm running Gnome 3.8.2.
## UPDATE 3
Sorry should have mentioned this in the first place. I'll perfect my question
asking one day :-}. I'm running Gentoo.
Answer: Have you actually seen this question? [can't get dbus signal listener to work
in C with gnome multimedia
keys](http://stackoverflow.com/questions/5744041/cant-get-dbus-signal-
listener-to-work-in-c-with-gnome-multimedia-keys?rq=1)
The questioner said this code works:
#!/usr/bin/env python
"""Printing out gnome multi media keys via dbus-python.
"""
import gobject
import dbus
import dbus.service
import dbus.mainloop.glib
def on_mediakey(comes_from, what):
""" gets called when multimedia keys are pressed down.
"""
print ('comes from:%s what:%s') % (comes_from, what)
if what in ['Stop','Play','Next','Previous']:
print ('Got a multimedia key!')
else:
print ('Got a multimedia key...')
# set up the glib main loop.
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.Bus(dbus.Bus.TYPE_SESSION)
bus_object = bus.get_object('org.gnome.SettingsDaemon',
'/org/gnome/SettingsDaemon/MediaKeys')
# this is what gives us the multi media keys.
dbus_interface='org.gnome.SettingsDaemon.MediaKeys'
bus_object.GrabMediaPlayerKeys("MyMultimediaThingy", 0,
dbus_interface=dbus_interface)
# connect_to_signal registers our callback function.
bus_object.connect_to_signal('MediaPlayerKeyPressed',
on_mediakey)
# and we start the main loop.
mainloop = gobject.MainLoop()
mainloop.run()
**Update** :
It seems that your problem is with your Gnome distribution, as someone else
had encountered previously in [this bug
report](https://bugs.launchpad.net/ubuntu/+source/checkbox/+bug/967211). So
probably you should upgrade your distribution.
|
Differences between Matlab and Numpy and Python's `round` function
Question: ## Simplified question
Can I make Numpy agree with Matlab and Python's `round`?
Matlab 2013a:
>> round(-0.5)
ans =
-1
Python (using a Numpy array, or just a scalar, same result):
>>> import numpy
>>> round(numpy.array(-0.5))
-1.0
Numpy, the odd one out:
>>> import numpy
>>> numpy.round(numpy.array(-0.5))
-0
Is this difference in round platform dependent?
## Original question
Matlab comes with a file "handel.mat" containing some audio data:
>> which handel.mat
C:\Program Files\MATLAB\R2013a\toolbox\matlab\audiovideo\handel.mat
>> load handel
>> soundsc(y) % play the short audio clip
I want to work with this data in Python so I use `scipy.io.loadmat` [1].
Specifically, I want to scale the audio's values to span the entire range of
16-bit signed integer, i.e., the smallest value of the audio signal gets
mapped to -2^15 and the largest one to 2^15-1. I was surprised when doing this
in Matlab gave me different results than Python:
Matlab:
>> load handel
>> int16(round(interp1([min(y), max(y)], [-2^15, 2^15-1], y(1:10))))
ans =
-1 %%% <-- Different from Python
-253
-3074
-1277
252
1560
772
-1025
-1277
-3074
Python:
In [1]: import numpy as np
In [2]: import scipy.io as io
In [3]: mat = io.loadmat('handel.mat')
In [4]: np.int16(np.round(np.interp(mat['y'][:10], [mat['y'].min(), mat['y'].max()], [-2.0**15, 2.0**15-1.0])))
Out[4]:
array([[ 0], ### <-- Different from Matlab
[ -253],
[-3074],
[-1277],
[ 252],
[ 1560],
[ 772],
[-1025],
[-1277],
[-3074]], dtype=int16)
There are actually 1231 samples (out of 73113 total) where the Python and
Matlab differ. I think I'm being careful with my types, but really, there's
very little error surface for type bugs to creep in here: `loadmat` should
infer the types from the MAT file, and int16 can't differ that much between
the two systems.
**Added** The first element of the output of the `interp`/`interp1d` commands
are both -0.5 (printing it to the 100th decimal place in both Python and
Matlab confirms this), but rounding in Numpy (`np.round`) yields 0, while
Matlab rounds it to -1. Is this a matter of Matlab rounding semantics?
**Furthermore** Python's built-in non-Numpy `round` for -0.5 gives me -1!
Whence this difference between Numpy's and Python's `round` functions? And
will Python's `round` always match Matlab's?
Windows64, Matlab 8.1 (2013a), Python 2.7.4.
[1]
<http://docs.scipy.org/doc/scipy/reference/generated/scipy.io.loadmat.html>
Answer: `numpy.round`, also known as `numpy.around`, rounds to the _nearest even
value_ for half-integers. This is **not** platform dependent, but a purposeful
implementation detail.
If you wish to have a quick round without using Python, see [this
answer](http://stackoverflow.com/a/16000851/1763356).
The summary is that there's a platform dependant hack to set rounding using
`fesetround` via `ctypes`. From the post:
import numpy as np
import ctypes
FE_TONEAREST = 0x0000
FE_DOWNWARD = 0x0400
FE_UPWARD = 0x0800
FE_TOWARDZERO = 0x0c00
libc = ctypes.CDLL('libc.dylib')
v = 1. / (1<<23)
print repr(np.float32(1+v) - np.float32(v/2)) # prints 1.0
libc.fesetround(FE_UPWARD)
print repr(np.float32(1+v) - np.float32(v/2)) # prints 1.0000002
|
Python. GUI(input and output matrices)?
Question: Python 3.3.2 Hello.My problem - need create GUI for input data(matrix or
table).And take from this form data. For example:
`A=[[1.02,-0.25,-0.30,0.515],[-0.41,1.13,-0.15,1.555],[-0.25,-0.14,1.21,2.780]]`
Perfect solution is restrictions to input form(only float).
Questions: What i can use? Tkinter haven't table.. wxPython not supported by
Python 3. PyQt4?(mb u have exampel how take data from tabel in `[[],[],[]]`?)
Anyone have idea?
Thnx!
Answer: Using tkinter, you don't need a special table widget to do this -- just create
a grid of normal entry widgets. If you have so many that you need a scrollbar
it's slightly more difficult (and there are examples on this site for how to
do that), but just to create a grid of something small it's very
straightforward.
Here's an example that also includes some input validation:
import tkinter as tk
class SimpleTableInput(tk.Frame):
def __init__(self, parent, rows, columns):
tk.Frame.__init__(self, parent)
self._entry = {}
self.rows = rows
self.columns = columns
# register a command to use for validation
vcmd = (self.register(self._validate), "%P")
# create the table of widgets
for row in range(self.rows):
for column in range(self.columns):
index = (row, column)
e = tk.Entry(self, validate="key", validatecommand=vcmd)
e.grid(row=row, column=column, stick="nsew")
self._entry[index] = e
# adjust column weights so they all expand equally
for column in range(self.columns):
self.grid_columnconfigure(column, weight=1)
# designate a final, empty row to fill up any extra space
self.grid_rowconfigure(rows, weight=1)
def get(self):
'''Return a list of lists, containing the data in the table'''
result = []
for row in range(self.rows):
current_row = []
for column in range(self.columns):
index = (row, column)
current_row.append(self._entry[index].get())
result.append(current_row)
return result
def _validate(self, P):
'''Perform input validation.
Allow only an empty value, or a value that can be converted to a float
'''
if P.strip() == "":
return True
try:
f = float(P)
except ValueError:
self.bell()
return False
return True
class Example(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.table = SimpleTableInput(self, 3, 4)
self.submit = tk.Button(self, text="Submit", command=self.on_submit)
self.table.pack(side="top", fill="both", expand=True)
self.submit.pack(side="bottom")
def on_submit(self):
print(self.table.get())
root = tk.Tk()
Example(root).pack(side="top", fill="both", expand=True)
root.mainloop()
More about input validation can be found here: [Python/Tkinter: Interactively
validating Entry widget
content](http://stackoverflow.com/questions/4140437/python-tkinter-
interactively-validating-entry-widget-content)
|
Can Python optimize this simple variable call?
Question: It seems like python (2.6) is not able to optimize this simple temp variable
'a' ?
I use to assign a local variable to some code in order to reduce the line
length.
To me this is a simple optimization any correct compiler can do automatically.
from dis import dis
def f(func):
func()
def functioncall():
print ' => function called'
def unoptimized():
print 'in unoptimized'
a = functioncall
f(func=a)
def optimized():
print 'in optimized'
f(func=functioncall)
unoptimized()
optimized()
print 'dis(unoptimized)'
dis(unoptimized)
print 'dis(optimized)'
dis(optimized)
Output :
in unoptimized
=> function called
in optimized
=> function called
dis(unoptimized)
10 0 LOAD_CONST 1 ('in unoptimized')
3 PRINT_ITEM
4 PRINT_NEWLINE
11 5 LOAD_GLOBAL 0 (functioncall)
8 STORE_FAST 0 (a)
12 11 LOAD_GLOBAL 1 (f)
14 LOAD_CONST 2 ('func')
17 LOAD_FAST 0 (a)
20 CALL_FUNCTION 256
23 POP_TOP
24 LOAD_CONST 0 (None)
27 RETURN_VALUE
dis(optimized)
15 0 LOAD_CONST 1 ('in optimized')
3 PRINT_ITEM
4 PRINT_NEWLINE
16 5 LOAD_GLOBAL 0 (f)
8 LOAD_CONST 2 ('func')
11 LOAD_GLOBAL 1 (functioncall)
14 CALL_FUNCTION 256
17 POP_TOP
18 LOAD_CONST 0 (None)
21 RETURN_VALUE
Why it cannot remove this automatically?
11 5 LOAD_GLOBAL 0 (functioncall)
8 STORE_FAST 0 (a)
Answer: CPython, the standard implementation of Python, doesn't do much optimization;
it is a pretty straightforward translation of source code to bytecode. There
are a few optimizations thrown in (elimination or hoisting of code under `if
__debug__` being one of them) and I do believe there's a peephole optimizer of
some kind, but I assume the compilation phase is meant to be reasonably quick,
which precludes doing a lot of analysis.
Maintainability also seems key to the core Python developers. Here is [a
thread from 2009](http://www.gossamer-threads.com/lists/python/dev/755165) in
which a patch for dead code removal is considered, then rejected, because it
would make the compiler less maintainable.
If you want high performance, you want something other than CPython; maybe
PyPy would help. Python in general is optimized to save programmer time, not
runtime. If you don't want dead code in your bytecode, don't put it in your
source code. :-)
|
Python Barcode Generation
Question: I'm using the elaphe package for python to generate ean-13 barcode images. The
package was installed from source using the tar file found at
<https://pypi.python.org/pypi/elaphe>.
When I run the code:
BARCODE_IMAGE_PATH = "/tmp/"
def create_barcode_image(product_barcode):
path = BARCODE_IMAGE_PATH + product_barcode + '.png'
img = barcode('ean13', product_barcode,
options=dict(includetext=True, height=0.4), margin=1)
img.save(path, 'PNG')
return path
from the python interpreter it seems to work perfectly. The correct barcode is
generated to the path that I specify. When I run it from apache using web.py
as my web framework I receive the error:
Traceback (most recent call last):
...
img_path = create_barcode_image(barcode)
File "/var/www/py/documents/barcode_images.py", line 27, in create_barcode_image
img.save(path, 'PNG')
File "/usr/local/lib/python2.7/dist-packages/PIL/Image.py", line 1406, in save
self.load()
File "/usr/local/lib/python2.7/dist-packages/PIL/EpsImagePlugin.py", line 283, in load
self.im = Ghostscript(self.tile, self.size, self.fp)
File "/usr/local/lib/python2.7/dist-packages/PIL/EpsImagePlugin.py", line 75, in Ghostscript
raise IOError("gs failed (status %d)" % status)
IOError: gs failed (status 256)
Does anyone know what might be causing this error or how to go about debugging
it?
Answer: Add in some debug statements that you can walk through:
import sys
BARCODE_IMAGE_PATH = "/tmp/"
def create_barcode_image(product_barcode):
print >> sys.stderr, "product_barcode: %s" % product_barcode
path = BARCODE_IMAGE_PATH + product_barcode + '.png'
print >> sys.stderr, "path: %s" % path
img = barcode('ean13', product_barcode,
options=dict(includetext=True, height=0.4), margin=1)
print >> sys.stderr, "img data: %s" % img.tostring()
img.save(path, 'PNG')
print >> sys.stderr, "Saved to %s" % path
return path
Then in your shell:
$ tail -F /var/log/httpd/error.log # or wherever you put it
You're looking for: First: the output of "`product_barcode: ...`". Hopefully
that's not blank. If it is, then the problem lies elsewhere, maybe in your
server config. Then the output of "`img data: ...`". Hopefully it's a png and
not blank. If it's blank, then the problem lies with your ghostscript
installation.
This is a very rudimentary way of debugging, and I feel that for small
projects it is just as easy to throw in some debug statements rather than
messing around with the debugger, which can be difficult to set up properly.
|
Cannot import FTP_TLS on EC2
Question: I'm writing an FTP script in python on EC2 where I need to be able to import
FTP_TLS for the connection.
`from ftplib import FTP_TLS`
Except, it gives me:
`ImportError: cannot import name FTP_TLS`
I can import FTP_TLS on my local python shell, but it fails on EC2.
What's going on?
EC2 Python 2.6.5 / Local Python 2.7.3
Answer: FTP_TLS is only supported in Python 2.7+.
You could upgrade Python on your server, or just grab `Lib/ftplib.py` from
source:
wget http://www.python.org/ftp/python/2.7.1/Python-2.7.1.tgz
Put it in your load path and your `import` will work.
|
python threading Queue producer-consumer with thread-safe
Question: I am using threading and Queue to fetch url and store to database.
I just want one thread to do storing job.
so I write code as below:
import threading
import time
import Queue
site_count = 10
fetch_thread_count = 2
site_queue = Queue.Queue()
proxy_array=[]
class FetchThread(threading.Thread):
def __init__(self,site_queue,proxy_array):
threading.Thread.__init__(self)
self.site_queue = site_queue
self.proxy_array = proxy_array
def run(self):
while True:
index = self.site_queue.get()
self.get_proxy_one_website(index)
self.site_queue.task_done()
def get_proxy_one_website(self,index):
print '{0} fetched site :{1}\n'.format(self.name,index)
self.proxy_array.append(index)
def save():
while True:
if site_queue.qsize() > 0:
if len(proxy_array) > 10:
print 'save :{0} to database\n'.format(proxy_array.pop())
else:
time.sleep(1)
elif len(proxy_array) > 0:
print 'save :{0} to database\n'.format(proxy_array.pop())
elif len(proxy_array) == 0:
print 'break'
break
else:
print 'continue'
continue
def start_crawl():
global site_count,fetch_thread_count,site_queue,proxy_array
print 'init'
for i in range(fetch_thread_count):
ft = FetchThread(site_queue,proxy_array)
ft.setDaemon(True)
ft.start()
print 'put site_queue'
for i in range(site_count):
site_queue.put(i)
save()
print 'start site_queue join'
site_queue.join()
print 'finish'
start_crawl()
excuted output:
init
put site_queue
Thread-1 fetched site :0
Thread-2 fetched site :1
Thread-1 fetched site :2
Thread-2 fetched site :3
Thread-1 fetched site :4
Thread-2 fetched site :5
Thread-1 fetched site :6
Thread-2 fetched site :7
Thread-1 fetched site :8
Thread-2 fetched site :9
save :9 to database
save :8 to database
save :7 to database
save :6 to database
save :5 to database
save :4 to database
save :3 to database
save :2 to database
save :1 to database
save :0 to database
break
start site_queue join
finish
[Finished in 1.2s]
Why `save()` function run after `site_queue.join()` which written after
`save()`.
I also have substituted `save()` with a thread function ,but it doesn't work
too.
Does it mean I must change `proxy_array=[]` to `proxy_queue=Queue.Queue()`
,then I can use theading to store data?
I just want one thead to do this,and there is not any other theads would get
data from `proxy_array` , why should I join it?Using Queue seems very weird.
Is there any better solusion?
**UPDATE:**
I don't want to wait until all the FetchThreads complete their work.I want to
save data while fethcing,it would be much faster. I want the result be
something like below(Becuase I use array.pop(),so save 0 maybe appear very
later,this is just a example for easily understand. ):
Thread-2 fetched site :1
Thread-1 fetched site :2
save :0 to database
Thread-2 fetched site :3
Thread-1 fetched site :4
save :2 to database
save :3 to database
Thread-2 fetched site :5
Thread-1 fetched site :6
save :4 to database
.......
**UPDATE2 for someone has same question as below:**
question:
As I saying as above context,there is not any other theads would get data from
proxy_array.
I just can not imagine why it would break thread-safe?
answer:
[producer-consumer
problem](http://en.wikipedia.org/wiki/Producer%E2%80%93consumer_problem) in
misha's answer, I understand after reading it carefully.
question:
And one more asking,if the Program main thread can play as comsumer with
FetchThreads (in another word,needn't create StoreThread)
this is what I cannot figure out,I would update after finded the answer.
Answer: I have to come up with something similar producer-consumer. Producer generates
an 'id' and consumer consumes that id to do some url fetch and processing it
to it. Here is my skeleton code which does that
import Queue
import random
import threading
import time
import sys
data_queue = Queue.Queue()
lock = threading.Lock()
def gcd(a, b):
while b != 0:
a,b = b, a%b
return b
def consumer(idnum):
while True:
try:
data = data_queue.get(block=False)
except Exception, e:
print 'Exception ' + str(e)
else:
with lock:
print('\t consumer %d: computed gcd(%d, %d) = %d' %(idnum, data[0], data[1], gcd(data[0], data[1])))
time.sleep(1)
data_queue.task_done()
def producer(idnum, count):
for i in range(count):
a,b = random.randint(1, sys.maxint), random.randint(1, sys.maxint)
with lock:
print('\t producer %d: generated (%d, %d)'% (idnum, a, b))
data_queue.put((a,b))
time.sleep(0.5)
if __name__ == '__main__':
num_producers = 1
num_consumers = 2
num_integer_pairs = 10
for i in range(num_consumers):
t = threading.Thread(target=consumer, args=(i,))
t.daemon = True
t.start()
threads = []
for ii in range(num_producers):
thread = threading.Thread(target=producer, args=(ii, num_integer_pairs))
threads.append(thread)
thread.start()
# wait for the producers threads to finish
for thread in threads:
thread.join()
print 'done with producer threads'
# wait till all the jobs are done in the queue
data_queue.join()
with lock:
print 'all consumer threads finished'
with lock:
print 'main thread exited'
|
Storing a list, then reading it as integer
Question: I have a question. It may be an easy one, but anyway I could not find a good
idea. The question is that I have 2 python programs. First of them is giving 2
outputs, one of output is a huge list (like having thousands of another lists)
and the other one is a simple csv file for the Weka. I need to store this list
(first output) somehow to be able to use it as input of the other program
later. I can not just send it to second program because when the first of the
program is done, Weka should also produce new output for the second program.
Hence, second program has to wait the outputs of first program and Weka.

The problem is that output list consists of lost of lists having numerical
values. Simple example could be:
list1 = [[1,5,7],[14,3,27], [19,12,0], [23,8,17], [12,7]]
If I write this on a txt file, then when I try to read it, it takes all the
values as string. Is there any easy and fast way (since data is big) to manage
somehow taking all the values as integer? Or maybe in the first case, writing
it as integer?
Answer: How about pickling the list output rather than outputting it as a plaintext
representation? Have a look at [the
documentation](http://docs.python.org/2/library/pickle.html) for your version:
it's basically a way to write Python objects to file, which you can then read
from Python at any point to get identical objects.
Once you have the file open that you want to output to, the outputting
difference will be quite minor, e.g.
import pickle
my_list = [[1, 2], [134, 76], [798, 5, 2]]
with open('outputfile.pkl', 'wb') as output:
pickle.dump(my_list, output, -1)
And then just use the following way to read it in from your second program:
import pickle
my_list = pickle.load(open('outputfile.pkl', 'rb'))
|
opening a batch file but when it opens my python code is stopped because the batch file opens in the same window
Question: So I am basically trying to open and close a batch file (that runs a minecraft
server) at specific times. The thing that is bothering me though is that when
the file opens it does so in the same window as my script and therefore
basically stops my code which consiquently means that the file will have to be
manually closed. I was wondering if there is a way to open it in a separate
window. This is thd code I have so far:
import os
import time
print "updates ever 15 mins = 0.25 hours"
hours = input("Current time (nearest 0.25 hour): ")
x = input("Opening time (hour): ")
y = input("Closing time (hour): ")
os.system ("cls")
os.chdir ("C:\\Users\\USERNAME\\Desktop\\Server")
while True:
os.system("cls")
time.sleep(900)
hours += 0.25
difx = x - hours
dify = y - hours
if difx == 0:
os.popen("run.bat")
if dify == 0:
os.system ("say CLOSING in 10 SECONDS")
time.sleep (10)
os.system ("stop")
os.system ("taskkill /f /im cmd.exe")
if hours == 25:
hours = 1
stop is a command that stops the server running and saves all the info but
leaves the window open (hence the taskkill command). Any help would be much
appreciated.
Answer: Replacing os.popen("run.bat" with os.startfile("filepath\run.bat") should
work. (Thanks)
|
'Drunk' input from readline, OK from other programs (reading smart meters P1 port)
Question: I'm new to Python and want to read my smart meters P1 port using a Raspberry
Pi and Python. Problem: the input looks like some component is drunk. I'm sure
it's pretty simple to fix, but after several hours of searching and trying,
had to seek help.
When reading the P1 port with CU etc. everything is fine so the hardware etc.
is OK. Using a serial to USB converter from dx.com ([this
one](http://dx.com/p/usb-to-rs232-serial-port-adapter-transparent-
green-24512?item=4 "this"))
Command and (part of) the output: **cu -l /dev/ttyUSB0 -s 9600 --parity=none**
0-0:96.1.1(205A414246303031363631323463949271)
1-0:1.8.1(03118.000*kWh)
However, when trying to read it from Python, the input becomes gibberish (but
at least sort of consistant):
0-0:96.±.±(²05A´±´²´630303±39363±3²3´639·3±3²©
±-0:±.¸.±(03±±¸.000ªë×è©
How to fix this? The code I'm using is:
import serial
ser = serial.Serial()
ser.baudrate = 9600
ser.bytesize=serial.SEVENBITS
ser.parity=serial.PARITY_EVEN
ser.stopbits=serial.STOPBITS_ONE
ser.xonxoff=0
ser.rtscts=0
ser.timeout=20
ser.port="/dev/ttyUSB0"
ser.close()
ser.open()
print ("Waiting for P1 output on " + ser.portstr)
counter=0
#read 20 lines
while counter < 20:
print ser.readline()
counter=counter+1
try:
ser.close()
print ("Closed serial port.")
except:
sys.exit ("Couldn't close serial port.")
Have already tried messing with baudrate etc. but that doesn't make any
difference.
Answer: I'm not very familiar with the `serial` module, but I noticed that your `cu`
command assumes there is no parity bit (`--parity=none`), but your python
script assumes there is an even parity bit (`ser.parity=serial.PARITY_EVEN`).
I would try
ser.parity=serial.PARITY_NONE
And if there's no parity bit, you'll also probably want
ser.bytesize=serial.EIGHTBITS
|
Python time.clock() result not precise and way off
Question: I have the following script with which I measure real elapsed time forseveral
sleep functions. I use either time.sleep() to pause the program or
psychopy.core.wait() which is said to be more precise and use the high
resolution timer. I'm testing the latter explicitly because the wait()
function appears to cause some trouble (e.g. pauses the program shorter than
it should).
from psychopy import core
import time
import scipy
import sys
times1 = []
times2 = []
times3 = []
times4 = []
testtime = 40 # Time to wait (40 ms)
n = 200 # Iterations
print "Starting timing test with", testtime, "ms as reference; running", n, "times."
for i in range(n):
t1 = time.time()
time.sleep(testtime/1000.0)
measurement = (time.time()-t1)*1000
times1.append(measurement)
time.clock()
time.sleep(testtime/1000.0)
measurement = time.clock()
times2.append(measurement)
t1 = time.time()
core.wait(testtime/1000.0)
measurement = (time.time()-t1)*1000
times3.append(measurement)
t1 = time.clock()
core.wait(testtime/1000.0)
measurement = time.clock()
times4.append(measurement)
if i%60==0:
sys.stdout.write(".")
print
print "Low precision with time.sleep()"
print "Average is", scipy.mean(times1)
print "StdDev is", scipy.std(times1)
print
print "High precision with time.sleep()"
print "Average is", scipy.mean(times2)
print "StdDev is", scipy.std(times2)
print
print "Low precision with PsychoPy core.wait()"
print "Average is", scipy.mean(times3)
print "StdDev is", scipy.std(times3)
print
print "High precision with PsychoPy core.wait()"
print "Average is", scipy.mean(times4)
print "StdDev is", scipy.std(times4)
The output I get however is:
Starting timing test with 40 ms as reference; running 200 times.
....
Low precision with time.sleep()
Average is 39.0950024128
StdDev is 7.77598671811
High precision with time.sleep()
Average is 16.2315164609
StdDev is 9.24644085289
Low precision with PsychoPy core.wait()
Average is 40.830000639
StdDev is 21.7002567107
High precision with PsychoPy core.wait()
Average is 16.3130358691
StdDev is 9.24395572035
The time returned by time.clock() is way too low! And this happens consistenly
across several systems we have here.
Is there anyone who has an idea what is going on here and what might cause
this?
Answer: You need to do the same as you are doing for `time.time()` and save the clock
value before you do your sleep and subtract it to get your measurement. As you
have it all your clock values are just measuring time since the process
started.
|
django "Exception Value: No module named urls" for admin site
Question: I am django newbie , trying to follow a tutorial. When I am trying to access
django administration site
http://127.0.0.1:8000/admin/
it giving the error "Exception Value: No module named urls" for the below code
#uls.py
from django.conf.urls import *
from mysite.views import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
#TODO ADD LIST OF URL
urlpatterns = patterns('',
(r'^home\/?$',get_homepage),
(r'^admin\/?$',include('django.contrib.admin.urls')),
)
I have tried multiple solution aviable on stackover flow @last publishing the
issue get it resolved
#settings.py
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATE_DIRS = (
'/home/abuzzar/djcode/mysite/mysite/template',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
#'django.contrib.sessions',
#'django.contrib.sites',
#'django.contrib.messages',
#'django.contrib.staticfiles',
'mysite.jobpost',
'django.contrib.admin',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
error : <http://pastebin.com/RMvzPd61>
i tried the solution : [Django 1.5.1 'ImportError: No module named urls' when
running
tests](http://stackoverflow.com/questions/15758494/django-1-5-1-importerror-
no-module-named-urls-when-running-tests) but did not worked.
Answer: To [include admin
urls](https://docs.djangoproject.com/en/dev/ref/contrib/admin/#hooking-
adminsite-instances-into-your-urlconf), use this:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
)
Or, to fix your issue, add `site` between `admin` and `urls`
(r'^admin\/?$',include('django.contrib.admin.site.urls'))
On a general note, I would revisit the URL pattern. `^admin\/?$` should not
have the `$` at the end since you are including another url conf.
Also, `^admin/` should be sufficient
|
Pythonic way to correctly separate Model from application using SQLAlchemy
Question: I'm having a hard time to make my application run. Flask-SQLAlchemy extension
creates an empty database whenever I try to separate module in packages. To
better explain what I'm doing, let me show how my project is structured:
Project
|
|-- Model
| |-- __init__.py
| |-- User.py
|
|-- Server
| |-- __init__.py
|
|-- API
| |-- __init__.py
The idea is simple: I want to create a package for my model, as I don't like
spreading code in a single package, and separate "sub" projects (like API), as
in the future I will be using blueprints to better isolate sub apps.
The code is very simple:
First, the `Model.__init__.py`:
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
Note that I created this only to use a single `SQLAlchemy()` object accross
the package. No we go to Model.User
from Model import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
Name = db.Column(db.String(80))
Age = db.Column(db.Integer)
...
Once again note the from Model import db that I used to allow the same db
object.
Finally, the `Server.__init__.py` goes like this:
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import Model, API
db = Model.db
def main():
app = Flask("__main__")
db = SQLAlchemy(app)
db.create_all()
API.SetAPIHookers(app)
app.run(host="0.0.0.0", port=5000, debug=True)
if __name__ == "__main__":
main()
From my point of view, the `db = SQLAlchemy(app)` allows me to pass my app
object without creating a circular reference.
The problem is that whenever I run this code, the sqlite database file is
created empty. That made me think that maybe Python don't import things like I
thought it would. So I tested my theory by removing the import Model and
creating the user directly inside Server... and voilá, it worked!
Now comes my question: Is there a 'pythonic' way to correctly separate modules
like I want or should I leave everything in the same package?
Answer: Right now, you have set up your application using what is a rough equivalent
to the "[Application
Factory](http://flask.pocoo.org/docs/patterns/appfactories/)" pattern (so
called by the Flask documentation). This is a Flask idea, not a Python one. It
has some advantages, but it also means that you need to do things such as
initialize your SQLAlchemy object using the `init_app` method rather than the
SQLAlchemy constructor. There is nothing "wrong" with doing it this way, but
it means that you need to run methods like `create_all()` within an
[application context](http://flask.pocoo.org/docs/appcontext/), which
currently you would not be if you tried to run it in the `main()` method.
There are a few ways you can resolve this, but it's up to you to determine
which one you want (there is no right answer):
## Don't use the Application Factory pattern
In this way, you don't create the app in a function. Instead, you put it
somewhere (like in `project/__init__.py`). Your `project/__init__.py` file can
import the `models` package, while the `models` package can import `app` from
`project`. This is a circular reference, but that's okay as long as the `app`
object is created in the `project` package first before `model` tries to
import `app` from `package`. See the Flask docs on [Larger Application
Patterns](http://flask.pocoo.org/docs/patterns/packages/) for an example where
you can split your package into multiple packages, yet still have these other
packages be able to use the `app` object by using circular references. The
docs even say:
> Every Python programmer hates them, and yet we just added some: circular
> imports. [...] Be advised that this is a bad idea in general but here it is
> actually fine.
If you do this, then you can change your `Models/__init__.py` file to build
the `SQLAlchemy` object with a reference to the app in the constructor. In
that way, you can use `create_all()` and `drop_all()` methods of the
`SQLAlchemy` object, [as described in the documentation for Flask-
SQLAlchemy](http://pythonhosted.org/Flask-SQLAlchemy/api.html#configuration).
## Keep how you have it now, but build in a request_context()
If you continue with what you have now (creating your app in a function), then
you will need to build the `SQLAlchemy` object in the `Models` package without
using the `app` object as part of the constructor (as you've done). In your
main method, change the...
db = SQLAlchemy(app)
...to a...
db.init_app(app)
Then, you would need to move the `create_all()` method into a function inside
of the application context. A common way to do this for something this early
in the project would be to utilize the
[`before_first_request()`](http://flask.pocoo.org/docs/api/#flask.Flask.before_first_request)
decorator....
app = Flask(...)
@app.before_first_request
def initialize_database():
db.create_all()
The "initialize_database" method is run before the first request is handled by
Flask. You could also do this at any point by using the `app_context()`
method:
app = Flask(...)
with app.app_context():
# This should work because we are in an app context.
db.create_all()
Realize that if you are going to continue using the Application Factory
pattern, you should really understand how the application context works; it
can be confusing at first but necessary to realize what errors like
"application not registered on db instance and no application bound to current
context" mean.
|
Why do I not get a good binary image with a grey background using the mahotas python module?
Question: I got some problems with image with grey background with mahotas library:
Example:
This is the code:
import mahotas as mh
path ="./imagepath/a.jpg"
fork = mh.imread(path)
bin = fork[:,:,0]
bfork = bin <230
After that i got that: 
What have i do for getting a black background and a white sign? I tried in
opencv module and was good, but i prefer mahotas.
import cv2
path ="./imagepath/a.jpg"
ow = int ((oshape[0]/100 )*7 )
oh = int ((oshape[0]/100 )*7 )
gray = cv2.imread(path,0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(ow,oh))
graydilate = cv2.erode(gray, element)
ret,thresh = cv2.threshold(graydilate,127,255,cv2.THRESH_BINARY_INV)
bin = thresh
Answer: You are not doing the same as in the opencv version. If you do, you'll get the
wanted results:
fork = mh.imread(path)
bin = fork[:,:,0]
bin = mh.erode(bin)
bin = (bin < 127)
The erosion step was missing and the threshold was different.
from matplotlib import pyplot as plt
plt.imshow(bin)
plt.gray()

|
how to use scipy.stats.kstest/basic questions about Kolmogorov–Smirnov test
Question: The help link is
<http://docs.scipy.org/doc/scipy-0.7.x/reference/generated/scipy.stats.kstest.html>
I can compute the ks-test value now,but I do not understand it. The code is as
below.
from scipy import stats
import numpy as np
sample =np.loadtxt('mydata',delimiter=",",usecols=(2,),unpack=True)
print stats.kstest(sample, 'poisson', args=(1,))
Q1
If the reference distribution is constant,what word can replace 'poisson'
above?
Q2
what is the meaning of `args=(1,)`?
Q3
If anybody is interested in ks-test,here is the wiki link.
<http://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test>
Can we write our own python code to practice? We can get _max(D)_ easily,but
how to get _Pr(k <=x)_ in the link? What is the relation between _max(D)_ and
_Pr(k <=x)_?
Answer: Q2: look at this, I have a array called `x1`
>>> stats.kstest(x1, 'norm')
(0.50018855199491585, 0.0)
>>> stats.kstest(x1, stats.norm.cdf)
(0.50018855199491585, 0.0)
>>> stats.kstest(x1, stats.norm.cdf, args=(0,))
(0.50018855199491585, 0.0)
>>> stats.kstest(x1, stats.norm.cdf, args=(2,))
(0.84134903906580316, 0.0)
>>> stats.kstest(x1, 'norm', args=(2,))
(0.84134903906580316, 0.0)
If you pass the name of distribution, i.e., `'norm'`, what actually get passed
to `kstest` is the standard distribution `cdf`. By standard, it means for
normal distribution having mean==0 and sigma=1. If you don't want the standard
`cdf`, you can pass additional parameters to `cdf` using `args=()`. In this
case I only passed the mean. That is, we testing the difference between `x1`
and a normal distribution with mean==2 and sigma=1.
Q3: The short answer is, yes. But, why reinventing the wheel? If you want to
know how it is implemented, just check the source code. It is in
`your_package_folder\scipy\stats\stats.py`, line 3292.
|
Python3: ZipFile instance has no attribute 'extractall'
Question:
from zipfile import ZipFile
fzip=ZipFile("crackme.zip")
fzip.extractall(pwd=b"mysecretpassword")
the script works only on IDLE, but when i run it from the command line, it
displays:
> unzip.py
fzip.extractall(pwd=b"mysecretpassword")
^
SyntaxError: invalid syntax
what's wrong?
Answer: It works (Ubuntu 13.04):
>>> import sys
>>> sys.version
'3.3.1 (default, Apr 17 2013, 22:32:14) \n[GCC 4.7.3]'
>>> from zipfile import ZipFile
>>> f = ZipFile('a.zip')
BTW, `pwd` should be bytes objects:
>>> f.extractall(pwd="mysecretpassword")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3.3/zipfile.py", line 1225, in extractall
self.extract(zipinfo, path, pwd)
File "/usr/lib/python3.3/zipfile.py", line 1213, in extract
return self._extract_member(member, path, pwd)
File "/usr/lib/python3.3/zipfile.py", line 1275, in _extract_member
with self.open(member, pwd=pwd) as source, \
File "/usr/lib/python3.3/zipfile.py", line 1114, in open
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
TypeError: pwd: expected bytes, got <class 'str'>
>>> f.extractall(pwd=b'mysecretpassword')
>>>
According to [`zipfile.ZipFile.extractall`
documentation](http://docs.python.org/3/library/zipfile.html#zipfile.ZipFile.extractall):
> **Warning** Never extract archives from untrusted sources without prior
> inspection. It is possible that files are created outside of path, e.g.
> members that have absolute filenames starting with "/" or filenames with two
> dots "..".
>
> _Changed in version 3.3.1_ : The zipfile module attempts to prevent that.
> See `extract()` note.
|
How to install pyodbc to be used in ipython
Question: I'm confused. I have installed pyodbc on my computer and I was able to import
it using other IDE but i'm new to ipython.
I use Ananconda , and was able to install other library using something like
pip install BeautifulSoup
But when I do that with pyodbc using
pip install pyodbc
I got error :
error: command 'gcc' failed with exist status 1
* * *
C:\Users\jeannie.chirayu>pip install pyodbc Downloading/unpacking pyodbc You
are installing a potentially insecure and unverifiable file. Future versio ns
of pip will default to disallowing insecure files. Downloading
pyodbc-3.0.7.zip (85kB): 85kB downloaded Running setup.py egg_info for package
pyodbc
warning: no files found matching 'tests\*'
Installing collected packages: pyodbc Running setup.py install for pyodbc
building 'pyodbc' extension C:\Anaconda\Scripts\gcc.bat -DMS_WIN64 -mdll -O
-Wall -DPYODBC_VERSION=3.0.7 -IC:\Anaconda\include -IC:\Anaconda\PC -c
c:\users\jeanni~1.chi\appdata\local\t
emp\pip_build_jeannie.chirayu\pyodbc\src\buffer.cpp -o
c:\users\jeanni~1.chi\app
data\local\temp\pip_build_jeannie.chirayu\pyodbc\src\buffer.o /Wall /wd4668
/wd4 820 /wd4711 /wd4100 /wd4127 /wd4191 gcc.exe: error: /Wall: No such file
or directory gcc.exe: error: /wd4668: No such file or directory gcc.exe:
error: /wd4820: No such file or directory gcc.exe: error: /wd4711: No such
file or directory gcc.exe: error: /wd4100: No such file or directory gcc.exe:
error: /wd4127: No such file or directory gcc.exe: error: /wd4191: No such
file or directory error: command 'gcc' failed with exit status 1 Complete
output from command C:\Anaconda\python.exe -c "import setuptools;__
file__='c:\users\jeanni~1.chi\appdata\local\temp\pip_build_jeannie.chirayu
\pyodbc\setup.py';exec(compile(open(**file**).read().replace('\r\n', '\n'), __
file__, 'exec'))" install --record
c:\users\jeanni~1.chi\appdata\local\temp\pip- lqnyba-record\install-record.txt
--single-version-externally-managed: running install
running build
running build_ext
building 'pyodbc' extension
C:\Anaconda\Scripts\gcc.bat -DMS_WIN64 -mdll -O -Wall -DPYODBC_VERSION=3.0.7
-IC :\Anaconda\include -IC:\Anaconda\PC -c
c:\users\jeanni~1.chi\appdata\local\temp\
pip_build_jeannie.chirayu\pyodbc\src\buffer.cpp -o
c:\users\jeanni~1.chi\appdata
\local\temp\pip_build_jeannie.chirayu\pyodbc\src\buffer.o /Wall /wd4668
/wd4820 /wd4711 /wd4100 /wd4127 /wd4191
gcc.exe: error: /Wall: No such file or directory
gcc.exe: error: /wd4668: No such file or directory
gcc.exe: error: /wd4820: No such file or directory
gcc.exe: error: /wd4711: No such file or directory
gcc.exe: error: /wd4100: No such file or directory
gcc.exe: error: /wd4127: No such file or directory
gcc.exe: error: /wd4191: No such file or directory
error: command 'gcc' failed with exit status 1
* * *
Cleaning up... Command C:\Anaconda\python.exe -c "import setuptools;**file**
='c:\users\jeanni
~1.chi\appdata\local\temp\pip_build_jeannie.chirayu\pyodbc\setup.py';exec(
compile(open(**file**).read().replace('\r\n', '\n'), **file** , 'exec'))"
install \--record c:\users\jeanni~1.chi\appdata\local\temp\pip-lqnyba-
record\install-rec ord.txt --single-version-externally-managed failed with
error code 1 in c:\users
\jeanni~1.chi\appdata\local\temp\pip_build_jeannie.chirayu\pyodbc Storing
complete log in C:\Users\j\pip\pip.log
Any recommendation would help. Thanks.
Answer: this was annoying. but i got it working. basically, pyodbc source code is
missing a lot of crap.
1) in the pyodbc directory, open setup.py and search for "wd4668".
change that list to look like this:
settings['extra_compile_args'] = []
2) in the src directory, create a file called "abc_minmax.h". in it, put:
#ifndef min
#define min(a, b) ((a < b) ? a : b)
#define max(a, b) ((a > b) ? a : b)
#endif
3) in the following files in the src directory:
cursor.h
params.h
sqlwchar.h
add the following line near the other includes at the top:
#include "abc_minmax.h"
4) finally, in the file wrapper.h, add the following 2 lines near the other
includes:
#include <Windows.h>
#include <Winreg.h>
ok, that should do it! let me know if something doesn't work.
|
Python multiprocessing: calling pool.map within a function
Question: I am trying to use the `mutltiprocessing` package to use multiple CPUs within
a function. When I run a toy example outside of a function it runs in a
quarter of a second with no problems (see below).
from multiprocessing import Pool
import time
start = time.clock()
def f(x):
return x*x
if __name__ == '__main__':
with Pool(processes=7) as pool:
result = pool.map(f, range(1000))
print(time.clock() - start)
However, when I adapt the same code into a function (see below), it prints
`True` to indicate that `__name__ == '__main__'`, but then it runs forever and
never returns a result. I am running Python 3.3 on Windows 7.
from multiprocessing import Pool
import time
start = time.clock()
def f(x):
return x*x
def testfunc(r):
if __name__ == '__main__':
print(True)
with Pool(processes=7) as pool:
result = pool.map(f, range(r))
return result
result = testfunc(1000)
print(time.clock() - start)
Answer: You are using `if __name__ == '__main__'` in wrong place.
from multiprocessing import Pool
import time
start = time.clock()
def f(x):
return x*x
def testfunc(r):
print(True)
with Pool(processes=7) as pool:
result = pool.map(f, range(r))
return result
if __name__ == '__main__':
result = testfunc(1000)
print(time.clock() - start)
According to [`multiprocessing - Programming
guidelines`](http://docs.python.org/3/library/multiprocessing.html#multiprocessing-
programming):
> _Safe importing of main module_
>
> Make sure that the main module can be safely imported by a new Python
> interpreter without causing unintended side effects (such a starting a new
> process).
>
> ... one should protect the “entry point” of the program by using if __name__
> == '__main__': as follows:
|
Creating a maze in Python/Pygame but not sure how to make walls
Question:
import pygame, sys
from pygame.locals import *
pygame.init()
windowwidth = 600
windowheight = 800
WALLWIDTH = 30
WALLHEIGHT = 30
PLAYERWIDTH = 20
PLAYERHEIGHT = 20
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
LIMEGREEN = (50, 205, 50)
running = True
while running == True:
for event in pygame.event.get():
if event.type == QUIT:
running = False
if event.type == KEYDOWN:
if (event.key == K_UP or event.key == K_w):
elif (event.key == K_DOWN or event.key == K_s):
elif (event.key == K_LEFT or event.key == K_a):
elif (event.key == K_RIGHT or event.key == K_d):
thescreen = pygame.display.set_mode((windowwidth, windowheight))
pygame.display.set_caption('Maze Game')
mainclock = pygame.time.Clock()
player1 = pygame.draw.rect(thescreen, LIMEGREEN, (50, 50, PLAYERWIDTH, PLAYERHEIGHT))
I am trying to create a maze game (not generator) for a project in school
using Python/Pygame. I have looked all over for a way to create walls and make
a player (just a green rect) collide with these walls, and how to make several
levels for this. How would I make levels, how would I create walls, and how
would I detect collision between player and walls?
Answer: Here are the two easiest ways I know of making walls. Both ways work with
graph structures and graph search algorithms so you can implement "path-
finding" in the future if you wish. This is all off the top of my head so I
apologize if any of it is unclear, but I have also provided links to relevant
pieces of documentation that you can check out if you are confused.
# Method 1: Tile-Based Maze
This is probably the easiest method to generate maps in because it can by done
simply by making an array of ASCII characters and processing them in Python to
make square "wall" objects.
Take this grid for example:
###########
# #
# ###### #
#S #F #
###########
'S' denotes the starting point and 'F' denotes the finish point. Yes this is
probably the easiest "maze" to solve in the world but it's just an example.
Imagine that each character in my horrible ASCII array represents a square
tile of size N x N. The spaces represent the characters the tile can walk on
and the hash characters represent walls '#'.
In this type of game, walls are game entities themselves. Specifically in the
context of Pygame, they inherit from the [Sprite
class](http://www.pygame.org/docs/ref/sprite.html). Sprite classes are special
classes which represent entities, or basically existing objects in your game.
They are very special because they can represent obstacles, walls, floors,
ceilings, players, enemies, you name it. Basically every object in your game
can inherit from the Sprite class.
So what makes Sprite classes so special? Well for one, you mentioned that you
were having conceptual difficulty understanding wall-collision. Each sprite in
Pygame has its own [Rect attribute](http://www.pygame.org/docs/ref/rect.html).
A rect attribute is basically just an invisible rectangle that is used to
determine things like collision detection, and drawing sprites. By definition,
in a pure tile-based map, a "collision" between entities is defined as
follows: Two entities are colliding if their rectangles overlap each other.
Then there is a method of the Sprite class called
[`pygame.sprite.groupcollide()`](http://www.pygame.org/docs/ref/sprite.html#pygame.sprite.collide_rect).
Each wall in the ASCII map above has a width, height, and location determined
by their position in the array. Thus each hashtag character directly
represents a rectangle that is also a square and has a square "surface" image.
Have your player inherit from the sprite class and place him in one sprite
group, the "player group". Have your wall entities inherit from the sprite
class and place them in another sprite group, call this the "obstacle group"
or something similar. All you have to do then is call
[`pygame.sprite.groupcollide()`](http://www.pygame.org/docs/ref/sprite.html#pygame.sprite.collide_rect)
in every frame of your game loop and use the dictionary it returns to tell if
the player is colliding with any sprites. I've provided links to the
documentation if any of this is unclear. Reading the Pygame documentation will
probably help you understand this better than my answer can.
So anyways what you end up with in the end of all this is a _dictionary_. I'll
make a direct quote from the documentation to explain my point:
groupcollide(group1, group2, dokill1, dokill2, collided = None) -> Sprite_dict
This will find collisions between all the Sprites in two groups. Collision is
determined by comparing the Sprite.rect attribute of each Sprite or by using the
collided function if it is not None.
Every Sprite inside group1 is added to the return dictionary. The value
for each item is the list of Sprites in group2 that intersect.
You would call this function directly in every iteration of your game loop
_after_ you update the player's movement and _before_ you draw all your
entities using the player's group as the `group1` argument, and the obstacles
group as your `group2` argument. Then you end up with a dictionary of the
following form:
{player_in_group1: [<list of obstacles from group2 that collide with player>] }
So what do you do with that list? Well its recommended that you define your
own local function (you can also make it a method of player class if you wish)
for dealing with this. Here is my extremely high-level pseudo code
implementation which is not close to actual code at all:
def handle_collisions(sprite_dict):
'''given sprite dict, moves all entities in group1 out of group2's
rectangle area'''
for p in sprite_dict:
for o in sprite_dict[p]:
# move p in such a way that its rectangle is no longer overlapping
# with the rectangle of o with the additional constraint that p must
# be moved **as minimally as possible.**
I'm not going to implement the function for you since I feel it would be
better to leave the challenge to you. :) I will warn you that the logic is not
that simple, however.
Incidentally this type of maze/map structure is used in many popular games
including Legend of Zelda, Pacman, Bomberman, Tetris, etc, etc. I couldn't
possibly name them all but you get the point. This is a _proven_ method since
it seamlessly integrates itself with game design. But don't take my word for
it, there is [an entire website which explains why tile-based games are so
powerful.](http://www.tonypa.pri.ee/tbw/tut00.html)
# Method 2: Vertex-Edge Based Maze
Note this method is much harder to implement. It's purely [Graph
based.](http://en.wikipedia.org/wiki/Graph_%28mathematics%29) Each space in
the graph is a node which the player can traverse. What determines whether an
entity is allowed to move between two nodes (in other words...collision based
on the principle of _restriction_) is whether or not an _edge_ exists between
those two nodes in the underlying undirected (or directed, if you wish) graph.
I'm not really going to explain this in detail because it's just too difficult
to cover in one answer. You'll just have to do your own research if you want
to use this method but keep in mind it's a lot harder since Pygame doesn't
actually support you much for this strategy. If you're really interested in
this, the best place to start is probably [Google](http://google.com).
* * *
And that's it! Make an attempt with the information I've given you and if you
have any trouble with this, you can ask another question here or on [The
GameDev StackExchange](http://gamedev.stackexchange.com/). In the future when
you ask a question on SO, try and make sure that it is a _specific programming
question_ or you will very likely get lots of downvotes.
|
Python 27 can't import shared object from guppy
Question: I installed guppy the memory profiler from its svn#95 via "sudo python
setup.py install".
It looks properly installed.
yey@yey:/usr/local/lib/python2.7/dist-packages/guppy/heapy$ ls *.so *.py
AbstractAlgebra.py ImpSet.py Path.py Remote.py Use.py
Classifiers.py __init__.py pbhelp.py RM.py View.py
Console.py Monitor.py Prof.py Spec.py
Doc.py OutputHandling.py RefPat.py Target.py
heapyc.so Part.py RemoteConstants.py UniSet.py
But I still can't import it. Guppy's Python source does this import so it
should succeed.
>>> import guppy.heapy
>>> import guppy.heapy.heapyc
# trying /usr/local/lib/python2.7/dist-packages/guppy/heapy/heapyc.so
# trying /usr/local/lib/python2.7/dist-packages/guppy/heapy/heapycmodule.so
# trying /usr/local/lib/python2.7/dist-packages/guppy/heapy/heapyc.py
# trying /usr/local/lib/python2.7/dist-packages/guppy/heapy/heapyc.pyc
Traceback (most recent call last):
File "", line 1, in
ImportError: No module named heapyc
My question is, Python clearly made an attempt to import the file at the
correct location. Why did it fail? Is it because the .so file corrupted? Or is
it my ld.so.cache bad somehow? Thanks!
Answer: There are many possible problems with the .so file that could cause this—no
read access, a corrupted file, an empty file, a perfectly valid library but
for the wrong platform/architecture, etc. Worse, the .so itself may be fine,
but it may have load-time dependencies on a _different_ file that has any of
the above problems.
Unfortunately, the Python 2.x importer doesn't show you _which_ problem it's
actually hit; all you can tell is that, for some reason, the call to open the
shared library failed.
It's worth noting that in 3.1 or later, you would have gotten a much more
useful error message, something like this:
ImportError: dlopen(/usr/local/lib/python3.3/dist-packages/guppy/heapy/heapyc.so, 2): no suitable image found. Did find:
/usr/local/lib/python3.3/dist-packages/guppy/heapy/heapyc.so: Permission denied
However, that's only possible because the importer was rewritten from scratch
for 3.1, and there's no way such a radical change is ever going to be
backported to 2.7.
* * *
Most platforms come with tools that let you test shared libraries, and this is
really the best way to diagnose the problem.
But for a simple and platform-independent test, you can just use the `ctypes`
library that comes with Python itself:
>>> import ctypes
>>> ctypes.CDLL('/usr/local/lib/python2.7/dist-packages/guppy/heapy/heapyc.so')
You should get an error, like this:
OSError: /usr/local/lib/python2.7/dist-packages/guppy/heapy/heapyc.so: cannot open shared object file: Permission denied
In this case, the file isn't readable (or, on platforms that require shared
libraries to be executable, it either isn't readable or isn't executable),
which should be enough to fix the problem. So, a `chmod a+r` should fix it
(although you may want to go further and figure out why it wasn't readable in
the first place).
If the error doesn't tell you enough to fix it yourself, and searching doesn't
help, at least you can come to SO and ask a question that will be much more
likely to get an immediate answer…
|
Discovering Poetic Form with NLTK and CMU Dict
Question: **Edit: This code has been worked on and released as a basic
module:<https://github.com/hyperreality/Poetry-Tools>**
I'm a linguist who has recently picked up python and I'm working on a project
which hopes to automatically analyze poems, including detecting the form of
the poem. I.e. if it found a 10 syllable line with 0101010101 stress pattern,
it would declare that it's iambic pentameter. A poem with 5-7-5 syllable
pattern would be a haiku.
I'm using the following code, part of a larger script, but I have a number of
problems which are listed below the program:
corpus in the script is simply the raw text input of the poem.
import sys, getopt, nltk, re, string
from nltk.tokenize import RegexpTokenizer
from nltk.util import bigrams, trigrams
from nltk.corpus import cmudict
from curses.ascii import isdigit
...
def cmuform():
tokens = [word for sent in nltk.sent_tokenize(corpus) for word in nltk.word_tokenize(sent)]
d = cmudict.dict()
text = nltk.Text(tokens)
words = [w.lower() for w in text]
regexp = "[A-Za-z]+"
exp = re.compile(regexp)
def nsyl(word):
lowercase = word.lower()
if lowercase not in d:
return 0
else:
first = [' '.join([str(c) for c in lst]) for lst in max(d[lowercase])]
second = ''.join(first)
third = ''.join([i for i in second if i.isdigit()]).replace('2', '1')
return third
#return max([len([y for y in x if isdigit(y[-1])]) for x in d[lowercase]])
sum1 = 0
for a in words:
if exp.match(a):
print a,nsyl(a),
sum1 = sum1 + len(str(nsyl(a)))
print "\nTotal syllables:",sum1
I guess that the output that I want would be like this:
1101111101
0101111001
1101010111
The first problem is that I lost the line breaks during the tokenization, and
I really need the line breaks to be able to identify form. This should not be
too hard to deal with though. The bigger problems are that:
* I can't deal with non-dictionary words. At the moment I return 0 for them, but this will confound any attempt to identify the poem, as the syllabic count of the line will probably decrease.
* In addition, the CMU dictionary often says that there is stress on a word - '1' - when there is not - '0 - . Which is why the output looks like this: 1101111101, when it should be the stress of iambic pentameter: 0101010101
So how would I add some fudging factor so the poem still gets identified as
iambic pentameter when it only approximates the pattern? It's no good to code
a function that identifies lines of 01's when the CMU dictionary is not going
to output such a clean result. I suppose I'm asking how to code a 'partial
match' algorithm.
Answer: Welcome to stack overflow. I'm not that familiar with Python, but I see you
have not received many answers yet so I'll try to help you with your queries.
First some advice: You'll find that if you focus your questions your chances
of getting answers are greatly improved. Your post is too long and contains
several different questions, so it is beyond the "attention span" of most
people answering questions here.
Back on topic:
Before you revised your question you asked how to make it less messy. That's a
big question, but you might want to use the top-down procedural approach and
break your code into functional units:
1. split corpus into lines
2. For each line: find the syllable length and stress pattern.
3. Classify stress patterns.
You'll find that the first step is a single function call in python:
corpus.split("\n");
and can remain in the main function but the second step would be better placed
in its own function and the third step would require to be split up itself,
and would probably be better tackled with an object oriented approach. If
you're in academy you might be able to convince the CS faculty to lend you a
post-grad for a couple of months and help you instead of some workshop
requirement.
Now to your other questions:
**Not loosing line breaks** : as @ykaganovich mentioned, you probably want to
split the corpus into lines and feed those to the tokenizer.
**Words not in dictionary/errors** : The [CMU dictionary home
page](http://www.speech.cs.cmu.edu/cgi-bin/cmudict) says:
_Find an error? Please contact the developers. We will look at the problem and
improve the dictionary. (See at bottom for contact information.)_
There is probably a way to add custom words to the dictionary / change
existing ones, look in their site, or contact the dictionary maintainers
directly. You can also ask here in a separate question if you can't figure it
out. There's bound to be someone in stackoverflow that knows the answer or can
point you to the correct resource. Whatever you decide, you'll want to contact
the maintainers and offer them any extra words and corrections anyway to
improve the dictionary.
Classifying input corpus when it doesn't exactly match the pattern: You might
want to look at the link ykaganovich provided for fuzzy string comparisons.
Some algorithms to look for:
* Levenshtein distance: gives you a measure of how different two strings are as the number of changes needed to turn one string into another. Pros: easy to implement, Cons: not normalized, a score of 2 means a good match for a pattern of length 20 but a bad match for a pattern of length 3.
* Jaro-Winkler string similarity measure: similar to Levenshtein, but based on how many character sequences appear in the same order in both strings. It is a bit harder to implement but gives you normalized values (0.0 - completely different, 1.0 - the same) and is suitable for classifying the stress patterns. A CS postgrad or last year undergrad should not have too much trouble with it ( _hint hint_ ).
I think those were all your questions. Hope this helps a bit.
|
Using with python selenium WebDriverWait in pysaunter for async pages?
Question: I'm trying to code nicely against a web site with AJAX like functionality, and
using pysaunter (<http://element34.ca/products/saunter/pysaunter>).
When I use the available synchronization method wait_for_available, perhaps
improperly, my code does more or less what I want, but the Selenium server
node throws asserts like following while the class is not yet present:
> org.openqa.selenium.remote.ErrorHandler$UnknownServerException: Unable to
> locate element: {"method":"css selector","selector":".ng-scope.ready.idle"}
I'd like to use WebDriverWait, I think like this:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.CLASS_NAME,'idle')))
But when I try that, I still get the above exception from a Firefox remote
webdriver, and the following from a chrome remote webdriver:
> 13:09:22.525 WARN - Exception: no such element (Session info:
> chrome=29.0.1547.76) (Driver info: chromedriver=2.0,platform=Mac OS X 10.8.5
> x86_64) (WARNING: The server did not provide any stacktrace information)
Is it possible to avoid exceptions from Selenium Server when looking for an
element that will likely not be present right away, when running remote
webdriver using Python?
Can anyone point me to an example of the proper way to use WebDriverWait from
pysaunter? I'm starting from here:
<http://docs.seleniumhq.org/docs/04_webdriver_advanced.jsp> and (see also
Element 34 blog posting "WebDriverWait and Python" from July 9th, 2012)
TIA
Answer: If you look into the WebDriverWait code you will see that you can give the
constructor a list of exceptions to ignore. One such list is pre-defined,
'IGNORED_EXCEPTIONS', that is set to [NoSuchElementException]. So you can just
add 'ignored_exceptions=IGNORED_EXCEPTIONS' to the WebDriverWait constructor,
i.e.:
WebDriverWait(self.driver, 30, ignored_exceptions=IGNORED_EXCEPTIONS).until(...)
Then those exceptions will be ignored and it will continue to try until it
succeeds or times out.
|
Simple game in python
Question: I have looked at the other posts about this topic but still can not find what
I'm doing wrong at the beginning. Instead of rock, paper, and scissors, I am
using python, ruby, and java. It is not close to being done yet. I'm not into
the if loops yet for, but if the user inputs something different then
"python", "ruby", or Java", I want it too print "The game is over". I get an
error saying the string i entered is not defined. Could someone guide me in
the direction I need to go? I think I'm confused when comparing userInput to
gameList, since gameList is a list.
import random
def pythonRubyJava():
gameList = ["python","ruby","java"]
userInput = input("python, ruby, or java?:")
randomInput = random.choice(gameList)
if userInput != gameList:
print "The game is over"
I got that part figured out. Do I need to store "python", "ruby", and "java"
as variables to continue now? Or where would you go?
import random
def pythonRubyJava():
gameList = ["python","ruby","java"]
userInput = raw_input("python, ruby, or java?:")
randomInput = random.choice(gameList)
print randomInput
if userInput not in gameList:
print "The game is over"
if userInput == "python" and randomInput == "python":
print "stalemate"
if userInput == "ruby" and randomInput == "ruby":
print "stalemate"
if userInput == "java" and randomInput == "java":
print "stalemate"
Instead of getting the same answer, I want to be able to run the game again
and not have it print the stalemate to end the game, just start over. I know I
would have to delete "print "stalemate"" but I just wanted to show that.
Answer: The error occurs in line 4, which reads the user input. The problem is that
`input(...)` parses the expression after reading from command line, so strings
would have to be quoted.
Use `raw_input(...)` instead:
userInput = raw_input("python, ruby, or java?:")
|
Parsing object in python
Question:
{u'jobs': {u'_total': 301746, u'_count': 10, u'_start': 0, u'values': [{u'position': {u'title': u'Director of Sales, New York '}, u'id': 7489651}, {u'position': {u'title': u'Senior Software Development Engineer'}, u'id': 7489610}, {u'position': {u'title': u'Senior Analyst, Marketing Analytics'}, u'id': 7489572}, {u'position': {u'title': u'Director, Quantitative Analytics'}, u'id': 7489559}, {u'position': {u'title': u'Senior Quantitative Analyst'}, u'id': 7489542}, {u'position': {u'title': u'Director - Progam Lead and Project Manager - Collateral Management'}, u'id': 7489520}, {u'position': {u'title': u'Accounting and Finance Manager'}, u'id': 7489519}, {u'position': {u'title': u'Software Development Engineer - Test'}, u'id': 7489508}, {u'position': {u'title': u'Sr. Sales Executive - Technology'}, u'id': 7489462}, {u'position': {u'title': u'Recruitment Manager'}, u'id': 7489264}]}}
Hi, I'm really really new to scripting in general - db programmer. I'm trying
to use a python linkedIN api that returns data formatted like above. Could
someone please help me parse that data out; I would like to store data into
some type of Python Data structure and then eventually write it into a
database.
Answer: [Python interface to the LinkedIn API](https://github.com/ozgur/python-
linkedin) has already returned you a python dict.
For example, you can extract a list of actual jobs from the data you've
provided:
from pprint import pprint
data = {u'jobs': {u'_total': 301746, u'_count': 10, u'_start': 0, u'values': [{u'position': {u'title': u'Director of Sales, New York '}, u'id': 7489651}, {u'position': {u'title': u'Senior Software Development Engineer'}, u'id': 7489610}, {u'position': {u'title': u'Senior Analyst, Marketing Analytics'}, u'id': 7489572}, {u'position': {u'title': u'Director, Quantitative Analytics'}, u'id': 7489559}, {u'position': {u'title': u'Senior Quantitative Analyst'}, u'id': 7489542}, {u'position': {u'title': u'Director - Progam Lead and Project Manager - Collateral Management'}, u'id': 7489520}, {u'position': {u'title': u'Accounting and Finance Manager'}, u'id': 7489519}, {u'position': {u'title': u'Software Development Engineer - Test'}, u'id': 7489508}, {u'position': {u'title': u'Sr. Sales Executive - Technology'}, u'id': 7489462}, {u'position': {u'title': u'Recruitment Manager'}, u'id': 7489264}]}}
pprint(data['jobs']['values'])
prints a list of dictionaries:
[{u'id': 7489651, u'position': {u'title': u'Director of Sales, New York '}},
{u'id': 7489610,
u'position': {u'title': u'Senior Software Development Engineer'}},
{u'id': 7489572,
u'position': {u'title': u'Senior Analyst, Marketing Analytics'}},
{u'id': 7489559,
u'position': {u'title': u'Director, Quantitative Analytics'}},
{u'id': 7489542, u'position': {u'title': u'Senior Quantitative Analyst'}},
{u'id': 7489520,
u'position': {u'title': u'Director - Progam Lead and Project Manager - Collateral Management'}},
{u'id': 7489519, u'position': {u'title': u'Accounting and Finance Manager'}},
{u'id': 7489508,
u'position': {u'title': u'Software Development Engineer - Test'}},
{u'id': 7489462,
u'position': {u'title': u'Sr. Sales Executive - Technology'}},
{u'id': 7489264, u'position': {u'title': u'Recruitment Manager'}}]
|
ImportError: DLL load failed: %1 is not a valid Win32 application. But the DLL's are there
Question: I have a situation very much like the one at [ImportError: DLL load failed: %1
is not a valid Win32
application](http://stackoverflow.com/questions/14629818/importerror-dll-load-
failed-1-is-not-a-valid-win32-application), but the answer there isn't working
for me.
My Python code says:
import cv2
But that line throws the error shown in the title of this question.
I have OpenCV installed in `C:\lib\opencv` on this 64-bit machine. I'm using
64-bit Python.
My PYTHONPATH variable: `PYTHONPATH=C:\lib\opencv\build\python\2.7`. This
folder contains `cv2.pyd` and that's all.
My PATH variable: `Path=%OPENCV_DIR%\bin;...` This folder contains 39 DLL
files such as `opencv_core246d.dll`.
OPENCV_DIR has this value: `OPENCV_DIR=C:\lib\opencv\build\x64\vc11`.
The solution at [ImportError: DLL load failed: %1 is not a valid Win32
application](http://stackoverflow.com/questions/14629818/importerror-dll-load-
failed-1-is-not-a-valid-win32-application) says to add "the new opencv
binaries path (`C:\opencv\build\bin\Release`) to the Windows PATH environment
variable". But as shown above, I already have the OpenCV binaries folder
(`C:\lib\opencv\build\x64\vc11\bin`) in my PATH. And my OpenCV installation
doesn't have any Release folders (except for an empty one under build/java).
Any ideas as to what's going wrong? Can I tell Python to verbosely trace the
loading process? Exactly what DLL's is it looking for?
Thanks, Lars
# EDIT:
I just noticed that, according to <http://www.dependencywalker.com/>, the
`cv2.pyd` in `C:\lib\opencv\build\python\2.7` is 32-bit, whereas the machine
and the Python I'm running are 64-bit. Could that be the problem? And if so,
where can I find a 64-bit version of cv2.pyd?
Answer: [Unofficial Windows Binaries for Python Extension
Packages](http://www.lfd.uci.edu/~gohlke/pythonlibs/)
you can find any python libs from here
|
python sqlite3 code works in global space but thows error when I place it in a function
Question: I'm trying to run the follwoing 'sqlite3_custom_type.py' example from the book
"Python Standard Library by Example". The following code works 'straight out
of the box':
import os
import sqlite3
db_filename = 'todo.db'
db_is_new = not os.path.exists(db_filename)
conn = sqlite3.connect(db_filename)
if db_is_new:
print('need to create schema')
else:
print('database exists, assume schema does to')
conn.close()
#import sqlite3
try:
import cPickle as pickle
except:
import pickle
db_filename = 'todo.db'
def adapter_func(obj):
"""Convert from in-memory to storage representation.
"""
print 'adapter_func(%s)\n' % obj
return pickle.dumps(obj)
def converter_func(data):
"""Convert from storage to in-memory representation.
"""
print 'converter_func(%r)\n' % data
return pickle.loads(data)
class MyObj(object):
def __init__(self, arg):
self.arg = arg
def __str__(self):
return 'MyObj(%r)' % self.arg
# Register the functions for manipulating the type.
sqlite3.register_adapter(MyObj, adapter_func)
sqlite3.register_converter("MyObj", converter_func)
# Create some objects to save. Use a list of tuples so
# the sequence can be passed directly to executemany().
to_save = [ (MyObj('this is a value to save'),),
(MyObj(42),),
]
with sqlite3.connect(db_filename,
detect_types=sqlite3.PARSE_DECLTYPES) as conn:
# Create a table with column of type "MyObj"
conn.execute("""
create table if not exists obj (
id integer primary key autoincrement not null,
data MyObj
)
""")
cursor = conn.cursor()
# Insert the objects into the database
cursor.executemany("insert into obj (data) values (?)", to_save)
# Query the database for the objects just saved
cursor.execute("select id, data from obj")
for obj_id, obj in cursor.fetchall():
print 'Retrieved', obj_id, obj, type(obj)
print
But if I put the all the code in a function such as
def stuff():
~same code as above but indented
if __name__=="__main__":
stuff()
then I get an error code:
cursor.executemany("insert into obj (data) values (?)", to_save)
sqlite3.InterfaceError: Error binding parameter 0 - probably unsupported type.
Why doesn't the code work when it is in a function and how can I make it work?
Answer: As per other answers, it's good style to place classes in module scope. The
real reason for failure in this particular case though, is because of the
`pickle.dumps(obj)` call which tries to pickle a non-module level class.
Try the following code in your `adapter_func`:
def adapter_func(obj):
"""Convert from in-memory to storage representation.
"""
try:
return pickle.dumps(obj)
except Exception, arg:
print 'Failed to pickle object [%s]' % arg
You will see an error such as the following when `MyObj` is declared inside
`stuff`:
Failed to pickle object [Can't pickle <class '__main__.MyObj'>: it's not found as __main__.MyObj]
It is a requirement of `pickle` that classes to be pickled are declared at the
module level as [described in the pickle
documentation](http://docs.python.org/2/library/pickle.html#what-can-be-
pickled-and-unpickled). The sqlite3 module appears to be squashing exceptions
raised in the adapter functions, rather than propagating them through
resulting in a silent failure.
You can declare and register your adapter and converter functions inside
`stuff`. Style issues aside, you could also declare your `MyObj` inside your
function and have it work, as long as you find some other way to
serialise/deserialise your object.
It's the attempt to pickle a class which isn't at the top level which is the
root of this problem.
|
python finding text and after that text multiple numbers
Question: I have big xml file and I want edit it with notepad++ (python script). I need
replace all numbers in text like this
(x text)="number1;number2;number3;number4;number5;number6"
to numbers divided on 2 (no float numbers).
(x text)="(number1)/2;(number2)/2;(number3)/2;(number4)/2;(number5)/2;(number6)/2"
My code:
import re
text = editor.getText()
for m in re.finditer( 'x text', text ):
numbers = [int(n.group(1)) for n in num.finditer(text)] //here we should make array of that numbers
numbers []= numbers []/2 //then divide on 2 (integer)
map (numbers) //and return
Answer:
mystr="1;5;7;10;11;20"
mystr=';'.join(map(lambda x:str(int(x)/2),mystr.split(';')))
|
Solr indexing issue with solrpy
Question: Just started learning solr. I am trying to use solrpy as a client. My python
code is:
import solr
# create a connection to a solr server
s = solr.SolrConnection('http://localhost:8983/solr')
# add a document to the index
doc = dict(
id='testid123',
title='Lucene in Action',
author=['Erik Hatcher', 'Otis Gospodneti'],
)
s.add(doc, commit=True)
# do a search
response = s.query('title:lucene')
for hit in response.results:
print hit['title']
This is from the example given [here](https://pypi.python.org/pypi/solrpy/)
My solr schema.xml is the default schema that comes with solr distribution. I
have not made any changes to that. It has a uniqueKey field as "id".
<uniqueKey>id</uniqueKey>
And it is of string type
<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false" />
Still when I run my code, on my client side I get error:
Traceback (most recent call last):
File "/Users/user1/Documents/workspace/PyDelight/src/Test.py", line 12, in <module>
s.add(doc, commit=True)
File "/Library/Python/2.7/site-packages/solrpy-0.9.5-py2.7.egg/solr/core.py", line 678, in add
return Solr.add_many(self, [fields], commit=_commit)
File "/Library/Python/2.7/site-packages/solrpy-0.9.5-py2.7.egg/solr/core.py", line 326, in wrapper
return self._update(content, query)
File "/Library/Python/2.7/site-packages/solrpy-0.9.5-py2.7.egg/solr/core.py", line 550, in _update
rsp = self._post(selector, request, self.xmlheaders)
File "/Library/Python/2.7/site-packages/solrpy-0.9.5-py2.7.egg/solr/core.py", line 639, in _post
return check_response_status(self.conn.getresponse())
File "/Library/Python/2.7/site-packages/solrpy-0.9.5-py2.7.egg/solr/core.py", line 1097, in check_response_status
raise ex
solr.core.SolrException: HTTP code=400, reason=Bad Request
And on the solr trace side I get error:
843169 [qtp1151734776-20] INFO org.apache.solr.update.processor.LogUpdateProcessor ? [collection1] webapp=/solr path=/update params={commit=true} {} 0 0
843170 [qtp1151734776-20] ERROR org.apache.solr.core.SolrCore ? org.apache.solr.common.SolrException: Document is missing mandatory uniqueKey field: id
schema.xml file is in solr-4.4.0/example/solr/collection1/conf
And I am running solr by simply running start.jar in example directory.
Any idea where I am going wrong?
Answer: i have not used solrpy much (and haven't installed it yet) but from the
initial example, it looks like it wants to be called with attribute=value
pairs instead of a dictionary. (i know the example you posted is right from
the online 0.9.2 documentation! but the current source on github has this in
the comments):
add(**params)
Add a document. Pass in all document fields as
keyword parameters:
add(id='foo', notes='bar')
You must "commit" for the addition to be saved.
So try this:
s.add(commit=True, **doc)
and it will probably work. You may need to pull out the commit and do it
separately, i don't know.
i am not a solr expert, and just played around with it a little bit, but also
i had better luck using [sunburnt](https://github.com/tow/sunburnt "sunburnt")
than solrpy. worth a shot, maybe.
edit: github pointer to that file is here:
<http://code.google.com/p/solrpy/source/browse/solr/core.py>
|
Ninja-IDE virtualenv not importing
Question: I started a new project in the Ninja-IDE and assigned it to a virtualenv
folder in the settings.
The virtualenv works fine in the terminal, but Ninja-IDE doesn't recognize the
packages in it and throws an import error.
How can I access the packages in the virtualenv in Ninja-IDE?
Thanks!
* Ubuntu
* Python 2.7
* Through VirtualBox
Answer: One quick answer so that others may find it useful. I had a very similar
problem a little while ago, and had a lot of trouble finding a quick workable
solution anywhere, not even google.
I somehow figured out that the solution was to simply create a virtualenv
folder with the version of numpy I wanted, and then pointed the "virtualenv"
property for NinjaIDE project to that folder. I restarted NinjaIDE and boom,
instantly worked.
To set the virtualenv property for your project via the GUI, go to the Project
menu:
Project > Open Project Properties > Project Execution
and you should see a variable called "Virtualenv Folder". Point that to the
folder for your virtualenv, and it should work. (May need to restart
NinjaIDE.)
> This worked for me, NinjaIDE version 2.2 under Ubuntu 12.04.
Edit by asker: If the above doesn't fix the problem, try PyCharm-- it works
for me.
|
How to override a function call in Python?
Question: I've something like this:
import os
from node import Node
def make_sum(num,done):
for i in range(0,100):
num = num + 1
done(num)
def result(num):
print num
return num
node = Node()
node.register(make_sum(20,result))
result(25)
and `node.py` is this:
import os
class Node():
def __init__(self):
pass
def register(self,obj):
print obj
What I want to do is this, that the `make_sum()` function call should happen
from inside the `register()` function. But currently it gets called while
making the `register()` function call.
Is such a thing possible to do it in python, where you can do a forward
declaration of a function but call it later?
Answer: You can pass `make_sum` function as an argument to `register` method:
node.register(make_sum, 20, result)
then, call it in the method:
class Node():
def __init__(self):
pass
def register(self, f, num, done):
print f(num, done)
Also, you can use `lambda`:
node.register(lambda: make_sum(20, result))
In this case you don't need to pass arguments at all:
class Node():
def __init__(self):
pass
def register(self, f):
print f()
Hope this is what you wanted.
|
Python - defining global name datetime
Question: I'm trying to create a function where I use various functions from the
datetime module, such as strftime, strptime and timedelta.
I feel like I've tried everything, but every time I am told this:
4 date = '2012.09.07'
5
----> 6 q = net(date)
7 print q
/Users/fb/Documents/Python_files/test_function.pyc in net(date)
1 def net(date):
----> 2 b = datetime.strptime(a, '%Y.%m.%d')
3 c = b.strftime('%d:%m:%y')
4 return c
NameError: global name 'datetime' is not defined
I've read that others probably experience the same problem as I, namely ' It
works in the python interpreter but not in the script'. Can anyone help,
please?
Answer: You need to import the `datetime` object _in your module_ :
from datetime import datetime
at the top of `test_function.py`.
In your interpreter session you probably already imported the object.
Your whole module will then look like:
from datetime import datetime
def net(date):
b = datetime.strptime(date, '%Y.%m.%d')
c = b.strftime('%d:%m:%y')
return c
where I replaced `a` with `date`, since that is the name of the actual
argument to the function.
Note that the `datetime` _module_ contains a `datetime` _class_ , which is the
only thing imported here. If you need access to the `date` and `timedelta`
classes as well, import these explicitly (`from datetime import datetime,
date, timedelta`) or import _just_ the module and refer to the contents as
attributes (`import datetime`, then `datetime.datetime.strptime()` and
`datetime.date.today()`, etc.).
|
A library like Python's collections.Counter library for C# -> Getting the difference of values between two dictionary objects in C#
Question: This is how I would create a Dictionary in C#.
Dictionary<string, int> d = new Dictionary<string, int>()
{
{"cheese", 2},
{"cakes", 1},
{"milk", 0},
{"humans", -1} // This one's for laughs
};
In Python if you have a dictionary like so:
from collections import Counter
my_first_dict = {
"cheese": 1,
"cakes": 2,
"milk": 3,
}
my_second_dict = {
"cheese": 0,
"cakes": 1,
"milk": 4,
}
print Counter(my_first_dict) - Counter(my_second_dict)
>>> Counter({'cheese': 1, 'cakes': 1})
As you can see, `Counter` is very useful when comparing dictionary objects.
Is there a library in C#, that will allow me to do something similar to this,
or do I have to code it from scratch?
Answer: You can join the two dictionaries together and then create a new one based on
the given operation with only a few lines of code:
Dictionary<string, int> d1 = new Dictionary<string, int>();
Dictionary<string, int> d2 = new Dictionary<string, int>();
var difference = d1.Join(d2, pair => pair.Key, pair => pair.Key, (a, b) => new
{
Key = a.Key,
Value = a.Value - b.Value,
})
.Where(pair => pair.Value > 0)
.ToDictionary(pair => pair.Key, pair => pair.Value);
There is no system class that you've shown that wraps a dictionary an provides
a `-` operator for them, but you can make your own if you want easily enough:
public class Counter<T> : IEnumerable<KeyValuePair<T, int>>
{
private IEnumerable<KeyValuePair<T, int>> sequence;
public Counter(IEnumerable<KeyValuePair<T, int>> sequence)
{
this.sequence = sequence;
}
public static Counter<T> operator -(Counter<T> first, Counter<T> second)
{
return new Counter<T>(first.Join(second
, pair => pair.Key, pair => pair.Key, (a, b) =>
new KeyValuePair<T, int>(a.Key, a.Value - b.Value))
.Where(pair => pair.Value > 0));
}
public IEnumerator<KeyValuePair<T, int>> GetEnumerator()
{
return sequence.GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
}
|
python library and example for gmail smtp using oauth2
Question: The [python oauth2 library](https://github.com/simplegeo/python-oauth2) seems
to implement oauth 1.0 protocol. The `import oauth2 as oauth` is misleading,
most probably it is referred to the 2nd version of the python lib implementing
oauth 1.0.
Is there any python library implementing the oauth 2.0 protocol? and also
sample for using it.
i tried google but failed to find any.
Answer: You should really give a try to [rauth](https://github.com/litl/rauth
"rauth"). It supports OAuth 1.0/a and 2.0
|
Installing pycuda-2013.1.1 on windows 7 64 bit
Question: FYI, I have 64 bit version of Python 2.7 and I followed [the pycuda
installation instruction](http://wiki.tiker.net/PyCuda/Installation/Windows)
to install pycuda.
And I don't have any problem running following script.
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import numpy
a = numpy.random.randn(4,4)
a = a.astype(numpy.float32)
a_gpu = cuda.mem_alloc(a.nbytes)
cuda.memcpy_htod(a_gpu,a)
But after that, when executing this statement,
mod = SourceModule("""
__global__ void doublify(float *a)
{
int idx = threadIdx.x + threadIdx.y * 4;
a[idx] *= 2;
}
""")
I got the error messages
> CompileError: nvcc compilation of
> c:\users\xxxx\appdata\local\temp\tmpaoxt97\kernel.cu failed [command: nvcc
> --cubin -arch sm_21 -m64 -Ic:\python27\lib\site-packages\pycuda\cuda
> kernel.cu] [stderr: nvcc : fatal error : nvcc cannot find a supported
> version of Microsoft Visual Studio. Only the versions 2008, 2010, and 2012
> are supported
But I have VS 2008 and VS 2010 installed on the machine and set path and nvcc
profile as instructed. Anybody tell me what's going on?
**UPDATE1** : As cgohike pointed out, running following statements before the
problematic statement will solve the problem.
import os
os.system("vcvarsamd64.bat")
Answer: Call `"C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\vcvarsall.bat"
amd64` or `"C:\Program Files (x86)\Microsoft Visual Studio
10.0\VC\vcvarsall.bat" amd64` before `python.exe`. That will set all the
necessary environment variables to use the 64 bit Visual Studio compiler from
Python or the command line.
|
elegant way of convert a numpy array containing datetime.timedelta into seconds in python 2.7
Question: I have a numpy array called `dt`. Each element is of type
`datetime.timedelta`. For example:
>>>dt[0]
datetime.timedelta(0, 1, 36000)
how can I convert `dt` into the array `dt_sec` which contains only seconds
without looping? my current solution (which works, but I don't like it) is:
dt_sec = zeros((len(dt),1))
for i in range(0,len(dt),1):
dt_sec[i] = dt[i].total_seconds()
I tried to use `dt.total_seconds()` but of course it didn't work. any idea on
how to avoid this loop?
Thanks
Answer:
import numpy as np
helper = np.vectorize(lambda x: x.total_seconds())
dt_sec = helper(dt)
|
Analog sampling on the Beaglebone Black only reads noise
Question: I want to write a script in python that will do data acquisition with the
beaglebone black. However, I'm only getting noise when I read in values.
I have a circuit set up to feed in a signal (I was running about a .1-10 Hz
12v square pulse), the voltage regulator limits the voltage to 5v, and the
voltage divider drops it down to a safe voltage (~1v) for the analog pin on
the BBB.
Circuit:

Then using a python script I wanted to log the values into an array and plot
them once the program finished.
In ipython shell:
In [1]: import Adafruit_BBIO.ADC as adc
In [2]: import time
In [3]: AIN0 = "P9_39"
In [4]: T = 0.1 # or some other value
In [5]: adc.setup()
In [6]: a = []
In [7]: while True:
a.append(adc.read(AIN0)*1800) # pin value times 1.8 ref voltage
time.sleep(T)
After running for a while, I crash the script and write `a` to a file to be
`scp`ed to my desktop. But when I plot it, it's only noise. I've hooked up on
o-scope between `AIN0` and ground and the wave form is what I expect. I also
set up a potentiometer like so:

And I am able to read in the correct values through python. So, I figure it
must be something about doing a continuous sampling that's creating problems.
Answer: If you don't connect the power supply ground to your Beaglebone ground it will
not work, and you should indeed see only noise, since `AIN0` will be sampling
the Beaglebone's ground. You have to connect them in order to `AIN0` see the
signal of interest.

See the "missing connection" wire. If you don't have that you have no return
path for the current coming from the power supply (blue arrows), so there
can't be any voltage across the pontentiometer (remember Ohm's law: `voltage =
resistance x current`. If current is zero, the sampled voltage must also be
zero).
As for the script part, you can directly write the sampled data to a file with
this:
with open('sampled_data.csv', 'w') as f:
while True:
f.write(','.join(str(adc.read(AIN0)*1800)))
time.sleep(T)
When you interrupt the script you'll get the `sample_data.csv` file, with all
values separated by commas (`,`), which is easily importable to a spreadsheet
or other software you use to plot it.
|
Config file for Python with repeated sections/objects
Question: I would like to store this kind of configuration, with repetition of sections
and the keys within each:
[item]
name = 'name1'
url = 'address1'
whatever_var = 'foo1'
[item]
name = 'name2'
url = 'address2'
whatever_var = 'foo2'
...
What config file format would be adequate for such structure?: ConfigParser,
ConfigObj, JSON, YAML,...? I have never used any (Python newcomer) and would
like to know which one fits best.
Note: I am using Python 2.7 so far.
[EDIT]: Well, I believe it's not just a duplicate question, because I need not
just duplicated keys, but: \- duplicated sections (with their unique keys
inside, which cannot be bropued with other keys from other sections)
Also, I don't ask how to do it in ConfigParser, but which file type fits
better in this situation (XML, JSON, custom file,...). In fact, I think that
what I want cannot be obtained with ConfigParser nor ConfigObj, and I might
try XML, even if it is a bit less human-readable. And, hey, it's never a waste
of time learning to deal with XML files.
Answer: It looks like you are going to store data in files and you are leaving out the
idea of using databases and other types of interactions with remote servers to
keep this stuff. Glad you have that straightened out.
Many file types will support this type of data if the data is small. (if it'll
fit on your machine). If the data gets to be too big or takes too long to
access due to billions of records then you have different problems.
Sometimes the domain of the problem affects which file type to use. If you are
making a webserver then you might want to go with JSON or XML for transmission
purposes.
If you want to use the format that you have spelled out in your post here is
how to read the file.
import configparser
conf = configparser.ConfigParser()
conf.read('FILE.INI')
-here is how to read a value from the file you read (and assuming your [item] will have a unique titles like [Person1])
conf['Person1']['name']
-and then writing the file is pretty close to normal
with open('FILE.INI', 'w') as inifile:
conf.write(inifile)
|
Beginning word counting program only produces output for the last line in python
Question: I am a beginner programmer attempting to build a simple program. It should
count every word in the file but as I wrote, it only counts the last line of
text.
tm = open('myfile.txt', 'r')
for line in tm:
line = line.replace ('\n', '')
line = line.strip()
line = line.translate(None, '!#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
line = line.lower()
line = line.split(' ')
list = line
dict ={}
for word in list:
dict[word]=1
if word in dict:
count = dict[word]
count += 1
dict[word] = count
else:
dict[word]=1
for word,count in dict.iteritems():
print word + ": " + str(count)
My output is this
about: 1
to: 1
subscribe: 1
hear: 1
new: 1
our: 1
newsletter: 1
email: 1
ebooks: 2
for a 500 page document any help is appreciated
Answer: Replace this line in your code:
list = line # that's not how you add elements to a list!
With this other:
list.extend(line)
And it'd be a good idea to rename to `lst` the `list` variable, because `list`
is a built-in and it's a bad idea to overwrite it. Same thing for `dict`, you
should not use that as a variable name.
Another good idea: use a
[`Counter`](http://docs.python.org/2/library/collections.html#collections.Counter)
object to keep track of the word frequency, it's much easier than updating the
dictionary's counter values by hand. The whole block of code where you create
and fill the dictionary can be replaced by this:
from collections import Counter
d = Counter(lst) # notice the suggested variable names
|
Python CSV Module Issue With JSON
Question: I am using the python `csv` module to create a csv where some of the values
are `json` strings. However the `csv` module's quoting is totally breaking the
json:
import csv
import json
writer = csv.writer(open('tmp', 'w'))
writer.writerow([json.dumps([{'a' : 'b'}])])
The outputted json is broken, as you can see:
cat tmp
> "[{""a"": ""b""}]"
import json
json.loads("[{""a"": ""b""}]")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/json/__init__.py", line 326, in loads
return _default_decoder.decode(s)
File "/usr/lib/python2.7/json/decoder.py", line 366, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib/python2.7/json/decoder.py", line 382, in raw_decode
obj, end = self.scan_once(s, idx)
ValueError: Expecting property name: line 1 column 2 (char 2)
And `csv` objects to turning quoting off:
import csv
import json
writer = csv.writer(open('tmp', 'w'), quoting=csv.QUOTE_NONE)
writer.writerow([json.dumps([{u'a' : u'b'}])])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
_csv.Error: need to escape, but no escapechar set
Has anyone else encountered this? Do `json` and `csv` just not play well
together? (It's not my idea to store `json` stirngs in `csv` files.. something
I just need to deal with right now). Unfortunately, these csvs I am creating
contain hash digests and all sorts of other complicated stuff so all the `sed`
or `awk`ish type solutions to fix the json I've tried have failed or messed up
something else..
Answer: don't use `"` as your quote character. Use something else:
with open('tmp', 'w') as fout:
writer = csv.writer(fout, quotechar="'")
Really, this just tidy's things up a bit. When you read the data back in, you
first need to "unquote" it by reading the data via `csv.reader`. That should
give you back the strings you put in which are valid `json`.
|
execute a remote python script, locally with additional arguments
Question: I have a python script that resides on a remote server, under version control,
and I'd like to execute it from my local shell.
I know that `curl https://remote.path/script.py | python` will work ([as confirmed here)](http://stackoverflow.com/questions/12016134/how-to-execute-a-remote-page) when there are no additional parameters.
The problem is, I can't figure out how to pass in additional command line
arguments, e.g. `python script.py arg1 arg2 arg3`?
_I recognize this is may not be the most secure practice, but the script is
pretty benign._
Answer: `man python` would have answered your question:
python [ -B ] [ -d ] [ -E ] [ -h ] [ -i ] [ -m module-name ]
[ -O ] [ -OO ] [ -R ] [ -Q argument ] [ -s ] [ -S ] [ -t ] [ -u
]
[ -v ] [ -V ] [ -W argument ] [ -x ] [ -3 ] [ -? ]
[ -c command | script | - ] [ arguments ]
Say:
curl https://remote.path/script.py | python - arg1 arg2 arg3
Example:
$ cat s
import sys
print sys.argv[1:]
$ cat s | python - arg1 arg2 arg3
['arg1', 'arg2', 'arg3']
|
adding admin interface to existing pyramid app
Question: I'm trying to add a nice admin interface to an existing Pyramid project. I
created a test project using `pcreate -s alchemy -s pyramid_fa fa_test` and
then copied all the extra files created into my project and altered them to be
suitable.
Everything looks to be good and dandy until I try to add a formalchemy route:
config.formalchemy_model("/foo", package='bar',
model='bar.models.specific_models.Thingy',
**settings)
Then I get: `ImportError: No module named forms`
**My question is:** How do I fix this? Or what is the correct way to add an
admin interface?
I've googled around a bunch to no avail...
Here's relevant code:
fainit.py:
from bar import models, faforms
import logging
def includeme(config):
config.include('pyramid_formalchemy')
config.include('bar.fainit')
config.include('fa.jquery')
config.include('pyramid_fanstatic')
model_view = 'fa.jquery.pyramid.ModelView'
session_factory = 'bar.models.access.DBSession'
## register session and model_view for later use
settings = {'package': 'bar',
'view': model_view,
'session_factory': session_factory,
}
config.registry.settings['bar.fa_config'] = settings
config.formalchemy_admin("/admin", models=models, forms=faforms,
**settings)
# Adding the package specific routes
config.include('shop.faroutes')
log.info('formalchemy_admin registered at /admin')
faroutes.py
from bar import models
import logging
log = logging.getLogger(__name__)
def includeme(config):
settings = config.registry.settings.get('shop.fa_settings}}', {})
config.formalchemy_model("/alerts", package='shop',
model='shop.models.super_models.Alert',
**settings)
log.info('shop.faroutes loaded')
And the traceback:
Starting subprocess with file monitor
Traceback (most recent call last):
File "../bin/pserve", line 9, in <module>
load_entry_point('pyramid==1.5a1', 'console_scripts', 'pserve')()
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/scripts/pserve.py", line 51, in main
return command.run()
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/scripts/pserve.py", line 316, in run
global_conf=vars)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/scripts/pserve.py", line 340, in loadapp
return loadapp(app_spec, name=name, relative_to=relative_to, **kw)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/PasteDeploy-1.5.0-py2.7.egg/paste/deploy/loadwsgi.py", line 247, in loadapp
return loadobj(APP, uri, name=name, **kw)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/PasteDeploy-1.5.0-py2.7.egg/paste/deploy/loadwsgi.py", line 272, in loadobj
return context.create()
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/PasteDeploy-1.5.0-py2.7.egg/paste/deploy/loadwsgi.py", line 710, in create
return self.object_type.invoke(self)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/PasteDeploy-1.5.0-py2.7.egg/paste/deploy/loadwsgi.py", line 146, in invoke
return fix_call(context.object, context.global_conf, **context.local_conf)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/PasteDeploy-1.5.0-py2.7.egg/paste/deploy/util.py", line 56, in fix_call
val = callable(*args, **kw)
File "/home/sheena/WORK/tv_guys_env/shop/shop/__init__.py", line 30, in main
includeme(config)
File "/home/sheena/WORK/tv_guys_env/shop/shop/fainit.py", line 8, in includeme
config.include('shop.fainit')
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/config/__init__.py", line 778, in include
c(configurator)
File "/home/sheena/WORK/tv_guys_env/shop/shop/fainit.py", line 24, in includeme
config.include('shop.faroutes')
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/config/__init__.py", line 778, in include
c(configurator)
File "/home/sheena/WORK/tv_guys_env/shop/shop/faroutes.py", line 12, in includeme
**settings)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/util.py", line 507, in wrapper
result = wrapped(self, *arg, **kw)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid_formalchemy-0.4.4-py2.7.egg/pyramid_formalchemy/__init__.py", line 58, in formalchemy_model
view=view, models=[model], model=model, **kwargs)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid_formalchemy-0.4.4-py2.7.egg/pyramid_formalchemy/__init__.py", line 85, in formalchemy_admin
forms = config.maybe_dotted('%s.forms' % package)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/config/__init__.py", line 848, in maybe_dotted
return self.name_resolver.maybe_resolve(dotted)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/path.py", line 316, in maybe_resolve
return self._resolve(dotted, package)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/path.py", line 323, in _resolve
return self._zope_dottedname_style(dotted, package)
File "/home/sheena/WORK/tv_guys_env/local/lib/python2.7/site-packages/pyramid-1.5a1-py2.7.egg/pyramid/path.py", line 372, in _zope_dottedname_style
__import__(used)
ImportError: No module named forms
Answer: It sounds like it's looking for you to create a forms module at
`shop.faroutes.forms`.
|
Duplicated results when importing methods from a class of another py file
Question: I am quite new in python, so I hope you can help me with this silly problem as
I couldnt find any reason for this problem to happen. So, I have a file called
calcoo.py which has a class called Calculator that sums and subtracts, and
then I inherit that class in another class called CalculatorCHild (located in
another py file of the same directory) that just extend the behaviour of
Calculator adding the divide and multiply methods. So far it works but gives
me duplicated results when suming and it is like it considers that the rest of
the program calcco.py is inside the class Calculator. So here is my code:
calcoo.py file:
#! /usr/bin/python
# -*- coding: utf-8 -*-
import sys
operator1= sys.argv[1]
operation= sys.argv[2]
operator2= sys.argv[3]
try:
operator1 = float(sys.argv[1])
operator2 = float(sys.argv[3])
except ValueError:
sys.exit("Error: Non numerical Parameters")
class Calculator():
def sumatory(self):
return float(operator1) + float(operator2)
def substract(self):
return float(operator1) - float(operator2)
if operation == "sum":
print Calculator().sumatory()
elif operation == "substract":
print Calculator().substract()
else:
print "Error, operation not supported."
calcoochild.py
#! /usr/bin/python
# -*- coding: utf-8 -*-
import sys
operator1= sys.argv[1]
operation= sys.argv[2]
operator2= sys.argv[3]
try:
operator1 = float(sys.argv[1])
operator2 = float(sys.argv[3])
except ValueError:
sys.exit("Error: Non numerical Parameters")
from calcoo import Calculator
class CalculatorChild(Calculator):
def multiply(self):
return float(operator1) * float(operator2)
def divide(self):
if operator2 == 0:
print "Division by zero is not allowed."
else:
return float(operator1) / float(operator2)
if operation == "sum":
print CalculatorChild().sumatory()
elif operation == "substract":
print CalculatorChild().substract()
elif operation == "multiply":
print CalculatorChild().multiply()
elif operation == "divide":
print CalculatorChild().divide()
else:
print "Error, operation not supported."
When I execute calcoo.py everything works fine, but when I exectute python
calcoochild.py 3 sum 2.1 for example it prints 5.1 twice and if I write
multiply it prints:
Error, operation not supported
6.3
So it is like CalculatorCHild inherit not only the methods sumatory and
substract, it also exectues the if clause that it's outside of the class, I
have tried to find a solution but it keeps giving me the same result. I hope
someone can help me, thank you in advance.
Answer: When you import `calcoo`, **all** code at the top level is executed. That
includes parsing `sys.argv` values.
Move whatever should only be executed when running the module as a script to a
block guarded by a test for the module name; if the name is `__main__` then
your code is run as a script, otherwise it was imported as a module:
class Calculator():
def sumatory(self):
return float(operator1) + float(operator2)
def substract(self):
return float(operator1) - float(operator2)
if __name__ == '__main__':
import sys
operator1= sys.argv[1]
operation= sys.argv[2]
operator2= sys.argv[3]
try:
operator1 = float(sys.argv[1])
operator2 = float(sys.argv[3])
except ValueError:
sys.exit("Error: Non numerical Parameters")
if operation == "sum":
print Calculator().sumatory()
elif operation == "substract":
print Calculator().substract()
else:
print "Error, operation not supported."
Now, when you import `calcoo`, **only** the `Calculator` class will be
defined; the rest of the code will not be run.
|
Python Count Certain Character
Question:
def file(char, filename):
for currentFile in filename:
print(currentFile.strip())
def string(char, str):
count = 0
if char in 'abcdefghijklmnopqrstuvwxyz':
count += 1
string(char,str)
else:
print("Incorrect Letters")
print(count)
def main():
char = input("Enter character: ")
openFile = input("Enter the filename: ")
filename = open(openFile)
file(char, filename)
string(char, str)
main()
I am trying to count certain character, for example, if I was to put "W" in
the char input prompt, It should count only W. How would I do that? I am
trying to do the recursion in the def string function
Thank you.
Answer: Here is a solution without recursion and regular expressions, just using
built-ins.
import sys
char = raw_input("Enter character: ")
# 'isalpha' does the same as your manual check and is more idiomatic
if not char.isalpha():
print "Incorrect letters"
# This will terminate the script
sys.exit()
fname = raw_input("Enter filename: ")
count = 0
# We use a context manager to open a file, this way we don't
# have to close it ourselves when we're done. This is the idiomatic
# way to open files in Python since context managers were introduced.
with open(fname, 'r') as fp:
# We go through the file line by line
for line in fp:
# We can use the built-in 'count' method to count
# the occurences of a character in a string
# Use 'line.lower().count(char)' if you want to be case-insensitive
count += line.count(char)
print count
|
Trying to combine (concatenate) elements from 3 lists into one, new list
Question: I have 3 lists that contain Town, Range and Section (These are also the names
of the lists) information. For example, List Town has 'N14'...'N20', List
Range has 'E4'...'E7' and List Section has '01'...'36'. I want to be able to
put all possible combinations from the three lists into one new list, named
AOI, like this 'N14E401'....'N20E732' (727 possible combinations). This is for
a arcpy script that is already written, and working, that will use raw_input
prompts (the above combinations) that will be then used as the AOI that will
do some geoprocessing (not important as that part of the script works fine). I
just want to make the AOI selection easier as the way I have it set up now,
the user must input the Town, Range and Section information as individual
raw_inputs in three separate steps.
Thanks in advance. I would have put this on the arcpy specific area but it
seems more of a python question than an arcpy question. I am a complete python
noob and have been teaching myself scripting so...be gentle, kind readers.
Answer: What you are trying to achieve is the [Cartesian
product](http://en.wikipedia.org/wiki/Cartesian_product) of 3 lists. This can
easily be achieved by using
[itertools.product](http://docs.python.org/2/library/itertools.html#itertools.product)
Off-course you would not get the O/P as you depicted instead you will get a
list of tuples, but then again joining the list of tuples would be trivial.
For each of the tuples you need to invoke
[str.join](http://docs.python.org/2/library/stdtypes.html#str.join)
You may either want to loop through the tuples, join the list while
[incrementally appending
](http://docs.python.org/2/tutorial/datastructures.html), or better use [List
comprehension](http://docs.python.org/2/tutorial/datastructures.html#list-
comprehensions)
|
Python: calling function from imported file
Question: How do you call a function from an imported file? for example:
Test:
import test2
def aFunction():
print "hi there"
Test2:
import test
aFunction()
This give me a name error, saying my function isn't defined. I've also tried:
from test import aFunction
And:
from test import *
I've also tried not importing test2 in test. I'm coming to Python from C++, so
I fear I'm missing something blatantly obvious to veteran Python progammers...
Answer: You are creating a circular import. `test.py` imports `test2.py` which tries
to import `test.py`.
Don't do this. By the time `test2` imports `test`, that module has not
completed executing all the code; the function is not yet defined:
* `test` is compiled and executed, and an empty module object is added to `sys.modules`.
* The line `import test2` is run.
* `test2` is compiled and executed, and an empty module object is added to `sys.modules`.
* The line `import test` is run.
* `test` is already present as a module in `sys.modules`, this object is returned and bound to the name `test`.
* A next line tries to run `test.aFunction()`. No such name exists in `test`. An exception is raised.
* The lines defining `def aFunction()` are never executed, because an exception was raised.
Remove the `import test2` line, and run `test2.py` _directly_ , and importing
the function will work fine:
import test
test.aFunction()
|
Python string/list with £ symbols failing writing to file?
Question: I am having issues writing data to a file due to `£` signs being in my
string/list.
For example in my code below, `x` is created by a series of appends from
various regex searchs, matches, subs and generic trims/splits.
# -*- coding: utf-8 -*-
x = [u'Loc ', u'352', '1', '51', '3D2', u'Student Total \xa3540.00', u'Discount \xa235.00', '\n', u'Rec ', u'352', '2', '51', '5S1', u'Student Total \xa3540.00', u'Discount \xa235.00', '\n']
with open('test.txt','w') as dfile:
dfile.write('\n'.join(x)) # UnicodeEncodeError: 'ascii' codec can't encode character u'\xa3' in position 71: ordinal not in range(128)
dfile.write(x) # TypeError: expected a character buffer object
I am trying to write x to file so it appears like:
Loc
352
1
51
3D2
Student Total £3540.00
Discount £235.00
Rec
352
2
51
5S1
Student Total £3540.00
Discount £235.00
Anyone know how I can do what I am trying to achieve?
**EDIT**
I now can't get it to compare and if it's different then save...
with open('test.txt','r') as dfile:
dfiler = dfile.read()
dfiler = dfiler.decode("UTF-8")
if dfiler == x:
print "same, no need to save"
else:
with open('test.txt','w') as result_end_datafile:
dfile.write('\n'.join(x).encode("UTF-8"))
Answer: You need to encode the unicode string before writing:
dfile.write('\n'.join(x).encode("UTF-8"))
Alternatively, you use use
[codecs.`open()`](http://docs.python.org/release/2.6/library/codecs.html#codecs.open)
in Python 2.x, thus passing the encoding as the argument while opening file:
import codecs
with codecs.open('test.txt', 'w', encoding="UTF-8") as dfile:
dfile.write('\n'.join(x))
* * *
**Related:**
* [UnicodeDecodeError when redirecting to file](http://stackoverflow.com/q/4545661/1679863)
|
Stuck on learnpython.org exercise using JSON
Question: <http://www.learnpython.org/Serialization_using_JSON_and_pickle>
Here are the instructions:
The aim of this exercise is to print out the JSON string with key-value pair
"Me" : 800 added to it.
And below is the starting code, which we should modify.
#Exercise fix this function, so it adds the given name and salary pair to the json it returns
def add_employee(jsonSalaries, name, salary):
# Add your code here
return jsonSalaries
#Test code - shouldn't need to be modified
originalJsonSalaries = '{"Alfred" : 300, "Jane" : 301 }'
newJsonSalaries = add_employee(originalJsonSalaries, "Me", 800)
print(newJsonSalaries)
I'm completely lost. The JSON lesson was brief, at best. The issue I seem to
be running in to here is that `orginalJsonSalaries` is defined as a string
(containing all sort of unnecessary symbols like brackets. In fact, I think if
the single quotes surrounding its definition were removed,
`originalJsonSalaries` would be a dictionary and this would be a lot easier.
But as it stands, how can I append `"Me"` and `800` to the string and still
maintain the dictionary-like formatting?
And yes, I'm very very new to coding. The only other language I know is tcl.
EDIT:
OK, thanks to the answers, I figured out I was being dense and I wrote this
code:
import json
#Exercise fix this function, so it adds the given name and salary pair to the json it returns
def add_employee(jsonSalaries, name, salary):
# Add your code here
jsonSalaries = json.loads(jsonSalaries)
jsonSalaries["Me"] = 800
return jsonSalaries
#Test code - shouldn't need to be modified
originalJsonSalaries = '{"Alfred" : 300, "Jane" : 301 }'
newJsonSalaries = add_employee(originalJsonSalaries, "Me", 800)
print(newJsonSalaries)
This does not work. For whatever reason, the original dictionary keys are
formatted as unicode (I don't know where that happened), so when I print out
the dictionary, the "u" flag is shown:
{u'Jane': 301, 'Me': 800, u'Alfred': 300}
I have tried using `dict.pop()` to replace the key ( `dict("Jane") =
dict.pop(u"Jane")` ) but that just brings up `SyntaxError: can't assign to
function call`
Is my original solution incorrect, or is this some annoying formatting issue
and how to resolve it?
Answer: The [page you linked
to](http://www.learnpython.org/Serialization_using_JSON_and_pickle) says
exactly how to do this:
> In order to use the json module, it must first be imported:
>
>
> import json
>
>
> [...]
>
> To load JSON back to a data structure, use the "loads" method. This method
> takes a string and turns it back into the json object datastructure:
>
>
> print json.loads(json_string)
>
They gave you a string (`jsonSalaries`). Use `json.loads` to turn it into a
dictionary.
|
PyQt4: Which one of these would be better/more correct? (Notepad++ clone)
Question: I'm making a super simple Notepad++ clone in Python/PyQt4, and I'm wondering
which of these options of storing editor tab data:
OPTION 1: I have a class called QQCodeTab, which stores the current
Qsci.QsciScintilla instance for the current tab, the file path, current
language, etc. These are mapped to tab indexes by a dict.
OPTION 2: Same as OPTION 1, but cut out the class and store everything in a
dict instead (example: `{1: {"scintilla": <blah>, "filepath":
"C:/File/whatevs.py"}, "language": "python"}`)
My code comments can explain it better.
from PyQt4 import QtGui, Qsci
class QQCodeEditor(QtGui.QTabWidget):
def __init__(self, parent=None):
QtGui.QTabWidget.__init__(self, parent)
self.new_tab()
self.new_tab()
# Option 1: Maps index to tab object
# Option 2: Maps index to dict of options
self.tab_info = {}
def new_tab(self):
scin = Qsci.QsciScintilla()
index = self.addTab(scin, "New Tab")
def get_tab_info(self, index):
# Returns QQCodeTab object
return self.tab_info[index]
def save(self, index):
# Option 2: Save dialog boc and file system stuff goes here
pass
class QQCodeTab(object):
def __init__(self, scintilla, editor):
self.scintilla = scintilla
self.editor = editor
def save(self):
# Option 1: Save dialog box and file system stuff goes here
pass
Answer: If you're wondering whether to use a class of dictionary, you probably want a
`namedtuple`. That gives you the simplicity of a `dict` with the attribute
syntax of a class:
from collections import namedtuple
FooBar = namedtuple("FooBar", ["these", "are", "the", "attributes"])
FooBar(123, 324, the=12, attributes=656).these
#>>> 123
|
change CLIENT_FOUND_ROWS flag in django for mysql-python (MySQLdb)?
Question: I have a problem with MySQL 5.5 `INSERT ... ON DUPLICATE KEY UPDATE` rows
effected mismatch cursor.rowcount in a normal Django project
According to [doc](http://dev.mysql.com/doc/refman/5.5/en/information-
functions.html):
> For INSERT ... ON DUPLICATE KEY UPDATE statements, the affected-rows value
> per row is 1 if the row is inserted as a new row, 2 if an existing row is
> updated, and 0 if an existing row is set to its current values. **If you
> specify the CLIENT_FOUND_ROWS flag** , the affected-rows value is 1 (not 0)
> if an existing row is set to its current values.
So I tried in out, it seems that in mysql shell, repeating the `INSERT ... ON
DUPLICATE KEY UPDATE` statement will show
> Query OK, 0 rows affected (0.00 sec)
In mysql-python (import MySQLdb),
`cursor.rowcount` will always be 1 regardless of insert/update/nothing updated
I searched everywhere and can not find a way to change the `CLIENT_FOUND_ROWS`
flag in Django. Anyone know how?
Answer: Okay, I found out how.
In django/db/backends/mysql/base.py there's
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
From the source code we could just change django project settings.py like this
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'OPTIONS': {
'client_flag': 0
}
}
}
|
Tkinter Label, TypeError: cannot concatenate 'str' and 'instance' objects
Question: I am coding a dice simulator that will roll either a 6 sided dice or an 8
sided dice. I am using Python 2.7 and Tkinter. Here is my file with a
dictionary with the dice in:
DICE = dict(
sixsided={'name': 'Six Sided Dice',
'side': 6},
eightsided = {'name': 'Eight Sided Dice',
'side': 8}
)
names = ['Six Sided Dice', 'Eight Sided Dice']
And here is the code from my main file that is causing my problem:
diceroll = random.randrange(1,DICE[selecteddice]["side"])
Label(diceroll, text="You rolled a " + diceroll + " on the " + DICE[selecteddice]["name"])
My problem is the error message that occurs when I run my file:
TypeError: cannot concatenate 'str' and 'instance' objects
Any help is greatly appreciated!! :)
Answer: Hope you were expecting something like this:

You have to pass the `Tk()` class _assuming it was imported as`from Tkinter
import *`_ as the first argument to a Tk widget:
root = Tk()
Label(root, text="You rolled a " + diceroll + " on the " + DICE[selecteddice]["name"])
But now you'll end up with `TypeError: cannot concatenate 'str' and 'int'
objects` so use the `str()` method to convert `diceroll` to a string
Label(root, text="You rolled a " + str(diceroll) + " on the " + DICE[selecteddice]["name"])
**`TypeError: cannot concatenate 'str' and 'instance' objects` error occurs
because data cannot be retrieved as strings or int's from a class without
using `__repr__` , `__str__` methods** _but rather as objects_
since you haven't displayed your full code this is far as I can help
#The top image was produced thanks to this
import random
from Tkinter import *
selecteddice = 'sixsided'
DICE = dict(
sixsided={'name': 'Six Sided Dice',
'side': 6},
eightsided = {'name': 'Eight Sided Dice',
'side': 8}
)
names = ['Six Sided Dice', 'Eight Sided Dice']
root = Tk()
diceroll = random.randrange(1,DICE[selecteddice]["side"])
Label(root, text="You rolled a " + str(diceroll) + " on the " + DICE[selecteddice]["name"]).pack()
root.mainloop()
|
How to count the number of commas in a string using regular expression in python?
Question: I have a string in the following format:
str ="India,USA,Australia,Japan,Russia"
I want to extract the word present after third comma by counting the number of
commas using regular expression in python.
desired output:`Japan`
Answer: You can do that with a regular expression with something like
([^,]*,){3}([^,]*)
with the meaning
* `[^,]*` Zero or more chars but no commas
* `,` a comma
* `{3}` the previous group must be repeated three times
* `[^,]*` Zero or more chars but no commas
the second group will be the fourh comma-separated value
import re
text = "India,USA,Australia,Japan,Russia"
m = re.match("([^,]*,){3}([^,]*)", text)
if m:
print m.group(2)
In this specific case however it would be much simpler to just split on commas
and taking the fourth value:
print text.split(',')[3]
|
Error in Simple PyGame
Question: I was trying to make a game using Python, so this is the code:
# 1 - Import library
import pygame
from pygame.locals import *
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
# 3 - Load images
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
# 4 - keep looping through
while 1:
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the screen elements
for x in range(width/grass.get_width()+1):
for y in range(height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
screen.blit(castle,(0,30))
screen.blit(castle,(0,135))
screen.blit(castle,(0,240))
screen.blit(castle,(0,345 ))
screen.blit(player, (100,100))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
This is the error I am getting, but I can't figure out how to solve it:
Traceback (most recent call last):
File "C:/Python32/Game 1/game.py", line 19, in <module>
for x in range(width/grass.get_width()+1):
TypeError: 'float' object cannot be interpreted as an integer
Answer: The range function requires integers not floats or strings, floats can be
converted to integers by using the `int()` function
So instead of:
for x in range(width/grass.get_width()+1):
use:
for x in range(int(width/grass.get_width()+1)):
eg:
print int(5.145)
>>> 5
|
Python: How would you save a simple settings/config file?
Question: I don't care if it's JSON, pickle, YAML, or whatever.
All other implementations I have seen are not forwards compatible, so if I
have a config file, add a new key in the code, then load that config file,
it'll just crash.
Are there any simple way to do this?
Answer: # Configuration files in python
There are several ways to do this depending on the file format required.
## ConfigParser [.ini format]
I would use the standard
[configparser](http://docs.python.org/2/library/configparser.html) approach
unless there were compelling reasons to use a different format.
Write a file like so:
from ConfigParser import SafeConfigParser
config = SafeConfigParser()
config.read('config.ini')
config.add_section('main')
config.set('main', 'key1', 'value1')
config.set('main', 'key2', 'value2')
config.set('main', 'key3', 'value3')
with open('config.ini', 'w') as f:
config.write(f)
The file format is very simple with sections marked out in square brackets:
[main]
key1 = value1
key2 = value2
key3 = value3
Values can be extracted from the file like so:
from ConfigParser import SafeConfigParser
config = SafeConfigParser()
config.read('config.ini')
print config.get('main', 'key1') # -> "value1"
print config.get('main', 'key2') # -> "value2"
print config.get('main', 'key3') # -> "value3"
# getfloat() raises an exception if the value is not a float
a_float = config.getfloat('main', 'a_float')
# getint() and getboolean() also do this for their respective types
an_int = config.getint('main', 'an_int')
## JSON [.json format]
JSON data can be very complex and has the advantage of being highly portable.
Write data to a file:
import json
config = {'key1': 'value1', 'key2': 'value2'}
with open('config.json', 'w') as f:
json.dump(config, f)
Read data from a file:
import json
with open('config.json', 'r') as f:
config = json.load(f)
#edit the data
config['key3'] = 'value3'
#write it back to the file
with open('config.json', 'w') as f:
json.dump(config, f)
## YAML
A basic YAML example is provided [in this
answer](http://stackoverflow.com/a/1774043/1083707). More details can be found
on [the pyYAML website](http://pyyaml.org/wiki/PyYAMLDocumentation).
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.