code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
from pygeotools.lib import timelib
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName(driverName)
if os.path.exists(out_fn):
drv.DeleteDataSource(out_fn)
out_ds = drv.CreateDataSource(out_fn)
out_lyrname = os.path.splitext(os.path.split(out_fn)[1])[0]
geom_srs = geom.GetSpatialReference()
geom_type = geom.GetGeometryType()
out_lyr = out_ds.CreateLayer(out_lyrname, geom_srs, geom_type)
if fields:
field_defn = ogr.FieldDefn("name", ogr.OFTString)
field_defn.SetWidth(128)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("path", ogr.OFTString)
field_defn.SetWidth(254)
out_lyr.CreateField(field_defn)
#field_defn = ogr.FieldDefn("date", ogr.OFTString)
#This allows sorting by date
field_defn = ogr.FieldDefn("date", ogr.OFTInteger)
field_defn.SetWidth(32)
out_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn("decyear", ogr.OFTReal)
field_defn.SetPrecision(8)
field_defn.SetWidth(64)
out_lyr.CreateField(field_defn)
out_feat = ogr.Feature(out_lyr.GetLayerDefn())
out_feat.SetGeometry(geom)
if fields:
#Hack to force output extesion to tif, since out_fn is shp
out_path = os.path.splitext(out_fn)[0] + '.tif'
out_feat.SetField("name", os.path.split(out_path)[-1])
out_feat.SetField("path", out_path)
#Try to extract a date from input raster fn
out_feat_date = timelib.fn_getdatetime(out_fn)
if out_feat_date is not None:
datestamp = int(out_feat_date.strftime('%Y%m%d'))
#out_feat_date = int(out_feat_date.strftime('%Y%m%d%H%M'))
out_feat.SetField("date", datestamp)
decyear = timelib.dt2decyear(out_feat_date)
out_feat.SetField("decyear", decyear)
out_lyr.CreateFeature(out_feat)
out_ds = None | def geom2shp(geom, out_fn, fields=False) | Write out a new shapefile for input geometry | 1.959493 | 1.957414 | 1.001062 |
gt = np.array(ds.GetGeoTransform())
from pygeotools.lib import iolib
a = iolib.ds_getma_sub(ds, scale=scale)
#Create empty geometry
geom = ogr.Geometry(ogr.wkbPolygon)
#Check to make sure we have unmasked data
if a.count() != 0:
#Scale the gt for reduced resolution
#The UL coords should remain the same, as any rounding will trim LR
if (scale != 1.0):
gt[1] *= scale
gt[5] *= scale
#Get srs
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
#Find the unmasked edges
#Note: using only axis=0 from notmasked_edges will miss undercuts - see malib.get_edgemask
#Better ways to do this - binary mask, sum (see numpy2stl)
#edges0, edges1, edges = malib.get_edges(a)
px = np.ma.notmasked_edges(a, axis=0)
# coord = []
#Combine edge arrays, reversing order and adding first point to complete polygon
x = np.concatenate((px[0][1][::1], px[1][1][::-1], [px[0][1][0]]))
#x = np.concatenate((edges[0][1][::1], edges[1][1][::-1], [edges[0][1][0]]))
y = np.concatenate((px[0][0][::1], px[1][0][::-1], [px[0][0][0]]))
#y = np.concatenate((edges[0][0][::1], edges[1][0][::-1], [edges[0][0][0]]))
#Use np arrays for computing mapped coords
mx, my = pixelToMap(x, y, gt)
#Create wkt string
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
geom.Transform(ct)
#Make sure geometry has correct srs assigned
geom.AssignSpatialReference(t_srs)
if not geom.IsValid():
tol = gt[1] * 0.1
geom = geom.Simplify(tol)
#Need to get output units and extent for tolerance specification
if simplify:
#2 pixel tolerance
tol = gt[1] * 2
geom = geom.Simplify(tol)
if convex:
geom = geom.ConvexHull()
else:
print("No unmasked values found")
return geom | def get_outline(ds, t_srs=None, scale=1.0, simplify=False, convex=False) | Generate outline of unmasked values in input raster
get_outline is an attempt to reproduce the PostGIS Raster ST_MinConvexHull function
Could potentially do the following: Extract random pts from unmasked elements, get indices, Run scipy convex hull, Convert hull indices to mapped coords
See this: http://stackoverflow.com/questions/3654289/scipy-create-2d-polygon-mask
This generates a wkt polygon outline of valid data for the input raster
Want to limit the dimensions of a, as notmasked_edges is slow: a = iolib.ds_getma_sub(ds, scale=scale) | 4.415265 | 4.077625 | 1.082803 |
#Convert lat/lon to projected srs
ds_srs = get_ds_srs(ds)
#If xy_srs is undefined, assume it is the same as ds_srs
mX = x
mY = y
if xy_srs is not None:
if not ds_srs.IsSame(xy_srs):
mX, mY, mZ = cT_helper(x, y, 0, xy_srs, ds_srs)
return mX, mY | def ds_cT(ds, x, y, xy_srs=wgs_srs) | Convert input point coordinates to map coordinates that match input dataset | 3.805096 | 3.859507 | 0.985902 |
#Extract list of (x,y) tuples at nodes
nodes = geom.GetPoints()
#print "%i nodes" % len(nodes)
#Point spacing in map units
if dl is None:
nsteps=1000
dl = geom.Length()/nsteps
#This only works for equidistant projection!
#l = np.arange(0, geom.Length(), dl)
#Initialize empty lists
l = []
mX = []
mY = []
#Add first point to output lists
l += [0]
x = nodes[0][0]
y = nodes[0][1]
mX += [x]
mY += [y]
#Remainder
rem_l = 0
#Previous length (initially 0)
last_l = l[-1]
#Loop through each line segment in the feature
for i in range(0,len(nodes)-1):
x1, y1 = nodes[i]
x2, y2 = nodes[i+1]
#Total length of segment
tl = np.sqrt((x2-x1)**2 + (y2-y1)**2)
#Number of dl steps we can fit in this segment
#This returns floor
steps = int((tl+rem_l)/dl)
if steps > 0:
dx = ((x2-x1)/tl)*dl
dy = ((y2-y1)/tl)*dl
rem_x = rem_l*(dx/dl)
rem_y = rem_l*(dy/dl)
#Loop through each step and append to lists
for n in range(1, steps+1):
l += [last_l + (dl*n)]
#Remove the existing remainder
x = x1 + (dx*n) - rem_x
y = y1 + (dy*n) - rem_y
mX += [x]
mY += [y]
#Note: could just build up arrays of pX, pY for entire line, then do single z extraction
#Update the remainder
rem_l += tl - (steps * dl)
last_l = l[-1]
else:
rem_l += tl
return l, mX, mY | def line2pts(geom, dl=None) | Given an input line geom, generate points at fixed interval
Useful for extracting profile data from raster | 4.012524 | 3.966546 | 1.011591 |
if t_srs is None:
t_srs = get_ds_srs(ds_list[0])
res = np.array([get_res(ds, t_srs=t_srs) for ds in ds_list])
#Check that all projections are identical
#gt_array = np.array([ds.GetGeoTransform() for ds in args])
#xres = gt_array[:,1]
#yres = -gt_array[:,5]
#if xres == yres:
#res = np.concatenate((xres, yres))
min = np.min(res)
max = np.max(res)
mean = np.mean(res)
med = np.median(res)
return (min, max, mean, med) | def get_res_stats(ds_list, t_srs=None) | Return resolution stats for an input dataset list | 2.73973 | 2.603361 | 1.052382 |
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#This is Xres, Yres
res = [gt[1], np.abs(gt[5])]
if square:
res = [np.mean(res), np.mean(res)]
if t_srs is not None and not ds_srs.IsSame(t_srs):
if True:
#This diagonal approach is similar to the approach in gdaltransformer.cpp
#Bad news for large extents near the poles
#ullr = get_ullr(ds, t_srs)
#diag = np.sqrt((ullr[0]-ullr[2])**2 + (ullr[1]-ullr[3])**2)
extent = ds_extent(ds, t_srs)
diag = np.sqrt((extent[2]-extent[0])**2 + (extent[3]-extent[1])**2)
res = diag / np.sqrt(ds.RasterXSize**2 + ds.RasterYSize**2)
res = [res, res]
else:
#Compute from center pixel
ct = osr.CoordinateTransformation(ds_srs, t_srs)
pt = get_center(ds)
#Transform center coordinates
pt_ct = ct.TransformPoint(*pt)
#Transform center + single pixel offset coordinates
pt_ct_plus = ct.TransformPoint(pt[0] + gt[1], pt[1] + gt[5])
#Compute resolution in new units
res = [pt_ct_plus[0] - pt_ct[0], np.abs(pt_ct_plus[1] - pt_ct[1])]
return res | def get_res(ds, t_srs=None, square=False) | Get GDAL Dataset raster resolution | 3.323366 | 3.283499 | 1.012142 |
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#Note: this is center of center pixel, not ul corner of center pixel
center = [gt[0] + (gt[1] * ds.RasterXSize/2.0), gt[3] + (gt[5] * ds.RasterYSize/2.0)]
#include t_srs.Validate() and t_srs.Fixup()
if t_srs is not None and not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
center = list(ct.TransformPoint(*center)[0:2])
return center | def get_center(ds, t_srs=None) | Get center coordinates of GDAL Dataset | 3.647039 | 3.57922 | 1.018948 |
ds_srs = osr.SpatialReference()
ds_srs.ImportFromWkt(ds.GetProjectionRef())
return ds_srs | def get_ds_srs(ds) | Get srs object for GDAL Datset | 2.044188 | 1.815658 | 1.125866 |
# ds_srs = get_ds_srs(ds)
gt = np.array(ds.GetGeoTransform())
gt_check = ~np.all(gt == np.array((0.0, 1.0, 0.0, 0.0, 0.0, 1.0)))
proj_check = (ds.GetProjection() != '')
#proj_check = ds_srs.IsProjected()
out = False
if gt_check and proj_check:
out = True
return out | def srs_check(ds) | Check validitiy of Dataset srs
Return True if srs is properly defined | 3.041609 | 3.006744 | 1.011595 |
out = False
b = ds.GetRasterBand(1)
#Looks like this throws:
#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.
#Should just catch this rater than bothering with logic below
try:
mm = b.ComputeRasterMinMax()
if (mm[0] == mm[1]):
ndv = b.GetNoDataValue()
if ndv is None:
out = True
else:
if (mm[0] == ndv):
out = True
except Exception:
out = True
#Check for std of nan
#import math
#stats = b.ComputeStatistics(1)
#for x in stats:
# if math.isnan(x):
# out = True
# break
return out | def ds_IsEmpty(ds) | Check to see if dataset is empty after warp | 5.633258 | 5.583411 | 1.008928 |
ul = [gt[0], gt[3]]
ll = [gt[0], gt[3] + (gt[5] * ny)]
ur = [gt[0] + (gt[1] * nx), gt[3]]
lr = [gt[0] + (gt[1] * nx), gt[3] + (gt[5] * ny)]
return ul, ll, ur, lr | def gt_corners(gt, nx, ny) | Get corner coordinates based on input geotransform and raster dimensions | 1.944151 | 1.579013 | 1.231245 |
def corner_extent(ul, ll, ur, lr):
xmin = min(ul[0], ll[0], ur[0], lr[0])
xmax = max(ul[0], ll[0], ur[0], lr[0])
ymin = min(ul[1], ll[1], ur[1], lr[1])
ymax = max(ul[1], ll[1], ur[1], lr[1])
extent = [xmin, ymin, xmax, ymax]
return extent | Get min/max extent based on corner coord | null | null | null |
|
ul, ll, ur, lr = gt_corners(ds.GetGeoTransform(), ds.RasterXSize, ds.RasterYSize)
ds_srs = get_ds_srs(ds)
if t_srs is not None and not ds_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(ds_srs, t_srs)
#Check to see if ct creation failed
#if ct == NULL:
#Check to see if transform failed
#if not ct.TransformPoint(extent[0], extent[1]):
#Need to check that transformed coordinates fall within appropriate bounds
ul = ct.TransformPoint(*ul)
ll = ct.TransformPoint(*ll)
ur = ct.TransformPoint(*ur)
lr = ct.TransformPoint(*lr)
extent = corner_extent(ul, ll, ur, lr)
return extent | def ds_extent(ds, t_srs=None) | Return min/max extent of dataset based on corner coordinates
xmin, ymin, xmax, ymax
If t_srs is specified, output will be converted to specified srs | 3.469707 | 3.543218 | 0.979253 |
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
if t_srs is None:
t_srs = ds_srs
ns = ds.RasterXSize
nl = ds.RasterYSize
x = np.array([0, ns, ns, 0, 0], dtype=float)
y = np.array([0, 0, nl, nl, 0], dtype=float)
#Note: pixelToMap adds 0.5 to input coords, need to account for this here
x -= 0.5
y -= 0.5
mx, my = pixelToMap(x, y, gt)
geom_wkt = 'POLYGON(({0}))'.format(', '.join(['{0} {1}'.format(*a) for a in zip(mx,my)]))
geom = ogr.CreateGeometryFromWkt(geom_wkt)
geom.AssignSpatialReference(ds_srs)
if not ds_srs.IsSame(t_srs):
geom_transform(geom, t_srs)
return geom | def ds_geom(ds, t_srs=None) | Return dataset bbox envelope as geom | 2.586133 | 2.564305 | 1.008512 |
e = geom.GetEnvelope()
h = e[1] - e[0]
w = e[3] - e[2]
return w, h | def geom_wh(geom) | Compute width and height of geometry in projected units | 2.86632 | 2.515053 | 1.139666 |
if ds is None:
ds = mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
else:
ds = mem_ds_copy(ds)
b = ds.GetRasterBand(1)
b.WriteArray(ma)
out = gdaldem_mem_ds(ds, processing=processing, returnma=returnma)
return out | def gdaldem_mem_ma(ma, ds=None, res=None, extent=None, srs=None, processing='hillshade', returnma=False, computeEdges=False) | Wrapper to allow gdaldem calculations for arbitrary NumPy masked array input
Untested, work in progress placeholder
Should only need to specify res, can caluclate local gt, cartesian srs | 2.699628 | 2.93633 | 0.919388 |
choices = ["hillshade", "slope", "aspect", "color-relief", "TRI", "TPI", "Roughness"]
out = None
scale=1.0
if not get_ds_srs(ds).IsProjected():
scale=111120
if processing in choices:
out = gdal.DEMProcessing('', ds, processing, format='MEM', computeEdges=computeEdges, scale=scale)
else:
print("Invalid processing choice")
print(choices)
#This should be a separate function
if returnma:
from pygeotools.lib import iolib
out = iolib.ds_getma(out)
return out | def gdaldem_mem_ds(ds, processing='hillshade', returnma=False, computeEdges=False) | Wrapper for gdaldem functions
Uses gdaldem API, requires GDAL v2.1+ | 4.689497 | 4.880867 | 0.960792 |
#These gdaldem functions should be able to ingest masked array
#Just write out temporary file, or maybe mem vrt?
valid_opt = ['hillshade', 'hs', 'slope', 'aspect', 'color-relief', 'TRI', 'TPI', 'roughness']
try:
open(fn)
except IOError:
print("Unable to open %s" %fn)
if product not in valid_opt:
print("Invalid gdaldem option specified")
import subprocess
from pygeotools.lib import iolib
bma = None
opts = []
if product == 'hs' or product == 'hillshade':
product = 'hillshade'
#opts = ['-compute_edges',]
out_fn = os.path.splitext(fn)[0]+'_hs_az315.tif'
else:
out_fn = os.path.splitext(fn)[0]+'_%s.tif' % product
if not os.path.exists(out_fn):
cmd = ['gdaldem', product]
cmd.extend(opts)
cmd.extend(iolib.gdal_opt_co)
cmd.extend([fn, out_fn])
if verbose:
print(' '.join(cmd))
cmd_opt = {}
else:
fnull = open(os.devnull, 'w')
cmd_opt = {'stdout':fnull, 'stderr':subprocess.STDOUT}
subprocess.call(cmd, shell=False, **cmd_opt)
if returnma:
ds = gdal.Open(out_fn, gdal.GA_ReadOnly)
bma = iolib.ds_getma(ds, 1)
return bma
else:
return out_fn | def gdaldem_wrapper(fn, product='hs', returnma=True, verbose=True) | Wrapper for gdaldem functions
Note: gdaldem is directly avaialable through API as of GDAL v2.1
https://trac.osgeo.org/gdal/wiki/rfc59.1_utilities_as_a_library
This function is no longer necessry, and will eventually be removed. | 3.760615 | 3.808436 | 0.987443 |
#import malib
#band_array = malib.checkma(band_array)
ndv = band_array.fill_value
ny, nx = band_array.shape
# Half raster cell widths
hx = gt[1]/2.0
hy = gt[5]/2.0
# Calculate raster lower bound indices from point
fx =(px -(gt[0] + hx))/gt[1]
fy =(py -(gt[3] + hy))/gt[5]
ix1 = int(np.floor(fx))
iy1 = int(np.floor(fy))
# Special case where point is on upper bounds
if fx == float(nx - 1):
ix1 -= 1
if fy == float(ny - 1):
iy1 -= 1
# Upper bound indices on raster
ix2 = ix1 + 1
iy2 = iy1 + 1
# Test array bounds to ensure point is within raster midpoints
if(ix1 < 0) or(iy1 < 0) or(ix2 > nx - 1) or(iy2 > ny - 1):
return ndv
# Calculate differences from point to bounding raster midpoints
dx1 = px -(gt[0] + ix1*gt[1] + hx)
dy1 = py -(gt[3] + iy1*gt[5] + hy)
dx2 =(gt[0] + ix2*gt[1] + hx) - px
dy2 =(gt[3] + iy2*gt[5] + hy) - py
# Use the differences to weigh the four raster values
div = gt[1]*gt[5]
return(band_array[iy1,ix1]*dx2*dy2/div +
band_array[iy1,ix2]*dx1*dy2/div +
band_array[iy2,ix1]*dx2*dy1/div +
band_array[iy2,ix2]*dx1*dy1/div) | def bilinear(px, py, band_array, gt) | Bilinear interpolated point at(px, py) on band_array | 2.729992 | 2.723251 | 1.002476 |
ds_srs = get_ds_srs(ds)
c = get_center(ds)
x, y, z = cT_helper(c[0], c[1], 0.0, ds_srs, geoid_srs)
return z | def get_geoid_offset(ds, geoid_srs=egm08_srs) | Return offset for center of ds
Offset is added to input (presumably WGS84 HAE) to get to geoid
Note: requires vertical offset grids in proj share dir - see earlier note | 5.18644 | 4.509071 | 1.150224 |
pX = np.arange(0, bma.shape[1], stride)
pY = np.arange(0, bma.shape[0], stride)
psamp = np.meshgrid(pX, pY)
#if origmask:
# psamp = np.ma.array(psamp, mask=np.ma.getmaskarray(bma), fill_value=0)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
mask = None
if origmask:
mask = np.ma.getmaskarray(bma)[::stride]
if newmask is not None:
mask = newmask[::stride]
mX = np.ma.array(mX, mask=mask, fill_value=0)
mY = np.ma.array(mY, mask=mask, fill_value=0)
return mX, mY | def get_xy_ma(bma, gt, stride=1, origmask=True, newmask=None) | Return arrays of x and y map coordinates for input array and geotransform | 2.197553 | 2.106921 | 1.043017 |
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
mX, dummy = pixelToMap(pX, pY[0], gt)
dummy, mY = pixelToMap(pX[0], pY, gt)
return mX, mY | def get_xy_1D(ds, stride=1, getval=False) | Return 1D arrays of x and y map coordinates for input GDAL Dataset | 3.629847 | 3.439131 | 1.055455 |
gt = ds.GetGeoTransform()
#stride = stride_m/gt[1]
pX = np.arange(0, ds.RasterXSize, stride)
pY = np.arange(0, ds.RasterYSize, stride)
psamp = np.meshgrid(pX, pY)
mX, mY = pixelToMap(psamp[0], psamp[1], gt)
return mX, mY | def get_xy_grids(ds, stride=1, getval=False) | Return 2D arrays of x and y map coordinates for input GDAL Dataset | 3.562637 | 3.321721 | 1.072527 |
[rows,cols] = XYZ.shape
# Set up constraint equations of the form AB = 0,
# where B is a column vector of the plane coefficients
# in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
p = (np.ones((rows,1)))
AB = np.hstack([XYZ,p])
[u, d, v] = np.linalg.svd(AB,0)
# Solution is last column of v.
B = np.array(v[3,:])
coeff = -B[[0, 1, 3]]/B[2]
return coeff | def fitPlaneSVD(XYZ) | Fit a plane to input point data using SVD | 5.023408 | 5.016901 | 1.001297 |
[rows,cols] = XYZ.shape
G = np.ones((rows,3))
G[:,0] = XYZ[:,0] #X
G[:,1] = XYZ[:,1] #Y
Z = XYZ[:,2]
coeff,resid,rank,s = np.linalg.lstsq(G,Z,rcond=None)
return coeff | def fitPlaneLSQ(XYZ) | Fit a plane to input point data using LSQ | 2.452897 | 2.454051 | 0.99953 |
if gt is None:
gt = [0, 1, 0, 0, 0, -1]
#Filter, can be useful to remove outliers
if perc is not None:
from pygeotools.lib import filtlib
bma_f = filtlib.perc_fltr(bma, perc)
else:
bma_f = bma
#Get indices
x, y = get_xy_ma(bma_f, gt, origmask=origmask)
#Fit only where we have valid data
bma_mask = np.ma.getmaskarray(bma)
coeff = polyfit2d(x[~bma_mask].data, y[~bma_mask].data, bma[~bma_mask].data, order=order)
#For 1D, these are: c, y, x, xy
print(coeff)
#Compute values for all x and y, unless origmask=True
vals = polyval2d(x, y, coeff)
resid = bma - vals
return vals, resid, coeff | def ma_fitpoly(bma, order=1, gt=None, perc=(2,98), origmask=True) | Fit a plane to values in input array | 4.32192 | 4.308144 | 1.003198 |
if gt is None:
gt = [0, 1, 0, 0, 0, -1]
#Filter, can be useful to remove outliers
if perc is not None:
from pygeotools.lib import filtlib
bma_f = filtlib.perc_fltr(bma, perc)
else:
bma_f = bma
#Get indices
x_f, y_f = get_xy_ma(bma_f, gt, origmask=origmask)
#Regardless of desired output (origmask True or False), for fit, need to limit to valid pixels only
bma_f_mask = np.ma.getmaskarray(bma_f)
#Create xyz stack, needed for SVD
xyz = np.vstack((np.ma.array(x_f, mask=bma_f_mask).compressed(), \
np.ma.array(y_f, mask=bma_f_mask).compressed(), bma_f.compressed())).T
#coeff = fitPlaneSVD(xyz)
coeff = fitPlaneLSQ(xyz)
print(coeff)
vals = coeff[0]*x_f + coeff[1]*y_f + coeff[2]
resid = bma_f - vals
return vals, resid, coeff | def ma_fitplane(bma, gt=None, perc=(2,98), origmask=True) | Fit a plane to values in input array | 4.005192 | 3.940899 | 1.016314 |
from pygeotools.lib import iolib
bma = iolib.ds_getma(ds)
gt = ds.GetGeoTransform()
return ma_fitplane(bma, gt) | def ds_fitplane(ds) | Fit a plane to values in GDAL Dataset | 5.219966 | 4.76814 | 1.09476 |
#If geom has srs properly defined, can do this
#geom.TransformTo(wgs_srs)
#Get centroid lat/lon
lon, lat = geom.Centroid().GetPoint_2D()
#Make sure we're -180 to 180
lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180
zonenum = int(np.floor((lon180 + 180)/6) + 1)
#Determine N/S hemisphere
if lat >= 0:
zonehem = 'N'
else:
zonehem = 'S'
#Deal with special cases
if (lat >= 56.0 and lat < 64.0 and lon180 >= 3.0 and lon180 < 12.0):
zonenum = 32
if (lat >= 72.0 and lat < 84.0):
if (lon180 >= 0.0 and lon180 < 9.0):
zonenum = 31
elif (lon180 >= 9.0 and lon180 < 21.0):
zonenum = 33
elif (lon180 >= 21.0 and lon180 < 33.0):
zonenum = 35
elif (lon180 >= 33.0 and lon180 < 42.0):
zonenum = 37
return str(zonenum)+zonehem | def getUTMzone(geom) | Determine UTM Zone for input geometry | 2.117796 | 2.075212 | 1.02052 |
out_srs = None
if proj_list is None:
proj_list = gen_proj_list()
#Go through user-defined projeciton list
for projbox in proj_list:
if projbox.geom.Intersects(geom):
out_srs = projbox.srs
break
#If geom doesn't fall in any of the user projection bbox, use UTM
if out_srs is None:
out_srs = getUTMsrs(geom)
return out_srs | def get_proj(geom, proj_list=None) | Determine best projection for input geometry | 4.584723 | 4.348013 | 1.054441 |
#Eventually, just read this in from a text file
proj_list = []
#Alaska
#Note, this spans -180/180
proj_list.append(ProjBox([-180, -130, 51.35, 71.35], 3338))
#proj_list.append(ProjBox([-130, 172.4, 51.35, 71.35], 3338))
#Transantarctic Mountains
proj_list.append(ProjBox([150, 175, -80, -70], 3294))
#Greenland
proj_list.append(ProjBox([-180, 180, 58, 82], 3413))
#Antarctica
proj_list.append(ProjBox([-180, 180, -90, -58], 3031))
#Arctic
proj_list.append(ProjBox([-180, 180, 60, 90], 3413))
return proj_list | def gen_proj_list() | Create list of projections with cascading preference | 3.616566 | 3.520835 | 1.02719 |
geom_wkt = 'POINT({0} {1})'.format(x, y)
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if t_srs is not None and not wgs_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(t_srs, wgs_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
return geom | def xy2geom(x, y, t_srs=None) | Convert x and y point coordinates to geom | 2.055179 | 2.010672 | 1.022135 |
cmd = ['dem_mosaic',]
if o is None:
o = 'mos'
cmd.extend(['-o', o])
if threads is None:
from pygeotools.lib import iolib
threads = iolib.cpu_count()
cmd.extend(['--threads', threads])
if tr is not None:
cmd.extend(['--tr', tr])
if t_srs is not None:
#cmd.extend(['--t_srs', t_srs.ExportToProj4()])
cmd.extend(['--t_srs', '"%s"' % t_srs.ExportToProj4()])
#cmd.extend(['--t_srs', "%s" % t_srs.ExportToProj4()])
if t_projwin is not None:
cmd.append('--t_projwin')
cmd.extend(t_projwin)
cmd.append('--force-projwin')
if tile is not None:
#Not yet implemented
#cmd.extend(tile_list)
cmd.append('--tile-index')
cmd.append(tile)
if georef_tile_size is not None:
cmd.extend(['--georef-tile-size', georef_tile_size])
if stat is not None:
if stat == 'wmean':
stat = None
else:
cmd.append('--%s' % stat.replace('index',''))
if stat in ['lastindex', 'firstindex', 'medianindex']:
#This will write out the index map to -last.tif by default
cmd.append('--save-index-map')
#Make sure we don't have ndv that conflicts with 0-based DEM indices
cmd.extend(['--output-nodata-value','-9999'])
#else:
# cmd.extend(['--save-dem-weight', o+'_weight'])
#If user provided a file containing list of DEMs to mosaic (useful to avoid long bash command issues)
if fn_list_txt is not None:
if os.path.exists(fn_list_txt):
cmd.append('-l')
cmd.append(fn_list_txt)
else:
print("Could not find input text file containing list of inputs")
else:
cmd.extend(fn_list)
cmd = [str(i) for i in cmd]
#print(cmd)
#return subprocess.call(cmd)
return cmd | def get_dem_mosaic_cmd(fn_list, o, fn_list_txt=None, tr=None, t_srs=None, t_projwin=None, georef_tile_size=None, threads=None, tile=None, stat=None) | Create ASP dem_mosaic command
Useful for spawning many single-threaded mosaicing processes | 3.093686 | 3.09931 | 0.998186 |
driver = iolib.mem_drv
return warp(src_ds, res, extent, t_srs, r, driver=driver, dst_ndv=dst_ndv, verbose=verbose) | def memwarp(src_ds, res=None, extent=None, t_srs=None, r=None, oudir=None, dst_ndv=0, verbose=True) | Helper function that calls warp for single input Dataset with output to memory (GDAL Memory Driver) | 3.928166 | 3.550283 | 1.106438 |
if dst_fn is None:
dst_fn = os.path.splitext(src_ds.GetFileList()[0])[0]+'_warp.tif'
if outdir is not None:
dst_fn = os.path.join(outdir, os.path.basename(dst_fn))
driver = iolib.gtif_drv
dst_ds = warp(src_ds, res, extent, t_srs, r, driver, dst_fn, dst_ndv=dst_ndv, verbose=verbose)
#Write out
dst_ds = None
#Now reopen ds from disk
dst_ds = gdal.Open(dst_fn)
return dst_ds | def diskwarp(src_ds, res=None, extent=None, t_srs=None, r='cubic', outdir=None, dst_fn=None, dst_ndv=None, verbose=True) | Helper function that calls warp for single input Dataset with output to disk (GDAL GeoTiff Driver) | 2.719085 | 2.649607 | 1.026222 |
#Note:GRA_CubicSpline created huge block artifacts for the St. Helen's compute_dh WV cases
#Stick with CubicSpline for both upsampling/downsampling for now
if r == 'near':
#Note: Nearest respects nodata when downsampling
gra = gdal.GRA_NearestNeighbour
elif r == 'bilinear':
gra = gdal.GRA_Bilinear
elif r == 'cubic':
gra = gdal.GRA_Cubic
elif r == 'cubicspline':
gra = gdal.GRA_CubicSpline
elif r == 'average':
gra = gdal.GRA_Average
elif r == 'lanczos':
gra = gdal.GRA_Lanczos
elif r == 'mode':
#Note: Mode respects nodata when downsampling, but very slow
gra = gdal.GRA_Mode
else:
gra = None
sys.exit("Invalid resampling method")
return gra | def parse_rs_alg(r) | Parse resampling algorithm | 4.881787 | 4.522833 | 1.079365 |
if t_srs is None and src_ds_list is None:
print("Input t_srs and src_ds_list are both None")
else:
if t_srs is None:
t_srs = 'first'
if t_srs == 'first' and src_ds_list is not None:
t_srs = geolib.get_ds_srs(src_ds_list[0])
elif t_srs == 'last' and src_ds_list is not None:
t_srs = geolib.get_ds_srs(src_ds_list[-1])
#elif t_srs == 'source':
# t_srs = None
elif isinstance(t_srs, osr.SpatialReference):
pass
elif isinstance(t_srs, gdal.Dataset):
t_srs = geolib.get_ds_srs(t_srs)
elif isinstance(t_srs, str) and os.path.exists(t_srs):
t_srs = geolib.get_ds_srs(gdal.Open(t_srs))
elif isinstance(t_srs, str):
temp = osr.SpatialReference()
if 'EPSG' in t_srs.upper():
epsgcode = int(t_srs.split(':')[-1])
temp.ImportFromEPSG(epsgcode)
elif 'proj' in t_srs:
temp.ImportFromProj4(t_srs)
else:
#Assume the user knows what they are doing
temp.ImportFromWkt(t_srs)
t_srs = temp
else:
t_srs = None
return t_srs | def parse_srs(t_srs, src_ds_list=None) | Parse arbitrary input t_srs
Parameters
----------
t_srs : str or gdal.Dataset or filename
Arbitrary input t_srs
src_ds_list : list of gdal.Dataset objects, optional
Needed if specifying 'first' or 'last'
Returns
-------
t_srs : osr.SpatialReference() object
Output spatial reference system | 1.930158 | 1.83982 | 1.049102 |
#Default to using first t_srs for res calculations
#Assumes src_ds_list is not None
t_srs = parse_srs(t_srs, src_ds_list)
#Valid options for res
res_str_list = ['first', 'last', 'min', 'max', 'mean', 'med', 'common_scale_factor']
#Compute output resolution in t_srs
if res in res_str_list and src_ds_list is not None:
#Returns min, max, mean, med
res_stats = geolib.get_res_stats(src_ds_list, t_srs=t_srs)
if res == 'first':
res = geolib.get_res(src_ds_list[0], t_srs=t_srs, square=True)[0]
elif res == 'last':
res = geolib.get_res(src_ds_list[-1], t_srs=t_srs, square=True)[0]
elif res == 'min':
res = res_stats[0]
elif res == 'max':
res = res_stats[1]
elif res == 'mean':
res = res_stats[2]
elif res == 'med':
res = res_stats[3]
elif res == 'common_scale_factor':
#Determine res to upsample min and downsample max by constant factor
res = np.sqrt(res_stats[1]/res_stats[0]) * res_stats[0]
elif res == 'source':
res = None
elif isinstance(res, gdal.Dataset):
res = geolib.get_res(res, t_srs=t_srs, square=True)[0]
elif isinstance(res, str) and os.path.exists(res):
res = geolib.get_res(gdal.Open(res), t_srs=t_srs, square=True)[0]
else:
res = float(res)
return res | def parse_res(res, src_ds_list=None, t_srs=None) | Parse arbitrary input res
Parameters
----------
res : str or gdal.Dataset or filename or float
Arbitrary input res
src_ds_list : list of gdal.Dataset objects, optional
Needed if specifying 'first' or 'last'
t_srs : osr.SpatialReference() object
Projection for res calculations, optional
Returns
-------
res : float
Output resolution
None if source resolution should be preserved | 2.524462 | 2.384012 | 1.058914 |
#Default to using first t_srs for extent calculations
if t_srs is not None:
t_srs = parse_srs(t_srs, src_ds_list)
#Valid strings
extent_str_list = ['first', 'last', 'intersection', 'union']
if extent in extent_str_list and src_ds_list is not None:
if len(src_ds_list) == 1 and (extent == 'intersection' or extent == 'union'):
extent = None
elif extent == 'first':
extent = geolib.ds_geom_extent(src_ds_list[0], t_srs=t_srs)
#extent = geolib.ds_extent(src_ds_list[0], t_srs=t_srs)
elif extent == 'last':
extent = geolib.ds_geom_extent(src_ds_list[-1], t_srs=t_srs)
#extent = geolib.ds_extent(src_ds_list[-1], t_srs=t_srs)
elif extent == 'intersection':
#By default, compute_intersection takes ref_srs from ref_ds
extent = geolib.ds_geom_intersection_extent(src_ds_list, t_srs=t_srs)
if len(src_ds_list) > 1 and extent is None:
sys.exit("Input images do not intersect")
elif extent == 'union':
#Need to clean up union t_srs handling
extent = geolib.ds_geom_union_extent(src_ds_list, t_srs=t_srs)
elif extent == 'source':
extent = None
elif isinstance(extent, gdal.Dataset):
extent = geolib.ds_geom_extent(extent, t_srs=t_srs)
elif isinstance(extent, str) and os.path.exists(extent):
extent = geolib.ds_geom_extent(gdal.Open(extent), t_srs=t_srs)
elif isinstance(extent, (list, tuple, np.ndarray)):
extent = list(extent)
else:
extent = [float(i) for i in extent.split(' ')]
return extent | def parse_extent(extent, src_ds_list=None, t_srs=None) | Parse arbitrary input extent
Parameters
----------
extent : str or gdal.Dataset or filename or list of float
Arbitrary input extent
src_ds_list : list of gdal.Dataset objects, optional
Needed if specifying 'first', 'last', 'intersection', or 'union'
t_srs : osr.SpatialReference() object, optional
Projection for res calculations
Returns
-------
extent : list of float
Output extent [xmin, ymin, xmax, ymax]
None if source extent should be preserved | 2.319716 | 2.216416 | 1.046607 |
#Type cast arguments as str for evaluation
#Avoid path errors
#res = str(res)
#extent = str(extent)
#t_srs = str(t_srs)
#Parse the input
t_srs = parse_srs(t_srs, src_ds_list)
res = parse_res(res, src_ds_list, t_srs)
extent = parse_extent(extent, src_ds_list, t_srs)
if verbose:
print("\nWarping all inputs to the following:")
print("Resolution: %s" % res)
print("Extent: %s" % str(extent))
print("Projection: '%s'" % t_srs.ExportToProj4())
print("Resampling alg: %s\n" % r)
out_ds_list = []
for i, ds in enumerate(src_ds_list):
fn_list = ds.GetFileList()
fn = '[memory]'
if fn_list is not None:
fn = fn_list[0]
if verbose:
print("%i of %i: %s" % (i+1, len(src_ds_list), fn))
#If input srs are different, must warp
ds_t_srs = geolib.get_ds_srs(ds)
srscheck = bool(t_srs.IsSame(ds_t_srs))
if debug:
print('\n%s' % ds_t_srs.ExportToWkt())
print('%s\n' % t_srs.ExportToWkt())
print('srscheck: %s\n' % srscheck)
rescheck = False
extentcheck = False
#if srscheck:
#Extract info from ds to see if warp is necessary
ds_res = geolib.get_res(ds, square=True)[0]
ds_extent = geolib.ds_extent(ds)
#Note: these checks necessary to handle rounding and precision issues
#Round extent and res to nearest mm
precision = 1E-3
#Or if t_srs has units of degrees
if ds_t_srs.IsGeographic():
precision = 1E-8
rescheck = (res is None) or geolib.res_compare(res, ds_res, precision=precision)
extentcheck = (extent is None) or geolib.extent_compare(extent, ds_extent, precision=precision)
if debug:
print('\n%s, %s\n' % (ds_res, res))
print('%s' % ds_extent)
print('%s\n' % extent)
print('rescheck: %s' % rescheck)
print('extentcheck: %s\n' % extentcheck)
#If the ds passes all three, it is identical to desired output, short circuit
if rescheck and extentcheck and srscheck:
out_ds_list.append(ds)
else:
dst_ds = warptype(ds, res, extent, t_srs, r, outdir, dst_ndv=dst_ndv, verbose=verbose)
out_ds_list.append(dst_ds)
return out_ds_list | def warp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', warptype=memwarp, outdir=None, dst_ndv=None, verbose=True, debug=False) | This parses and checks inputs, then calls desired warp function with appropriate arguments for each input ds
Parameters
----------
src_ds_list : list of gdal.Dataset objects
List of original datasets to be warped
res : arbitrary type
Desired output resolution
extent : arbitrary type
Desired output extent
t_srs : arbitrary type
Desired output spatial reference
r : str
Desired resampling algorithm
warptype : function
Desired warp type (write to memory or disk)
outdir : str
Desired output directory (for disk warp)
dst_ndv : float
Desired output NoData Value
verbose : bool
Print warp parameters
debug : bool
Print extra information for debugging purposes
Returns
-------
out_ds_list : list of gdal.Dataset objects
List of warped datasets (either in memory or on disk) | 3.12833 | 3.043874 | 1.027746 |
return warp_multi(src_ds_list, res, extent, t_srs, r, warptype=memwarp, verbose=verbose, dst_ndv=dst_ndv) | def memwarp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, dst_ndv=0) | Helper function for memwarp of multiple input GDAL Datasets | 2.587506 | 2.738175 | 0.944975 |
#Should implement proper error handling here
if not iolib.fn_list_check(src_fn_list):
sys.exit('Missing input file(s)')
src_ds_list = [gdal.Open(fn, gdal.GA_ReadOnly) for fn in src_fn_list]
return memwarp_multi(src_ds_list, res, extent, t_srs, r, verbose=verbose, dst_ndv=dst_ndv) | def memwarp_multi_fn(src_fn_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, dst_ndv=0) | Helper function for memwarp of multiple input filenames | 2.899681 | 2.986248 | 0.971011 |
return warp_multi(src_ds_list, res, extent, t_srs, r, verbose=verbose, warptype=diskwarp, outdir=outdir, dst_ndv=dst_ndv) | def diskwarp_multi(src_ds_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, outdir=None, dst_ndv=None) | Helper function for diskwarp of multiple input GDAL Datasets | 2.52721 | 2.793467 | 0.904686 |
#Should implement proper error handling here
if not iolib.fn_list_check(src_fn_list):
sys.exit('Missing input file(s)')
src_ds_list = [gdal.Open(fn, gdal.GA_ReadOnly) for fn in src_fn_list]
return diskwarp_multi(src_ds_list, res, extent, t_srs, r, verbose=verbose, outdir=outdir, dst_ndv=dst_ndv) | def diskwarp_multi_fn(src_fn_list, res='first', extent='intersection', t_srs='first', r='cubic', verbose=True, outdir=None, dst_ndv=None) | Helper function for diskwarp of multiple input filenames | 2.798541 | 2.865287 | 0.976705 |
print("Writing out %s" % outfn)
#Use outfn extension to get driver
#This may have issues if outfn already exists and the mem ds has different dimensions/res
out_ds = iolib.gtif_drv.CreateCopy(outfn, ds, 0, options=iolib.gdal_opt)
out_ds = None | def writeout(ds, outfn) | Write ds to disk
Note: Depreciated function - use diskwarp functions when writing to disk to avoid unnecessary CreateCopy | 11.327472 | 11.483715 | 0.986394 |
#Need to fix for Python 2.x and 3.X support
import urllib.request, urllib.error, urllib.parse
import xml.etree.ElementTree as ET
#http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points=37.78%2C-122.42%3B40.71%2C-74.01&databases=Point%2CTimeZone%2CAstronomy%2CNaturalEarthCountry%2CUsState2010%2CUsCounty2010%2CUsCountySubdivision2010%2CUsTract2010%2CUsBlockGroup2010%2CUsPlace2010%2CUsZcta2010
req = "http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points="+str(lat)+"%2C"+str(lon)+"&databases=TimeZone"
opener = urllib.request.build_opener()
f = opener.open(req)
tree = ET.parse(f)
root = tree.getroot()
#Check response
tzid = None
if root.attrib['code'] == '0':
tz = list(root.iter('TimeZone'))[0]
#shortname = tz.attrib['ShortName']
tzid = tz.attrib['TimeZoneId']
return tzid | def getTimeZone(lat, lon) | Get timezone for a given lat/lon | 3.790954 | 3.765949 | 1.00664 |
import pytz
local_tz = pytz.timezone(tz)
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_dt | def getLocalTime(utc_dt, tz) | Return local timezone time | 1.739506 | 1.644874 | 1.057531 |
import ephem
o = ephem.Observer()
o.date = utc_dt
o.lat = str(lat)
o.lon = str(lon)
sun = ephem.Sun()
sun.compute(o)
hour_angle = o.sidereal_time() - sun.ra
rad = str(ephem.hours(hour_angle + ephem.hours('12:00')).norm)
t = datetime.strptime(rad, '%H:%M:%S.%f')
solar_dt = datetime.combine(utc_dt.date(), t.time())
return solar_dt | def solarTime(utc_dt, lat, lon) | Compute local solar time for given (lat, lon) | 2.826509 | 2.855835 | 0.989731 |
import dateutil.parser
dt = dateutil.parser.parse(str(s), fuzzy=True)
return dt | def strptime_fuzzy(s) | Fuzzy date string parsing
Note: this returns current date if not found. If only year is provided, will return current month, day | 4.00698 | 5.100555 | 0.785597 |
t_factor = None
if t1 is not None and t2 is not None and t1 != t2:
dt = t2 - t1
year = timedelta(days=365.25)
t_factor = abs(dt.total_seconds() / year.total_seconds())
return t_factor | def get_t_factor(t1, t2) | Time difference between two datetimes, expressed as decimal year | 2.724833 | 2.518503 | 1.081925 |
dt_list = get_dt_list(fn_list)
fn_list_sort = [fn for (dt,fn) in sorted(zip(dt_list,fn_list))]
return fn_list_sort | def sort_fn_list(fn_list) | Sort input filename list by datetime | 2.647094 | 2.135641 | 1.239485 |
idx = (np.diff(dt_list) == timedelta(0))
while np.any(idx):
dt_list[idx.nonzero()[0] + 1] += timedelta(seconds=offset_s)
idx = (np.diff(dt_list) == timedelta(0))
return dt_list | def fix_repeat_dt(dt_list, offset_s=0.001) | Add some small offset to remove duplicate times
Needed for xarray interp, which expects monotonically increasing times | 2.289439 | 2.188384 | 1.046178 |
dt_list = np.array([fn_getdatetime(fn) for fn in fn_list])
return dt_list | def get_dt_list(fn_list) | Get list of datetime objects, extracted from a filename | 3.686066 | 3.404141 | 1.082818 |
from pygeotools.lib import malib
dt_list = malib.checkma(dt_list, fix=False)
dt_diff = np.abs(dt - dt_list)
return dt_diff.argmin() | def get_closest_dt_idx(dt, dt_list) | Get indices of dt_list that is closest to input dt | 5.076056 | 4.514499 | 1.12439 |
#If pad is in decimal days
if not isinstance(pad, timedelta):
pad = timedelta(days=pad)
from pygeotools.lib import malib
dt_list = malib.checkma(dt_list, fix=False)
dt_diff = np.abs(dt - dt_list)
valid_idx = (dt_diff.data < pad).nonzero()[0]
return valid_idx | def get_closest_dt_padded_idx(dt, dt_list, pad=timedelta(days=30)) | Get indices of dt_list that is closest to input dt +/- pad days | 5.297233 | 4.585123 | 1.155309 |
dt_list = np.array(dt_list)
years = get_unique_years(dt_list)
from collections import OrderedDict
out = OrderedDict()
for year in years:
#If within the same year
if min_rel_dt[0] < max_rel_dt[1]:
dt1 = datetime(year, min_rel_dt[0], min_rel_dt[1])
dt2 = datetime(year, max_rel_dt[0], max_rel_dt[1])
#Or if our relative values include Jan 1
else:
dt1 = datetime(year, min_rel_dt[0], min_rel_dt[1])
dt2 = datetime(year+1, max_rel_dt[0], max_rel_dt[1])
idx = np.logical_and((dt_list >= dt1), (dt_list <= dt2))
if np.any(idx):
out[year] = idx
return out | def dt_filter_rel_annual_idx(dt_list, min_rel_dt=(1,1), max_rel_dt=(12,31)) | Return dictionary containing indices of timestamps that fall within relative month/day bounds of each year | 2.289599 | 2.1995 | 1.040964 |
dt_list_sort = sorted(dt_list)
dt_list_sort_rel = [dt - dt_list_sort[0] for dt in dt_list_sort]
avg_timedelta = sum(dt_list_sort_rel, timedelta())/len(dt_list_sort_rel)
return dt_list_sort[0] + avg_timedelta | def mean_date(dt_list) | Calcuate mean datetime from datetime list | 2.286164 | 2.167304 | 1.054842 |
#dt_list_sort = sorted(dt_list)
idx = len(dt_list)/2
if len(dt_list) % 2 == 0:
md = mean_date([dt_list[idx-1], dt_list[idx]])
else:
md = dt_list[idx]
return md | def median_date(dt_list) | Calcuate median datetime from datetime list | 2.426596 | 2.465815 | 0.984095 |
if not isinstance(dt_list[0], float):
o_list = dt2o(dt_list)
else:
o_list = dt_list
o_list_sort = np.sort(o_list)
o_list_sort_idx = np.argsort(o_list)
d = np.diff(o_list_sort)
#These are indices of breaks
#Add one so each b starts a cluster
b = np.nonzero(d > dt_thresh)[0] + 1
#Add one to shape so we include final index
b = np.hstack((0, b, d.shape[0] + 1))
f_list = []
for i in range(len(b)-1):
#Need to subtract 1 here to give cluster bounds
b_idx = [b[i], b[i+1]-1]
b_dt = o_list_sort[b_idx]
#These should be identical if input is already sorted
b_idx_orig = o_list_sort_idx[b_idx]
all_idx = np.arange(b_idx[0], b_idx[1])
all_sort = o_list_sort[all_idx]
#These should be identical if input is already sorted
all_idx_orig = o_list_sort_idx[all_idx]
dict = {}
dict['break_indices'] = b_idx_orig
dict['break_ts_o'] = b_dt
dict['break_ts_dt'] = o2dt(b_dt)
dict['all_indices'] = all_idx_orig
dict['all_ts_o'] = all_sort
dict['all_ts_dt'] = o2dt(all_sort)
f_list.append(dict)
return f_list | def dt_cluster(dt_list, dt_thresh=16.0) | Find clusters of similar datetimes within datetime list | 2.783893 | 2.772494 | 1.004111 |
year = dt.year
startOfThisYear = datetime(year=year, month=1, day=1)
startOfNextYear = datetime(year=year+1, month=1, day=1)
yearElapsed = sinceEpoch(dt) - sinceEpoch(startOfThisYear)
yearDuration = sinceEpoch(startOfNextYear) - sinceEpoch(startOfThisYear)
fraction = yearElapsed/yearDuration
return year + fraction | def dt2decyear(dt) | Convert datetime to decimal year | 2.31444 | 2.310117 | 1.001872 |
year = int(t)
rem = t - year
base = datetime(year, 1, 1)
dt = base + timedelta(seconds=(base.replace(year=base.year+1) - base).total_seconds() * rem)
#This works for np array input
#year = t.astype(int)
#rem = t - year
#base = np.array([datetime(y, 1, 1) for y in year])
return dt | def decyear2dt(t) | Convert decimal year to datetime | 3.420547 | 3.26522 | 1.04757 |
a = (14 - dt.month)//12
y = dt.year + 4800 - a
m = dt.month + 12*a - 3
return dt.day + ((153*m + 2)//5) + 365*y + y//4 - y//100 + y//400 - 32045 | def dt2jd(dt) | Convert datetime to julian date | 1.639699 | 1.570656 | 1.043959 |
n = int(round(float(jd)))
a = n + 32044
b = (4*a + 3)//146097
c = a - (146097*b)//4
d = (4*c + 3)//1461
e = c - (1461*d)//4
m = (5*e + 2)//153
day = e + 1 - (153*m + 2)//5
month = m + 3 - 12*(m//10)
year = 100*b + d - 4800 + m/10
tfrac = 0.5 + float(jd) - n
tfrac_s = 86400.0 * tfrac
minfrac, hours = np.modf(tfrac_s / 3600.)
secfrac, minutes = np.modf(minfrac * 60.)
microsec, seconds = np.modf(secfrac * 60.)
return datetime(year, month, day, int(hours), int(minutes), int(seconds), int(microsec*1E6)) | def jd2dt(jd) | Convert julian date to datetime | 2.33507 | 2.275323 | 1.026259 |
gps_epoch = datetime(1980,1,6,0,0,0)
gps_week_s = timedelta(seconds=gps_week*7*24*60*60)
gps_ms_s = timedelta(milliseconds=gps_ms)
return gps_epoch + gps_week_s + gps_ms_s | def gps2dt(gps_week, gps_ms) | Convert GPS week and ms to a datetime | 2.035038 | 2.003211 | 1.015888 |
is_repeated = ndb_key_prop._repeated
name = ndb_key_prop._code_name
if name.endswith('_key') or name.endswith('_keys'):
# Case #1 - name is of form 'store_key' or 'store_keys'
string_prop_name = rreplace(name, '_key', '_id', 1)
resolved_prop_name = name[:-4] if name.endswith('_key') else p.plural(name[:-5])
else:
# Case #2 - name is of form 'store'
singular_name = p.singular_noun(name) if p.singular_noun(name) else name
string_prop_name = singular_name + '_ids' if is_repeated else singular_name + '_id'
resolved_prop_name = name
return [
ConversionResult(name=string_prop_name, field=DynamicNdbKeyStringField(ndb_key_prop, registry=registry)),
ConversionResult(name=resolved_prop_name, field=DynamicNdbKeyReferenceField(ndb_key_prop, registry=registry))
] | def convert_ndb_key_propety(ndb_key_prop, registry=None) | Two conventions for handling KeyProperties:
#1.
Given:
store_key = ndb.KeyProperty(...)
Result is 2 fields:
store_id = graphene.String() -> resolves to store_key.urlsafe()
store = NdbKeyField() -> resolves to entity
#2.
Given:
store = ndb.KeyProperty(...)
Result is 2 fields:
store_id = graphene.String() -> resolves to store_key.urlsafe()
store = NdbKeyField() -> resolves to entity | 3.162918 | 3.127346 | 1.011375 |
'''
A simple function that accepts an ndb Query and used ndb QueryIterator object(https://cloud.google.com/appengine/docs/python/ndb/queries#iterators)
to returns a connection object for use in GraphQL.
It uses array offsets as pagination,
so pagination will only work if the array is static.
'''
args = args or {}
connection_type = connection_type or Connection
edge_type = edge_type or Edge
pageinfo_type = pageinfo_type or PageInfo
full_args = dict(args, **kwargs)
first = full_args.get('first')
after = full_args.get('after')
has_previous_page = bool(after)
keys_only = full_args.get('keys_only', False)
batch_size = full_args.get('batch_size', 20)
page_size = first if first else full_args.get('page_size', 20)
start_cursor = ndb.Cursor(urlsafe=after) if after else None
ndb_iter = query.iter(produce_cursors=True, start_cursor=start_cursor, batch_size=batch_size, keys_only=keys_only, projection=query.projection)
edges = []
while len(edges) < page_size:
missing_edges_count = page_size - len(edges)
edges_page = generate_edges_page(ndb_iter, missing_edges_count, keys_only, edge_type)
edges.extend(transform_edges(edges_page, args, context) if transform_edges else edges_page)
if len(edges_page) < missing_edges_count:
break
try:
end_cursor = ndb_iter.cursor_after().urlsafe()
except BadArgumentError:
end_cursor = None
# Construct the connection
return connection_type(
edges=edges,
page_info=pageinfo_type(
start_cursor=start_cursor.urlsafe() if start_cursor else '',
end_cursor=end_cursor,
has_previous_page=has_previous_page,
has_next_page=ndb_iter.has_next()
)
) | def connection_from_ndb_query(query, args=None, connection_type=None, edge_type=None, pageinfo_type=None,
transform_edges=None, context=None, **kwargs) | A simple function that accepts an ndb Query and used ndb QueryIterator object(https://cloud.google.com/appengine/docs/python/ndb/queries#iterators)
to returns a connection object for use in GraphQL.
It uses array offsets as pagination,
so pagination will only work if the array is static. | 2.711318 | 2.002617 | 1.353887 |
assert iscode(co)
show_module_header(bytecode_version, co, timestamp, out,
is_pypy, magic_int, source_size, header,
show_filename=False)
# store final output stream for case of error
real_out = out or sys.stdout
if co.co_filename and not asm_format:
real_out.write(format_code_info(co, bytecode_version) + "\n")
pass
opc = get_opcode(bytecode_version, is_pypy)
if asm_format:
disco_loop_asm_format(opc, bytecode_version, co, real_out,
{}, set([]))
else:
queue = deque([co])
disco_loop(opc, bytecode_version, queue, real_out,
show_bytes=show_bytes) | def disco(bytecode_version, co, timestamp, out=sys.stdout,
is_pypy=False, magic_int=None, source_size=None,
header=True, asm_format=False, show_bytes=False,
dup_lines=False) | diassembles and deparses a given code block 'co' | 4.877581 | 4.949931 | 0.985384 |
while len(queue) > 0:
co = queue.popleft()
if co.co_name not in ('<module>', '?'):
real_out.write("\n" + format_code_info(co, version) + "\n")
bytecode = Bytecode(co, opc, dup_lines=dup_lines)
real_out.write(bytecode.dis(show_bytes=show_bytes) + "\n")
for c in co.co_consts:
if iscode(c):
queue.append(c)
pass
pass | def disco_loop(opc, version, queue, real_out, dup_lines=False,
show_bytes=False) | Disassembles a queue of code objects. If we discover
another code object which will be found in co_consts, we add
the new code to the list. Note that the order of code discovery
is in the order of first encountered which is not amenable for
the format used by a disassembler where code objects should
be defined before using them in other functions.
However this is not recursive and will overall lead to less
memory consumption at run time. | 3.448295 | 3.143339 | 1.097016 |
if version < 3.0:
co = code2compat(co)
else:
co = code3compat(co)
co_name = co.co_name
mapped_name = fn_name_map.get(co_name, co_name)
new_consts = []
for c in co.co_consts:
if iscode(c):
if version < 3.0:
c_compat = code2compat(c)
else:
c_compat = code3compat(c)
disco_loop_asm_format(opc, version, c_compat, real_out,
fn_name_map, all_fns)
m = re.match(".* object <(.+)> at", str(c))
if m:
basename = m.group(1)
if basename != 'module':
mapped_name = code_uniquify(basename, c.co_code)
c_compat.co_name = mapped_name
c_compat.freeze()
new_consts.append(c_compat)
else:
new_consts.append(c)
pass
co.co_consts = new_consts
m = re.match("^<(.+)>$", co.co_name)
if m or co_name in all_fns:
if co_name in all_fns:
basename = co_name
else:
basename = m.group(1)
if basename != 'module':
mapped_name = code_uniquify(basename, co.co_code)
co_name = mapped_name
assert mapped_name not in fn_name_map
fn_name_map[mapped_name] = basename
co.co_name = mapped_name
pass
elif co_name in fn_name_map:
# FIXME: better would be a hash of the co_code
mapped_name = code_uniquify(co_name, co.co_code)
fn_name_map[mapped_name] = co_name
co.co_name = mapped_name
pass
co = co.freeze()
all_fns.add(co_name)
if co.co_name != '<module>' or co.co_filename:
real_out.write("\n" + format_code_info(co, version, mapped_name) + "\n")
bytecode = Bytecode(co, opc, dup_lines=True)
real_out.write(bytecode.dis(asm_format=True) + "\n") | def disco_loop_asm_format(opc, version, co, real_out,
fn_name_map, all_fns) | Produces disassembly in a format more conducive to
automatic assembly by producing inner modules before they are
used by outer ones. Since this is recusive, we'll
use more stack space at runtime. | 2.665036 | 2.674846 | 0.996332 |
filename = check_object_path(filename)
version, timestamp, magic_int, co, is_pypy, source_size = load_module(filename)
if header:
show_module_header(version, co, timestamp, outstream,
is_pypy, magic_int, source_size, show_filename=True)
else:
disco(version, co, timestamp, outstream, is_pypy, magic_int, source_size,
asm_format=asm_format, show_bytes=show_bytes)
# print co.co_filename
return filename, co, version, timestamp, magic_int | def disassemble_file(filename, outstream=sys.stdout,
asm_format=False, header=False, show_bytes=False) | disassemble Python byte-code file (.pyc)
If given a Python source file (".py") file, we'll
try to find the corresponding compiled object. | 5.130252 | 5.079196 | 1.010052 |
fp = open(filename, 'rb')
try:
source = fp.read()
try:
if PYTHON_VERSION < 2.6:
co = compile(source, filename, 'exec')
else:
co = compile(source, filename, 'exec', dont_inherit=True)
except SyntaxError:
out.write('>>Syntax error in %s\n' % filename)
raise
finally:
fp.close()
return co | def load_file(filename, out=sys.stdout) | load a Python source file and compile it to byte-code
_load_file(filename: string): code_object
filename: name of file containing Python source code
(normally a .py)
code_object: code_object compiled from this source code
This function does NOT write any file! | 2.87515 | 2.762865 | 1.040641 |
# Some sanity checks
if not osp.exists(filename):
raise ImportError("File name: '%s' doesn't exist" % filename)
elif not osp.isfile(filename):
raise ImportError("File name: '%s' isn't a file" % filename)
elif osp.getsize(filename) < 50:
raise ImportError("File name: '%s (%d bytes)' is too short to be a valid pyc file" % (filename, osp.getsize(filename)))
with open(filename, 'rb') as fp:
return load_module_from_file_object(fp, filename=filename, code_objects=code_objects,
fast_load=fast_load, get_code=get_code) | def load_module(filename, code_objects=None, fast_load=False,
get_code=True) | load a module without importing it.
load_module(filename: string): version, magic_int, code_object
filename: name of file containing Python byte-code object
(normally a .pyc)
code_object: code_object from this file
version: Python major/minor value e.g. 2.7. or 3.4
magic_int: more specific than version. The actual byte code version of the
code object
Parsing the code object takes a bit of parsing time, but
sometimes all you want is the module info, time string, code size,
python version, etc. For that, set get_code=False. | 2.394638 | 2.44345 | 0.980024 |
fp = open(bytecode_path, 'wb')
try:
if PYTHON3:
fp.write(pack('<Hcc', magic_int, b'\r', b'\n'))
else:
fp.write(pack('<Hcc', magic_int, '\r', '\n'))
fp.write(pack('<I', int(time.time())))
if (3000 <= magic_int < 20121):
# In Python 3 you need to write out the size mod 2**32 here
fp.write(pack('<I', filesize))
fp.write(marshal.dumps(code))
finally:
fp.close() | def write_bytecode_file(bytecode_path, code, magic_int, filesize=0) | Write bytecode file _bytecode_path_, with code for having Python
magic_int (i.e. bytecode associated with some version of Python) | 3.693341 | 3.740797 | 0.987314 |
Usage_short = % (program, program)
if not (2.5 <= PYTHON_VERSION <= 3.8):
sys.stderr(print("This works on Python version 2.5..3.8; have %s" % PYTHON_VERSION))
if not len(files):
sys.stderr.write("No file(s) given..\n")
print(Usage_short, file=sys.stderr)
sys.exit(1)
for path in files:
# Some sanity checks
if not osp.exists(path):
sys.stderr.write("File name: '%s' doesn't exist\n" % path)
continue
elif not osp.isfile(path):
sys.stderr.write("File name: '%s' isn't a file\n" % path)
continue
elif osp.getsize(path) < 50:
sys.stderr.write("File name: '%s (%d bytes)' is too short to be a valid pyc file\n" % (path, osp.getsize(path)))
continue
disassemble_file(path, sys.stdout, asm, header, show_bytes)
return | def main(asm, show_bytes, header, files) | Disassembles a Python bytecode file.
We handle bytecode for virtually every release of Python and some releases of PyPy.
The version of Python in the bytecode doesn't have to be the same version as
the Python interpreter used to run this program. For example, you can disassemble Python 3.6.1
bytecode from Python 2.7.13 and vice versa. | 3.708082 | 3.642883 | 1.017898 |
if PYTHON3:
f.write(bytes([x & 0xff]))
f.write(bytes([(x >> 8) & 0xff]))
f.write(bytes([(x >> 16) & 0xff]))
f.write(bytes([(x >> 24) & 0xff]))
else:
f.write(chr( x & 0xff))
f.write(chr((x >> 8) & 0xff))
f.write(chr((x >> 16) & 0xff))
f.write(chr((x >> 24) & 0xff)) | def wr_long(f, x) | Internal; write a 32-bit int to a file in little-endian order. | 1.487176 | 1.485291 | 1.00127 |
# Atomically write the pyc/pyo file. Issue #13146.
# id() is used to generate a pseudo-random filename.
path_tmp = '%s.%s' % (filename, id(filename))
fc = None
try:
fc = open(path_tmp, 'wb')
if PYTHON3:
fc.write(bytes([0, 0, 0, 0]))
else:
fc.write('\0\0\0\0')
wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(magic)
fc.close()
os.rename(path_tmp, filename)
except OSError:
try:
os.unlink(path_tmp)
except OSError:
pass
raise
finally:
if fc: fc.close() | def dump_compile(codeobject, filename, timestamp, magic) | Write code object as a byte-compiled file
Arguments:
codeobject: code object
filefile: bytecode file to write
timestamp: timestamp to put in file
magic: Pyton bytecode magic | 3.100104 | 3.289735 | 0.942357 |
tempdir = tempfile.gettempdir()
source_filename = os.path.join(tempdir, "testing.py")
if not os.path.exists(real_source_filename):
return
try:
f = open(real_source_filename, 'U')
except:
return
codestring = f.read()
f.close()
codeobject1 = compile(codestring, source_filename,'exec')
(version, timestamp, magic_int, codeobject2, is_pypy,
source_size) = load_module(real_bytecode_filename)
# A hack for PyPy 3.2
if magic_int == 3180+7:
magic_int = 48
assert MAGIC == magics.int2magic(magic_int), \
("magic_int %d vs %d in %s/%s" %
(magic_int, magics.magic2int(MAGIC), os.getcwd(), real_bytecode_filename))
bytecode_filename1 = os.path.join(tempdir, "testing1.pyc")
dump_compile(codeobject1, bytecode_filename1, timestamp, MAGIC)
(version, timestamp, magic_int, codeobject3, is_pypy,
source_size) = load_module(real_bytecode_filename, fast_load=not is_pypy)
# compare_code(codeobject1, codeobject2)
# compare_code(codeobject2, codeobject3)
bytecode_filename2 = os.path.join(tempdir, "testing2.pyc")
dump_compile(codeobject1, bytecode_filename2, timestamp, magics.int2magic(magic_int))
compare_bytecode_files(bytecode_filename1, bytecode_filename2)
return | def verify_file(real_source_filename, real_bytecode_filename) | Compile *real_source_filename* using
the running Python interpreter. Then
write bytecode out to a new place again using
Python's routines.
Next load it in using two of our routines.
Compare that the code objects there are equal.
Next write out the bytecode (using the same Python
bytecode writin routine as in step 1.
Finally compare the bytecode files. | 3.277309 | 3.269371 | 1.002428 |
if (sys.version_info >= (3, 0)):
return struct.pack('<Hcc', magic_int, bytes('\r', 'utf-8'), bytes('\n', 'utf-8'))
else:
return struct.pack('<Hcc', magic_int, '\r', '\n') | def int2magic(magic_int) | Given a magic int like 62211, compute the corresponding magic byte string
b'\x03\xf3\r\n' using the conversion method that does this.
See also dictionary magic2nt2version which has precomputed these values
for knonwn magic_int's. | 2.88246 | 2.983057 | 0.966277 |
if version.endswith('pypy'):
version = version[:-len('pypy')]
if version in magics:
magic = magics[version]
for v, m in list(magics.items()):
if m == magic:
try:
return float(canonic_python_version[v])
except:
try:
m = re.match(r'^(\d\.)(\d+)\.(\d+)$', v)
if m:
return float(m.group(1)+m.group(2))
except:
pass
pass
pass
pass
raise RuntimeError("Can't find a valid Python version for version %s"
% version)
return | def py_str2float(version) | Convert a Python version into a two-digit 'canonic' floating-point number,
e.g. 2.5, 3.6.
A runtime error is raised if "version" is not found.
Note that there can be several strings that map to a single floating-
point number. For example 3.2a1, 3.2.0, 3.2.2, 3.2.6 among others all map to
3.2. | 3.87094 | 3.915064 | 0.98873 |
vers_str = '.'.join([str(v) for v in version_info[0:3]])
if version_info[3] != 'final':
vers_str += '.' + ''.join([str(i) for i in version_info[3:]])
if IS_PYPY:
vers_str += 'pypy'
else:
try:
import platform
platform = platform.python_implementation()
if platform in ('Jython', 'Pyston'):
vers_str += platform
pass
except ImportError:
# Python may be too old, e.g. < 2.6 or implementation may
# just not have platform
pass
except AttributeError:
pass
return py_str2float(vers_str) | def sysinfo2float(version_info=sys.version_info) | Convert a sys.versions_info-compatible list into a 'canonic'
floating-point number which that can then be used to look up a
magic number. Note that this can only be used for released version
of C Python, not interim development versions, since we can't
represent that as a floating-point number.
For handling Pypy, pyston, jython, etc. and interim versions of
C Python, use sysinfo2magic. | 3.690041 | 3.691404 | 0.999631 |
# FIXME: DRY with sysinfo2float()
vers_str = '.'.join([str(v) for v in version_info[0:3]])
if version_info[3] != 'final':
vers_str += ''.join([str(v) for v in version_info[3:]])
if IS_PYPY:
vers_str += 'pypy'
else:
try:
import platform
platform = platform.python_implementation()
if platform in ('Jython', 'Pyston'):
vers_str += platform
pass
except ImportError:
# Python may be too old, e.g. < 2.6 or implementation may
# just not have platform
pass
return magics[vers_str] | def sysinfo2magic(version_info=sys.version_info) | Convert a list sys.versions_info compatible list into a 'canonic'
floating-point number which that can then be used to look up a
magic number. Note that this can raise an exception. | 4.014473 | 3.832009 | 1.047616 |
if version:
l['python_version'] = version
l['is_pypy'] = is_pypy
l['cmp_op'] = cmp_op
l['HAVE_ARGUMENT'] = HAVE_ARGUMENT
if version <= 3.5:
l['findlinestarts'] = findlinestarts
l['findlabels'] = findlabels
l['get_jump_targets'] = get_jump_targets
l['get_jump_target_maps'] = get_jump_target_maps
else:
l['findlinestarts'] = wordcode.findlinestarts
l['findlabels'] = wordcode.findlabels
l['get_jump_targets'] = wordcode.get_jump_targets
l['get_jump_target_maps'] = wordcode.get_jump_target_maps
l['opmap'] = deepcopy(from_mod.opmap)
l['opname'] = deepcopy(from_mod.opname)
for field in fields2copy:
l[field] = list(getattr(from_mod, field)) | def init_opdata(l, from_mod, version=None, is_pypy=False) | Sets up a number of the structures found in Python's
opcode.py. Python opcode.py routines assign attributes to modules.
In order to do this in a modular way here, the local dictionary
for the module is passed. | 2.527539 | 2.432622 | 1.039018 |
# opname is an array, so we need to keep the position in there.
l['opname'][op] = '<%s>' % op
if op in l['hasconst']:
l['hasconst'].remove(op)
if op in l['hascompare']:
l['hascompare'].remove(op)
if op in l['hascondition']:
l['hascondition'].remove(op)
if op in l['hasfree']:
l['hasfree'].remove(op)
if op in l['hasjabs']:
l['hasjabs'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasjrel']:
l['hasjrel'].remove(op)
if op in l['haslocal']:
l['haslocal'].remove(op)
if op in l['hasname']:
l['hasname'].remove(op)
if op in l['hasnargs']:
l['hasnargs'].remove(op)
if op in l['hasvargs']:
l['hasvargs'].remove(op)
if op in l['nofollow']:
l['nofollow'].remove(op)
assert l['opmap'][name] == op
del l['opmap'][name] | def rm_op(l, name, op) | Remove an opcode. This is used when basing a new Python release off
of another one, and there is an opcode that is in the old release
that was removed in the new release.
We are pretty aggressive about removing traces of the op. | 2.122535 | 2.024889 | 1.048223 |
return dict([(k.replace('+', '_'), v)
for (k, v) in opmap.items()]) | def fix_opcode_names(opmap) | Python stupidly named some OPCODES with a + which prevents using opcode name
directly as an attribute, e.g. SLICE+3. So we turn that into SLICE_3 so we
can then use opcode_23.SLICE_3. Later Python's fix this. | 5.223275 | 3.304771 | 1.580526 |
# Python 2.6 reports 2.6000000000000001
if (abs(PYTHON_VERSION - l['python_version']) <= 0.01
and IS_PYPY == l['is_pypy']):
try:
import dis
opmap = fix_opcode_names(dis.opmap)
# print(set(opmap.items()) - set(l['opmap'].items()))
# print(set(l['opmap'].items()) - set(opmap.items()))
assert all(item in opmap.items() for item in l['opmap'].items())
assert all(item in l['opmap'].items() for item in opmap.items())
except:
import sys | def opcode_check(l) | When the version of Python we are running happens
to have the same opcode set as the opcode we are
importing, we perform checks to make sure our opcode
set matches exactly. | 3.524179 | 3.236821 | 1.088778 |
op2name = {}
for k in opmap.keys():
op2name[opmap[k]] = k
for i in sorted(op2name.keys()):
print("%-3s %s" % (str(i), op2name[i])) | def dump_opcodes(opmap) | Utility for dumping opcodes | 2.65519 | 2.644285 | 1.004124 |
global internStrings, internObjects
internStrings = []
internObjects = []
seek_pos = fp.tell()
# Do a sanity check. Is this a code type?
b = ord(fp.read(1))
if (b & 0x80):
b = b & 0x7f
c = chr(b)
if c != 'c':
raise TypeError("File %s doesn't smell like Python bytecode:\n"
"expecting code indicator 'c'; got '%s'"
% (fp.name, c))
fp.seek(seek_pos)
return load_code_internal(fp, magic_int, code_objects=code_objects) | def load_code(fp, magic_int, code_objects={}) | marshal.load() written in Python. When the Python bytecode magic loaded is the
same magic for the running Python interpreter, we can simply use the
Python-supplied marshal.load().
However we need to use this when versions are different since the internal
code structures are different. Sigh. | 5.187189 | 5.339616 | 0.971454 |
names = []
result = "0x%08x" % flags
for i in range(32):
flag = 1 << i
if flags & flag:
names.append(COMPILER_FLAG_NAMES.get(flag, hex(flag)))
flags ^= flag
if not flags:
break
else:
names.append(hex(flags))
names.reverse()
return "%s (%s)" % (result, " | ".join(names)) | def pretty_flags(flags) | Return pretty representation of code flags. | 3.138873 | 2.93161 | 1.070699 |
try:
c = compile(source, name, 'eval')
except SyntaxError:
c = compile(source, name, 'exec')
return c | def _try_compile(source, name) | Attempts to compile the given source, first as an expression and
then as a statement if the first approach fails.
Utility function to accept strings in functions that otherwise
expect code objects | 2.682008 | 3.119057 | 0.859878 |
if hasattr(x, '__func__'): # Method
x = x.__func__
if hasattr(x, '__code__'): # Function
x = x.__code__
if hasattr(x, 'gi_code'): # Generator
x = x.gi_code
if isinstance(x, str): # Source code
x = _try_compile(x, "<disassembly>")
if hasattr(x, 'co_code'): # Code object
return x
raise TypeError("don't know how to disassemble %s objects" %
type(x).__name__) | def get_code_object(x) | Helper to handle methods, functions, generators, strings and raw code objects | 2.729027 | 2.451177 | 1.113353 |
if file is None:
print(code_info(co, version))
else:
file.write(code_info(co, version) + '\n') | def show_code(co, version, file=None) | Print details of methods, functions, or code to *file*.
If *file* is not provided, the output is printed on stdout. | 2.980903 | 3.27473 | 0.910274 |
return inspect.iscode(obj) or isinstance(obj, Code3) or isinstance(obj, Code2) | def iscode(obj) | A replacement for inspect.iscode() which we can't used because we may be
using a different version of Python than the version of Python used
in creating the byte-compiled objects. Here, the code types may mismatch. | 6.152288 | 5.485279 | 1.1216 |
if not isinstance(code.co_lnotab, str):
byte_increments = list(code.co_lnotab[0::2])
line_increments = list(code.co_lnotab[1::2])
else:
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
offset = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if (lineno != lastlineno or
(dup_lines and 0 < byte_incr < 255)):
yield (offset, lineno)
lastlineno = lineno
pass
offset += byte_incr
pass
lineno += line_incr
if (lineno != lastlineno or
(dup_lines and 0 < byte_incr < 255)):
yield (offset, lineno) | def findlinestarts(code, dup_lines=False) | Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c. | 2.055186 | 1.983878 | 1.035944 |
if len(linestarts) == 0 or offset < linestarts[0][0]:
return 0
low = 0
high = len(linestarts) - 1
mid = (low + high + 1) // 2
while low <= high:
if linestarts[mid][0] > offset:
high = mid - 1
elif linestarts[mid][0] < offset:
low = mid + 1
else:
return linestarts[mid][1]
mid = (low + high + 1) // 2
pass
# Not found. Return closest position below
if mid >= len(linestarts):
return linestarts[len(linestarts)-1][1]
return linestarts[high][1] | def offset2line(offset, linestarts) | linestarts is expected to be a *list) of (offset, line number)
where both offset and line number are in increasing order.
Return the closes line number at or below the offset.
If offset is less than the first line number given in linestarts,
return line number 0. | 1.927262 | 1.907046 | 1.010601 |
offsets = []
for offset, op, arg in unpack_opargs_bytecode(code, opc):
if arg is not None:
jump_offset = -1
if op in opc.JREL_OPS:
op_len = op_size(op, opc)
jump_offset = offset + op_len + arg
elif op in opc.JABS_OPS:
jump_offset = arg
if jump_offset >= 0:
if jump_offset not in offsets:
offsets.append(jump_offset)
return offsets | def get_jump_targets(code, opc) | Returns a list of instruction offsets in the supplied bytecode
which are the targets of some sort of jump instruction. | 3.352132 | 3.129688 | 1.071075 |
offset2prev = {}
prev_offset = -1
for offset, op, arg in unpack_opargs_bytecode(code, opc):
if prev_offset >= 0:
prev_list = offset2prev.get(offset, [])
prev_list.append(prev_offset)
offset2prev[offset] = prev_list
if op in opc.NOFOLLOW:
prev_offset = -1
else:
prev_offset = offset
if arg is not None:
jump_offset = -1
if op in opc.JREL_OPS:
op_len = op_size(op, opc)
jump_offset = offset + op_len + arg
elif op in opc.JABS_OPS:
jump_offset = arg
if jump_offset >= 0:
prev_list = offset2prev.get(jump_offset, [])
prev_list.append(offset)
offset2prev[jump_offset] = prev_list
return offset2prev | def get_jump_target_maps(code, opc) | Returns a dictionary where the key is an offset and the values are
a list of instruction offsets which can get run before that
instruction. This includes jump instructions as well as non-jump
instructions. Therefore, the keys of the dictionary are reachable
instructions. The values of the dictionary may be useful in control-flow
analysis. | 2.552095 | 2.425209 | 1.05232 |
argval = const_index
if const_list is not None:
argval = const_list[const_index]
# float values nan and inf are not directly representable in Python at least
# before 3.5 and even there it is via a library constant.
# So we will canonicalize their representation as float('nan') and float('inf')
if isinstance(argval, float) and str(argval) in frozenset(['nan', '-nan', 'inf', '-inf']):
return argval, "float('%s')" % argval
return argval, repr(argval) | def _get_const_info(const_index, const_list) | Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr(). | 6.234581 | 6.236559 | 0.999683 |
argval = name_index
if (name_list is not None
# PyPY seems to "optimize" out constant names,
# so we need for that:
and name_index < len(name_list)):
argval = name_list[name_index]
argrepr = argval
else:
argrepr = repr(argval)
return argval, argrepr | def _get_name_info(name_index, name_list) | Helper to get optional details about named references
Returns the dereferenced name as both value and repr if the name
list is defined.
Otherwise returns the name index and its repr(). | 6.079601 | 5.814693 | 1.045558 |
if op < opc.HAVE_ARGUMENT:
return 2 if opc.version >= 3.6 else 1
else:
return 2 if opc.version >= 3.6 else 3 | def instruction_size(op, opc) | For a given opcode, `op`, in opcode module `opc`,
return the size, in bytes, of an `op` instruction.
This is the size of the opcode (1 byte) and any operand it has. In
Python before version 3.6 this will be either 1 or 3 bytes. In
Python 3.6 or later, it is 2 bytes or a "word". | 4.252058 | 3.53591 | 1.202536 |
bc = []
for i, opcodes in enumerate(l):
opname = opcodes[0]
operands = opcodes[1:]
if opname not in opc.opname:
raise TypeError(
"error at item %d [%s, %s], opcode not valid" %
(i, opname, operands))
opcode = opc.opmap[opname]
bc.append(opcode)
print(opname, operands)
gen = (j for j in operands if operands)
for j in gen:
k = (consts if opcode in opc.CONST_OPS else varnames).index(j)
if k == -1:
raise TypeError(
"operand %s [%s, %s], not found in names" %
(i, opname, operands))
else:
bc += num2code(k)
pass
pass
pass
if opc.python_version < 3.0:
return reduce(lambda a, b: a + chr(b), bc, '')
else:
if PYTHON3:
return bytes(bc)
else:
return bytes(bytearray(bc)) | def list2bytecode(l, opc, varnames, consts) | Convert list/tuple of list/tuples to bytecode
_names_ contains a list of name objects | 3.969908 | 3.913597 | 1.014389 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.