code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
iv = derive_key(hashlib.sha1, PURPOSE_IV_MATERIAL, password, salt, iteration_count, 16) key = derive_key(hashlib.sha1, PURPOSE_KEY_MATERIAL, password, salt, iteration_count, 256//8) encrypted_data = bytearray(encrypted_data) encrypted_data_len = len(encrypted_data) if encrypted_data_len % 16 != 0: raise BadDataLengthException("encrypted data length is not a multiple of 16 bytes") plaintext = bytearray() # slow and dirty CBC decrypt from twofish import Twofish cipher = Twofish(key) last_cipher_block = bytearray(iv) for block_offset in range(0, encrypted_data_len, 16): cipher_block = encrypted_data[block_offset:block_offset+16] plaintext_block = xor_bytearrays(bytearray(cipher.decrypt(bytes(cipher_block))), last_cipher_block) plaintext.extend(plaintext_block) last_cipher_block = cipher_block plaintext = strip_pkcs7_padding(plaintext, 16) return bytes(plaintext)
def decrypt_PBEWithSHAAndTwofishCBC(encrypted_data, password, salt, iteration_count)
Decrypts PBEWithSHAAndTwofishCBC, assuming PKCS#12-generated PBE parameters. (Not explicitly defined as an algorithm in RFC 7292, but defined here nevertheless because of the assumption of PKCS#12 parameters).
2.798462
2.975789
0.94041
iv = derive_key(hashlib.sha1, PURPOSE_IV_MATERIAL, password, salt, iteration_count, 16) key = derive_key(hashlib.sha1, PURPOSE_KEY_MATERIAL, password, salt, iteration_count, 256//8) plaintext_data = add_pkcs7_padding(plaintext_data, 16) plaintext_data = bytearray(plaintext_data) plaintext_len = len(plaintext_data) assert plaintext_len % 16 == 0 ciphertext = bytearray() from twofish import Twofish cipher = Twofish(key) last_cipher_block = bytearray(iv) for block_offset in range(0, plaintext_len, 16): plaintext_block = plaintext_data[block_offset:block_offset+16] cipher_block = bytearray(cipher.encrypt(bytes(xor_bytearrays(plaintext_block, last_cipher_block)))) ciphertext.extend(cipher_block) last_cipher_block = cipher_block return bytes(ciphertext)
def encrypt_PBEWithSHAAndTwofishCBC(plaintext_data, password, salt, iteration_count)
Encrypts a value with PBEWithSHAAndTwofishCBC, assuming PKCS#12-generated PBE parameters. (Not explicitly defined as an algorithm in RFC 7292, but defined here nevertheless because of the assumption of PKCS#12 parameters).
2.506671
2.705597
0.926476
password_bytes = password_str.encode('utf-16be') # Java chars are UTF-16BE code units iv = os.urandom(20) key = bytearray(key) xoring = zip(key, _jks_keystream(iv, password_bytes)) data = bytearray([d^k for d,k in xoring]) check = hashlib.sha1(bytes(password_bytes + key)).digest() return bytes(iv + data + check)
def jks_pkey_encrypt(key, password_str)
Encrypts the private key with password protection algorithm used by JKS keystores.
5.438236
5.311666
1.023829
password_bytes = password_str.encode('utf-16be') # Java chars are UTF-16BE code units data = bytearray(data) iv, data, check = data[:20], data[20:-20], data[-20:] xoring = zip(data, _jks_keystream(iv, password_bytes)) key = bytearray([d^k for d,k in xoring]) if hashlib.sha1(bytes(password_bytes + key)).digest() != check: raise BadHashCheckException("Bad hash check on private key; wrong password?") key = bytes(key) return key
def jks_pkey_decrypt(data, password_str)
Decrypts the private key password protection algorithm used by JKS keystores. The JDK sources state that 'the password is expected to be in printable ASCII', though this does not appear to be enforced; the password is converted into bytes simply by taking each individual Java char and appending its raw 2-byte representation. See sun/security/provider/KeyProtector.java in the JDK sources.
5.800383
5.600222
1.035742
cur = iv while 1: xhash = hashlib.sha1(bytes(password + cur)) # hashlib.sha1 in python 2.6 does not accept a bytearray argument cur = bytearray(xhash.digest()) # make sure we iterate over ints in both Py2 and Py3 for byte in cur: yield byte
def _jks_keystream(iv, password)
Helper keystream generator for _jks_pkey_decrypt
8.740587
8.335293
1.048624
key, iv = _jce_pbe_derive_key_and_iv(password, salt, iteration_count) from Cryptodome.Cipher import DES3 des3 = DES3.new(key, DES3.MODE_CBC, IV=iv) padded = des3.decrypt(data) result = strip_pkcs5_padding(padded) return result
def jce_pbe_decrypt(data, password, salt, iteration_count)
Decrypts Sun's custom PBEWithMD5AndTripleDES password-based encryption scheme. It is based on password-based encryption as defined by the PKCS #5 standard, except that it uses triple DES instead of DES. Here's how this algorithm works: 1. Create random salt and split it in two halves. If the two halves are identical, invert(*) the first half. 2. Concatenate password with each of the halves. 3. Digest each concatenation with c iterations, where c is the iterationCount. Concatenate the output from each digest round with the password, and use the result as the input to the next digest operation. The digest algorithm is MD5. 4. After c iterations, use the 2 resulting digests as follows: The 16 bytes of the first digest and the 1st 8 bytes of the 2nd digest form the triple DES key, and the last 8 bytes of the 2nd digest form the IV. (*) Not actually an inversion operation due to an implementation bug in com.sun.crypto.provider.PBECipherCore. See _jce_invert_salt_half for details. See http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b27/com/sun/crypto/provider/PBECipherCore.java#PBECipherCore.deriveCipherKey%28java.security.Key%29
2.543794
3.013757
0.844061
salt = bytearray(salt_half) salt[2] = salt[1] salt[1] = salt[0] salt[0] = salt[3] return bytes(salt)
def _jce_invert_salt_half(salt_half)
JCE's proprietary PBEWithMD5AndTripleDES algorithm as described in the OpenJDK sources calls for inverting the first salt half if the two halves are equal. However, there appears to be a bug in the original JCE implementation of com.sun.crypto.provider.PBECipherCore causing it to perform a different operation: for (i=0; i<2; i++) { byte tmp = salt[i]; salt[i] = salt[3-i]; salt[3-1] = tmp; // <-- typo '1' instead of 'i' } The result is transforming [a,b,c,d] into [d,a,b,d] instead of [d,c,b,a] (verified going back to the original JCE 1.2.2 release for JDK 1.2). See source (or bytecode) of com.sun.crypto.provider.PBECipherCore (JRE <= 7) and com.sun.crypto.provider.PBES1Core (JRE 8+):
2.832021
3.25985
0.868758
bitlist = list(bitstr) bits_missing = (8 - len(bitlist) % 8) % 8 bitlist = [0]*bits_missing + bitlist # pad with 0 bits to a multiple of 8 result = bytearray() for i in range(0, len(bitlist), 8): byte = 0 for j in range(8): byte = (byte << 1) | bitlist[i+j] result.append(byte) return bytes(result)
def bitstring_to_bytes(bitstr)
Converts a pyasn1 univ.BitString instance to byte sequence of type 'bytes'. The bit string is interpreted big-endian and is left-padded with 0 bits to form a multiple of 8.
2.169989
2.081795
1.042364
if len(m) < block_size or len(m) % block_size != 0: raise BadPaddingException("Unable to strip padding: invalid message length") m = bytearray(m) # py2/3 compatibility: always returns individual indexed elements as ints last_byte = m[-1] # the <last_byte> bytes of m must all have value <last_byte>, otherwise something's wrong if (last_byte <= 0 or last_byte > block_size) or (m[-last_byte:] != bytearray([last_byte])*last_byte): raise BadPaddingException("Unable to strip padding: invalid padding found") return bytes(m[:-last_byte])
def strip_pkcs7_padding(m, block_size)
Same as PKCS#5 padding, except generalized to block sizes other than 8.
4.544801
4.520961
1.005273
with open(filename, 'rb') as file: input_bytes = file.read() ret = cls.loads(input_bytes, store_password, try_decrypt_keys=try_decrypt_keys) return ret
def load(cls, filename, store_password, try_decrypt_keys=True)
Convenience wrapper function; reads the contents of the given file and passes it through to :func:`loads`. See :func:`loads`.
2.828402
2.675895
1.056993
with open(filename, 'wb') as file: keystore_bytes = self.saves(store_password) file.write(keystore_bytes)
def save(self, filename, store_password)
Convenience wrapper function; calls the :func:`saves` and saves the content to a file.
4.42176
3.55075
1.245303
size = b2.unpack_from(data, pos)[0] pos += 2 try: return data[pos:pos+size].decode('utf-8'), pos+size except (UnicodeEncodeError, UnicodeDecodeError) as e: raise BadKeystoreFormatException(("Failed to read %s, contains bad UTF-8 data: %s" % (kind, str(e))) if kind else \ ("Encountered bad UTF-8 data: %s" % str(e)))
def _read_utf(cls, data, pos, kind=None)
:param kind: Optional; a human-friendly identifier for the kind of UTF-8 data we're loading (e.g. is it a keystore alias? an algorithm identifier? something else?). Used to construct more informative exception messages when a decoding error occurs.
4.243786
3.890789
1.090726
return dict([(a, e) for a, e in self.entries.items() if isinstance(e, BksSealedKeyEntry)])
def sealed_keys(self)
A subset of the :attr:`entries` dictionary, filtered down to only those entries of type :class:`BksSealedKeyEntry`.
10.217973
3.942284
2.591892
return dict([(a, e) for a, e in self.entries.items() if isinstance(e, BksKeyEntry)])
def plain_keys(self)
A subset of the :attr:`entries` dictionary, filtered down to only those entries of type :class:`BksKeyEntry`.
10.875808
4.151372
2.61981
try: pos = 0 version = b4.unpack_from(data, pos)[0]; pos += 4 if version not in [1,2]: raise UnsupportedKeystoreVersionException("Unsupported BKS keystore version; only V1 and V2 supported, found v"+repr(version)) salt, pos = cls._read_data(data, pos) iteration_count = b4.unpack_from(data, pos)[0]; pos += 4 store_type = "bks" entries, size = cls._load_bks_entries(data[pos:], store_type, store_password, try_decrypt_keys=try_decrypt_keys) hmac_fn = hashlib.sha1 hmac_digest_size = hmac_fn().digest_size hmac_key_size = hmac_digest_size*8 if version != 1 else hmac_digest_size hmac_key = rfc7292.derive_key(hmac_fn, rfc7292.PURPOSE_MAC_MATERIAL, store_password, salt, iteration_count, hmac_key_size//8) store_data = data[pos:pos+size] store_hmac = data[pos+size:pos+size+hmac_digest_size] if len(store_hmac) != hmac_digest_size: raise BadKeystoreFormatException("Bad HMAC size; found %d bytes, expected %d bytes" % (len(store_hmac), hmac_digest_size)) hmac = HMAC.new(hmac_key, digestmod=SHA) hmac.update(store_data) computed_hmac = hmac.digest() if store_hmac != computed_hmac: raise KeystoreSignatureException("Hash mismatch; incorrect keystore password?") return cls(store_type, entries, version=version) except struct.error as e: raise BadKeystoreFormatException(e)
def loads(cls, data, store_password, try_decrypt_keys=True)
See :meth:`jks.jks.KeyStore.loads`. :param bytes data: Byte string representation of the keystore to be loaded. :param str password: Keystore password string :param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password as the keystore password. :returns: A loaded :class:`BksKeyStore` instance, if the keystore could be successfully parsed and the supplied store password is correct. If the ``try_decrypt_keys`` parameters was set to ``True``, any keys that could be successfully decrypted using the store password have already been decrypted; otherwise, no atttempt to decrypt any key entries is made. :raises BadKeystoreFormatException: If the keystore is malformed in some way :raises UnsupportedKeystoreVersionException: If the keystore contains an unknown format version number :raises KeystoreSignatureException: If the keystore signature could not be verified using the supplied store password :raises DuplicateAliasException: If the keystore contains duplicate aliases
3.266485
3.074871
1.062316
key_type = b1.unpack_from(data, pos)[0]; pos += 1 key_format, pos = BksKeyStore._read_utf(data, pos, kind="key format") key_algorithm, pos = BksKeyStore._read_utf(data, pos, kind="key algorithm") key_enc, pos = BksKeyStore._read_data(data, pos) entry = BksKeyEntry(key_type, key_format, key_algorithm, key_enc, store_type=store_type) return entry, pos
def _read_bks_key(cls, data, pos, store_type)
Given a data stream, attempt to parse a stored BKS key entry at the given position, and return it as a BksKeyEntry.
3.207921
2.90851
1.102943
# Uber keystores contain the same entry data as BKS keystores, except they wrap it differently: # BKS = BKS_store || HMAC-SHA1(BKS_store) # UBER = PBEWithSHAAndTwofish-CBC(BKS_store || SHA1(BKS_store)) # # where BKS_store represents the entry format shared by both keystore types. # # The Twofish key size is 256 bits, the PBE key derivation scheme is that as outlined by PKCS#12 (RFC 7292), # and the padding scheme for the Twofish cipher is PKCS#7. try: pos = 0 version = b4.unpack_from(data, pos)[0]; pos += 4 if version != 1: raise UnsupportedKeystoreVersionException('Unsupported UBER keystore version; only v1 supported, found v'+repr(version)) salt, pos = cls._read_data(data, pos) iteration_count = b4.unpack_from(data, pos)[0]; pos += 4 encrypted_bks_store = data[pos:] try: decrypted = rfc7292.decrypt_PBEWithSHAAndTwofishCBC(encrypted_bks_store, store_password, salt, iteration_count) except BadDataLengthException as e: raise BadKeystoreFormatException("Bad UBER keystore format: %s" % str(e)) except BadPaddingException as e: raise DecryptionFailureException("Failed to decrypt UBER keystore: bad password?") # Note: we can assume that the hash must be present at the last 20 bytes of the decrypted data (i.e. without first # parsing through to see where the entry data actually ends), because valid UBER keystores generators should not put # any trailing bytes after the hash prior to encrypting. hash_fn = hashlib.sha1 hash_digest_size = hash_fn().digest_size bks_store = decrypted[:-hash_digest_size] bks_hash = decrypted[-hash_digest_size:] if len(bks_hash) != hash_digest_size: raise BadKeystoreFormatException("Insufficient signature bytes; found %d bytes, expected %d bytes" % (len(bks_hash), hash_digest_size)) if hash_fn(bks_store).digest() != bks_hash: raise KeystoreSignatureException("Hash mismatch; incorrect keystore password?") store_type = "uber" entries, size = cls._load_bks_entries(bks_store, store_type, store_password, try_decrypt_keys=try_decrypt_keys) return cls(store_type, entries, version=version) except struct.error as e: raise BadKeystoreFormatException(e)
def loads(cls, data, store_password, try_decrypt_keys=True)
See :meth:`jks.jks.KeyStore.loads`. :param bytes data: Byte string representation of the keystore to be loaded. :param str password: Keystore password string :param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password as the keystore password. :returns: A loaded :class:`UberKeyStore` instance, if the keystore could be successfully parsed and the supplied store password is correct. If the ``try_decrypt_keys`` parameters was set to ``True``, any keys that could be successfully decrypted using the store password have already been decrypted; otherwise, no atttempt to decrypt any key entries is made. :raises BadKeystoreFormatException: If the keystore is malformed in some way :raises UnsupportedKeystoreVersionException: If the keystore contains an unknown format version number :raises KeystoreSignatureException: If the keystore signature could not be verified using the supplied store password :raises DecryptionFailureException: If the keystore contents could not be decrypted using the supplied store password :raises DuplicateAliasException: If the keystore contains duplicate aliases
4.528519
4.357716
1.039196
num_columns = int(ceil(sqrt(n))) num_rows = int(ceil(n / float(num_columns))) return (num_columns, num_rows)
def calc_columns_rows(n)
Calculate the number of columns and rows required to divide an image into ``n`` parts. Return a tuple of integers in the format (num_columns, num_rows)
2.188852
2.170128
1.008628
# TODO: Refactor calculating layout to avoid repetition. columns, rows = calc_columns_rows(len(tiles)) tile_size = tiles[0].image.size return (tile_size[0] * columns, tile_size[1] * rows)
def get_combined_size(tiles)
Calculate combined size of tiles.
5.139082
4.598386
1.117584
TILE_LIMIT = 99 * 99 try: number_tiles = int(number_tiles) except: raise ValueError('number_tiles could not be cast to integer.') if number_tiles > TILE_LIMIT or number_tiles < 2: raise ValueError('Number of tiles must be between 2 and {} (you \ asked for {}).'.format(TILE_LIMIT, number_tiles))
def validate_image(image, number_tiles)
Basic sanity checks prior to performing a split.
3.636979
3.474274
1.046831
SPLIT_LIMIT = 99 try: col = int(col) row = int(row) except: raise ValueError('columns and rows values could not be cast to integer.') if col < 2: raise ValueError('Number of columns must be between 2 and {} (you \ asked for {}).'.format(SPLIT_LIMIT, col)) if row < 2 : raise ValueError('Number of rows must be between 2 and {} (you \ asked for {}).'.format(SPLIT_LIMIT, row))
def validate_image_col_row(image , col , row)
Basic checks for columns and rows values
3.477171
3.150446
1.103708
im = Image.open(filename) im_w, im_h = im.size columns = 0 rows = 0 if not number_tiles is None: validate_image(im, number_tiles) columns, rows = calc_columns_rows(number_tiles) extras = (columns * rows) - number_tiles else: validate_image_col_row(im, col, row) columns = col rows = row extras = (columns * rows) - number_tiles tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows)) tiles = [] number = 1 for pos_y in range(0, im_h - rows, tile_h): # -rows for rounding error. for pos_x in range(0, im_w - columns, tile_w): # as above. area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h) image = im.crop(area) position = (int(floor(pos_x / tile_w)) + 1, int(floor(pos_y / tile_h)) + 1) coords = (pos_x, pos_y) tile = Tile(image, number, position, coords) tiles.append(tile) number += 1 if save: save_tiles(tiles, prefix=get_basename(filename), directory=os.path.dirname(filename)) return tuple(tiles)
def slice(filename, number_tiles=None, col=None, row=None, save=True)
Split an image into a specified number of tiles. Args: filename (str): The filename of the image to split. number_tiles (int): The number of tiles required. Kwargs: save (bool): Whether or not to save tiles to disk. Returns: Tuple of :class:`Tile` instances.
2.48473
2.516388
0.987419
# Causes problems in CLI script. # if not os.path.exists(directory): # os.makedirs(directory) for tile in tiles: tile.save(filename=tile.generate_filename(prefix=prefix, directory=directory, format=format), format=format) return tuple(tiles)
def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png')
Write image files to disk. Create specified folder(s) if they don't exist. Return list of :class:`Tile` instance. Args: tiles (list): List, tuple or set of :class:`Tile` objects to save. prefix (str): Filename prefix of saved tiles. Kwargs: directory (str): Directory to save tiles. Created if non-existant. Returns: Tuple of :class:`Tile` instances.
4.257836
4.692545
0.907362
filename = prefix + '_{col:02d}_{row:02d}.{ext}'.format( col=self.column, row=self.row, ext=format.lower().replace('jpeg', 'jpg')) if not path: return filename return os.path.join(directory, filename)
def generate_filename(self, directory=os.getcwd(), prefix='tile', format='png', path=True)
Construct and return a filename for this tile.
2.603615
2.464796
1.056321
tiles = [] for filename in filenames: row, column = os.path.splitext(filename)[0][-5:].split('_') tiles.append((int(row), int(column))) rows = [pos[0] for pos in tiles]; columns = [pos[1] for pos in tiles] num_rows = max(rows); num_columns = max(columns) return (num_columns, num_rows)
def get_columns_rows(filenames)
Derive number of columns and rows from filenames.
2.716922
2.576337
1.054568
initial_state.inspect.b('mem_read', when=angr.BP_AFTER, action=_read_consolidate) initial_state.inspect.b('reg_read', when=angr.BP_AFTER, action=_read_consolidate)
def consolidate_reverse_exprs(initial_state)
Tries to simplify the Reverse(Extract(Reverse())) pattern in expressions. NOTE: Experimental! Maybe not working correctly, use it with care!
5.785494
6.02926
0.95957
size = expr.size() umin = umax = smin = smax = None if not sat_zero(se, expr): try: umin = se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0]) umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0]) return (umin, umax) except: pass try: smin = -(1 << size) + se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1]) smax = -(1 << size) + se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1]) return (smin, smax) except: pass return None else: try: umax = se.max(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 0]) smin = 0 try: smin = -(1 << size) + se.min(expr, extra_constraints=[claripy.Extract(size-1,size-1,expr) == 1]) except: pass return (smin, umax) except: pass return None
def get_signed_range(se, expr)
Calculate the range of the expression with signed boundaries
1.959705
1.961081
0.999298
status = True if not os.path.isfile(fn): status = False else: try: open(fn) except IOError: status = False return status
def fn_check_full(fn)
Check for file existence Avoids race condition, but slower than os.path.exists. Parameters ---------- fn : str Input filename string. Returns ------- status True if file exists, False otherwise.
3.858099
4.34154
0.888648
ds = None if fn_check(fn): ds = gdal.Open(fn, gdal.GA_ReadOnly) else: print("Unable to find %s" % fn) return ds
def fn_getds(fn)
Wrapper around gdal.Open()
3.055671
2.92826
1.043511
#Add check for filename existence ds = fn_getds(fn) out = ds_getma(ds, bnum=bnum) if return_ds: out = (out, ds) return out
def fn_getma(fn, bnum=1, return_ds=False)
Get masked array from input filename Parameters ---------- fn : str Input filename string bnum : int, optional Band number Returns ------- np.ma.array Masked array containing raster values
4.275695
4.857206
0.880279
b_ndv = get_ndv_b(b) #bma = np.ma.masked_equal(b.ReadAsArray(), b_ndv) #This is more appropriate for float, handles precision issues bma = np.ma.masked_values(b.ReadAsArray(), b_ndv) return bma
def b_getma(b)
Get masked array from input GDAL Band Parameters ---------- b : gdal.Band Input GDAL Band Returns ------- np.ma.array Masked array containing raster values
5.499128
4.761898
1.154818
ns = src_ds.RasterXSize nl = src_ds.RasterYSize maxdim = float(maxdim) if scale is None: scale_ns = ns/maxdim scale_nl = nl/maxdim scale = max(scale_ns, scale_nl) #Need to check to make sure scale is positive real if scale > 1: ns = int(round(ns/scale)) nl = int(round(nl/scale)) return ns, nl, scale
def get_sub_dim(src_ds, scale=None, maxdim=1024)
Compute dimensions of subsampled dataset Parameters ---------- ds : gdal.Dataset Input GDAL Datset scale : int, optional Scaling factor maxdim : int, optional Maximum dimension along either axis, in pixels Returns ------- ns Numper of samples in subsampled output nl Numper of lines in subsampled output scale Final scaling factor
2.867009
2.925652
0.979956
dtype = src_ds.GetRasterBand(1).DataType src_ds_sub = gdal.GetDriverByName('MEM').Create('', ns, nl, 1, dtype) gt = np.array(src_ds.GetGeoTransform()) gt[[1,5]] = gt[[1,5]]*scale src_ds_sub.SetGeoTransform(list(gt)) src_ds_sub.SetProjection(src_ds.GetProjection()) b = src_ds_sub.GetRasterBand(1) b.WriteArray(bma) b.SetNoDataValue(b_ndv) out = (bma, src_ds_sub) return out
def ds_getma_sub(src_ds, bnum=1, scale=None, maxdim=1024., return_ds=False): #print src_ds.GetFileList()[0] b = src_ds.GetRasterBand(bnum) b_ndv = get_ndv_b(b) ns, nl, scale = get_sub_dim(src_ds, scale, maxdim) #The buf_size parameters determine the final array dimensions b_array = b.ReadAsArray(buf_xsize=ns, buf_ysize=nl) bma = np.ma.masked_values(b_array, b_ndv) out = bma if return_ds
Load a subsampled array, rather than full resolution This is useful when working with large rasters Uses buf_xsize and buf_ysize options from GDAL ReadAsArray method. Parameters ---------- ds : gdal.Dataset Input GDAL Datset bnum : int, optional Band number scale : int, optional Scaling factor maxdim : int, optional Maximum dimension along either axis, in pixels Returns ------- np.ma.array Masked array containing raster values
2.241666
2.512696
0.892136
out_vrt = os.path.splitext(out_csv)[0]+'.vrt' out_csv = os.path.split(out_csv)[-1] f = open(out_vrt, 'w') f.write('<OGRVRTDataSource>\n') f.write(' <OGRVRTLayer name="%s">\n' % os.path.splitext(out_csv)[0]) f.write(' <SrcDataSource>%s</SrcDataSource>\n' % out_csv) f.write(' <GeometryType>wkbPoint</GeometryType>\n') f.write(' <LayerSRS>%s</LayerSRS>\n' % srs) f.write(' <GeometryField encoding="PointFromColumns" x="%s" y="%s"/>\n' % (x, y)) f.write(' </OGRVRTLayer>\n') f.write('</OGRVRTDataSource>\n') f.close()
def writevrt(out_csv,srs='EPSG:4326',x='field_1',y='field_2')
Write out a vrt to accompany a csv of points
1.37793
1.36945
1.006192
dt_dict = gdal_array.codes if isinstance(d, (np.ndarray, np.generic)): d = d.dtype #This creates dtype from another built-in type #d = np.dtype(d) if isinstance(d, np.dtype): if d.name == 'int8': gdal_dt = 1 elif d.name == 'bool': #Write out as Byte gdal_dt = 1 else: gdal_dt = list(dt_dict.keys())[list(dt_dict.values()).index(d)] else: print("Input must be NumPy array or NumPy dtype") gdal_dt = None return gdal_dt
def np2gdal_dtype(d)
Get GDAL RasterBand datatype that corresponds with NumPy datatype Input should be numpy array or numpy dtype
4.049691
3.71173
1.091052
dt_dict = gdal_array.codes if isinstance(b, str): b = gdal.Open(b) if isinstance(b, gdal.Dataset): b = b.GetRasterBand(1) if isinstance(b, gdal.Band): b = b.DataType if isinstance(b, int): np_dtype = dt_dict[b] else: np_dtype = None print("Input must be GDAL Dataset or RasterBand object") return np_dtype
def gdal2np_dtype(b)
Get NumPy datatype that corresponds with GDAL RasterBand datatype Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
2.792116
2.318365
1.204347
b_ndv = b.GetNoDataValue() if b_ndv is None: #Check ul pixel for ndv ns = b.XSize nl = b.YSize ul = float(b.ReadAsArray(0, 0, 1, 1)) #ur = float(b.ReadAsArray(ns-1, 0, 1, 1)) lr = float(b.ReadAsArray(ns-1, nl-1, 1, 1)) #ll = float(b.ReadAsArray(0, nl-1, 1, 1)) #Probably better to use 3/4 corner criterion #if ul == ur == lr == ll: if np.isnan(ul) or ul == lr: b_ndv = ul else: #Assume ndv is 0 b_ndv = 0 elif np.isnan(b_ndv): b_dt = gdal.GetDataTypeName(b.DataType) if 'Float' in b_dt: b_ndv = np.nan else: b_ndv = 0 return b_ndv
def get_ndv_b(b)
Get NoData value for GDAL band. If NoDataValue is not set in the band, extract upper left and lower right pixel values. Otherwise assume NoDataValue is 0. Parameters ---------- b : GDALRasterBand object This is the input band. Returns ------- b_ndv : float NoData value
2.843138
2.579488
1.10221
if logical: from multiprocessing import cpu_count ncpu=cpu_count() else: import psutil ncpu=psutil.cpu_count(logical=False) return ncpu
def cpu_count(logical=True)
Return system CPU count
2.532453
2.394395
1.057659
fn = os.path.split(url)[-1] if outdir is not None: fn = os.path.join(outdir, fn) if not os.path.exists(fn): #Find appropriate urlretrieve for Python 2 and 3 try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve print("Retrieving: %s" % url) #Add progress bar urlretrieve(url, fn) return fn
def getfile(url, outdir=None)
Function to fetch files using urllib Works with ftp
3.10568
3.31171
0.937788
import requests print("Retrieving: %s" % url) fn = os.path.split(url)[-1] if outdir is not None: fn = os.path.join(outdir, fn) if auth is not None: r = requests.get(url, stream=True, auth=auth) else: r = requests.get(url, stream=True) chunk_size = 1000000 with open(fn, 'wb') as fd: for chunk in r.iter_content(chunk_size): fd.write(chunk)
def getfile2(url, auth=None, outdir=None)
Function to fetch files using requests Works with https authentication
1.748382
1.854755
0.942648
import getpass from requests.auth import HTTPDigestAuth #This binds raw_input to input for Python 2 input_func = input try: input_func = raw_input except NameError: pass uname = input_func("MODSCAG Username:") pw = getpass.getpass("MODSCAG Password:") auth = HTTPDigestAuth(uname, pw) #wget -A'h8v4*snow_fraction.tif' --user=uname --password=pw return auth
def get_auth()
Get authorization token for https
7.766786
7.535324
1.030717
import csv #Check first line for header with open(fn, 'r') as f: reader = csv.DictReader(f) hdr = reader.fieldnames #Assume there is a header on first line, check skiprows = 1 if np.all(f.isdigit() for f in hdr): hdr = None skiprows = 0 #Check header for lat/lon/z or x/y/z tags #Should probably do genfromtxt here if header exists and dtype of cols is variable pts = np.loadtxt(fn, delimiter=',', skiprows=skiprows, dtype=None) return pts
def readcsv(fn)
Wrapper to read arbitrary csv, check for header Needs some work to be more robust, quickly added for demcoreg sampling
6.372752
6.050655
1.053233
print('Excluding values outside of range: {0:f} to {1:f}'.format(*rangelim)) out = np.ma.masked_outside(dem, *rangelim) out.set_fill_value(dem.fill_value) return out
def range_fltr(dem, rangelim)
Range filter (helper function)
2.982636
2.968512
1.004758
out = range_fltr(np.ma.abs(dem), *rangelim) #Apply mask to original input out = np.ma.array(dem, mask=np.ma.getmaskarray(out)) out.set_fill_value(dem.fill_value) return out
def absrange_fltr(dem, rangelim)
Absolute range filter
3.932856
3.817033
1.030344
rangelim = malib.calcperc(dem, perc) print('Excluding values outside of percentile range: {0:0.2f} to {1:0.2f}'.format(*perc)) out = range_fltr(dem, rangelim) return out
def perc_fltr(dem, perc=(1.0, 99.0))
Percentile filter
5.725267
5.827129
0.982519
std = dem.std() u = dem.mean() print('Excluding values outside of range: {1:0.2f} +/- {0}*{2:0.2f}'.format(n, u, std)) rangelim = (u - n*std, u + n*std) out = range_fltr(dem, rangelim) return out
def sigma_fltr(dem, n=3)
sigma * factor filter Useful for outlier removal These are min/max percentile ranges for different sigma values: 1: 15.865, 84.135 2: 2.275, 97.725 3: 0.135, 99.865
5.300261
5.462986
0.970213
mad, med = malib.mad(dem, return_med=True) print('Excluding values outside of range: {1:0.3f} +/- {0}*{2:0.3f}'.format(n, med, mad)) rangelim = (med - n*mad, med + n*mad) out = range_fltr(dem, rangelim) return out
def mad_fltr(dem, n=3)
Median absolute deviation * factor filter Robust outlier removal
4.85589
5.330053
0.91104
#import astropy.nddata import astropy.convolution dem = malib.checkma(dem) #Generate 2D gaussian kernel for input sigma and size #Default size is 8*sigma in x and y directions #kernel = astropy.nddata.make_kernel([size, size], sigma, 'gaussian') #Size must be odd if size is not None: size = int(np.floor(size/2)*2 + 1) size = max(size, 3) #Truncate the filter at this many standard deviations. Default is 4.0 truncate = 3.0 if size is not None and sigma is None: sigma = (size - 1) / (2*truncate) elif size is None and sigma is not None: #Round up to nearest odd int size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1) elif size is None and sigma is None: #Use default parameters sigma = 1 size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1) size = max(size, 3) kernel = astropy.convolution.Gaussian2DKernel(sigma, x_size=size, y_size=size, mode='oversample') print("Applying gaussian smoothing filter with size %i and sigma %0.3f (sum %0.3f)" % \ (size, sigma, kernel.array.sum())) #This will fill holes #np.nan is float #dem_filt_gauss = astropy.nddata.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan) #dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan) #Added normalization to ensure filtered values are not brightened/darkened if kernelsum != 1 dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan, normalize_kernel=True) #This will preserve original ndv pixels, applying original mask after filtering if origmask: print("Applying original mask") #Allow filling of interior holes, but use original outer edge if fill_interior: mask = malib.maskfill(dem) else: mask = dem.mask dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=mask, fill_value=dem.fill_value) out = np.ma.fix_invalid(dem_filt_gauss, copy=False, fill_value=dem.fill_value) out.set_fill_value(dem.fill_value.astype(dem.dtype)) return out.astype(dem.dtype)
def gauss_fltr_astropy(dem, size=None, sigma=None, origmask=False, fill_interior=False)
Astropy gaussian filter properly handles convolution with NaN http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab width1 = 3; sigma1 = (width1-1) / 6; Specify width for smallest feature of interest and determine sigma appropriately sigma is width of 1 std in pixels (not multiplier) scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994% 3*sigma on either side of kernel - 99.7% If sigma is specified, filter width will be a multiple of 8 times sigma Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8. If size is < the required width for 6-8 sigma, need to use different mode to create kernel mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3 The sigma/size calculations below should work for non-integer sigma
3.440786
3.461982
0.993878
dem = malib.checkma(dem) levels = int(np.floor(np.log2(size))) #print levels dim = np.floor(np.array(dem.shape)/float(2**levels) + 1)*(2**levels) #print dem.shape #print dim #Can do something with np.pad here #np.pad(a_fp.filled(), 1, mode='constant', constant_values=(a_fp.fill_value,)) dem2 = np.full(dim, dem.fill_value) offset = (dim - np.array(dem.shape))/2.0 #print offset #dem2[0:dem.shape[0],0:dem.shape[1]] = dem.data dem2[offset[0]:dem.shape[0]+offset[0],offset[1]:dem.shape[1]+offset[1]] = dem.data dem2 = np.ma.masked_equal(dem2, dem.fill_value) #dem2 = dem for n in range(levels): print(dem2.shape) dim = (np.floor(np.array(dem2.shape)/2.0 + 1)*2).astype(int) #dem2 = gauss_fltr_astropy(dem2, size=5, origmask=origmask) #dem2 = gauss_fltr_astropy(dem2, size=5) dem2 = gauss_fltr_astropy(dem2, size=5) #Note: Should use zoom with same bilinear interpolation here for consistency #However, this doesn't respect nan #dem2 = zoom(dem2, 0.5, order=1, prefilter=False, cval=dem.fill_value) dem2 = dem2[::2,::2] if full: print("Resizing to original input dimensions") from scipy.ndimage import zoom for n in range(levels): print(dem2.shape) #Note: order 1 is bilinear dem2 = zoom(dem2, 2, order=1, prefilter=False, cval=dem.fill_value) #dem2 = zoom(dem2, 2**levels, order=1, prefilter=False, cval=dem2.fill_value) print(dem2.shape) #This was for power of 2 offset #offset = (2**levels)/2 #print offset #dem2 = dem2[offset:dem.shape[0]+offset,offset:dem.shape[1]+offset] #Use original offset dem2 = dem2[offset[0]:dem.shape[0]+offset[0],offset[1]:dem.shape[1]+offset[1]] if origmask: print("Applying original mask") #Allow filling of interior holes, but use original outer edge maskfill = malib.maskfill(dem) #dem2 = np.ma.array(dem2, mask=np.ma.getmaskarray(dem)) dem2 = np.ma.array(dem2, mask=maskfill, fill_value=dem.fill_value) return dem2
def gauss_fltr_pyramid(dem, size=None, full=False, origmask=False)
Pyaramidal downsampling approach for gaussian smoothing Avoids the need for large kernels, very fast Needs testing
2.932641
2.944227
0.996065
import cv2 dem = malib.checkma(dem) dem_cv = cv2.GaussianBlur(dem.filled(np.nan), (size, size), sigma) out = np.ma.fix_invalid(dem_cv) out.set_fill_value(dem.fill_value) return out
def gauss_fltr_opencv(dem, size=3, sigma=1)
OpenCV Gaussian filter Still propagates NaN values
3.765524
3.367332
1.118252
smooth = gauss_fltr_astropy(dem, size=size) smooth[~dem.mask] = dem[~dem.mask] if newmask is not None: smooth = np.ma.array(smooth, mask=newmask) return smooth
def gaussfill(dem, size=3, newmask=None)
Gaussian filter with filling
3.707384
3.668993
1.010464
print("Applying median filter with size %s" % fsize) from scipy.ndimage.filters import median_filter dem_filt_med = median_filter(dem.filled(np.nan), fsize) #Now mask all nans out = np.ma.fix_invalid(dem_filt_med, copy=False, fill_value=dem.fill_value) if origmask: out = np.ma.array(out, mask=dem.mask, fill_value=dem.fill_value) out.set_fill_value(dem.fill_value) return out
def median_fltr(dem, fsize=7, origmask=False)
Scipy.ndimage median filter Does not properly handle NaN
2.560878
2.656474
0.964014
import cv2 dem = malib.checkma(dem) if size > 5: print("Need to implement iteration") n = 0 out = dem while n <= iterations: dem_cv = cv2.medianBlur(out.astype(np.float32).filled(np.nan), size) out = np.ma.fix_invalid(dem_cv) out.set_fill_value(dem.fill_value) n += 1 return out
def median_fltr_opencv(dem, size=3, iterations=1)
OpenCV median filter
4.550635
4.288352
1.061162
r = size/2 c = (r,r) y,x = np.ogrid[-c[0]:size-c[0], -c[1]:size-c[1]] mask = ~(x*x + y*y <= r*r) return mask
def circular_mask(size)
Create a circular mask for an array Useful when sampling rasters for a laser shot
2.739406
2.471936
1.108203
print("Applying rolling filter: %s with size %s" % (f.__name__, size)) dem = malib.checkma(dem) #Convert to float32 so we can fill with nan dem = dem.astype(np.float32) newshp = (dem.size, size*size) #Force a step size of 1 t = malib.sliding_window_padded(dem.filled(np.nan), (size, size), (1, 1)) if circular: if size > 3: mask = circular_mask(size) t[:,mask] = np.nan t = t.reshape(newshp) out = f(t, axis=1).reshape(dem.shape) out = np.ma.fix_invalid(out).astype(dem.dtype) out.set_fill_value(dem.fill_value) if origmask: out = np.ma.array(out, mask=np.ma.getmaskarray(dem)) return out
def rolling_fltr(dem, f=np.nanmedian, size=3, circular=True, origmask=False)
General rolling filter (default operator is median filter) Can input any function f Efficient for smaller arrays, correclty handles NaN, fills gaps
3.527629
3.659809
0.963883
#Note, ndimage doesn't properly handle ma - convert to nan dem = malib.checkma(dem) dem = dem.astype(np.float64) #Mask islands if erode > 0: print("Eroding islands smaller than %s pixels" % (erode * 2)) dem = malib.mask_islands(dem, iterations=erode) print("Applying median filter with radius %s" % radius) #Note: this funcitonality was present in scikit-image 0.9.3 import skimage.filter dem_filt_med = skimage.filter.median_filter(dem, radius, mask=~dem.mask) #Starting in version 0.10.0, this is the new filter #This is the new filter, but only supports uint8 or unit16 #import skimage.filters #import skimage.morphology #dem_filt_med = skimage.filters.rank.median(dem, disk(radius), mask=~dem.mask) #dem_filt_med = skimage.filters.median(dem, skimage.morphology.disk(radius), mask=~dem.mask) #Now mask all nans #skimage assigns the minimum value as nodata #CHECK THIS, seems pretty hacky #Also, looks like some valid values are masked at this stage, even though they should be above min ndv = np.min(dem_filt_med) #ndv = dem_filt_med.min() + 0.001 out = np.ma.masked_less_equal(dem_filt_med, ndv) #Should probably replace the ndv with original ndv out.set_fill_value(dem.fill_value) if origmask: print("Applying original mask") #Allow filling of interior holes, but use original outer edge #maskfill = malib.maskfill(dem, iterations=radius) maskfill = malib.maskfill(dem) #dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=dem.mask, fill_value=dem.fill_value) out = np.ma.array(out, mask=maskfill, fill_value=dem.fill_value) return out
def median_fltr_skimage(dem, radius=3, erode=1, origmask=False)
Older skimage.filter.median_filter This smooths, removes noise and fills in nodata areas with median of valid pixels! Effectively an inpainting routine
4.843401
4.824182
1.003984
print("Applying uniform filter with size %s" % fsize) #Note, ndimage doesn't properly handle ma - convert to nan from scipy.ndimage.filters import unifiform_filter dem_filt_med = uniform_filter(dem.filled(np.nan), fsize) #Now mask all nans out = np.ma.fix_invalid(dem_filt_med, copy=False, fill_value=dem.fill_value) out.set_fill_value(dem.fill_value) return out
def uniform_fltr(dem, fsize=7)
Uniform (mean) filter Note: suffers from significant ringing
5.7554
5.923364
0.971644
try: open(refdem_fn) except IOError: sys.exit('Unable to open reference DEM: %s' % refdem_fn) from pygeotools.lib import warplib dem_ds, refdem_ds = warplib.memwarp_multi_fn([dem_fn, refdem_fn], res='first', extent='first', t_srs='first') dem = iolib.ds_getma(dem_ds) refdem = iolib.ds_getma(refdem_ds) out = dz_fltr_ma(dem, refdem, perc, rangelim, smooth) return out
def dz_fltr(dem_fn, refdem_fn, perc=None, rangelim=(0,30), smooth=False)
Absolute elevation difference range filter using values from a source raster file and a reference raster file
2.878839
2.869643
1.003204
if smooth: refdem = gauss_fltr_astropy(refdem) dem = gauss_fltr_astropy(dem) dz = refdem - dem #This is True for invalid values in DEM, and should be masked demmask = np.ma.getmaskarray(dem) if perc: dz_perc = malib.calcperc(dz, perc) print("Applying dz percentile filter (%s%%, %s%%): (%0.1f, %0.1f)" % (perc[0], perc[1], dz_perc[0], dz_perc[1])) #This is True for invalid values perc_mask = ((dz < dz_perc[0]) | (dz > dz_perc[1])).filled(False) demmask = (demmask | perc_mask) if rangelim: #This is True for invalid values range_mask = ((np.abs(dz) < rangelim[0]) | (np.abs(dz) > rangelim[1])).filled(False) if False: cutoff = 150 rangelim = (0, 80) low = (refdem < cutoff).data range_mask[low] = ((np.abs(dz) < rangelim[0]) | (np.abs(dz) > rangelim[1])).filled(False)[low] demmask = (demmask | range_mask) out = np.ma.array(dem, mask=demmask, fill_value=dem.fill_value) return out
def dz_fltr_ma(dem, refdem, perc=None, rangelim=(0,30), smooth=False)
Absolute elevation difference range filter using values from a source array and a reference array
2.91877
2.942251
0.992019
import scipy.ndimage as ndimage print('Eroding pixels near nodata: %i iterations' % iterations) mask = np.ma.getmaskarray(dem) mask_dilate = ndimage.morphology.binary_dilation(mask, iterations=iterations) out = np.ma.array(dem, mask=mask_dilate) return out
def erode_edge(dem, iterations=1)
Erode pixels near nodata
3.670286
2.882302
1.273387
import scipy.signal import matplotlib.pyplot as plt #dt is 300 s, 5 min dt_diff = np.diff(dt_list) dt_diff = np.array([dt.total_seconds() for dt in dt_diff]) dt = malib.fast_median(dt_diff) #f is 0.00333 Hz #288 samples/day fs = 1./dt nyq = fs/2. if False: #psd, f = psd(z_msl, fs) sp_f, sp_psd = scipy.signal.periodogram(val, fs, detrend='linear') #sp_f, sp_psd = scipy.signal.welch(z_msl, fs, nperseg=2048) sp_f_days = 1./sp_f/86400. plt.figure() plt.plot(sp_f, sp_psd) plt.plot(sp_f_days, sp_psd) plt.semilogy(sp_f_days, sp_psd) plt.xlabel('Frequency') plt.ylabel('Power') print("Filtering tidal signal") #Define bandpass filter #f_min = dt/(86400*0.25) f_max = (1./(86400*0.1)) / nyq f_min = (1./(86400*1.8)) / nyq order = 6 b, a = scipy.signal.butter(order, f_min, btype='highpass') #b, a = sp.signal.butter(order, (f_min, f_max), btype='bandpass') w, h = scipy.signal.freqz(b, a, worN=2000) w_f = (nyq/np.pi)*w w_f_days = 1/w_f/86400. #plt.figure() #plt.plot(w_f_days, np.abs(h)) val_f_tide = scipy.signal.filtfilt(b, a, val) b, a = scipy.signal.butter(order, f_max, btype='lowpass') #b, a = sp.signal.butter(order, (f_min, f_max), btype='bandstop') w, h = scipy.signal.freqz(b, a, worN=2000) w_f = (nyq/np.pi)*w w_f_days = 1/w_f/86400. #plt.plot(w_f_days, np.abs(h)) val_f_tide_denoise = scipy.signal.filtfilt(b, a, val_f_tide) #val_f_notide = sp.signal.filtfilt(b, a, val) val_f_notide = val - val_f_tide
def butter(dt_list, val, lowpass=1.0)
This is framework for a butterworth bandpass for 1D data Needs to be cleaned up and generalized
2.485751
2.470677
1.006101
#Fill ndv with random data bf = malib.randomfill(bma) import scipy.fftpack f = scipy.fftpack.fft2(bf) ff = scipy.fftpack.fftshift(f) #Ben suggested a Hahn filter here, remove the low frequency, high amplitude information #Then do a second fft? #np.log(np.abs(ff)) #perc = malib.calcperc(np.real(ff), perc=(80, 95)) #malib.iv(numpy.real(ff), clim=perc) #See http://scipy-lectures.github.io/advanced/image_processing/ #Starting at a,b, compute argmax along vertical axis for restricted range #Fit line to the x and y argmax values #Mask [argmax[y]-1:argmax[y]+1] #Create radial mask ff_dim = np.array(ff.shape) a,b = ff_dim/2 n = ff_dim.max() y,x = np.ogrid[-a:n-a, -b:n-b] r1 = 40 r2 = 60 ff_mask = np.ma.make_mask(ff) radial_mask = (r1**2 <= x**2 + y**2) & (x**2 + y**2 < r2**2) #Note issues with rounding indices here #Hacked in +1 for testing ff_mask[:] = radial_mask[a-ff_dim[0]/2:a+ff_dim[0], b-ff_dim[1]/2:b+1+ff_dim[1]/2] #Combine radial and line mask #Convert mask to 0-1, then feather fm = ff * ff_mask #Inverse fft bf_filt = scipy.fftpack.ifft2(scipy.fftpack.ifftshift(fm)) #Apply original mask bf_filt = np.ma.masked_array(bf_filt, bma.mask)
def freq_filt(bma)
This is a framework for 2D FFT filtering. It has not be tested or finished - might be a dead end See separate utility freq_analysis.py
6.916913
6.875042
1.00609
from copy import deepcopy from pygeotools.lib import filtlib print("Copying original DEMStack") s = deepcopy(s_orig) s.stack_fn = os.path.splitext(s_orig.stack_fn)[0]+'_smooth%ipx.npz' % size #Loop through each array and smooth print("Smoothing all arrays in stack with %i px gaussian filter" % size) for i in range(s.ma_stack.shape[0]): print('%i of %i' % (i+1, s.ma_stack.shape[0])) s.ma_stack[i] = filtlib.gauss_fltr_astropy(s.ma_stack[i], size=size) if s.stats: s.compute_stats() if save: s.write_stats() #Update datestack if s.datestack and s.date_list_o.count() > 1: s.compute_dt_stats() if save: s.write_datestack() #Update trend if s.trend: s.compute_trend() if save: s.write_trend() if save: s.savestack() return s
def stack_smooth(s_orig, size=7, save=False)
Run Gaussian smoothing filter on exising stack object
4.213803
4.022777
1.047486
#Should check for valid extent #This is not memory efficient, but is much simpler #To be safe, if we are saving out, create a copy to avoid overwriting if copy or save: from copy import deepcopy print("Copying original DEMStack") s = deepcopy(s_orig) else: #Want to be very careful here, as we could overwrite the original file s = s_orig from pygeotools.lib import geolib gt = s.gt s_shape = s.ma_stack.shape[1:3] #Compute pixel bounds for input extent min_x_px, max_y_px = geolib.mapToPixel(extent[0], extent[1], gt) max_x_px, min_y_px = geolib.mapToPixel(extent[2], extent[3], gt) #Clip to stack extent and round to whole integers min_x_px = int(max(0, min_x_px)+0.5) max_x_px = int(min(s_shape[1], max_x_px)+0.5) min_y_px = int(max(0, min_y_px)+0.5) max_y_px = int(min(s_shape[0], max_y_px)+0.5) #Clip the stack x_slice = slice(min_x_px,max_x_px) y_slice = slice(min_y_px,max_y_px) s.ma_stack = s.ma_stack[:, y_slice, x_slice] #Now update geospatial info #This returns the pixel center in map coordinates #Want to remove 0.5 px offset for upper left corner in gt out_ul = geolib.pixelToMap(min_x_px - 0.5, min_y_px - 0.5, gt) #Update stack geotransform s.gt[0] = out_ul[0] s.gt[3] = out_ul[1] #Update new stack extent s.get_extent() #Check for and discard emtpy arrays #Might be faster to reshape then np.ma.count(s.ma_stack, axis=1) count_list = np.array([i.count() for i in s.ma_stack]) idx = count_list > 0 #Output subset with valid data in next extent #fn_list, source, error, error_dict_list, date_list, date_list_o #Note: no need to copy again s_sub = get_stack_subset(s, idx, out_stack_fn=out_stack_fn, copy=False, save=False) print("Orig filename:", s_orig.stack_fn) print("Orig extent:", s_orig.extent) print("Orig dimensions:", s_orig.ma_stack.shape) print("Input extent:", extent) print("New filename:", s_sub.stack_fn) print("New extent:", s_sub.extent) print("New dimensions:", s_sub.ma_stack.shape) if save: if os.path.abspath(s_orig.stack_fn) == os.path.abspath(s_sub.stack_fn): print("Original stack would be overwritten!") print("Skipping save") else: s_sub.save = True s_sub.savestack() #The following should be unchanged by clip - it is more efficient to clip thes, but easier to regenerate #if s.stats: #stack_count, stack_mean, stack_min, stack_max, stack_std #s.stack_min = s.stack_min[y_slice, x_slice] #if s.datestack: #dt_ptp, dt_min, dt_max, dt_center #if s.med: #stack_med #if s.trend: #trend, intercept, detrended_std #Recompute stats/etc return s_sub
def stack_clip(s_orig, extent, out_stack_fn=None, copy=True, save=False)
Create a new stack object with limited extent from an exising stack object
4.115426
4.125713
0.997507
#This must be a numpy boolean array idx = np.array(idx) if np.any(idx): #This is not memory efficient, but is much simpler #To be safe, if we are saving out, create a copy to avoid overwriting if copy or save: from copy import deepcopy print("Copying original DEMStack") s = deepcopy(s_orig) else: #Want to be very careful here, as we could overwrite the original file s = s_orig #Update fn_list #Note: need to change fn_list to np.array - object array, allows longer strings #s.fn_list = s.fn_list[idx] print("Original stack: %i" % len(s_orig.fn_list)) s.fn_list = (np.array(s.fn_list)[idx]).tolist() print("Filtered stack: %i" % len(s.fn_list)) #Update date_lists s.date_list = s.date_list[idx] s.date_list_o = s.date_list_o[idx] #Update ma s.ma_stack = s.ma_stack[idx] #Update source/error #s.source = s.source[idx] s.source = (np.array(s.source)[idx]).tolist() s.error = s.error[idx] s.error_dict_list = np.array(s.error_dict_list)[idx] #Update stack_fn #out_stack_fn should be full path, with npz if out_stack_fn is None: s.stack_fn = None s.get_stack_fn() else: s.stack_fn = out_stack_fn #Check to make sure we are not going to overwrite if os.path.abspath(s_orig.stack_fn) == os.path.abspath(s.stack_fn): print("Warning: new stack has identical filename: %s" % s.stack_fn) print("As a precaution, new stack will not be saved") save = False s.save = save #Update stats if s.stats: s.compute_stats() if save: s.write_stats() #Update datestack if s.datestack and s.date_list_o.count() > 1: s.compute_dt_stats() if save: s.write_datestack() #Update trend if s.trend: s.compute_trend() if save: s.write_trend() if save: s.savestack() else: print("No valid entries for input index array") s = None return s
def get_stack_subset(s_orig, idx, out_stack_fn=None, copy=True, save=False)
Create a new stack object as a subset of an exising stack object
3.526267
3.539918
0.996144
from pygeotools.lib import geolib from copy import deepcopy #Assumes input stacks have identical extent, resolution, and projection if s1.ma_stack.shape[1:3] != s2.ma_stack.shape[1:3]: print(s1.ma_stack.shape) print(s2.ma_stack.shape) sys.exit('Input stacks must have identical array dimensions') if not geolib.extent_compare(s1.extent, s2.extent): print(s1.extent) print(s2.extent) sys.exit('Input stacks must have identical extent') if not geolib.res_compare(s1.res, s2.res): print(s1.res) print(s2.res) sys.exit('Input stacks must have identical res') print("\nCombining fn_list and ma_stack") fn_list = np.array(s1.fn_list + s2.fn_list) if sort: #Sort based on filenames (should be datesort) sort_idx = np.argsort([os.path.split(x)[-1] for x in fn_list]) else: sort_idx = Ellipsis #Now pull out final, sorted order fn_list = fn_list[sort_idx] ma_stack = np.ma.vstack((s1.ma_stack, s2.ma_stack))[sort_idx] #date_list = np.ma.dstack(s1.date_list, s2.date_list) #date_list_o = np.ma.dstack(s1.date_list_o, s2.date_list_o) source = np.array(s1.source + s2.source)[sort_idx] error = np.ma.concatenate([s1.error, s2.error])[sort_idx] #These are object arrays error_dict_list = np.concatenate([s1.error_dict_list, s2.error_dict_list])[sort_idx] print("Creating copy for new stack") s = deepcopy(s1) s.fn_list = list(fn_list) s.ma_stack = ma_stack s.source = list(source) s.error = error s.error_dict_list = error_dict_list #This will use original stack outdir if not out_stack_fn: s.get_stack_fn() else: s.stack_fn = out_stack_fn s.get_date_list() #These will preserve trend from one stack if present in only one stack #Useful when combining surface topo and bed topo if s1.datestack and s2.datestack: s.compute_dt_stats() if save and s1.datestack: s.write_datestack() if s1.stats and s2.stats: s.compute_stats() if save and s1.stats: s.write_stats() if s1.trend and s2.trend: s.compute_trend() if save and s1.trend: s.write_trend() if save: s.savestack() return s
def stack_merge(s1, s2, out_stack_fn=None, sort=True, save=False)
Merge two stack objects
2.862119
2.832797
1.010351
a = checkma(a) #For data that have already been normalized, #This provides a proper normal distribution with mean=0 and std=1 #a = (a - a.mean()) / a.std() #noise = a.mask * (np.random.randn(*a.shape)) noise = a.mask * np.random.normal(a.mean(), a.std(), a.shape) #Add the noise b = a.filled(0) + noise return b
def randomfill(a)
Fill masked areas with random noise This is needed for any fft-based operations
5.143744
4.931283
1.043084
a = checkma(a) ndv = a.fill_value #Note: The following fails for arrays that are not float (np.nan is float) b = f_a(a.filled(np.nan), *args, **kwargs) #the fix_invalid fill_value parameter doesn't seem to work out = np.ma.fix_invalid(b, copy=False) out.set_fill_value(ndv) return out
def nanfill(a, f_a, *args, **kwargs)
Fill masked areas with np.nan Wrapper for functions that can't handle ma (e.g. scipy.ndimage) This will force filters to ignore nan, but causes adjacent pixels to be set to nan as well: http://projects.scipy.org/scipy/ticket/1155
5.881074
5.864095
1.002895
a = checkma(a) #return scoreatpercentile(a.compressed(), 50) if a.count() > 0: out = np.percentile(a.compressed(), 50) else: out = np.ma.masked return out
def fast_median(a)
Fast median operation for masked array using 50th-percentile
4.558294
3.793472
1.201615
a = checkma(a) #return np.ma.median(np.fabs(a - np.ma.median(a))) / c if a.count() > 0: if axis is None: med = fast_median(a) out = fast_median(np.fabs(a - med)) * c else: med = np.ma.median(a, axis=axis) #This is necessary for broadcasting med = np.expand_dims(med, axis=axis) out = np.ma.median(np.ma.fabs(a - med), axis=axis) * c else: out = np.ma.masked if return_med: out = (out, med) return out
def mad(a, axis=None, c=1.4826, return_med=False)
Compute normalized median absolute difference Can also return median array, as this can be expensive, and often we want both med and nmad Note: 1.4826 = 1/0.6745
2.684329
2.736522
0.980927
b = checkma(b) if b.count() > 0: #low = scoreatpercentile(b.compressed(), perc[0]) #high = scoreatpercentile(b.compressed(), perc[1]) low = np.percentile(b.compressed(), perc[0]) high = np.percentile(b.compressed(), perc[1]) else: low = 0 high = 0 #low = scipy.stats.mstats.scoreatpercentile(b, perc[0]) #high = scipy.stats.mstats.scoreatpercentile(b, perc[1]) #This approach can be used for unmasked array, but values less than 0 are problematic #bma_low = b.min() #bma_high = b.max() #low = scipy.stats.scoreatpercentile(b.data.flatten(), perc[0], (bma_low, bma_high)) #high = scipy.stats.scoreatpercentile(b.data.flatten(), perc[1], (bma_low, bma_high)) return low, high
def calcperc(b, perc=(0.1,99.9))
Calculate values at specified percentiles
2.502589
2.380762
1.051172
clim = np.max(np.abs(calcperc(b, perc))) #clim = (-clim, clim) return -clim, clim
def calcperc_sym(b, perc=(0.1,99.9))
Get symmetrical percentile values Useful for determining clim centered on 0 for difference maps
6.909949
5.32976
1.296484
b = checkma(b) low, high = calcperc(b, perc) return low, high, high - low
def iqr(b, perc=(25, 75))
Inter-quartile range
9.264363
8.208624
1.128613
from scipy.stats.mstats import mode a = checkma(a) thresh = 4E6 if full or a.count() < thresh: q = (iqr(a)) p16, p84, spread = robust_spread(a) #There has to be a better way to compute the mode for a ma #mstats.mode returns tuple of (array[mode], array[count]) a_mode = float(mode(a, axis=None)[0]) stats = (a.count(), a.min(), a.max(), a.mean(dtype='float64'), a.std(dtype='float64'), \ fast_median(a), mad(a), q[0], q[1], q[2], a_mode, p16, p84, spread) else: ac = a.compressed() stride = int(np.around(ac.size / thresh)) ac = np.ma.array(ac[::stride]) #idx = np.random.permutation(ac.size) #Note: need the ma cast here b/c of a.count() below #ac = np.ma.array(ac[idx[::stride]]) q = (iqr(ac)) p16, p84, spread = robust_spread(ac) ac_mode = float(mode(ac, axis=None)[0]) stats = (a.count(), a.min(), a.max(), a.mean(dtype='float64'), a.std(dtype='float64'), \ fast_median(ac), mad(ac), q[0], q[1], q[2], ac_mode, p16, p84, spread) return stats
def get_stats(a, full=False)
Compute and print statistics for input array Needs to be cleaned up, return a stats object
3.723031
3.6597
1.017305
d = {} a = checkma(a_in) d['count'] = a.count() thresh = 4E6 if not full and d['count'] > thresh: a = a.compressed() stride = int(np.around(a.size / thresh)) #a = np.ma.array(a[::stride]) a = a[::stride] d['min'] = a.min() d['max'] = a.max() d['ptp'] = d['max'] - d['min'] d['mean'] = a.mean(dtype='float64') d['std'] = a.std(dtype='float64') d['nmad'], d['med'] = mad(a, return_med=True) d['median'] = d['med'] d['p16'], d['p84'], d['spread'] = robust_spread(a) from scipy.stats.mstats import mode d['mode'] = mode(a, axis=None)[0] for i in d: d[i] = float(d[i]) d['count'] = int(d['count']) return d
def get_stats_dict(a_in, full=True)
Compute and print statistics for input array
3.184406
3.154944
1.009338
import matplotlib.pyplot as plt import imview.imviewer as imview b = checkma(b) #if hasattr(kwargs,'imshow_kwargs'): # kwargs['imshow_kwargs']['interpolation'] = 'bicubic' #else: # kwargs['imshow_kwargs'] = {'interpolation': 'bicubic'} #bma_fig(fig, bma, cmap='gist_rainbow_r', clim=None, bg=None, n_subplt=1, subplt=1, label=None, **imshow_kwargs) fig = plt.figure() imview.bma_fig(fig, b, **kwargs) plt.show() return fig
def iv(b, **kwargs)
Quick access to imview for interactive sessions
5.003089
4.519887
1.106906
from numpy.lib.stride_tricks import as_strided as ast # simple shape and strides computations may seem at first strange # unless one is able to recognize the 'tuple additions' involved ;-) shape= (A.shape[0]/ block[0], A.shape[1]/ block[1])+ block strides= (block[0]* A.strides[0], block[1]* A.strides[1])+ A.strides return ast(A, shape= shape, strides= strides)
def block_view(A, block=(3, 3))
Provide a 2D block view to 2D array. No error checking made. Therefore meaningful (as implemented) only for blocks strictly compatible with the shape of A.
6.044739
5.970509
1.012433
from numpy.lib.stride_tricks import as_strided as ast ''' Return a sliding window over a in any number of dimensions Parameters: a - an n-dimensional numpy array ws - an int (a is 1D) or tuple (a is 2D or greater) representing the size of each dimension of the window ss - an int (a is 1D) or tuple (a is 2D or greater) representing the amount to slide the window in each dimension. If not specified, it defaults to ws. flatten - if True, all slices are flattened, otherwise, there is an extra dimension for each dimension of the input. Returns an array containing each n-dimensional window from a ''' if None is ss: # ss was not provided. the windows will not overlap in any direction. ss = ws ws = norm_shape(ws) ss = norm_shape(ss) # convert ws, ss, and a.shape to numpy arrays so that we can do math in every # dimension at once. ws = np.array(ws) ss = np.array(ss) shape = np.array(a.shape) # ensure that ws, ss, and a.shape all have the same number of dimensions ls = [len(shape),len(ws),len(ss)] if 1 != len(set(ls)): raise ValueError(\ 'a.shape, ws and ss must all have the same length. They were %s' % str(ls)) # ensure that ws is smaller than a in every dimension if np.any(ws > shape): raise ValueError(\ 'ws cannot be larger than a in any dimension.\ a.shape was %s and ws was %s' % (str(a.shape),str(ws))) # how many slices will there be in each dimension? newshape = norm_shape(((shape - ws) // ss) + 1) # the shape of the strided array will be the number of slices in each dimension # plus the shape of the window (tuple addition) newshape += norm_shape(ws) # the strides tuple will be the array's strides multiplied by step size, plus # the array's strides (tuple addition) newstrides = norm_shape(np.array(a.strides) * ss) + a.strides strided = ast(a,shape = newshape,strides = newstrides) if not flatten: return strided # Collapse strided so that it has one more dimension than the window. I.e., # the new array is a flat list of slices. meat = len(ws) if ws.shape else 0 firstdim = (np.product(newshape[:-meat]),) if ws.shape else () dim = firstdim + (newshape[-meat:]) # remove any dimensions with size 1 dim = [i for i in dim if i != 1] return strided.reshape(dim)
def sliding_window(a, ws, ss=None, flatten=True)
Return a sliding window over a in any number of dimensions Parameters: a - an n-dimensional numpy array ws - an int (a is 1D) or tuple (a is 2D or greater) representing the size of each dimension of the window ss - an int (a is 1D) or tuple (a is 2D or greater) representing the amount to slide the window in each dimension. If not specified, it defaults to ws. flatten - if True, all slices are flattened, otherwise, there is an extra dimension for each dimension of the input. Returns an array containing each n-dimensional window from a
3.49118
2.72681
1.280317
''' Normalize numpy array shapes so they're always expressed as a tuple, even for one-dimensional shapes. Parameters shape - an int, or a tuple of ints Returns a shape tuple ''' try: i = int(shape) return (i,) except TypeError: # shape was not a number pass try: t = tuple(shape) return t except TypeError: # shape was not iterable pass raise TypeError('shape must be an int, or a tuple of ints')
def norm_shape(shape)
Normalize numpy array shapes so they're always expressed as a tuple, even for one-dimensional shapes. Parameters shape - an int, or a tuple of ints Returns a shape tuple
5.135561
2.338919
2.195699
local_srs = osr.SpatialReference() local_proj = '+proj=ortho +lat_0=%0.7f +lon_0=%0.7f +datum=WGS84 +units=m +no_defs ' % (lat, lon) local_srs.ImportFromProj4(local_proj) return local_srs
def localortho(lon, lat)
Create srs for local orthographic projection centered at lat, lon
2.19595
1.923092
1.141885
cx, cy = geom.Centroid().GetPoint_2D() lon, lat, z = cT_helper(cx, cy, 0, geom.GetSpatialReference(), wgs_srs) local_srs = localortho(lon,lat) local_geom = geom_dup(geom) geom_transform(local_geom, local_srs) return local_geom
def geom2localortho(geom)
Convert existing geom to local orthographic projection Useful for local cartesian distance/area calculations
7.265897
7.491981
0.969823
lat = np.array(lat) if np.any(lat > 0): m70_t70 = 1.9332279 #Hack to deal with pole lat[lat>=90.0] = 89.999999999 else: # for 71 deg, southern PS -- checked BS 5/2012 m70_t70 = 1.93903005 lat[lat<=-90.0] = -89.999999999 #for WGS84, a=6378137, 1/f = 298.257223563 -> 1-sqrt(1-e^2) = f #-> 1-(1-f)^2 = e2 = 0.006694379990141 #e2 = 0.006693883 e2 = 0.006694379990141 # BS calculated from WGS84 parameters 5/2012 e = np.sqrt(e2) lat = np.abs(np.deg2rad(lat)) slat = np.sin(lat) clat = np.cos(lat) m = clat/np.sqrt(1. - e2*slat**2) t = np.tan(np.pi/4 - lat/2)/((1. - e*slat)/(1. + e*slat))**(e/2) k = m70_t70*t/m scale=(1./k) return scale
def scale_ps(lat)
This function calculates the scaling factor for a polar stereographic projection (ie. SSM/I grid) to correct area calculations. The scaling factor is defined (from Snyder, 1982, Map Projections used by the U.S. Geological Survey) as: k = (mc/m)*(t/tc), where: m = cos(lat)/sqrt(1 - e2*sin(lat)^2) t = tan(Pi/4 - lat/2)/((1 - e*sin(lat))/(1 + e*sin(lat)))^(e/2) e2 = 0.006693883 is the earth eccentricity (Hughes ellipsoid) e = sqrt(e2) mc = m at the reference latitude (70 degrees) tc = t at the reference latitude (70 degrees) The ratio mc/tc is precalculated and stored in the variable m70_t70. From Ben Smith PS scale m file (7/12/12)
4.241032
3.442945
1.231803
if np.any(lon > 360.0) or np.any(lon < 0.0): print("Warning: lon outside expected range") lon = wraplon(lon) #lon[lon > 180.0] -= 360.0 #lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180 lon = lon - (lon.astype(int)/180)*360.0 return lon
def lon360to180(lon)
Convert longitude from (0, 360) to (-180, 180)
2.847189
2.803282
1.015663
if np.any(lon > 180.0) or np.any(lon < -180.0): print("Warning: lon outside expected range") lon = lon360to180(lon) #lon[lon < 0.0] += 360.0 lon = (lon + 360.0) % 360.0 return lon
def lon180to360(lon)
Convert longitude from (-180, 180) to (0, 360)
2.47852
2.363821
1.048523
n = dd < 0 dd = abs(dd) m,s = divmod(dd*3600,60) d,m = divmod(m,60) if n: d = -d return d,m,s
def dd2dms(dd)
Convert decimal degrees to degrees, minutes, seconds
2.301019
2.266172
1.015377
if d < 0: sign = -1 else: sign = 1 dd = sign * (int(abs(d)) + float(m) / 60 + float(s) / 3600) return dd
def dms2dd(d,m,s)
Convert degrees, minutes, seconds to decimal degrees
2.074818
2.105901
0.98524
d,m,s = dd2dms(dd) m = m + float(s)/3600 return d,m,s
def dd2dm(dd)
Convert decimal to degrees, decimal minutes
4.008577
3.880249
1.033072
mX = np.asarray(mX) mY = np.asarray(mY) if geoTransform[2] + geoTransform[4] == 0: pX = ((mX - geoTransform[0]) / geoTransform[1]) - 0.5 pY = ((mY - geoTransform[3]) / geoTransform[5]) - 0.5 else: pX, pY = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform)) #return int(pX), int(pY) return pX, pY
def mapToPixel(mX, mY, geoTransform)
Convert map coordinates to pixel coordinates based on geotransform Accepts float or NumPy arrays GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
2.26269
2.379038
0.951094
pX = np.asarray(pX, dtype=float) pY = np.asarray(pY, dtype=float) pX += 0.5 pY += 0.5 mX, mY = applyGeoTransform(pX, pY, geoTransform) return mX, mY
def pixelToMap(pX, pY, geoTransform)
Convert pixel coordinates to map coordinates based on geotransform Accepts float or NumPy arrays GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
2.039641
2.369252
0.86088
import scipy.stats as stats extent = ds_extent(ds) #[[xmin, xmax], [ymin, ymax]] range = [[extent[0], extent[2]], [extent[1], extent[3]]] if bins is None: bins = (ds.RasterXSize, ds.RasterYSize) if stat == 'max': stat = np.max elif stat == 'min': stat = np.min #block_count, xedges, yedges, bin = stats.binned_statistic_2d(x,y,z,'count',bins,range) block_stat, xedges, yedges, bin = stats.binned_statistic_2d(x,y,z,stat,bins,range) #Get valid blocks #if (stat == 'median') or (stat == 'mean'): if stat in ('median', 'mean', np.max, np.min): idx = ~np.isnan(block_stat) else: idx = (block_stat != 0) idx_idx = idx.nonzero() #Cell centers res = [(xedges[1] - xedges[0]), (yedges[1] - yedges[0])] out_x = xedges[:-1]+res[0]/2.0 out_y = yedges[:-1]+res[1]/2.0 out_x = out_x[idx_idx[0]] out_y = out_y[idx_idx[1]] out_z = block_stat[idx] return out_x, out_y, out_z
def block_stats(x,y,z,ds,stat='median',bins=None)
Compute points on a regular grid (matching input GDAL Dataset) from scattered point data using specified statistic Wrapper for scipy.stats.binned_statistic_2d Note: this is very fast for mean, std, count, but bignificantly slower for median
2.577686
2.453766
1.050502
mx, my, mz = block_stats(x,y,z,ds,stat) gt = ds.GetGeoTransform() pX, pY = mapToPixel(mx, my, gt) shape = (ds.RasterYSize, ds.RasterXSize) ndv = -9999.0 a = np.full(shape, ndv) a[pY.astype('int'), pX.astype('int')] = mz return np.ma.masked_equal(a, ndv)
def block_stats_grid(x,y,z,ds,stat='median')
Fill regular grid (matching input GDAL Dataset) from scattered point data using specified statistic
2.977822
2.981023
0.998926
#These round down to int #dst_ns = int((extent[2] - extent[0])/res) #dst_nl = int((extent[3] - extent[1])/res) #This should pad by 1 pixel, but not if extent and res were calculated together to give whole int dst_ns = int((extent[2] - extent[0])/res + 0.99) dst_nl = int((extent[3] - extent[1])/res + 0.99) m_ds = gdal.GetDriverByName('MEM').Create('', dst_ns, dst_nl, 1, dtype) m_gt = [extent[0], res, 0, extent[3], 0, -res] m_ds.SetGeoTransform(m_gt) if srs is not None: m_ds.SetProjection(srs.ExportToWkt()) return m_ds
def mem_ds(res, extent, srs=None, dtype=gdal.GDT_Float32)
Create a new GDAL Dataset in memory Useful for various applications that require a Dataset
2.678838
2.785623
0.961665
src_ds = gdal.Open(src_fn, gdal.GA_ReadOnly) dst_ds = gdal.Open(dst_fn, gdal.GA_Update) dst_ds.SetProjection(src_ds.GetProjection()) if gt: src_gt = np.array(src_ds.GetGeoTransform()) src_dim = np.array([src_ds.RasterXSize, src_ds.RasterYSize]) dst_dim = np.array([dst_ds.RasterXSize, dst_ds.RasterYSize]) #This preserves dst_fn resolution if np.any(src_dim != dst_dim): res_factor = src_dim/dst_dim.astype(float) src_gt[[1, 5]] *= max(res_factor) #src_gt[[1, 5]] *= min(res_factor) #src_gt[[1, 5]] *= res_factor dst_ds.SetGeoTransform(src_gt) src_ds = None dst_ds = None
def copyproj(src_fn, dst_fn, gt=True)
Copy projection and geotransform from one raster file to another
2.076875
2.072164
1.002274
g = ogr.CreateGeometryFromWkt(geom.ExportToWkt()) g.AssignSpatialReference(geom.GetSpatialReference()) return g
def geom_dup(geom)
Create duplicate geometry Needed to avoid segfault when passing geom around. See: http://trac.osgeo.org/gdal/wiki/PythonGotchas
2.586447
2.101253
1.230907
s_srs = geom.GetSpatialReference() if not s_srs.IsSame(t_srs): ct = osr.CoordinateTransformation(s_srs, t_srs) geom.Transform(ct) geom.AssignSpatialReference(t_srs)
def geom_transform(geom, t_srs)
Transform a geometry in place
2.034426
2.044029
0.995302
from pygeotools.lib import timelib ds = ogr.Open(shp_fn) lyr = ds.GetLayer() nfeat = lyr.GetFeatureCount() print('%i input features\n' % nfeat) if fields is None: fields = shp_fieldnames(lyr) d_list = [] for n,feat in enumerate(lyr): d = {} if geom: geom = feat.GetGeometryRef() d['geom'] = geom for f_name in fields: i = str(feat.GetField(f_name)) if 'date' in f_name: # date_f = f_name #If d is float, clear off decimal i = i.rsplit('.')[0] i = timelib.strptime_fuzzy(str(i)) d[f_name] = i d_list.append(d) #d_list_sort = sorted(d_list, key=lambda k: k[date_f]) return d_list
def shp_dict(shp_fn, fields=None, geom=True)
Get a dictionary for all features in a shapefile Optionally, specify fields
3.227973
3.358415
0.961159
#Need to check t_srs s_srs = lyr.GetSpatialRef() cT = osr.CoordinateTransformation(s_srs, t_srs) #Do everything in memory drv = ogr.GetDriverByName('Memory') #Might want to save clipped, warped shp to disk? # create the output layer #drv = ogr.GetDriverByName('ESRI Shapefile') #out_fn = '/tmp/temp.shp' #if os.path.exists(out_fn): # driver.DeleteDataSource(out_fn) #out_ds = driver.CreateDataSource(out_fn) out_ds = drv.CreateDataSource('out') outlyr = out_ds.CreateLayer('out', srs=t_srs, geom_type=lyr.GetGeomType()) if preserve_fields: # add fields inLayerDefn = lyr.GetLayerDefn() for i in range(0, inLayerDefn.GetFieldCount()): fieldDefn = inLayerDefn.GetFieldDefn(i) outlyr.CreateField(fieldDefn) # get the output layer's feature definition outLayerDefn = outlyr.GetLayerDefn() # loop through the input features inFeature = lyr.GetNextFeature() while inFeature: # get the input geometry geom = inFeature.GetGeometryRef() # reproject the geometry geom.Transform(cT) # create a new feature outFeature = ogr.Feature(outLayerDefn) # set the geometry and attribute outFeature.SetGeometry(geom) if preserve_fields: for i in range(0, outLayerDefn.GetFieldCount()): outFeature.SetField(outLayerDefn.GetFieldDefn(i).GetNameRef(), inFeature.GetField(i)) # add the feature to the shapefile outlyr.CreateFeature(outFeature) # destroy the features and get the next input feature inFeature = lyr.GetNextFeature() #NOTE: have to operate on ds here rather than lyr, otherwise segfault return out_ds
def lyr_proj(lyr, t_srs, preserve_fields=True)
Reproject an OGR layer
2.430857
2.420385
1.004327
shp_ds = ogr.Open(shp_fn) lyr = shp_ds.GetLayer() #This returns xmin, ymin, xmax, ymax shp_extent = lyr_extent(lyr) shp_srs = lyr.GetSpatialRef() # dst_dt = gdal.GDT_Byte ndv = 0 if r_ds is not None: r_extent = ds_extent(r_ds) res = get_res(r_ds, square=True)[0] if extent is None: extent = r_extent r_srs = get_ds_srs(r_ds) r_geom = ds_geom(r_ds) # dst_ns = r_ds.RasterXSize # dst_nl = r_ds.RasterYSize #Convert raster extent to shp_srs cT = osr.CoordinateTransformation(r_srs, shp_srs) r_geom_reproj = geom_dup(r_geom) r_geom_reproj.Transform(cT) r_geom_reproj.AssignSpatialReference(t_srs) lyr.SetSpatialFilter(r_geom_reproj) #lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt)) else: #TODO: clean this up if res is None: sys.exit("Must specify input res") if extent is None: print("Using input shp extent") extent = shp_extent if t_srs is None: t_srs = r_srs if not shp_srs.IsSame(t_srs): print("Input shp srs: %s" % shp_srs.ExportToProj4()) print("Specified output srs: %s" % t_srs.ExportToProj4()) out_ds = lyr_proj(lyr, t_srs) outlyr = out_ds.GetLayer() else: outlyr = lyr #outlyr.SetSpatialFilter(r_geom) m_ds = mem_ds(res, extent, srs=t_srs, dtype=gdal.GDT_Byte) b = m_ds.GetRasterBand(1) b.SetNoDataValue(ndv) gdal.RasterizeLayer(m_ds, [1], outlyr, burn_values=[1]) a = b.ReadAsArray() a = ~(a.astype('Bool')) return a
def shp2array(shp_fn, r_ds=None, res=None, extent=None, t_srs=None)
Rasterize input shapefile to match existing raster Dataset (or specified res/extent/t_srs)
2.807976
2.76982
1.013776
from pygeotools.lib import iolib from pygeotools.lib import warplib r_ds = iolib.fn_getds(r_fn) r_srs = get_ds_srs(r_ds) r_extent = ds_extent(r_ds) r_extent_geom = bbox2geom(r_extent) #NOTE: want to add spatial filter here to avoid reprojeting global RGI polygons, for example shp_ds = ogr.Open(shp_fn) lyr = shp_ds.GetLayer() shp_srs = lyr.GetSpatialRef() if not r_srs.IsSame(shp_srs): shp_ds = lyr_proj(lyr, r_srs) lyr = shp_ds.GetLayer() #This returns xmin, ymin, xmax, ymax shp_extent = lyr_extent(lyr) shp_extent_geom = bbox2geom(shp_extent) #Define the output - can set to either raster or shp #Could accept as cl arg out_srs = r_srs if extent == 'raster': out_extent = r_extent elif extent == 'shp': out_extent = shp_extent elif extent == 'intersection': out_extent = geom_intersection([r_extent_geom, shp_extent_geom]) elif extent == 'union': out_extent = geom_union([r_extent_geom, shp_extent_geom]) else: print("Unexpected extent specification, reverting to input raster extent") out_extent = 'raster' #Add padding around shp_extent #Should implement buffer here if pad is not None: out_extent = pad_extent(out_extent, width=pad) print("Raster to clip: %s\nShapefile used to clip: %s" % (r_fn, shp_fn)) if verbose: print(shp_extent) print(r_extent) print(out_extent) r_ds = warplib.memwarp(r_ds, extent=out_extent, t_srs=out_srs, r='cubic') r = iolib.ds_getma(r_ds) #If bbox, return without clipping, otherwise, clip to polygons if not bbox: #Create binary mask from shp mask = shp2array(shp_fn, r_ds) if invert: mask = ~(mask) #Now apply the mask r = np.ma.array(r, mask=mask) #Return both the array and the dataset, needed for writing out #Should probably just write r to r_ds and return r_ds return r, r_ds
def raster_shpclip(r_fn, shp_fn, extent='raster', bbox=False, pad=None, invert=False, verbose=False)
Clip an input raster by input polygon shapefile for given extent
3.5508
3.554106
0.99907
ds = ogr.Open(shp_fn) lyr = ds.GetLayer() srs = lyr.GetSpatialRef() lyr.ResetReading() geom_list = [] for feat in lyr: geom = feat.GetGeometryRef() geom.AssignSpatialReference(srs) #Duplicate the geometry, or segfault #See: http://trac.osgeo.org/gdal/wiki/PythonGotchas #g = ogr.CreateGeometryFromWkt(geom.ExportToWkt()) #g.AssignSpatialReference(srs) g = geom_dup(geom) geom_list.append(g) #geom = ogr.ForceToPolygon(' '.join(geom_list)) #Dissolve should convert multipolygon to single polygon #return geom_list[0] ds = None return geom_list
def shp2geom(shp_fn)
Extract geometries from input shapefile Need to handle multi-part geom: http://osgeo-org.1560.x6.nabble.com/Multipart-to-singlepart-td3746767.html
3.14789
3.079349
1.022258