code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
'''
byte-like -> bytes
'''
b2 = hashlib.blake2b(**kwargs)
b2.update(data)
return b2.digest() | def blake2b(data=b'', **kwargs) | byte-like -> bytes | 4.403479 | 2.954104 | 1.490631 |
'''
bytes, bool, bool -> str
cashaddrs are preferred where possible
but cashaddr is ignored in most cases
is there a better way to structure this?
'''
addr_bytes = bytearray()
if riemann.network.CASHADDR_P2SH is not None and cashaddr:
addr_bytes.extend(riemann.network.CASHADDR_P2SH)
addr_bytes.extend(script_hash)
return riemann.network.CASHADDR_ENCODER.encode(addr_bytes)
if witness:
addr_bytes.extend(riemann.network.P2WSH_PREFIX)
addr_bytes.extend(script_hash)
return riemann.network.SEGWIT_ENCODER.encode(addr_bytes)
else:
addr_bytes.extend(riemann.network.P2SH_PREFIX)
addr_bytes.extend(script_hash)
return riemann.network.LEGACY_ENCODER.encode(addr_bytes) | def _hash_to_sh_address(script_hash, witness=False, cashaddr=True) | bytes, bool, bool -> str
cashaddrs are preferred where possible
but cashaddr is ignored in most cases
is there a better way to structure this? | 2.845916 | 1.983373 | 1.434887 |
'''
makes an p2sh address from a serialized script
'''
if witness:
script_hash = utils.sha256(script_bytes)
else:
script_hash = utils.hash160(script_bytes)
return _hash_to_sh_address(
script_hash=script_hash,
witness=witness,
cashaddr=cashaddr) | def _ser_script_to_sh_address(script_bytes, witness=False, cashaddr=True) | makes an p2sh address from a serialized script | 3.218331 | 2.806647 | 1.146682 |
'''
str, bool, bool -> str
'''
script_bytes = script_ser.serialize(script_string)
return _ser_script_to_sh_address(
script_bytes=script_bytes,
witness=witness,
cashaddr=cashaddr) | def make_sh_address(script_string, witness=False, cashaddr=True) | str, bool, bool -> str | 4.693208 | 3.851299 | 1.218604 |
'''
bytes, bool -> str
'''
addr_bytes = bytearray()
if riemann.network.CASHADDR_P2PKH is not None and cashaddr:
addr_bytes.extend(riemann.network.CASHADDR_P2PKH)
addr_bytes.extend(pubkey_hash)
return riemann.network.CASHADDR_ENCODER.encode(addr_bytes)
if witness:
addr_bytes.extend(riemann.network.P2WPKH_PREFIX)
addr_bytes.extend(pubkey_hash)
return riemann.network.SEGWIT_ENCODER.encode(addr_bytes)
else:
addr_bytes.extend(riemann.network.P2PKH_PREFIX)
addr_bytes.extend(pubkey_hash)
return riemann.network.LEGACY_ENCODER.encode(addr_bytes) | def _make_pkh_address(pubkey_hash, witness=False, cashaddr=True) | bytes, bool -> str | 2.157604 | 1.991014 | 1.083671 |
'''
bytes, bool -> str
'''
pubkey_hash = utils.hash160(pubkey)
return _make_pkh_address(pubkey_hash=pubkey_hash,
witness=witness,
cashaddr=cashaddr) | def make_pkh_address(pubkey, witness=False, cashaddr=True) | bytes, bool -> str | 3.904132 | 2.958241 | 1.319748 |
'''
str -> bytes
There's probably a better way to do this
'''
parsed = parse(address)
parsed_hash = b''
try:
if (parsed.find(riemann.network.P2WPKH_PREFIX) == 0
and len(parsed) == 22):
return parsed
except TypeError:
pass
try:
if (parsed.find(riemann.network.P2WSH_PREFIX) == 0
and len(parsed) == 34):
return parsed
except TypeError:
pass
try:
if (parsed.find(riemann.network.CASHADDR_P2SH) == 0
and len(parsed) == len(riemann.network.CASHADDR_P2SH) + 20):
prefix = b'\xa9\x14' # OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):]
suffix = b'\x87' # OP_EQUAL
except TypeError:
pass
try:
if (parsed.find(riemann.network.CASHADDR_P2PKH) == 0
and len(parsed) == len(riemann.network.CASHADDR_P2PKH) + 20):
prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):]
suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG
except TypeError:
pass
if (parsed.find(riemann.network.P2PKH_PREFIX) == 0
and len(parsed) == len(riemann.network.P2PKH_PREFIX) + 20):
prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):]
suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG
if (parsed.find(riemann.network.P2SH_PREFIX) == 0
and len(parsed) == len(riemann.network.P2SH_PREFIX) + 20):
prefix = b'\xa9\x14' # OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):]
suffix = b'\x87' # OP_EQUAL
if parsed_hash == b'':
raise ValueError('Cannot parse output script from address.')
output_script = prefix + parsed_hash + suffix
return output_script | def to_output_script(address) | str -> bytes
There's probably a better way to do this | 1.732574 | 1.69067 | 1.024785 |
'''
bytes -> str
Convert output script (the on-chain format) to an address
There's probably a better way to do this
'''
try:
if (len(output_script) == len(riemann.network.P2WSH_PREFIX) + 32
and output_script.find(riemann.network.P2WSH_PREFIX) == 0):
# Script hash is the last 32 bytes
return _hash_to_sh_address(
output_script[-32:], witness=True, cashaddr=cashaddr)
except TypeError:
pass
try:
if (len(output_script) == len(riemann.network.P2WPKH_PREFIX) + 20
and output_script.find(riemann.network.P2WPKH_PREFIX) == 0):
# PKH is the last 20 bytes
return _make_pkh_address(
output_script[-20:], witness=True, cashaddr=cashaddr)
except TypeError:
pass
if len(output_script) == 25 and output_script.find(b'\x76\xa9\x14') == 0:
return _make_pkh_address(
output_script[3:23], witness=False, cashaddr=cashaddr)
elif len(output_script) == 23 and output_script.find(b'\xa9\x14') == 0:
return _hash_to_sh_address(
output_script[2:22], witness=False, cashaddr=cashaddr)
raise ValueError('Cannot parse address from script.') | def from_output_script(output_script, cashaddr=True) | bytes -> str
Convert output script (the on-chain format) to an address
There's probably a better way to do this | 2.345321 | 2.087776 | 1.123358 |
'''
str -> bytes
There's probably a better way to do this.
'''
raw = parse(address)
# Cash addresses
try:
if address.find(riemann.network.CASHADDR_PREFIX) == 0:
if raw.find(riemann.network.CASHADDR_P2SH) == 0:
return raw[len(riemann.network.CASHADDR_P2SH):]
if raw.find(riemann.network.CASHADDR_P2PKH) == 0:
return raw[len(riemann.network.CASHADDR_P2PKH):]
except TypeError:
pass
# Segwit addresses
try:
if address.find(riemann.network.BECH32_HRP) == 0:
if raw.find(riemann.network.P2WSH_PREFIX) == 0:
return raw[len(riemann.network.P2WSH_PREFIX):]
if raw.find(riemann.network.P2WPKH_PREFIX) == 0:
return raw[len(riemann.network.P2WPKH_PREFIX):]
except TypeError:
pass
# Legacy Addresses
if raw.find(riemann.network.P2SH_PREFIX) == 0:
return raw[len(riemann.network.P2SH_PREFIX):]
if raw.find(riemann.network.P2PKH_PREFIX) == 0:
return raw[len(riemann.network.P2PKH_PREFIX):] | def parse_hash(address) | str -> bytes
There's probably a better way to do this. | 1.875236 | 1.774303 | 1.056886 |
'''
str -> int
Bitcoin uses tx version 2 for nSequence signaling.
Zcash uses tx version 2 for joinsplits.
We want to signal nSequence if we're using OP_CSV.
Unless we're in zcash.
'''
n = riemann.get_current_network_name()
if 'sprout' in n:
return 1
if 'overwinter' in n:
return 3
if 'sapling' in n:
return 4
try:
script_array = redeem_script.split()
script_array.index('OP_CHECKSEQUENCEVERIFY')
return 2
except ValueError:
return 1 | def guess_version(redeem_script) | str -> int
Bitcoin uses tx version 2 for nSequence signaling.
Zcash uses tx version 2 for joinsplits.
We want to signal nSequence if we're using OP_CSV.
Unless we're in zcash. | 8.712403 | 3.062074 | 2.845262 |
'''
str -> int
If OP_CSV is used, guess an appropriate sequence
Otherwise, disable RBF, but leave lock_time on.
Fails if there's not a constant before OP_CSV
'''
try:
script_array = redeem_script.split()
loc = script_array.index('OP_CHECKSEQUENCEVERIFY')
return int(script_array[loc - 1], 16)
except ValueError:
return 0xFFFFFFFE | def guess_sequence(redeem_script) | str -> int
If OP_CSV is used, guess an appropriate sequence
Otherwise, disable RBF, but leave lock_time on.
Fails if there's not a constant before OP_CSV | 9.714251 | 2.600513 | 3.735513 |
'''
str -> int
If OP_CLTV is used, guess an appropriate lock_time
Otherwise return 0 (no lock time)
Fails if there's not a constant before OP_CLTV
'''
try:
script_array = redeem_script.split()
loc = script_array.index('OP_CHECKLOCKTIMEVERIFY')
return int(script_array[loc - 1], 16)
except ValueError:
return 0 | def guess_locktime(redeem_script) | str -> int
If OP_CLTV is used, guess an appropriate lock_time
Otherwise return 0 (no lock time)
Fails if there's not a constant before OP_CLTV | 6.412394 | 2.430151 | 2.638681 |
'''
int, str -> TxOut
accepts base58 or bech32 addresses
'''
script = addr.to_output_script(address)
value = utils.i2le_padded(value, 8)
return tb._make_output(value, script) | def output(value, address) | int, str -> TxOut
accepts base58 or bech32 addresses | 11.88005 | 6.129171 | 1.93828 |
'''
hex_str, int, int -> Outpoint
accepts block explorer txid string
'''
tx_id_le = bytes.fromhex(tx_id)[::-1]
return tb.make_outpoint(tx_id_le, index, tree) | def outpoint(tx_id, index, tree=None) | hex_str, int, int -> Outpoint
accepts block explorer txid string | 10.776366 | 5.169725 | 2.084514 |
'''
Outpoint, byte-like, int -> TxIn
'''
if redeem_script is not None and sequence is None:
sequence = guess_sequence(redeem_script)
if sequence is None:
sequence = 0xFFFFFFFE
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=b'',
redeem_script=b'',
sequence=sequence) | def unsigned_input(outpoint, redeem_script=None, sequence=None) | Outpoint, byte-like, int -> TxIn | 5.256197 | 3.695839 | 1.422193 |
'''
OutPoint, hex_string, hex_string, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout
'''
stack_script = '{sig} {pk}'.format(sig=sig, pk=pubkey)
stack_script = script_ser.serialize(stack_script)
return tb.make_legacy_input(outpoint, stack_script, b'', sequence) | def p2pkh_input(outpoint, sig, pubkey, sequence=0xFFFFFFFE) | OutPoint, hex_string, hex_string, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout | 9.821783 | 5.010481 | 1.960248 |
'''
OutPoint, hex_string, hex_string, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts
'''
stack_script = '{sig} {pk}'.format(sig=sig, pk=pubkey)
return tb.make_legacy_input_and_empty_witness(
outpoint=outpoint,
stack_script=script_ser.serialize(stack_script),
redeem_script=b'',
sequence=sequence) | def p2pkh_input_and_witness(outpoint, sig, pubkey, sequence=0xFFFFFFFE) | OutPoint, hex_string, hex_string, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts | 9.620491 | 3.28641 | 2.927356 |
'''
OutPoint, str, str, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout
'''
if sequence is None:
sequence = guess_sequence(redeem_script)
stack_script = script_ser.serialize(stack_script)
redeem_script = script_ser.hex_serialize(redeem_script)
redeem_script = script_ser.serialize(redeem_script)
return tb.make_legacy_input(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence) | def p2sh_input(outpoint, stack_script, redeem_script, sequence=None) | OutPoint, str, str, int -> TxIn
Create a signed legacy TxIn from a p2pkh prevout | 4.093661 | 2.886264 | 1.418325 |
'''
OutPoint, str, str, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts
'''
if sequence is None:
sequence = guess_sequence(redeem_script)
stack_script = script_ser.serialize(stack_script)
redeem_script = script_ser.hex_serialize(redeem_script)
redeem_script = script_ser.serialize(redeem_script)
return tb.make_legacy_input_and_empty_witness(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence) | def p2sh_input_and_witness(outpoint, stack_script,
redeem_script, sequence=None) | OutPoint, str, str, int -> (TxIn, InputWitness)
Create a signed legacy TxIn from a p2pkh prevout
Create an empty InputWitness for it
Useful for transactions spending some witness and some legacy prevouts | 5.393066 | 2.392793 | 2.253879 |
'''
Outpoint, hex_string, hex_string, int -> (TxIn, InputWitness)
Create a signed witness TxIn and InputWitness from a p2wpkh prevout
'''
return tb.make_witness_input_and_witness(
outpoint=outpoint,
sequence=sequence,
stack=[bytes.fromhex(sig), bytes.fromhex(pubkey)]) | def p2wpkh_input_and_witness(outpoint, sig, pubkey, sequence=0xFFFFFFFE) | Outpoint, hex_string, hex_string, int -> (TxIn, InputWitness)
Create a signed witness TxIn and InputWitness from a p2wpkh prevout | 6.667355 | 3.038169 | 2.194531 |
'''
Outpoint, str, str, int -> (TxIn, InputWitness)
Create a signed witness TxIn and InputWitness from a p2wsh prevout
'''
if sequence is None:
sequence = guess_sequence(witness_script)
stack = list(map(
lambda x: b'' if x == 'NONE' else bytes.fromhex(x), stack.split()))
stack.append(script_ser.serialize(witness_script))
return tb.make_witness_input_and_witness(outpoint, sequence, stack) | def p2wsh_input_and_witness(outpoint, stack, witness_script, sequence=None) | Outpoint, str, str, int -> (TxIn, InputWitness)
Create a signed witness TxIn and InputWitness from a p2wsh prevout | 5.841707 | 3.785647 | 1.54312 |
'''Create an unsigned transaction
Use this to generate sighashes for unsigned TxIns
Gotcha: it requires you to know the timelock and version
it will _not_ guess them
becuase it may not have acess to all scripts
Hint: set version to 2 if using sequence number relative time locks
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
expiry (int): overwinter expiry time
tx_joinsplits (list): list of joinsplits transactions
joinsplit_pubkey (bytes): joinsplit public key
joinsplit_sig (bytes): joinsplit signature
Returns:
(Tx instance): unsigned transaction
'''
return tb.make_tx(
version=kwargs['version'] if 'version' in kwargs else 1,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=kwargs['lock_time'] if 'lock_time' in kwargs else 0,
expiry=kwargs['expiry'] if 'expiry' in kwargs else 0,
tx_joinsplits=(kwargs['tx_joinsplits']
if 'tx_joinsplits' in kwargs else None),
joinsplit_pubkey=(kwargs['joinsplit_pubkey']
if 'joinsplit_pubkey' in kwargs
else None),
joinsplit_sig=(kwargs['joinsplit_sig']
if 'joinsplit_sig' in kwargs else None)) | def unsigned_legacy_tx(tx_ins, tx_outs, **kwargs) | Create an unsigned transaction
Use this to generate sighashes for unsigned TxIns
Gotcha: it requires you to know the timelock and version
it will _not_ guess them
becuase it may not have acess to all scripts
Hint: set version to 2 if using sequence number relative time locks
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
expiry (int): overwinter expiry time
tx_joinsplits (list): list of joinsplits transactions
joinsplit_pubkey (bytes): joinsplit public key
joinsplit_sig (bytes): joinsplit signature
Returns:
(Tx instance): unsigned transaction | 4.384819 | 1.360342 | 3.223322 |
'''Create an unsigned segwit transaction
Create an unsigned segwit transaction
Use this to generate sighashes for unsigned TxIns
Gotcha: it requires you to know the timelock and version
it will _not_ guess them
becuase it may not have acess to all scripts
Hint: set version to 2 if using sequence number relative time locks
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
Returns:
(Tx instance): unsigned transaction with empty witness
'''
return tb.make_tx(
version=kwargs['version'] if 'version' in kwargs else 1,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=kwargs['lock_time'] if 'lock_time' in kwargs else 0,
tx_witnesses=[tb.make_empty_witness() for _ in tx_ins]) | def unsigned_witness_tx(tx_ins, tx_outs, **kwargs) | Create an unsigned segwit transaction
Create an unsigned segwit transaction
Use this to generate sighashes for unsigned TxIns
Gotcha: it requires you to know the timelock and version
it will _not_ guess them
becuase it may not have acess to all scripts
Hint: set version to 2 if using sequence number relative time locks
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
Returns:
(Tx instance): unsigned transaction with empty witness | 6.795793 | 1.579196 | 4.303323 |
'''
Construct a fully-signed legacy transaction
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
expiry (int): overwinter expiry time
tx_joinsplits (list): list of joinsplits transactions
joinsplit_pubkey (bytes): joinsplit public key
joinsplit_sig (bytes): joinsplit signature
Returns:
(Tx instance): signed transaction with empty witness
'''
# Look at each input to guess lock_time and version
deser = [script_ser.deserialize(tx_in.redeem_script)
for tx_in in tx_ins if tx_in.redeem_script is not None]
version = max([guess_version(d) for d in deser])
lock_time = max([guess_locktime(d) for d in deser])
return tb.make_tx(
version=version,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
tx_witnesses=None,
expiry=kwargs['expiry'] if 'expiry' in kwargs else 0,
tx_joinsplits=(kwargs['tx_joinsplits']
if 'tx_joinsplits' in kwargs else None),
joinsplit_pubkey=(kwargs['joinsplit_pubkey']
if 'joinsplit_pubkey' in kwargs else None),
joinsplit_sig=(kwargs['joinsplit_sig']
if 'joinsplit_sig' in kwargs else None)) | def legacy_tx(tx_ins, tx_outs, **kwargs) | Construct a fully-signed legacy transaction
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
expiry (int): overwinter expiry time
tx_joinsplits (list): list of joinsplits transactions
joinsplit_pubkey (bytes): joinsplit public key
joinsplit_sig (bytes): joinsplit signature
Returns:
(Tx instance): signed transaction with empty witness | 3.214211 | 1.943351 | 1.653952 |
'''
Construct a fully-signed segwit transaction
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
tx_witnesses list(TxWitness instances): list of transaction witnsses
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
Returns:
(Tx instance): signed transaction with witnesses
'''
# Parse legacy scripts AND witness scripts for OP_CLTV
deser = [script_ser.deserialize(tx_in.redeem_script) for tx_in in tx_ins
if tx_in is not None]
for w in tx_witnesses:
try:
deser.append(script_ser.deserialize(w.stack[-1].item))
except (NotImplementedError, ValueError):
pass
version = max([guess_version(d) for d in deser])
if 'lock_time' in kwargs:
lock_time = kwargs['lock_time']
else:
lock_time = max([guess_locktime(d) for d in deser])
return tb.make_tx(
version=version,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
tx_witnesses=tx_witnesses) | def witness_tx(tx_ins, tx_outs, tx_witnesses, **kwargs) | Construct a fully-signed segwit transaction
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
tx_witnesses list(TxWitness instances): list of transaction witnsses
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
Returns:
(Tx instance): signed transaction with witnesses | 3.779319 | 2.549104 | 1.482607 |
'''
OverwinterTx, ... -> OverwinterTx
Makes a copy. Allows over-writing specific pieces.
'''
return OverwinterTx(
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry_height=(expiry_height if expiry_height is not None
else self.expiry_height),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig)) | def copy(self, tx_ins=None, tx_outs=None, lock_time=None,
expiry_height=None, tx_joinsplits=None, joinsplit_pubkey=None,
joinsplit_sig=None) | OverwinterTx, ... -> OverwinterTx
Makes a copy. Allows over-writing specific pieces. | 1.984322 | 1.466172 | 1.353403 |
'''
byte-like -> OverwinterTx
'''
header = byte_string[0:4]
group_id = byte_string[4:8]
if header != b'\x03\x00\x00\x80' or group_id != b'\x70\x82\xc4\x03':
raise ValueError(
'Bad header or group ID. Expected {} and {}. Got: {} and {}'
.format(b'\x03\x00\x00\x80'.hex(),
b'\x70\x82\xc4\x03'.hex(),
header.hex(),
group_id.hex()))
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[8:])
current = 8 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
expiry_height = byte_string[current:current + 4]
current += 4
if current == len(byte_string):
# No joinsplits
tx_joinsplits = tuple()
joinsplit_pubkey = None
joinsplit_sig = None
else:
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_joinsplits_num.number):
tx_joinsplit = z.SproutJoinsplit.from_bytes(
byte_string[current:])
current += len(tx_joinsplit)
tx_joinsplits.append(tx_joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return OverwinterTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
expiry_height=expiry_height,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig) | def from_bytes(OverwinterTx, byte_string) | byte-like -> OverwinterTx | 1.815448 | 1.791582 | 1.013321 |
dir_path = os.path.join(os.path.expanduser('~'), '.keep')
if os.path.exists(dir_path):
if click.confirm('[CRITICAL] Remove everything inside ~/.keep ?', abort=True):
shutil.rmtree(dir_path)
utils.first_time_use(ctx) | def cli(ctx) | Initializes the CLI environment. | 5.110957 | 4.814704 | 1.061531 |
matches = utils.grep_commands(pattern)
if matches:
selected = utils.select_command(matches)
if selected >= 0:
cmd, desc = matches[selected]
pcmd = utils.create_pcmd(cmd)
raw_params, params, defaults = utils.get_params_in_pcmd(pcmd)
arguments = list(arguments)
kargs = {}
for r, p, d in zip(raw_params, params, defaults):
if arguments:
val = arguments.pop(0)
click.echo("{}: {}".format(p, val))
kargs[r] = val
elif safe:
if d:
kargs[r] = d
else:
p_default = d if d else None
val = click.prompt("Enter value for '{}'".format(p), default=p_default)
kargs[r] = val
click.echo("\n")
final_cmd = utils.substitute_pcmd(pcmd, kargs, safe)
command = "$ {} :: {}".format(final_cmd, desc)
if click.confirm("Execute\n\t{}\n\n?".format(command), default=True):
os.system(final_cmd)
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo("No commands to run, Add one by 'keep new'. ") | def cli(ctx, pattern, arguments, safe) | Executes a saved command. | 4.179084 | 3.984142 | 1.048929 |
cmd = click.prompt('Command')
desc = click.prompt('Description ')
alias = click.prompt('Alias (optional)', default='')
utils.save_command(cmd, desc, alias)
utils.log(ctx, 'Saved the new command - {} - with the description - {}.'.format(cmd, desc)) | def cli(ctx) | Saves a new command | 6.198548 | 4.752772 | 1.304196 |
if args:
msg %= args
click.echo(msg, file=sys.stderr) | def log(self, msg, *args) | Logs a message to stderr. | 4.626146 | 3.65086 | 1.267139 |
if self.verbose:
self.log(msg, *args) | def vlog(self, msg, *args) | Logs a message to stderr only if verbose is enabled. | 4.102341 | 3.854191 | 1.064385 |
utils.check_update(ctx, forced=True)
click.secho("Keep is at its latest version v{}".format(about.__version__), fg='green') | def cli(ctx) | Check for an update of Keep. | 19.043541 | 11.152538 | 1.707552 |
matches = utils.grep_commands(pattern)
if matches:
for cmd, desc in matches:
click.secho("$ {} :: {}".format(cmd, desc), fg='green')
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo('No commands to show. Add one by `keep new`.') | def cli(ctx, pattern) | Searches for a saved command. | 6.428598 | 5.342097 | 1.203385 |
json_path = os.path.join(os.path.expanduser('~'), '.keep', 'commands.json')
if not os.path.exists(json_path):
click.echo('No commands to show. Add one by `keep new`.')
else:
utils.list_commands(ctx) | def cli(ctx) | Shows the saved commands. | 4.666041 | 3.972244 | 1.174661 |
credentials_path = os.path.join(os.path.expanduser('~'), '.keep', '.credentials')
if not os.path.exists(credentials_path):
click.echo('You are not registered.')
utils.register()
else:
utils.pull(ctx, overwrite) | def cli(ctx, overwrite) | Updates the local database with remote. | 4.541011 | 4.392464 | 1.033819 |
dir_path = os.path.join(os.path.expanduser('~'), '.keep', '.credentials')
if os.path.exists(dir_path):
if click.confirm('[CRITICAL] Reset credentials saved in ~/.keep/.credentials ?', abort=True):
os.remove(dir_path)
utils.register() | def cli(ctx) | Register user over server. | 5.866469 | 5.256611 | 1.116017 |
try:
if ctx.update_checked and not forced:
return
except AttributeError:
update_check_file = os.path.join(dir_path, 'update_check.txt')
today = datetime.date.today().strftime("%m/%d/%Y")
if os.path.exists(update_check_file):
date = open(update_check_file, 'r').read()
else:
date = []
if forced or today != date:
ctx.update_checked = True
date = today
with open(update_check_file, 'w') as f:
f.write(date)
r = requests.get("https://pypi.org/pypi/keep/json").json()
version = r['info']['version']
curr_version = about.__version__
if version > curr_version:
click.secho("Keep seems to be outdated. Current version = "
"{}, Latest version = {}".format(curr_version, version) +
"\n\nPlease update with ", bold=True, fg='red')
click.secho("\tpip3 --no-cache-dir install -U keep==" + str(version), fg='green')
click.secho("\n\n") | def check_update(ctx, forced=False) | Check for update on pypi. Limit to 1 check per day if not forced | 3.194234 | 3.054002 | 1.045917 |
commands = utils.read_commands()
if commands is []:
click.echo("No commands to edit, Add one by 'keep new'. ")
else:
edit_header = "# Unchanged file will abort the operation\n"
new_commands = utils.edit_commands(commands, editor, edit_header)
if new_commands and new_commands != commands:
click.echo("Replace:\n")
click.secho("\t{}".format('\n\t'.join(utils.format_commands(commands))),
fg="green")
click.echo("With:\n\t")
click.secho("\t{}".format('\n\t'.join(utils.format_commands(new_commands))),
fg="green")
if click.confirm("", default=False):
utils.write_commands(new_commands) | def cli(ctx, editor) | Edit saved commands. | 4.22801 | 3.906273 | 1.082364 |
matches = utils.grep_commands(pattern)
if matches:
selected = utils.select_command(matches)
if selected >= 0:
cmd, desc = matches[selected]
command = "$ {} :: {}".format(cmd, desc)
if click.confirm("Remove\n\t{}\n\n?".format(command), default=True):
utils.remove_command(cmd)
click.echo('Command successfully removed!')
elif matches == []:
click.echo('No saved commands matches the pattern {}'.format(pattern))
else:
click.echo("No commands to remove, Add one by 'keep new'. ") | def cli(ctx, pattern) | Deletes a saved command. | 5.650498 | 5.076806 | 1.113003 |
r
lim_max = sp.amax(mesh.verts, axis=0)
lim_min = sp.amin(mesh.verts, axis=0)
# Display resulting triangular mesh using Matplotlib.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(mesh.verts[mesh.faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
ax.set_zlabel("z-axis")
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
ax.set_zlim(lim_min[2], lim_max[2])
return fig | def show_mesh(mesh) | r"""
Visualizes the mesh of a region as obtained by ``get_mesh`` function in
the ``metrics`` submodule.
Parameters
----------
mesh : tuple
A mesh returned by ``skimage.measure.marching_cubes``
Returns
-------
fig : Matplotlib figure
A handle to a matplotlib 3D axis | 1.993032 | 2.112861 | 0.943286 |
r
im_temp = sp.zeros_like(im)
crds = sp.array(sp.rand(npoints, im.ndim)*im.shape, dtype=int)
pads = sp.array(sp.rand(npoints)*sp.amin(im.shape)/2+10, dtype=int)
im_temp[tuple(crds.T)] = True
labels, N = spim.label(input=im_temp)
slices = spim.find_objects(input=labels)
porosity = sp.zeros(shape=(N,), dtype=float)
volume = sp.zeros(shape=(N,), dtype=int)
for i in tqdm(sp.arange(0, N)):
s = slices[i]
p = pads[i]
new_s = extend_slice(s, shape=im.shape, pad=p)
temp = im[new_s]
Vp = sp.sum(temp)
Vt = sp.size(temp)
porosity[i] = Vp/Vt
volume[i] = Vt
profile = namedtuple('profile', ('volume', 'porosity'))
profile.volume = volume
profile.porosity = porosity
return profile | def representative_elementary_volume(im, npoints=1000) | r"""
Calculates the porosity of the image as a function subdomain size. This
function extracts a specified number of subdomains of random size, then
finds their porosity.
Parameters
----------
im : ND-array
The image of the porous material
npoints : int
The number of randomly located and sized boxes to sample. The default
is 1000.
Returns
-------
result : named_tuple
A tuple containing the *volume* and *porosity* of each subdomain
tested in arrays ``npoints`` long. They can be accessed as
attributes of the tuple. They can be conveniently plotted
by passing the tuple to matplotlib's ``plot`` function using the
\* notation: ``plt.plot(*result, 'b.')``. The resulting plot is
similar to the sketch given by Bachmat and Bear [1]
Notes
-----
This function is frustratingly slow. Profiling indicates that all the time
is spent on scipy's ``sum`` function which is needed to sum the number of
void voxels (1's) in each subdomain.
Also, this function is a prime target for parallelization since the
``npoints`` are calculated independenlty.
References
----------
[1] Bachmat and Bear. On the Concept and Size of a Representative
Elementary Volume (Rev), Advances in Transport Phenomena in Porous Media
(1987) | 3.424069 | 3.344758 | 1.023712 |
r
if axis >= im.ndim:
raise Exception('axis out of range')
im = np.atleast_3d(im)
a = set(range(im.ndim)).difference(set([axis]))
a1, a2 = a
prof = np.sum(np.sum(im, axis=a2), axis=a1)/(im.shape[a2]*im.shape[a1])
return prof*100 | def porosity_profile(im, axis) | r"""
Returns a porosity profile along the specified axis
Parameters
----------
im : ND-array
The volumetric image for which to calculate the porosity profile
axis : int
The axis (0, 1, or 2) along which to calculate the profile. For
instance, if `axis` is 0, then the porosity in each YZ plane is
calculated and returned as 1D array with 1 value for each X position.
Returns
-------
result : 1D-array
A 1D-array of porosity along the specified axis | 3.471178 | 3.96981 | 0.874394 |
r
if im.dtype == bool:
im = spim.distance_transform_edt(im)
mask = find_dt_artifacts(im) == 0
im[mask] = 0
x = im[im > 0].flatten()
h = sp.histogram(x, bins=bins, density=True)
h = _parse_histogram(h=h, voxel_size=voxel_size)
rdf = namedtuple('radial_density_function',
('R', 'pdf', 'cdf', 'bin_centers', 'bin_edges',
'bin_widths'))
return rdf(h.bin_centers, h.pdf, h.cdf, h.bin_centers, h.bin_edges,
h.bin_widths) | def radial_density(im, bins=10, voxel_size=1) | r"""
Computes radial density function by analyzing the histogram of voxel
values in the distance transform. This function is defined by
Torquato [1] as:
.. math::
\int_0^\infty P(r)dr = 1.0
where *P(r)dr* is the probability of finding a voxel at a lying at a radial
distance between *r* and *dr* from the solid interface. This is equivalent
to a probability density function (*pdf*)
The cumulative distribution is defined as:
.. math::
F(r) = \int_r^\infty P(r)dr
which gives the fraction of pore-space with a radius larger than *r*. This
is equivalent to the cumulative distribution function (*cdf*).
Parameters
----------
im : ND-array
Either a binary image of the pore space with ``True`` indicating the
pore phase (or phase of interest), or a pre-calculated distance
transform which can save time.
bins : int or array_like
This number of bins (if int) or the location of the bins (if array).
This argument is passed directly to Scipy's ``histogram`` function so
see that docstring for more information. The default is 10 bins, which
reduces produces a relatively smooth distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several 1D arrays:
*R* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
This function should not be taken as a pore size distribution in the
explict sense, but rather an indicator of the sizes in the image. The
distance transform contains a very skewed number of voxels with small
values near the solid walls. Nonetheless, it does provide a useful
indicator and it's mathematical formalism is handy.
Torquato refers to this as the *pore-size density function*, and mentions
that it is also known as the *pore-size distribution function*. These
terms are avoided here since they have specific connotations in porous
media analysis.
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 48 & 292 | 3.919281 | 3.584329 | 1.093449 |
r
im = sp.array(im, dtype=int)
Vp = sp.sum(im == 1)
Vs = sp.sum(im == 0)
e = Vp/(Vs + Vp)
return e | def porosity(im) | r"""
Calculates the porosity of an image assuming 1's are void space and 0's are
solid phase.
All other values are ignored, so this can also return the relative
fraction of a phase of interest.
Parameters
----------
im : ND-array
Image of the void space with 1's indicating void space (or True) and
0's indicating the solid phase (or False).
Returns
-------
porosity : float
Calculated as the sum of all 1's divided by the sum of all 1's and 0's.
See Also
--------
phase_fraction
Notes
-----
This function assumes void is represented by 1 and solid by 0, and all
other values are ignored. This is useful, for example, for images of
cylindrical cores, where all voxels outside the core are labelled with 2.
Alternatively, images can be processed with ``find_disconnected_voxels``
to get an image of only blind pores. This can then be added to the orignal
image such that blind pores have a value of 2, thus allowing the
calculation of accessible porosity, rather than overall porosity. | 4.933902 | 5.579087 | 0.884356 |
r
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if im.ndim == 2:
pts = sp.meshgrid(range(0, im.shape[0], spacing),
range(0, im.shape[1], spacing))
crds = sp.vstack([pts[0].flatten(),
pts[1].flatten()]).T
elif im.ndim == 3:
pts = sp.meshgrid(range(0, im.shape[0], spacing),
range(0, im.shape[1], spacing),
range(0, im.shape[2], spacing))
crds = sp.vstack([pts[0].flatten(),
pts[1].flatten(),
pts[2].flatten()]).T
dmat = sptl.distance.cdist(XA=crds, XB=crds)
hits = im[tuple(pts)].flatten()
dmat = dmat[hits, :]
h1 = sp.histogram(dmat, bins=range(0, int(sp.amin(im.shape)/2), spacing))
dmat = dmat[:, hits]
h2 = sp.histogram(dmat, bins=h1[1])
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(h2[1][:-1], h2[0]/h1[0]) | def two_point_correlation_bf(im, spacing=10) | r"""
Calculates the two-point correlation function using brute-force (see Notes)
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
spacing : int
The space between points on the regular grid that is used to generate
the correlation (see Notes)
Returns
-------
result : named_tuple
A tuple containing the x and y data for plotting the two-point
correlation function, using the *args feature of matplotlib's plot
function. The x array is the distances between points and the y array
is corresponding probabilities that points of a given distance both
lie in the void space. The distance values are binned as follows:
``bins = range(start=0, stop=sp.amin(im.shape)/2, stride=spacing)``
Notes
-----
The brute-force approach means overlaying a grid of equally spaced points
onto the image, calculating the distance between each and every pair of
points, then counting the instances where both pairs lie in the void space.
This approach uses a distance matrix so can consume memory very quickly for
large 3D images and/or close spacing. | 2.985049 | 2.610238 | 1.143593 |
r
if len(autocorr.shape) == 2:
adj = sp.reshape(autocorr.shape, [2, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2)
elif len(autocorr.shape) == 3:
adj = sp.reshape(autocorr.shape, [3, 1, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2)
else:
raise Exception('Image dimensions must be 2 or 3')
bin_size = np.int(np.ceil(r_max/nbins))
bins = np.arange(bin_size, r_max, step=bin_size)
radial_sum = np.zeros_like(bins)
for i, r in enumerate(bins):
# Generate Radial Mask from dt using bins
mask = (dt <= r) * (dt > (r-bin_size))
radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask)
# Return normalized bin and radially summed autoc
norm_autoc_radial = radial_sum/np.max(autocorr)
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(bins, norm_autoc_radial) | def _radial_profile(autocorr, r_max, nbins=100) | r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin. | 3.032122 | 2.878514 | 1.053364 |
r
# Calculate half lengths of the image
hls = (np.ceil(np.shape(im))/2).astype(int)
# Fourier Transform and shift image
F = sp_ft.ifftshift(sp_ft.fftn(sp_ft.fftshift(im)))
# Compute Power Spectrum
P = sp.absolute(F**2)
# Auto-correlation is inverse of Power Spectrum
autoc = sp.absolute(sp_ft.ifftshift(sp_ft.ifftn(sp_ft.fftshift(P))))
tpcf = _radial_profile(autoc, r_max=np.min(hls))
return tpcf | def two_point_correlation_fft(im) | r"""
Calculates the two-point correlation function using fourier transforms
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
Returns
-------
result : named_tuple
A tuple containing the x and y data for plotting the two-point
correlation function, using the *args feature of matplotlib's plot
function. The x array is the distances between points and the y array
is corresponding probabilities that points of a given distance both
lie in the void space.
Notes
-----
The fourier transform approach utilizes the fact that the autocorrelation
function is the inverse FT of the power spectrum density.
For background read the Scipy fftpack docs and for a good explanation see:
http://www.ucl.ac.uk/~ucapikr/projects/KamilaSuankulova_BSc_Project.pdf | 5.769037 | 5.535493 | 1.04219 |
r
im = im.flatten()
vals = im[im > 0]*voxel_size
if log:
vals = sp.log10(vals)
h = _parse_histogram(sp.histogram(vals, bins=bins, density=True))
psd = namedtuple('pore_size_distribution',
(log*'log' + 'R', 'pdf', 'cdf', 'satn',
'bin_centers', 'bin_edges', 'bin_widths'))
return psd(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths) | def pore_size_distribution(im, bins=10, log=True, voxel_size=1) | r"""
Calculate a pore-size distribution based on the image produced by the
``porosimetry`` or ``local_thickness`` functions.
Parameters
----------
im : ND-array
The array of containing the sizes of the largest sphere that overlaps
each voxel. Obtained from either ``porosimetry`` or
``local_thickness``.
bins : scalar or array_like
Either an array of bin sizes to use, or the number of bins that should
be automatically generated that span the data range.
log : boolean
If ``True`` (default) the size data is converted to log (base-10)
values before processing. This can help
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several values:
*R* or *logR* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*satn* - phase saturation in differential form. For the cumulative
saturation, just use *cfd* which is already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
(1) To ensure the returned values represent actual sizes be sure to scale
the distance transform by the voxel size first (``dt *= voxel_size``)
plt.bar(psd.R, psd.satn, width=psd.bin_widths, edgecolor='k') | 4.698265 | 3.374897 | 1.392121 |
r
labels, N = spim.label(im > 0)
props = regionprops(labels, coordinates='xy')
chord_lens = sp.array([i.filled_area for i in props])
return chord_lens | def chord_counts(im) | r"""
Finds the length of each chord in the supplied image and returns a list
of their individual sizes
Parameters
----------
im : ND-array
An image containing chords drawn in the void space.
Returns
-------
result : 1D-array
A 1D array with one element for each chord, containing its length.
Notes
----
The returned array can be passed to ``plt.hist`` to plot the histogram,
or to ``sp.histogram`` to get the histogram data directly. Another useful
function is ``sp.bincount`` which gives the number of chords of each
length in a format suitable for ``plt.plot``. | 6.130771 | 9.632071 | 0.636496 |
r
x = im[im > 0]
h = list(sp.histogram(x, bins=bins, density=True))
h = _parse_histogram(h=h, voxel_size=voxel_size)
cld = namedtuple('linear_density_function',
('L', 'pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return cld(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths) | def linear_density(im, bins=25, voxel_size=1, log=False) | r"""
Determines the probability that a point lies within a certain distance
of the opposite phase *along a specified direction*
This relates directly the radial density function defined by Torquato [1],
but instead of reporting the probability of lying within a stated distance
to the nearest solid in any direciton, it considers only linear distances
along orthogonal directions.The benefit of this is that anisotropy can be
detected in materials by performing the analysis in multiple orthogonal
directions.
Parameters
----------
im : ND-array
An image with each voxel containing the distance to the nearest solid
along a linear path, as produced by ``distance_transform_lin``.
bins : int or array_like
The number of bins or a list of specific bins to use
voxel_size : scalar
The side length of a voxel. This is used to scale the chord lengths
into real units. Note this is applied *after* the binning, so
``bins``, if supplied, should be in terms of voxels, not length units.
Returns
-------
result : named_tuple
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) | 4.077096 | 4.41737 | 0.922969 |
r
x = chord_counts(im)
if bins is None:
bins = sp.array(range(0, x.max()+2))*voxel_size
x = x*voxel_size
if log:
x = sp.log10(x)
if normalization == 'length':
h = list(sp.histogram(x, bins=bins, density=False))
h[0] = h[0]*(h[1][1:]+h[1][:-1])/2 # Scale bin heigths by length
h[0] = h[0]/h[0].sum()/(h[1][1:]-h[1][:-1]) # Normalize h[0] manually
elif normalization in ['number', 'count']:
h = sp.histogram(x, bins=bins, density=True)
else:
raise Exception('Unsupported normalization:', normalization)
h = _parse_histogram(h)
cld = namedtuple('chord_length_distribution',
(log*'log' + 'L', 'pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return cld(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths) | def chord_length_distribution(im, bins=None, log=False, voxel_size=1,
normalization='count') | r"""
Determines the distribution of chord lengths in an image containing chords.
Parameters
----------
im : ND-image
An image with chords drawn in the pore space, as produced by
``apply_chords`` or ``apply_chords_3d``.
``im`` can be either boolean, in which case each chord will be
identified using ``scipy.ndimage.label``, or numerical values in which
case it is assumed that chords have already been identifed and labeled.
In both cases, the size of each chord will be computed as the number
of voxels belonging to each labelled region.
bins : scalar or array_like
If a scalar is given it is interpreted as the number of bins to use,
and if an array is given they are used as the bins directly.
log : Boolean
If true, the logarithm of the chord lengths will be used, which can
make the data more clear.
normalization : string
Indicates how to normalize the bin heights. Options are:
*'count' or 'number'* - (default) This simply counts the number of
chords in each bin in the normal sense of a histogram. This is the
rigorous definition according to Torquato [1].
*'length'* - This multiplies the number of chords in each bin by the
chord length (i.e. bin size). The normalization scheme accounts for
the fact that long chords are less frequent than shorert chords,
thus giving a more balanced distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A tuple containing the following elements, which can be retrieved by
attribute name:
*L* or *logL* - chord length, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*relfreq* - relative frequency chords in each bin. The sum of all bin
heights is 1.0. For the cumulative relativce, use *cdf* which is
already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 45 & 292 | 3.477675 | 3.119134 | 1.114949 |
r
print('_'*60)
print('Finding interfacial areas between each region')
from skimage.morphology import disk, square, ball, cube
im = regions.copy()
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if im.ndim == 2:
cube = square
ball = disk
# Get 'slices' into im for each region
slices = spim.find_objects(im)
# Initialize arrays
Ps = sp.arange(1, sp.amax(im)+1)
sa = sp.zeros_like(Ps, dtype=float)
sa_combined = [] # Difficult to preallocate since number of conns unknown
cn = []
# Start extracting area from im
for i in tqdm(Ps):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
sa[reg] = areas[reg]
im_w_throats = spim.binary_dilation(input=mask_im,
structure=ball(1))
im_w_throats = im_w_throats*sub_im
Pn = sp.unique(im_w_throats)[1:] - 1
for j in Pn:
if j > reg:
cn.append([reg, j])
merged_region = im[(min(slices[reg][0].start,
slices[j][0].start)):
max(slices[reg][0].stop,
slices[j][0].stop),
(min(slices[reg][1].start,
slices[j][1].start)):
max(slices[reg][1].stop,
slices[j][1].stop)]
merged_region = ((merged_region == reg + 1) +
(merged_region == j + 1))
mesh = mesh_region(region=merged_region, strel=strel)
sa_combined.append(mesh_surface_area(mesh))
# Interfacial area calculation
cn = sp.array(cn)
ia = 0.5 * (sa[cn[:, 0]] + sa[cn[:, 1]] - sa_combined)
ia[ia <= 0] = 1
result = namedtuple('interfacial_areas', ('conns', 'area'))
result.conns = cn
result.area = ia * voxel_size**2
return result | def region_interface_areas(regions, areas, voxel_size=1, strel=None) | r"""
Calculates the interfacial area between all pairs of adjecent regions
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
areas : array_like
A list containing the areas of each regions, as determined by
``region_surface_area``. Note that the region number and list index
are offset by 1, such that the area for region 1 is stored in
``areas[0]``.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : named_tuple
A named-tuple containing 2 arrays. ``conns`` holds the connectivity
information and ``area`` holds the result for each pair. ``conns`` is
a N-regions by 2 array with each row containing the region number of an
adjacent pair of regions. For instance, if ``conns[0, 0]`` is 0 and
``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial
area shared by regions 0 and 5. | 4.201116 | 3.83711 | 1.094865 |
r
print('_'*60)
print('Finding surface area of each region')
im = regions.copy()
# Get 'slices' into im for each pore region
slices = spim.find_objects(im)
# Initialize arrays
Ps = sp.arange(1, sp.amax(im)+1)
sa = sp.zeros_like(Ps, dtype=float)
# Start extracting marching cube area from im
for i in tqdm(Ps):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
mesh = mesh_region(region=mask_im, strel=strel)
sa[reg] = mesh_surface_area(mesh)
result = sa * voxel_size**2
return result | def region_surface_areas(regions, voxel_size=1, strel=None) | r"""
Extracts the surface area of each region in a labeled image.
Optionally, it can also find the the interfacial area between all
adjoining regions.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : list
A list containing the surface area of each region, offset by 1, such
that the surface area of region 1 is stored in element 0 of the list. | 6.078142 | 5.577309 | 1.089798 |
r
if mesh:
verts = mesh.verts
faces = mesh.faces
else:
if (verts is None) or (faces is None):
raise Exception('Either mesh or verts and faces must be given')
surface_area = measure.mesh_surface_area(verts, faces)
return surface_area | def mesh_surface_area(mesh=None, verts=None, faces=None) | r"""
Calculates the surface area of a meshed region
Parameters
----------
mesh : tuple
The tuple returned from the ``mesh_region`` function
verts : array
An N-by-ND array containing the coordinates of each mesh vertex
faces : array
An N-by-ND array indicating which elements in ``verts`` form a mesh
element.
Returns
-------
surface_area : float
The surface area of the mesh, calculated by
``skimage.measure.mesh_surface_area``
Notes
-----
This function simply calls ``scikit-image.measure.mesh_surface_area``, but
it allows for the passing of the ``mesh`` tuple returned by the
``mesh_region`` function, entirely for convenience. | 3.151628 | 3.63508 | 0.867004 |
r
if im.dtype == bool:
im = im.astype(int)
elif im.dtype != int:
raise Exception('Image must contain integer values for each phase')
labels = sp.arange(0, sp.amax(im)+1)
results = sp.zeros_like(labels)
for i in labels:
results[i] = sp.sum(im == i)
if normed:
results = results/im.size
return results | def phase_fraction(im, normed=True) | r"""
Calculates the number (or fraction) of each phase in an image
Parameters
----------
im : ND-array
An ND-array containing integer values
normed : Boolean
If ``True`` (default) the returned values are normalized by the total
number of voxels in image, otherwise the voxel count of each phase is
returned.
Returns
-------
result : 1D-array
A array of length max(im) with each element containing the number of
voxels found with the corresponding label.
See Also
--------
porosity | 3.857807 | 4.162828 | 0.926727 |
r
if sp.squeeze(im.ndim) < 3:
raise Exception('This view is only necessary for 3D images')
x, y, z = (sp.array(im.shape)/2).astype(int)
im_xy = im[:, :, z]
im_xz = im[:, y, :]
im_yz = sp.rot90(im[x, :, :])
new_x = im_xy.shape[0] + im_yz.shape[0] + 10
new_y = im_xy.shape[1] + im_xz.shape[1] + 10
new_im = sp.zeros([new_x + 20, new_y + 20], dtype=im.dtype)
# Add xy image to upper left corner
new_im[10:im_xy.shape[0]+10,
10:im_xy.shape[1]+10] = im_xy
# Add xz image to lower left coner
x_off = im_xy.shape[0]+20
y_off = im_xy.shape[1]+20
new_im[10:10 + im_xz.shape[0],
y_off:y_off + im_xz.shape[1]] = im_xz
new_im[x_off:x_off + im_yz.shape[0],
10:10 + im_yz.shape[1]] = im_yz
return new_im | def show_planes(im) | r"""
Create a quick montage showing a 3D image in all three directions
Parameters
----------
im : ND-array
A 3D image of the porous material
Returns
-------
image : ND-array
A 2D array containing the views. This single image can be viewed using
``matplotlib.pyplot.imshow``. | 2.412392 | 2.382642 | 1.012486 |
r
im = sp.array(~im, dtype=int)
if direction in ['Y', 'y']:
im = sp.transpose(im, axes=[1, 0, 2])
if direction in ['Z', 'z']:
im = sp.transpose(im, axes=[2, 1, 0])
t = im.shape[0]
depth = sp.reshape(sp.arange(0, t), [t, 1, 1])
im = im*depth
im = sp.amax(im, axis=0)
return im | def sem(im, direction='X') | r"""
Simulates an SEM photograph looking into the porous material in the
specified direction. Features are colored according to their depth into
the image, so darker features are further away.
Parameters
----------
im : array_like
ND-image of the porous material with the solid phase marked as 1 or
True
direction : string
Specify the axis along which the camera will point. Options are
'X', 'Y', and 'Z'.
Returns
-------
image : 2D-array
A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
function. | 2.928183 | 3.260145 | 0.898176 |
r
im = sp.array(~im, dtype=int)
if direction in ['Y', 'y']:
im = sp.transpose(im, axes=[1, 0, 2])
if direction in ['Z', 'z']:
im = sp.transpose(im, axes=[2, 1, 0])
im = sp.sum(im, axis=0)
return im | def xray(im, direction='X') | r"""
Simulates an X-ray radiograph looking through the porouls material in the
specfied direction. The resulting image is colored according to the amount
of attenuation an X-ray would experience, so regions with more solid will
appear darker.
Parameters
----------
im : array_like
ND-image of the porous material with the solid phase marked as 1 or
True
direction : string
Specify the axis along which the camera will point. Options are
'X', 'Y', and 'Z'.
Returns
-------
image : 2D-array
A 2D greyscale image suitable for use in matplotlib\'s ```imshow```
function. | 2.684821 | 3.250916 | 0.825866 |
r
# Parse the regionprops list and pull out all props with scalar values
metrics = []
reg = regionprops[0]
for item in reg.__dir__():
if not item.startswith('_'):
try:
if sp.shape(getattr(reg, item)) == ():
metrics.append(item)
except (TypeError, NotImplementedError, AttributeError):
pass
# Create a dictionary of all metrics that are simple scalar propertie
d = {}
for k in metrics:
try:
d[k] = sp.array([r[k] for r in regionprops])
except ValueError:
print('Error encountered evaluating ' + k + ' so skipping it')
# Create pandas data frame an return
df = DataFrame(d)
return df | def props_to_DataFrame(regionprops) | r"""
Returns a Pandas DataFrame containing all the scalar metrics for each
region, such as volume, sphericity, and so on, calculated by
``regionprops_3D``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by ``regionprops_3D``. Because ``regionprops_3D`` returns data in
the same ``list`` format as the ``regionprops`` function in **Skimage**
you can pass in either.
Returns
-------
DataFrame : Pandas DataFrame
A Pandas DataFrame with each region corresponding to a row and each
column corresponding to a key metric. All the values for a given
property (e.g. 'sphericity') can be obtained as
``val = df['sphericity']``. Conversely, all the key metrics for a
given region can be found with ``df.iloc[1]``.
See Also
--------
props_to_image
regionprops_3d | 5.896379 | 5.966064 | 0.98832 |
r
im = sp.zeros(shape=shape)
for r in regionprops:
if prop == 'convex':
mask = r.convex_image
else:
mask = r.image
temp = mask * r[prop]
s = bbox_to_slices(r.bbox)
im[s] += temp
return im | def props_to_image(regionprops, shape, prop) | r"""
Creates an image with each region colored according the specified ``prop``,
as obtained by ``regionprops_3d``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by PoreSpy's ``regionprops_3D`` or Skimage's ``regionsprops``.
shape : array_like
The shape of the original image for which ``regionprops`` was obtained.
prop : string
The region property of interest. Can be a scalar item such as 'volume'
in which case the the regions will be colored by their respective
volumes, or can be an image-type property such as 'border' or
'convex_image', which will return an image composed of the sub-images.
Returns
-------
image : ND-array
An ND-image the same size as the original image, with each region
represented by the values specified in ``prop``.
See Also
--------
props_to_DataFrame
regionprops_3d | 4.602747 | 5.643798 | 0.815541 |
r
vs = voxel_size
for entry in data:
if data[entry].dtype == bool:
data[entry] = data[entry].astype(np.int8)
if data[entry].flags['C_CONTIGUOUS']:
data[entry] = np.ascontiguousarray(data[entry])
imageToVTK(path, cellData=data, spacing=(vs, vs, vs), origin=origin) | def dict_to_vtk(data, path='./dictvtk', voxel_size=1, origin=(0, 0, 0)) | r"""
Accepts multiple images as a dictionary and compiles them into a vtk file
Parameters
----------
data : dict
A dictionary of *key: value* pairs, where the *key* is the name of the
scalar property stored in each voxel of the array stored in the
corresponding *value*.
path : string
Path to output file
voxel_size : int
The side length of the voxels (voxels are cubic)
origin : float
data origin (according to selected voxel size)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in ParaView | 3.57763 | 4.128487 | 0.866572 |
r
from openpnm.network import GenericNetwork
# Convert net dict to an openpnm Network
pn = GenericNetwork()
pn.update(net)
pn.project.save_project(filename)
ws = pn.project.workspace
ws.close_project(pn.project) | def to_openpnm(net, filename) | r"""
Save the result of the `snow` network extraction function in a format
suitable for opening in OpenPNM.
Parameters
----------
net : dict
The dictionary object produced by the network extraction functions
filename : string or path object
The name and location to save the file, which will have `.net` file
extension. | 5.876146 | 7.205894 | 0.815464 |
r
if len(im.shape) == 2:
im = im[:, :, np.newaxis]
if im.dtype == bool:
vox = True
if vox:
im = im.astype(np.int8)
vs = voxel_size
if divide:
split = np.round(im.shape[2]/2).astype(np.int)
im1 = im[:, :, 0:split]
im2 = im[:, :, split:]
imageToVTK(path+'1', cellData={'im': np.ascontiguousarray(im1)},
spacing=(vs, vs, vs))
imageToVTK(path+'2', origin=(0.0, 0.0, split*vs),
cellData={'im': np.ascontiguousarray(im2)},
spacing=(vs, vs, vs))
elif downsample:
im = spim.interpolation.zoom(im, zoom=0.5, order=0, mode='reflect')
imageToVTK(path, cellData={'im': np.ascontiguousarray(im)},
spacing=(2*vs, 2*vs, 2*vs))
else:
imageToVTK(path, cellData={'im': np.ascontiguousarray(im)},
spacing=(vs, vs, vs)) | def to_vtk(im, path='./voxvtk', divide=False, downsample=False, voxel_size=1,
vox=False) | r"""
Converts an array to a vtk file.
Parameters
----------
im : 3D image
The image of the porous material
path : string
Path to output file
divide : bool
vtk files can get very large, this option allows you for two output
files, divided at z = half. This allows for large data sets to be
imaged without loss of information
downsample : bool
very large images acan be downsampled to half the size in each
dimension, this doubles the effective voxel size
voxel_size : int
The side length of the voxels (voxels are cubic)
vox : bool
For an image that is binary (1's and 0's) this reduces the file size by
using int8 format (can also be used to reduce file size when accuracy
is not necessary ie: just visulization)
Notes
-----
Outputs a vtk, vtp or vti file that can opened in paraview | 2.203917 | 2.206624 | 0.998773 |
r
# Create binary image for fluid and solid phases
bin_im = im == solid
# Transform to integer for distance transform
bin_im = bin_im.astype(int)
# Distance Transform computes Euclidean distance in lattice units to
# Nearest fluid for every solid voxel
dt = nd.distance_transform_edt(bin_im)
dt[dt > np.sqrt(2)] = 2
dt[(dt > 0)*(dt <= np.sqrt(2))] = 1
dt = dt.astype(int)
# Write out data
with open(filename, 'w') as f:
out_data = dt.flatten().tolist()
f.write('\n'.join(map(repr, out_data))) | def to_palabos(im, filename, solid=0) | r"""
Converts an ND-array image to a text file that Palabos can read in as a
geometry for Lattice Boltzmann simulations. Uses a Euclidean distance
transform to identify solid voxels neighboring fluid voxels and labels
them as the interface.
Parameters
----------
im : ND-array
The image of the porous material
filename : string
Path to output file
solid : int
The value of the solid voxels in the image used to convert image to
binary with all other voxels assumed to be fluid.
Notes
-----
File produced contains 3 values: 2 = Solid, 1 = Interface, 0 = Pore
Palabos will run the simulation applying the specified pressure drop from
x = 0 to x = -1. | 5.66145 | 4.71982 | 1.199505 |
r
return generate_voxel_image(network, pore_shape=pore_shape,
throat_shape=throat_shape, max_dim=max_dim,
verbose=verbose, rtol=rtol) | def openpnm_to_im(network, pore_shape="sphere", throat_shape="cylinder",
max_dim=None, verbose=1, rtol=0.1) | r"""
Generates voxel image from an OpenPNM network object.
Parameters
----------
network : OpenPNM GenericNetwork
Network from which voxel image is to be generated
pore_shape : str
Shape of pores in the network, valid choices are "sphere", "cube"
throat_shape : str
Shape of throats in the network, valid choices are "cylinder", "cuboid"
max_dim : int
Number of voxels in the largest dimension of the network
rtol : float
Stopping criteria for finding the smallest voxel image such that
further increasing the number of voxels in each dimension by 25% would
improve the predicted porosity of the image by less that ``rtol``
Returns
-------
im : ND-array
Voxelated image corresponding to the given pore network model
Notes
-----
(1) The generated voxelated image is labeled with 0s, 1s and 2s signifying
solid phase, pores, and throats respectively.
(2) If max_dim is not provided, the method calculates it such that the
further increasing it doesn't change porosity by much. | 2.723652 | 3.016832 | 0.902819 |
r
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if mode in ['backward', 'reverse']:
im = sp.flip(im, axis)
im = distance_transform_lin(im=im, axis=axis, mode='forward')
im = sp.flip(im, axis)
return im
elif mode in ['both']:
im_f = distance_transform_lin(im=im, axis=axis, mode='forward')
im_b = distance_transform_lin(im=im, axis=axis, mode='backward')
return sp.minimum(im_f, im_b)
else:
b = sp.cumsum(im > 0, axis=axis)
c = sp.diff(b*(im == 0), axis=axis)
d = sp.minimum.accumulate(c, axis=axis)
if im.ndim == 1:
e = sp.pad(d, pad_width=[1, 0], mode='constant', constant_values=0)
elif im.ndim == 2:
ax = [[[1, 0], [0, 0]], [[0, 0], [1, 0]]]
e = sp.pad(d, pad_width=ax[axis], mode='constant', constant_values=0)
elif im.ndim == 3:
ax = [[[1, 0], [0, 0], [0, 0]],
[[0, 0], [1, 0], [0, 0]],
[[0, 0], [0, 0], [1, 0]]]
e = sp.pad(d, pad_width=ax[axis], mode='constant', constant_values=0)
f = im*(b + e)
return f | def distance_transform_lin(im, axis=0, mode='both') | r"""
Replaces each void voxel with the linear distance to the nearest solid
voxel along the specified axis.
Parameters
----------
im : ND-array
The image of the porous material with ``True`` values indicating the
void phase (or phase of interest)
axis : int
The direction along which the distance should be measured, the default
is 0 (i.e. along the x-direction)
mode : string
Controls how the distance is measured. Options are:
'forward' - Distances are measured in the increasing direction along
the specified axis
'reverse' - Distances are measured in the reverse direction.
*'backward'* is also accepted.
'both' - Distances are calculated in both directions (by recursively
calling itself), then reporting the minimum value of the two results.
Returns
-------
image : ND-array
A copy of ``im`` with each foreground voxel containing the distance to
the nearest background along the specified axis. | 2.223412 | 2.179204 | 1.020286 |
r
im = dt > 0
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if footprint is None:
if im.ndim == 2:
footprint = disk
elif im.ndim == 3:
footprint = ball
else:
raise Exception("only 2-d and 3-d images are supported")
mx = spim.maximum_filter(dt + 2*(~im), footprint=footprint(r_max))
peaks = (dt == mx)*im
return peaks | def find_peaks(dt, r_max=4, footprint=None) | r"""
Returns all local maxima in the distance transform
Parameters
----------
dt : ND-array
The distance transform of the pore space. This may be calculated and
filtered using any means desired.
r_max : scalar
The size of the structuring element used in the maximum filter. This
controls the localness of any maxima. The default is 4 voxels.
footprint : ND-array
Specifies the shape of the structuring element used to define the
neighborhood when looking for peaks. If none is specified then a
spherical shape is used (or circular in 2D).
Returns
-------
image : ND-array
An array of booleans with ``True`` values at the location of any
local maxima.
Notes
-----
It is also possible ot the ``peak_local_max`` function from the
``skimage.feature`` module as follows:
``peaks = peak_local_max(image=dt, min_distance=r, exclude_border=0,
indices=False)``
This automatically uses a square structuring element which is significantly
faster than using a circular or spherical element. | 4.666939 | 5.447739 | 0.856675 |
r
if peaks.ndim == 2:
strel = square
else:
strel = cube
markers, N = spim.label(input=peaks, structure=strel(3))
inds = spim.measurements.center_of_mass(input=peaks,
labels=markers,
index=sp.arange(1, N+1))
inds = sp.floor(inds).astype(int)
# Centroid may not be on old pixel, so create a new peaks image
peaks_new = sp.zeros_like(peaks, dtype=bool)
peaks_new[tuple(inds.T)] = True
return peaks_new | def reduce_peaks(peaks) | r"""
Any peaks that are broad or elongated are replaced with a single voxel
that is located at the center of mass of the original voxels.
Parameters
----------
peaks : ND-image
An image containing True values indicating peaks in the distance
transform
Returns
-------
image : ND-array
An array with the same number of isolated peaks as the original image,
but fewer total voxels.
Notes
-----
The center of mass of a group of voxels is used as the new single voxel, so
if the group has an odd shape (like a horse shoe), the new voxel may *not*
lie on top of the original set. | 4.172719 | 3.945646 | 1.05755 |
r
peaks = sp.copy(peaks)
if dt.ndim == 2:
from skimage.morphology import square as cube
else:
from skimage.morphology import cube
labels, N = spim.label(peaks)
slices = spim.find_objects(labels)
for i in range(N):
s = extend_slice(s=slices[i], shape=peaks.shape, pad=10)
peaks_i = labels[s] == i+1
dt_i = dt[s]
im_i = dt_i > 0
iters = 0
peaks_dil = sp.copy(peaks_i)
while iters < max_iters:
iters += 1
peaks_dil = spim.binary_dilation(input=peaks_dil,
structure=cube(3))
peaks_max = peaks_dil*sp.amax(dt_i*peaks_dil)
peaks_extended = (peaks_max == dt_i)*im_i
if sp.all(peaks_extended == peaks_i):
break # Found a true peak
elif sp.sum(peaks_extended*peaks_i) == 0:
peaks_i = False
break # Found a saddle point
peaks[s] = peaks_i
if iters >= max_iters:
print('Maximum number of iterations reached, consider'
+ 'running again with a larger value of max_iters')
return peaks | def trim_saddle_points(peaks, dt, max_iters=10) | r"""
Removes peaks that were mistakenly identified because they lied on a
saddle or ridge in the distance transform that was not actually a true
local peak.
Parameters
----------
peaks : ND-array
A boolean image containing True values to mark peaks in the distance
transform (``dt``)
dt : ND-array
The distance transform of the pore space for which the true peaks are
sought.
max_iters : int
The maximum number of iterations to run while eroding the saddle
points. The default is 10, which is usually not reached; however,
a warning is issued if the loop ends prior to removing all saddle
points.
Returns
-------
image : ND-array
An image with fewer peaks than the input image
References
----------
[1] Gostick, J. "A versatile and efficient network extraction algorithm
using marker-based watershed segmenation". Physical Review E. (2017) | 3.45021 | 3.518896 | 0.980481 |
r
peaks = sp.copy(peaks)
if dt.ndim == 2:
from skimage.morphology import square as cube
else:
from skimage.morphology import cube
peaks, N = spim.label(peaks, structure=cube(3))
crds = spim.measurements.center_of_mass(peaks, labels=peaks,
index=sp.arange(1, N+1))
crds = sp.vstack(crds).astype(int) # Convert to numpy array of ints
# Get distance between each peak as a distance map
tree = sptl.cKDTree(data=crds)
temp = tree.query(x=crds, k=2)
nearest_neighbor = temp[1][:, 1]
dist_to_neighbor = temp[0][:, 1]
del temp, tree # Free-up memory
dist_to_solid = dt[tuple(crds.T)] # Get distance to solid for each peak
hits = sp.where(dist_to_neighbor < dist_to_solid)[0]
# Drop peak that is closer to the solid than it's neighbor
drop_peaks = []
for peak in hits:
if dist_to_solid[peak] < dist_to_solid[nearest_neighbor[peak]]:
drop_peaks.append(peak)
else:
drop_peaks.append(nearest_neighbor[peak])
drop_peaks = sp.unique(drop_peaks)
# Remove peaks from image
slices = spim.find_objects(input=peaks)
for s in drop_peaks:
peaks[slices[s]] = 0
return (peaks > 0) | def trim_nearby_peaks(peaks, dt) | r"""
Finds pairs of peaks that are nearer to each other than to the solid phase,
and removes the peak that is closer to the solid.
Parameters
----------
peaks : ND-array
A boolean image containing True values to mark peaks in the distance
transform (``dt``)
dt : ND-array
The distance transform of the pore space for which the true peaks are
sought.
Returns
-------
image : ND-array
An array the same size as ``peaks`` containing a subset of the peaks
in the original image.
Notes
-----
Each pair of peaks is considered simultaneously, so for a triplet of peaks
each pair is considered. This ensures that only the single peak that is
furthest from the solid is kept. No iteration is required.
References
----------
[1] Gostick, J. "A versatile and efficient network extraction algorithm
using marker-based watershed segmenation". Physical Review E. (2017) | 3.48268 | 3.508142 | 0.992742 |
r
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if im.ndim == 2:
if conn == 4:
strel = disk(1)
elif conn in [None, 8]:
strel = square(3)
elif im.ndim == 3:
if conn == 6:
strel = ball(1)
elif conn in [None, 26]:
strel = cube(3)
labels, N = spim.label(input=im, structure=strel)
holes = clear_border(labels=labels) > 0
return holes | def find_disconnected_voxels(im, conn=None) | r"""
This identifies all pore (or solid) voxels that are not connected to the
edge of the image. This can be used to find blind pores, or remove
artifacts such as solid phase voxels that are floating in space.
Parameters
----------
im : ND-image
A Boolean image, with True values indicating the phase for which
disconnected voxels are sought.
conn : int
For 2D the options are 4 and 8 for square and diagonal neighbors, while
for the 3D the options are 6 and 26, similarily for square and diagonal
neighbors. The default is max
Returns
-------
image : ND-array
An ND-image the same size as ``im``, with True values indicating
voxels of the phase of interest (i.e. True values in the original
image) that are not connected to the outer edges.
Notes
-----
image : ND-array
The returned array (e.g. ``holes``) be used to trim blind pores from
``im`` using: ``im[holes] = False`` | 4.145908 | 3.873052 | 1.07045 |
r
im = sp.copy(im)
holes = find_disconnected_voxels(im)
im[holes] = False
return im | def fill_blind_pores(im) | r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels | 10.683798 | 9.751775 | 1.095575 |
r
im = sp.copy(im)
holes = find_disconnected_voxels(~im)
im[holes] = True
return im | def trim_floating_solid(im) | r"""
Removes all solid that that is not attached to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected solid removed.
See Also
--------
find_disconnected_voxels | 14.497808 | 12.600683 | 1.150557 |
r
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
im = trim_floating_solid(~im)
labels = spim.label(~im)[0]
inlet = sp.zeros_like(im, dtype=int)
outlet = sp.zeros_like(im, dtype=int)
if im.ndim == 3:
if inlet_axis == 0:
inlet[0, :, :] = 1
elif inlet_axis == 1:
inlet[:, 0, :] = 1
elif inlet_axis == 2:
inlet[:, :, 0] = 1
if outlet_axis == 0:
outlet[-1, :, :] = 1
elif outlet_axis == 1:
outlet[:, -1, :] = 1
elif outlet_axis == 2:
outlet[:, :, -1] = 1
if im.ndim == 2:
if inlet_axis == 0:
inlet[0, :] = 1
elif inlet_axis == 1:
inlet[:, 0] = 1
if outlet_axis == 0:
outlet[-1, :] = 1
elif outlet_axis == 1:
outlet[:, -1] = 1
IN = sp.unique(labels*inlet)
OUT = sp.unique(labels*outlet)
new_im = sp.isin(labels, list(set(IN) ^ set(OUT)), invert=True)
im[new_im == 0] = True
return ~im | def trim_nonpercolating_paths(im, inlet_axis=0, outlet_axis=0) | r"""
Removes all nonpercolating paths between specified edges
This function is essential when performing transport simulations on an
image, since image regions that do not span between the desired inlet and
outlet do not contribute to the transport.
Parameters
----------
im : ND-array
The image of the porous material with ```True`` values indicating the
phase of interest
inlet_axis : int
Inlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
outlet_axis : int
Outlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
Returns
-------
image : ND-array
A copy of ``im`` with all the nonpercolating paths removed
See Also
--------
find_disconnected_voxels
trim_floating_solid
trim_blind_pores | 2.286455 | 2.21765 | 1.031026 |
r
result = im
if mode in ['maxima', 'extrema']:
result = reconstruction(seed=im - h, mask=im, method='dilation')
elif mode in ['minima', 'extrema']:
result = reconstruction(seed=im + h, mask=im, method='erosion')
return result | def trim_extrema(im, h, mode='maxima') | r"""
Trims local extrema in greyscale values by a specified amount.
This essentially decapitates peaks and/or floods valleys.
Parameters
----------
im : ND-array
The image whose extrema are to be removed
h : float
The height to remove from each peak or fill in each valley
mode : string {'maxima' | 'minima' | 'extrema'}
Specifies whether to remove maxima or minima or both
Returns
-------
image : ND-array
A copy of the input image with all the peaks and/or valleys removed.
Notes
-----
This function is referred to as **imhmax** or **imhmin** in Matlab. | 4.234086 | 4.72434 | 0.896228 |
r
mask = im > 0
if regions is None:
labels, N = spim.label(mask)
else:
labels = sp.copy(regions)
N = labels.max()
I = im.flatten()
L = labels.flatten()
if mode.startswith('max'):
V = sp.zeros(shape=N+1, dtype=float)
for i in range(len(L)):
if V[L[i]] < I[i]:
V[L[i]] = I[i]
elif mode.startswith('min'):
V = sp.ones(shape=N+1, dtype=float)*sp.inf
for i in range(len(L)):
if V[L[i]] > I[i]:
V[L[i]] = I[i]
elif mode.startswith('size'):
V = sp.zeros(shape=N+1, dtype=int)
for i in range(len(L)):
V[L[i]] += 1
im_flooded = sp.reshape(V[labels], newshape=im.shape)
im_flooded = im_flooded*mask
return im_flooded | def flood(im, regions=None, mode='max') | r"""
Floods/fills each region in an image with a single value based on the
specific values in that region. The ``mode`` argument is used to
determine how the value is calculated.
Parameters
----------
im : array_like
An ND image with isolated regions containing 0's elsewhere.
regions : array_like
An array the same shape as ``im`` with each region labeled. If None is
supplied (default) then ``scipy.ndimage.label`` is used with its
default arguments.
mode : string
Specifies how to determine which value should be used to flood each
region. Options are:
'max' - Floods each region with the local maximum in that region
'min' - Floods each region the local minimum in that region
'size' - Floods each region with the size of that region
Returns
-------
image : ND-array
A copy of ``im`` with new values placed in each forground voxel based
on the ``mode``.
See Also
--------
props_to_image | 2.178948 | 2.413377 | 0.902863 |
r
temp = sp.ones(shape=dt.shape)*sp.inf
for ax in range(dt.ndim):
dt_lin = distance_transform_lin(sp.ones_like(temp, dtype=bool),
axis=ax, mode='both')
temp = sp.minimum(temp, dt_lin)
result = sp.clip(dt - temp, a_min=0, a_max=sp.inf)
return result | def find_dt_artifacts(dt) | r"""
Finds points in a distance transform that are closer to wall than solid.
These points could *potentially* be erroneously high since their distance
values do not reflect the possibility that solid may have been present
beyond the border of the image but lost by trimming.
Parameters
----------
dt : ND-array
The distance transform of the phase of interest
Returns
-------
image : ND-array
An ND-array the same shape as ``dt`` with numerical values indicating
the maximum amount of error in each volxel, which is found by
subtracting the distance to nearest edge of image from the distance
transform value. In other words, this is the error that would be found
if there were a solid voxel lurking just beyond the nearest edge of
the image. Obviously, voxels with a value of zero have no error. | 5.371768 | 5.442273 | 0.987045 |
r
if im.dtype == bool:
im = spim.label(im)[0]
counts = sp.bincount(im.flatten())
counts[0] = 0
chords = counts[im]
return chords | def region_size(im) | r"""
Replace each voxel with size of region to which it belongs
Parameters
----------
im : ND-array
Either a boolean image wtih ``True`` indicating the features of
interest, in which case ``scipy.ndimage.label`` will be applied to
find regions, or a greyscale image with integer values indicating
regions.
Returns
-------
image : ND-array
A copy of ``im`` with each voxel value indicating the size of the
region to which it belongs. This is particularly useful for finding
chord sizes on the image produced by ``apply_chords``. | 7.792514 | 6.65112 | 1.171609 |
r
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if spacing < 0:
raise Exception('Spacing cannot be less than 0')
if spacing == 0:
label = True
result = sp.zeros(im.shape, dtype=int) # Will receive chords at end
slxyz = [slice(None, None, spacing*(axis != i) + 1) for i in [0, 1, 2]]
slices = tuple(slxyz[:im.ndim])
s = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] # Straight-line structuring element
if im.ndim == 3: # Make structuring element 3D if necessary
s = sp.pad(sp.atleast_3d(s), pad_width=((0, 0), (0, 0), (1, 1)),
mode='constant', constant_values=0)
im = im[slices]
s = sp.swapaxes(s, 0, axis)
chords = spim.label(im, structure=s)[0]
if trim_edges: # Label on border chords will be set to 0
chords = clear_border(chords)
result[slices] = chords # Place chords into empty image created at top
if label is False: # Remove label if not requested
result = result > 0
return result | def apply_chords(im, spacing=1, axis=0, trim_edges=True, label=False) | r"""
Adds chords to the void space in the specified direction. The chords are
separated by 1 voxel plus the provided spacing.
Parameters
----------
im : ND-array
An image of the porous material with void marked as ``True``.
spacing : int
Separation between chords. The default is 1 voxel. This can be
decreased to 0, meaning that the chords all touch each other, which
automatically sets to the ``label`` argument to ``True``.
axis : int (default = 0)
The axis along which the chords are drawn.
trim_edges : bool (default = ``True``)
Whether or not to remove chords that touch the edges of the image.
These chords are artifically shortened, so skew the chord length
distribution.
label : bool (default is ``False``)
If ``True`` the chords in the returned image are each given a unique
label, such that all voxels lying on the same chord have the same
value. This is automatically set to ``True`` if spacing is 0, but is
``False`` otherwise.
Returns
-------
image : ND-array
A copy of ``im`` with non-zero values indicating the chords.
See Also
--------
apply_chords_3D | 4.297064 | 4.388328 | 0.979203 |
r
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if im.ndim < 3:
raise Exception('Must be a 3D image to use this function')
if spacing < 0:
raise Exception('Spacing cannot be less than 0')
ch = sp.zeros_like(im, dtype=int)
ch[:, ::4+2*spacing, ::4+2*spacing] = 1 # X-direction
ch[::4+2*spacing, :, 2::4+2*spacing] = 2 # Y-direction
ch[2::4+2*spacing, 2::4+2*spacing, :] = 3 # Z-direction
chords = ch*im
if trim_edges:
temp = clear_border(spim.label(chords > 0)[0]) > 0
chords = temp*chords
return chords | def apply_chords_3D(im, spacing=0, trim_edges=True) | r"""
Adds chords to the void space in all three principle directions. The
chords are seprated by 1 voxel plus the provided spacing. Chords in the X,
Y and Z directions are labelled 1, 2 and 3 resepctively.
Parameters
----------
im : ND-array
A 3D image of the porous material with void space marked as True.
spacing : int (default = 0)
Chords are automatically separed by 1 voxel on all sides, and this
argument increases the separation.
trim_edges : bool (default is ``True``)
Whether or not to remove chords that touch the edges of the image.
These chords are artifically shortened, so skew the chord length
distribution
Returns
-------
image : ND-array
A copy of ``im`` with values of 1 indicating x-direction chords,
2 indicating y-direction chords, and 3 indicating z-direction chords.
Notes
-----
The chords are separated by a spacing of at least 1 voxel so that tools
that search for connected components, such as ``scipy.ndimage.label`` can
detect individual chords.
See Also
--------
apply_chords | 3.976173 | 3.977237 | 0.999733 |
r
im_new = porosimetry(im=im, sizes=sizes, access_limited=False, mode=mode)
return im_new | def local_thickness(im, sizes=25, mode='hybrid') | r"""
For each voxel, this functions calculates the radius of the largest sphere
that both engulfs the voxel and fits entirely within the foreground. This
is not the same as a simple distance transform, which finds the largest
sphere that could be *centered* on each voxel.
Parameters
----------
im : array_like
A binary image with the phase of interest set to True
sizes : array_like or scalar
The sizes to invade. If a list of values of provided they are used
directly. If a scalar is provided then that number of points spanning
the min and max of the distance transform are used.
mode : string
Controls with method is used to compute the result. Options are:
'hybrid' - (default) Performs a distance tranform of the void space,
thresholds to find voxels larger than ``sizes[i]``, trims the resulting
mask if ``access_limitations`` is ``True``, then dilates it using the
efficient fft-method to obtain the non-wetting fluid configuration.
'dt' - Same as 'hybrid', except uses a second distance transform,
relative to the thresholded mask, to find the invading fluid
configuration. The choice of 'dt' or 'hybrid' depends on speed, which
is system and installation specific.
'mio' - Using a single morphological image opening step to obtain the
invading fluid confirguration directly, *then* trims if
``access_limitations`` is ``True``. This method is not ideal and is
included mostly for comparison purposes.
Returns
-------
image : ND-array
A copy of ``im`` with the pore size values in each voxel
See Also
--------
porosimetry
Notes
-----
The term *foreground* is used since this function can be applied to both
pore space or the solid, whichever is set to ``True``.
This function is identical to ``porosimetry`` with ``access_limited`` set
to ``False``.
The way local thickness is found in PoreSpy differs from the traditional
method (i.e. `used in ImageJ <https://imagej.net/Local_Thickness>`_).
Our approach is probably slower, but it allows for the same code to be
used for ``local_thickness`` and ``porosimetry``, since we can 'trim'
invaded regions that are not connected to the inlets in the ``porosimetry``
function. This is not needed in ``local_thickness`` however. | 12.109858 | 6.906686 | 1.753353 |
r
temp = sp.zeros_like(im)
temp[inlets] = True
labels, N = spim.label(im + temp)
im = im ^ (clear_border(labels=labels) > 0)
return im | def trim_disconnected_blobs(im, inlets) | r"""
Removes foreground voxels not connected to specified inlets
Parameters
----------
im : ND-array
The array to be trimmed
inlets : ND-array of tuple of indices
The locations of the inlets. Any voxels *not* connected directly to
the inlets will be trimmed
Returns
-------
image : ND-array
An array of the same shape as ``im``, but with all foreground
voxels not connected to the ``inlets`` removed. | 9.965244 | 10.481935 | 0.950707 |
r'''
Helper function to generate the axial shifts that will be performed on
the image to identify bordering pixels/voxels
'''
if ndim == 2:
if include_diagonals:
neighbors = square(3)
else:
neighbors = diamond(1)
neighbors[1, 1] = 0
x, y = np.where(neighbors)
x -= 1
y -= 1
return np.vstack((x, y)).T
else:
if include_diagonals:
neighbors = cube(3)
else:
neighbors = octahedron(1)
neighbors[1, 1, 1] = 0
x, y, z = np.where(neighbors)
x -= 1
y -= 1
z -= 1
return np.vstack((x, y, z)).T | def _get_axial_shifts(ndim=2, include_diagonals=False) | r'''
Helper function to generate the axial shifts that will be performed on
the image to identify bordering pixels/voxels | 2.467544 | 1.909195 | 1.292452 |
r'''
Creates a stack of images with one extra dimension to the input image
with length equal to the number of borders to search + 1.
Image is rolled along the axial shifts so that the border pixel is
overlapping the original pixel. First image in stack is the original.
Stacking makes direct vectorized array comparisons possible.
'''
ndim = len(np.shape(im))
axial_shift = _get_axial_shifts(ndim, include_diagonals)
if ndim == 2:
stack = np.zeros([np.shape(im)[0],
np.shape(im)[1],
len(axial_shift)+1])
stack[:, :, 0] = im
for i in range(len(axial_shift)):
ax0, ax1 = axial_shift[i]
temp = np.roll(np.roll(im, ax0, 0), ax1, 1)
stack[:, :, i+1] = temp
return stack
elif ndim == 3:
stack = np.zeros([np.shape(im)[0],
np.shape(im)[1],
np.shape(im)[2],
len(axial_shift)+1])
stack[:, :, :, 0] = im
for i in range(len(axial_shift)):
ax0, ax1, ax2 = axial_shift[i]
temp = np.roll(np.roll(np.roll(im, ax0, 0), ax1, 1), ax2, 2)
stack[:, :, :, i+1] = temp
return stack | def _make_stack(im, include_diagonals=False) | r'''
Creates a stack of images with one extra dimension to the input image
with length equal to the number of borders to search + 1.
Image is rolled along the axial shifts so that the border pixel is
overlapping the original pixel. First image in stack is the original.
Stacking makes direct vectorized array comparisons possible. | 2.496206 | 1.427046 | 1.749212 |
r'''
Identifies the voxels in regions that border *N* other regions.
Useful for finding triple-phase boundaries.
Parameters
----------
im : ND-array
An ND image of the porous material containing discrete values in the
pore space identifying different regions. e.g. the result of a
snow-partition
include_diagonals : boolean
When identifying bordering pixels (2D) and voxels (3D) include those
shifted along more than one axis
Returns
-------
image : ND-array
A copy of ``im`` with voxel values equal to the number of uniquely
different bordering values
'''
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
# Get dimension of image
ndim = len(np.shape(im))
if ndim not in [2, 3]:
raise NotImplementedError("Function only works for 2d and 3d images")
# Pad image to handle edges
im = np.pad(im, pad_width=1, mode='edge')
# Stack rolled images for each neighbor to be inspected
stack = _make_stack(im, include_diagonals)
# Sort the stack along the last axis
stack.sort()
out = np.ones_like(im)
# Run through stack recording when neighbor id changes
# Number of changes is number of unique bordering regions
for k in range(np.shape(stack)[ndim])[1:]:
if ndim == 2:
mask = stack[:, :, k] != stack[:, :, k-1]
elif ndim == 3:
mask = stack[:, :, :, k] != stack[:, :, :, k-1]
out += mask
# Un-pad
if ndim == 2:
return out[1:-1, 1:-1].copy()
else:
return out[1:-1, 1:-1, 1:-1].copy() | def nphase_border(im, include_diagonals=False) | r'''
Identifies the voxels in regions that border *N* other regions.
Useful for finding triple-phase boundaries.
Parameters
----------
im : ND-array
An ND image of the porous material containing discrete values in the
pore space identifying different regions. e.g. the result of a
snow-partition
include_diagonals : boolean
When identifying bordering pixels (2D) and voxels (3D) include those
shifted along more than one axis
Returns
-------
image : ND-array
A copy of ``im`` with voxel values equal to the number of uniquely
different bordering values | 5.408484 | 2.642735 | 2.046548 |
r
values = sp.array(values).flatten()
if sp.size(values) != regions.max() + 1:
raise Exception('Number of values does not match number of regions')
im = sp.zeros_like(regions)
im = values[regions]
return im | def map_to_regions(regions, values) | r"""
Maps pore values from a network onto the image from which it was extracted
This function assumes that the pore numbering in the network has remained
unchanged from the region labels in the partitioned image.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into regions and labeled
values : array_like
An array containing the numerical values to insert into each region.
The value at location *n* will be inserted into the image where
``regions`` is *n+1*. This mis-match is caused by the fact that 0's
in the ``regions`` image is assumed to be the backgroung phase, while
pore index 0 is valid.
Notes
-----
This function assumes that the array of pore values are indexed starting
at location 0, while in the region image 0's indicate background phase and
the region indexing starts at 1. That is, region 1 corresponds to pore 0. | 4.827604 | 5.660773 | 0.852817 |
r
print("\n" + "-" * 44, flush=True)
print("| Generating voxel image from pore network |", flush=True)
print("-" * 44, flush=True)
# If max_dim is provided, generate voxel image using max_dim
if max_dim is not None:
return _generate_voxel_image(network, pore_shape, throat_shape,
max_dim=max_dim, verbose=verbose)
else:
max_dim = 200
# If max_dim is not provided, find best max_dim that predicts porosity
eps_old = 200
err = 100 # percent
while err > rtol:
im = _generate_voxel_image(network, pore_shape, throat_shape,
max_dim=max_dim, verbose=verbose)
eps = im.astype(bool).sum() / sp.prod(im.shape)
err = abs(1 - eps / eps_old)
eps_old = eps
max_dim = int(max_dim * 1.25)
if verbose:
print("\nConverged at max_dim = {max_dim} voxels.\n")
return im | def generate_voxel_image(network, pore_shape="sphere", throat_shape="cylinder",
max_dim=None, verbose=1, rtol=0.1) | r"""
Generates voxel image from an OpenPNM network object.
Parameters
----------
network : OpenPNM GenericNetwork
Network from which voxel image is to be generated
pore_shape : str
Shape of pores in the network, valid choices are "sphere", "cube"
throat_shape : str
Shape of throats in the network, valid choices are "cylinder", "cuboid"
max_dim : int
Number of voxels in the largest dimension of the network
rtol : float
Stopping criteria for finding the smallest voxel image such that
further increasing the number of voxels in each dimension by 25% would
improve the predicted porosity of the image by less that ``rtol``
Returns
-------
im : ND-array
Voxelated image corresponding to the given pore network model
Notes
-----
(1) The generated voxelated image is labeled with 0s, 1s and 2s signifying
solid phase, pores, and throats respectively.
(2) If max_dim is not provided, the method calculates it such that the
further increasing it doesn't change porosity by much. | 3.19212 | 2.989171 | 1.067895 |
r
f = boundary_faces
if f is not None:
coords = network['pore.coords']
condition = coords[~network['pore.boundary']]
dic = {'left': 0, 'right': 0, 'front': 1, 'back': 1,
'top': 2, 'bottom': 2}
if all(coords[:, 2] == 0):
dic['top'] = 1
dic['bottom'] = 1
for i in f:
if i in ['left', 'front', 'bottom']:
network['pore.{}'.format(i)] = (coords[:, dic[i]] <
min(condition[:, dic[i]]))
elif i in ['right', 'back', 'top']:
network['pore.{}'.format(i)] = (coords[:, dic[i]] >
max(condition[:, dic[i]]))
return network | def label_boundary_cells(network=None, boundary_faces=None) | r"""
Takes 2D or 3D network and assign labels to boundary pores
Parameters
----------
network : dictionary
A dictionary as produced by the SNOW network extraction algorithms
containing edge/vertex, site/bond, node/link information.
boundary_faces : list of strings
The user can choose ‘left’, ‘right’, ‘top’, ‘bottom’, ‘front’ and
‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
Returns
-------
The same dictionar s pass ing, but containing boundary nodes labels. For
example network['pore.left'], network['pore.right'], network['pore.top'],
network['pore.bottom'] etc.
Notes
-----
The dictionary names use the OpenPNM convention so it may be converted
directly to an OpenPNM network object using the ``update`` command. | 2.851108 | 2.887429 | 0.987421 |
r
im = im.copy()
if im.ndim != element.ndim:
raise Exception('Image shape ' + str(im.shape)
+ ' and element shape ' + str(element.shape)
+ ' do not match')
s_im = []
s_el = []
if (center is not None) and (corner is None):
for dim in range(im.ndim):
r, d = sp.divmod(element.shape[dim], 2)
if d == 0:
raise Exception('Cannot specify center point when element ' +
'has one or more even dimension')
lower_im = sp.amax((center[dim] - r, 0))
upper_im = sp.amin((center[dim] + r + 1, im.shape[dim]))
s_im.append(slice(lower_im, upper_im))
lower_el = sp.amax((lower_im - center[dim] + r, 0))
upper_el = sp.amin((upper_im - center[dim] + r,
element.shape[dim]))
s_el.append(slice(lower_el, upper_el))
elif (corner is not None) and (center is None):
for dim in range(im.ndim):
L = int(element.shape[dim])
lower_im = sp.amax((corner[dim], 0))
upper_im = sp.amin((corner[dim] + L, im.shape[dim]))
s_im.append(slice(lower_im, upper_im))
lower_el = sp.amax((lower_im - corner[dim], 0))
upper_el = sp.amin((upper_im - corner[dim],
element.shape[dim]))
s_el.append(slice(min(lower_el, upper_el), upper_el))
else:
raise Exception('Cannot specify both corner and center')
if mode == 'overlay':
im[tuple(s_im)] = im[tuple(s_im)] + element[tuple(s_el)]*value
elif mode == 'overwrite':
im[tuple(s_im)] = element[tuple(s_el)]*value
else:
raise Exception('Invalid mode ' + mode)
return im | def insert_shape(im, element, center=None, corner=None, value=1,
mode='overwrite') | r"""
Inserts sub-image into a larger image at the specified location.
If the inserted image extends beyond the boundaries of the image it will
be cropped accordingly.
Parameters
----------
im : ND-array
The image into which the sub-image will be inserted
element : ND-array
The sub-image to insert
center : tuple
Coordinates indicating the position in the main image where the
inserted imaged will be centered. If ``center`` is given then
``corner`` cannot be specified. Note that ``center`` can only be
used if all dimensions of ``element`` are odd, otherwise the meaning
of center is not defined.
corner : tuple
Coordinates indicating the position in the main image where the
lower corner (i.e. [0, 0, 0]) of the inserted image should be anchored.
If ``corner`` is given then ``corner`` cannot be specified.
value : scalar
A scalar value to apply to the sub-image. The default is 1.
mode : string
If 'overwrite' (default) the inserted image replaces the values in the
main image. If 'overlay' the inserted image is added to the main
image. In both cases the inserted image is multiplied by ``value``
first.
Returns
-------
im : ND-array
A copy of ``im`` with the supplied element inserted. | 1.903284 | 1.911039 | 0.995942 |
r
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
if sp.size(shape) == 2:
shape = sp.hstack((shape, [1]))
temp = sp.zeros(shape=shape[:2])
Xi = sp.ceil(sp.linspace(spacing/2,
shape[0]-(spacing/2)-1,
int(shape[0]/spacing)))
Xi = sp.array(Xi, dtype=int)
Yi = sp.ceil(sp.linspace(spacing/2,
shape[1]-(spacing/2)-1,
int(shape[1]/spacing)))
Yi = sp.array(Yi, dtype=int)
temp[tuple(sp.meshgrid(Xi, Yi))] = 1
inds = sp.where(temp)
for i in range(len(inds[0])):
r = sp.random.randint(1, (spacing/2))
try:
s1 = slice(inds[0][i]-r, inds[0][i]+r+1)
s2 = slice(inds[1][i]-r, inds[1][i]+r+1)
temp[s1, s2] = ps_disk(r)
except ValueError:
odd_shape = sp.shape(temp[s1, s2])
temp[s1, s2] = ps_disk(r)[:odd_shape[0], :odd_shape[1]]
im = sp.broadcast_to(array=sp.atleast_3d(temp), shape=shape)
return im | def bundle_of_tubes(shape: List[int], spacing: int) | r"""
Create a 3D image of a bundle of tubes, in the form of a rectangular
plate with randomly sized holes through it.
Parameters
----------
shape : list
The size the image, with the 3rd dimension indicating the plate
thickness. If the 3rd dimension is not given then a thickness of
1 voxel is assumed.
spacing : scalar
The center to center distance of the holes. The hole sizes will be
randomly distributed between this values down to 3 voxels.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space | 2.515056 | 2.537033 | 0.991337 |
r
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
Rs = dist.interval(sp.linspace(0.05, 0.95, nbins))
Rs = sp.vstack(Rs).T
Rs = (Rs[:-1] + Rs[1:])/2
Rs = sp.clip(Rs.flatten(), a_min=r_min, a_max=None)
phi_desired = 1 - (1 - porosity)/(len(Rs))
im = sp.ones(shape, dtype=bool)
for r in Rs:
phi_im = im.sum() / sp.prod(shape)
phi_corrected = 1 - (1 - phi_desired) / phi_im
temp = overlapping_spheres(shape=shape, radius=r, porosity=phi_corrected)
im = im * temp
return im | def polydisperse_spheres(shape: List[int], porosity: float, dist,
nbins: int = 5, r_min: int = 5) | r"""
Create an image of randomly place, overlapping spheres with a distribution
of radii.
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where Ni is the
number of voxels in each direction. If shape is only 2D, then an
image of polydisperse disks is returns
porosity : scalar
The porosity of the image, defined as the number of void voxels
divided by the number of voxels in the image. The specified value
is only matched approximately, so it's suggested to check this value
after the image is generated.
dist : scipy.stats distribution object
This should be an initialized distribution chosen from the large number
of options in the ``scipy.stats`` submodule. For instance, a normal
distribution with a mean of 20 and a standard deviation of 10 can be
obtained with ``dist = scipy.stats.norm(loc=20, scale=10)``
nbins : scalar
The number of discrete sphere sizes that will be used to generate the
image. This function generates ``nbins`` images of monodisperse
spheres that span 0.05 and 0.95 of the possible values produced by the
provided distribution, then overlays them to get polydispersivity.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space | 4.044414 | 4.074635 | 0.992583 |
r
print(78*'―')
print('voronoi_edges: Generating', ncells, 'cells')
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
im = sp.zeros(shape, dtype=bool)
base_pts = sp.rand(ncells, 3)*shape
if flat_faces:
# Reflect base points
Nx, Ny, Nz = shape
orig_pts = base_pts
base_pts = sp.vstack((base_pts,
[-1, 1, 1] * orig_pts + [2.0*Nx, 0, 0]))
base_pts = sp.vstack((base_pts,
[1, -1, 1] * orig_pts + [0, 2.0*Ny, 0]))
base_pts = sp.vstack((base_pts,
[1, 1, -1] * orig_pts + [0, 0, 2.0*Nz]))
base_pts = sp.vstack((base_pts, [-1, 1, 1] * orig_pts))
base_pts = sp.vstack((base_pts, [1, -1, 1] * orig_pts))
base_pts = sp.vstack((base_pts, [1, 1, -1] * orig_pts))
vor = sptl.Voronoi(points=base_pts)
vor.vertices = sp.around(vor.vertices)
vor.vertices *= (sp.array(im.shape)-1) / sp.array(im.shape)
vor.edges = _get_Voronoi_edges(vor)
for row in vor.edges:
pts = vor.vertices[row].astype(int)
if sp.all(pts >= 0) and sp.all(pts < im.shape):
line_pts = line_segment(pts[0], pts[1])
im[tuple(line_pts)] = True
im = spim.distance_transform_edt(~im) > radius
return im | def voronoi_edges(shape: List[int], radius: int, ncells: int,
flat_faces: bool = True) | r"""
Create an image of the edges in a Voronoi tessellation
Parameters
----------
shape : array_like
The size of the image to generate in [Nx, Ny, Nz] where Ni is the
number of voxels in each direction.
radius : scalar
The radius to which Voronoi edges should be dilated in the final image.
ncells : scalar
The number of Voronoi cells to include in the tesselation.
flat_faces : Boolean
Whether the Voronoi edges should lie on the boundary of the
image (True), or if edges outside the image should be removed (False).
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space | 2.614955 | 2.552265 | 1.024562 |
r
edges = [[], []]
for facet in vor.ridge_vertices:
# Create a closed cycle of vertices that define the facet
edges[0].extend(facet[:-1]+[facet[-1]])
edges[1].extend(facet[1:]+[facet[0]])
edges = sp.vstack(edges).T # Convert to scipy-friendly format
mask = sp.any(edges == -1, axis=1) # Identify edges at infinity
edges = edges[~mask] # Remove edges at infinity
edges = sp.sort(edges, axis=1) # Move all points to upper triangle
# Remove duplicate pairs
edges = edges[:, 0] + 1j*edges[:, 1] # Convert to imaginary
edges = sp.unique(edges) # Remove duplicates
edges = sp.vstack((sp.real(edges), sp.imag(edges))).T # Back to real
edges = sp.array(edges, dtype=int)
return edges | def _get_Voronoi_edges(vor) | r"""
Given a Voronoi object as produced by the scipy.spatial.Voronoi class,
this function calculates the start and end points of eeach edge in the
Voronoi diagram, in terms of the vertex indices used by the received
Voronoi object.
Parameters
----------
vor : scipy.spatial.Voronoi object
Returns
-------
A 2-by-N array of vertex indices, indicating the start and end points of
each vertex in the Voronoi diagram. These vertex indices can be used to
index straight into the ``vor.vertices`` array to get spatial positions. | 3.54039 | 3.951932 | 0.895863 |
r
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
ndim = (shape != 1).sum()
s_vol = ps_disk(radius).sum() if ndim == 2 else ps_ball(radius).sum()
bulk_vol = sp.prod(shape)
N = int(sp.ceil((1 - porosity)*bulk_vol/s_vol))
im = sp.random.random(size=shape)
# Helper functions for calculating porosity: phi = g(f(N))
f = lambda N: spim.distance_transform_edt(im > N/bulk_vol) < radius
g = lambda im: 1 - im.sum() / sp.prod(shape)
# # Newton's method for getting image porosity match the given
# w = 1.0 # Damping factor
# dN = 5 if ndim == 2 else 25 # Perturbation
# for i in range(iter_max):
# err = g(f(N)) - porosity
# d_err = (g(f(N+dN)) - g(f(N))) / dN
# if d_err == 0:
# break
# if abs(err) <= tol:
# break
# N2 = N - int(err/d_err) # xnew = xold - f/df
# N = w * N2 + (1-w) * N
# Bisection search: N is always undershoot (bc. of overlaps)
N_low, N_high = N, 4*N
for i in range(iter_max):
N = sp.mean([N_high, N_low], dtype=int)
err = g(f(N)) - porosity
if err > 0:
N_low = N
else:
N_high = N
if abs(err) <= tol:
break
return ~f(N) | def overlapping_spheres(shape: List[int], radius: int, porosity: float,
iter_max: int = 10, tol: float = 0.01) | r"""
Generate a packing of overlapping mono-disperse spheres
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where Ni is the
number of voxels in the i-th direction.
radius : scalar
The radius of spheres in the packing.
porosity : scalar
The porosity of the final image, accurate to the given tolerance.
iter_max : int
Maximum number of iterations for the iterative algorithm that improves
the porosity of the final image to match the given value.
tol : float
Tolerance for porosity of the final image compared to the given value.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
Notes
-----
This method can also be used to generate a dispersion of hollows by
treating ``porosity`` as solid volume fraction and inverting the
returned image. | 4.355009 | 4.320169 | 1.008065 |
r
try:
import noise
except ModuleNotFoundError:
raise Exception("The noise package must be installed")
shape = sp.array(shape)
if sp.size(shape) == 1:
Lx, Ly, Lz = sp.full((3, ), int(shape))
elif len(shape) == 2:
Lx, Ly = shape
Lz = 1
elif len(shape) == 3:
Lx, Ly, Lz = shape
if mode == 'simplex':
f = noise.snoise3
else:
f = noise.pnoise3
frequency = sp.atleast_1d(frequency)
if frequency.size == 1:
freq = sp.full(shape=[3, ], fill_value=frequency[0])
elif frequency.size == 2:
freq = sp.concatenate((frequency, [1]))
else:
freq = sp.array(frequency)
im = sp.zeros(shape=[Lx, Ly, Lz], dtype=float)
for x in range(Lx):
for y in range(Ly):
for z in range(Lz):
im[x, y, z] = f(x=x/freq[0], y=y/freq[1], z=z/freq[2],
octaves=octaves)
im = im.squeeze()
if porosity:
im = norm_to_uniform(im, scale=[0, 1])
im = im < porosity
return im | def generate_noise(shape: List[int], porosity=None, octaves: int = 3,
frequency: int = 32, mode: str = 'simplex') | r"""
Generate a field of spatially correlated random noise using the Perlin
noise algorithm, or the updated Simplex noise algorithm.
Parameters
----------
shape : array_like
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels.
porosity : float
If specified, this will threshold the image to the specified value
prior to returning. If no value is given (the default), then the
scalar noise field is returned.
octaves : int
Controls the *texture* of the noise, with higher octaves giving more
complex features over larger length scales.
frequency : array_like
Controls the relative sizes of the features, with higher frequencies
giving larger features. A scalar value will apply the same frequency
in all directions, given an isotropic field; a vector value will
apply the specified values along each axis to create anisotropy.
mode : string
Which noise algorithm to use, either ``'simplex'`` (default) or
``'perlin'``.
Returns
-------
image : ND-array
If porosity is given, then a boolean array with ``True`` values
denoting the pore space is returned. If not, then normally
distributed and spatially correlated randomly noise is returned.
Notes
-----
This method depends the a package called 'noise' which must be
compiled. It is included in the Anaconda distribution, or a platform
specific binary can be downloaded.
See Also
--------
porespy.tools.norm_to_uniform | 2.511693 | 2.270801 | 1.106083 |
blobiness = sp.array(blobiness)
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
sigma = sp.mean(shape)/(40*blobiness)
im = sp.random.random(shape)
im = spim.gaussian_filter(im, sigma=sigma)
im = norm_to_uniform(im, scale=[0, 1])
if porosity:
im = im < porosity
return im | def blobs(shape: List[int], porosity: float = 0.5, blobiness: int = 1) | Generates an image containing amorphous blobs
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels
porosity : float
If specified, this will threshold the image to the specified value
prior to returning. If ``None`` is specified, then the scalar noise
field is converted to a uniform distribution and returned without
thresholding.
blobiness : int or list of ints(default = 1)
Controls the morphology of the blobs. A higher number results in
a larger number of small blobs. If a list is supplied then the blobs
are anisotropic.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
See Also
--------
norm_to_uniform | 4.909578 | 4.765621 | 1.030208 |
r
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
elif sp.size(shape) == 2:
raise Exception("2D cylinders don't make sense")
R = sp.sqrt(sp.sum(sp.square(shape))).astype(int)
im = sp.zeros(shape)
# Adjust max angles to be between 0 and 90
if (phi_max > 90) or (phi_max < 0):
raise Exception('phi_max must be betwen 0 and 90')
if (theta_max > 90) or (theta_max < 0):
raise Exception('theta_max must be betwen 0 and 90')
n = 0
while n < ncylinders:
# Choose a random starting point in domain
x = sp.rand(3)*shape
# Chose a random phi and theta within given ranges
phi = (sp.pi/2 - sp.pi*sp.rand())*phi_max/90
theta = (sp.pi/2 - sp.pi*sp.rand())*theta_max/90
X0 = R*sp.array([sp.cos(phi)*sp.cos(theta),
sp.cos(phi)*sp.sin(theta),
sp.sin(phi)])
[X0, X1] = [x + X0, x - X0]
crds = line_segment(X0, X1)
lower = ~sp.any(sp.vstack(crds).T < [0, 0, 0], axis=1)
upper = ~sp.any(sp.vstack(crds).T >= shape, axis=1)
valid = upper*lower
if sp.any(valid):
im[crds[0][valid], crds[1][valid], crds[2][valid]] = 1
n += 1
im = sp.array(im, dtype=bool)
dt = spim.distance_transform_edt(~im) < radius
return ~dt | def cylinders(shape: List[int], radius: int, ncylinders: int,
phi_max: float = 0, theta_max: float = 90) | r"""
Generates a binary image of overlapping cylinders. This is a good
approximation of a fibrous mat.
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where N is the
number of voxels. 2D images are not permitted.
radius : scalar
The radius of the cylinders in voxels
ncylinders : scalar
The number of cylinders to add to the domain. Adjust this value to
control the final porosity, which is not easily specified since
cylinders overlap and intersect different fractions of the domain.
theta_max : scalar
A value between 0 and 90 that controls the amount of rotation *in the*
XY plane, with 0 meaning all fibers point in the X-direction, and
90 meaning they are randomly rotated about the Z axis by as much
as +/- 90 degrees.
phi_max : scalar
A value between 0 and 90 that controls the amount that the fibers
lie *out of* the XY plane, with 0 meaning all fibers lie in the XY
plane, and 90 meaning that fibers are randomly oriented out of the
plane by as much as +/- 90 degrees.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space | 2.685014 | 2.739018 | 0.980284 |
r
X0 = sp.around(X0).astype(int)
X1 = sp.around(X1).astype(int)
if len(X0) == 3:
L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]], [X1[2]-X0[2]]])) + 1
x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int)
y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int)
z = sp.rint(sp.linspace(X0[2], X1[2], L)).astype(int)
return [x, y, z]
else:
L = sp.amax(sp.absolute([[X1[0]-X0[0]], [X1[1]-X0[1]]])) + 1
x = sp.rint(sp.linspace(X0[0], X1[0], L)).astype(int)
y = sp.rint(sp.linspace(X0[1], X1[1], L)).astype(int)
return [x, y] | def line_segment(X0, X1) | r"""
Calculate the voxel coordinates of a straight line between the two given
end points
Parameters
----------
X0 and X1 : array_like
The [x, y] or [x, y, z] coordinates of the start and end points of
the line.
Returns
-------
coords : list of lists
A list of lists containing the X, Y, and Z coordinates of all voxels
that should be drawn between the start and end points to create a solid
line. | 1.606739 | 1.629899 | 0.985791 |
r
elem = strel.copy()
x_dim, y_dim = im.shape
x_min = x-r
x_max = x+r+1
y_min = y-r
y_max = y+r+1
if x_min < 0:
x_adj = -x_min
elem = elem[x_adj:, :]
x_min = 0
elif x_max > x_dim:
x_adj = x_max - x_dim
elem = elem[:-x_adj, :]
if y_min < 0:
y_adj = -y_min
elem = elem[:, y_adj:]
y_min = 0
elif y_max > y_dim:
y_adj = y_max - y_dim
elem = elem[:, :-y_adj]
ex, ey = elem.shape
im[x_min:x_min+ex, y_min:y_min+ey] += elem
return im | def _fit_strel_to_im_2d(im, strel, r, x, y) | r"""
Helper function to add a structuring element to a 2D image.
Used by RSA. Makes sure if center is less than r pixels from edge of image
that the strel is sliced to fit. | 1.756916 | 1.650097 | 1.064735 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.