code
stringlengths 11
173k
| docstring
stringlengths 2
593k
| func_name
stringlengths 2
189
| language
stringclasses 1
value | repo
stringclasses 844
values | path
stringlengths 11
294
| url
stringlengths 60
339
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
public static void storeStackTrace(StackTraceElement[] stackTrace) {
if(stackTrace == null) {
return;
}
for (StackTraceElement stackTraceElement : stackTrace) {
if(stackTrace != null)
storeStackTraceElement(stackTraceElement);
}
} |
Store a stack trace in the cache
@param stackTrace the stack trace to store
| StackTraceElementCache::storeStackTrace | java | deeplearning4j/deeplearning4j | nd4j/nd4j-common/src/main/java/org/nd4j/linalg/profiler/data/stacktrace/StackTraceElementCache.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-common/src/main/java/org/nd4j/linalg/profiler/data/stacktrace/StackTraceElementCache.java | Apache-2.0 |
public static void storeStackTraceElement(StackTraceElement stackTraceElement) {
if(stackTraceElement == null) {
return;
}
StackTraceLookupKey key = StackTraceLookupKey.builder()
.className(stackTraceElement.getClassName())
.methodName(stackTraceElement.getMethodName())
.lineNumber(stackTraceElement.getLineNumber()).build();
cache.put(key,stackTraceElement);
} |
Store a stack trace element in the cache
@param stackTraceElement the stack trace element to store
| StackTraceElementCache::storeStackTraceElement | java | deeplearning4j/deeplearning4j | nd4j/nd4j-common/src/main/java/org/nd4j/linalg/profiler/data/stacktrace/StackTraceElementCache.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-common/src/main/java/org/nd4j/linalg/profiler/data/stacktrace/StackTraceElementCache.java | Apache-2.0 |
public static boolean containsKey(String className,String methodName,int lineNumber) {
StackTraceLookupKey key = StackTraceLookupKey.builder().className(className).methodName(methodName).lineNumber(lineNumber).build();
return cache.containsKey(key);
} |
Check if the cache contains a stack trace element
@param className the class name to check
@param methodName the method name to check
@param lineNumber the line number to check
@return
| StackTraceElementCache::containsKey | java | deeplearning4j/deeplearning4j | nd4j/nd4j-common/src/main/java/org/nd4j/linalg/profiler/data/stacktrace/StackTraceElementCache.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-common/src/main/java/org/nd4j/linalg/profiler/data/stacktrace/StackTraceElementCache.java | Apache-2.0 |
public static StackTraceElement lookup(String className,String methodName,int lineNumber) {
StackTraceLookupKey key = StackTraceLookupKey.builder().className(className).methodName(methodName).lineNumber(lineNumber).build();
return cache.get(key);
} |
Lookup a stack trace element by class name, method name, and line number
@param className the class name to check
@param methodName the method name to check
@param lineNumber the line number to check
@return the stack trace element if it exists, or null if it does not exist
| StackTraceElementCache::lookup | java | deeplearning4j/deeplearning4j | nd4j/nd4j-common/src/main/java/org/nd4j/linalg/profiler/data/stacktrace/StackTraceElementCache.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-common/src/main/java/org/nd4j/linalg/profiler/data/stacktrace/StackTraceElementCache.java | Apache-2.0 |
public static INDArray fromTensor(Tensor tensor) {
byte b = tensor.typeType();
int[] shape = new int[tensor.shapeLength()];
int[] stride = new int[tensor.stridesLength()];
for(int i = 0; i < shape.length; i++) {
shape[i] = (int) tensor.shape(i).size();
stride[i] = (int) tensor.strides(i);
}
int length = ArrayUtil.prod(shape);
Buffer buffer = tensor.data();
if(buffer == null) {
throw new ND4JIllegalStateException("Buffer was not serialized properly.");
}
//deduce element size
int elementSize = (int) buffer.length() / length;
//nd4j strides aren't based on element size
for(int i = 0; i < stride.length; i++) {
stride[i] /= elementSize;
}
DataType type = typeFromTensorType(b,elementSize);
DataBuffer dataBuffer = DataBufferStruct.createFromByteBuffer(tensor.getByteBuffer(),(int) tensor.data().offset(),type,length);
INDArray arr = Nd4j.create(dataBuffer,shape);
arr.setShapeAndStride(shape,stride);
return arr;
} |
Convert a {@link Tensor}
to an {@link INDArray}
@param tensor the input tensor
@return the equivalent {@link INDArray}
| ArrowSerde::fromTensor | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | Apache-2.0 |
public static Tensor toTensor(INDArray arr) {
FlatBufferBuilder bufferBuilder = new FlatBufferBuilder(1024);
long[] strides = getArrowStrides(arr);
int shapeOffset = createDims(bufferBuilder,arr);
int stridesOffset = Tensor.createStridesVector(bufferBuilder,strides);
Tensor.startTensor(bufferBuilder);
Tensor.addShape(bufferBuilder,shapeOffset);
Tensor.addStrides(bufferBuilder,stridesOffset);
int dataOffset = addDataForArr(bufferBuilder,arr);
Tensor.addData(bufferBuilder,dataOffset);
Tensor.addType(bufferBuilder,bufferBuilder.offset());
addTypeTypeRelativeToNDArray(bufferBuilder,arr);
int endTensor = Tensor.endTensor(bufferBuilder);
Tensor.finishTensorBuffer(bufferBuilder,endTensor);
return Tensor.getRootAsTensor(bufferBuilder.dataBuffer());
} |
Convert an {@link INDArray}
to an arrow {@link Tensor}
@param arr the array to convert
@return the equivalent {@link Tensor}
| ArrowSerde::toTensor | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | Apache-2.0 |
public static int addDataForArr(FlatBufferBuilder bufferBuilder, INDArray arr) {
DataBuffer toAdd = arr.isView() ? arr.dup().data() : arr.data();
int offset = DataBufferStruct.createDataBufferStruct(bufferBuilder,toAdd);
return Buffer.createBuffer(bufferBuilder,offset,toAdd.length() * toAdd.getElementSize());
} |
Create a {@link Buffer}
representing the location metadata of the actual data
contents for the ndarrays' {@link DataBuffer}
@param bufferBuilder the buffer builder in use
@param arr the array to add the underlying data for
@return the offset added
| ArrowSerde::addDataForArr | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | Apache-2.0 |
public static void addTypeTypeRelativeToNDArray(FlatBufferBuilder bufferBuilder,INDArray arr) {
switch(arr.data().dataType()) {
case LONG:
case INT:
Tensor.addTypeType(bufferBuilder,Type.Int);
break;
case FLOAT:
Tensor.addTypeType(bufferBuilder,Type.FloatingPoint);
break;
case DOUBLE:
Tensor.addTypeType(bufferBuilder,Type.Decimal);
break;
}
} |
Convert the given {@link INDArray}
data type to the proper data type for the tensor.
@param bufferBuilder the buffer builder in use
@param arr the array to conver tthe data type for
| ArrowSerde::addTypeTypeRelativeToNDArray | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | Apache-2.0 |
public static int createDims(FlatBufferBuilder bufferBuilder,INDArray arr) {
int[] tensorDimOffsets = new int[arr.rank()];
int[] nameOffset = new int[arr.rank()];
for(int i = 0; i < tensorDimOffsets.length; i++) {
nameOffset[i] = bufferBuilder.createString("");
tensorDimOffsets[i] = TensorDim.createTensorDim(bufferBuilder,arr.size(i),nameOffset[i]);
}
return Tensor.createShapeVector(bufferBuilder,tensorDimOffsets);
} |
Create the dimensions for the flatbuffer builder
@param bufferBuilder the buffer builder to use
@param arr the input array
@return
| ArrowSerde::createDims | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | Apache-2.0 |
public static long[] getArrowStrides(INDArray arr) {
long[] ret = new long[arr.rank()];
for(int i = 0; i < arr.rank(); i++) {
ret[i] = arr.stride(i) * arr.data().getElementSize();
}
return ret;
} |
Get the strides of this {@link INDArray}
multiplieed by the element size.
This is the {@link Tensor} and numpy format
@param arr the array to convert
@return
| ArrowSerde::getArrowStrides | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | Apache-2.0 |
public static DataType typeFromTensorType(byte type, int elementSize) {
if(type == Type.FloatingPoint) {
return DataType.FLOAT;
}
else if(type == Type.Decimal) {
return DataType.DOUBLE;
}
else if(type == Type.Int) {
if(elementSize == 4) {
return DataType.INT;
}
else if(elementSize == 8) {
return DataType.LONG;
}
}
else {
throw new IllegalArgumentException("Only valid types are Type.Decimal and Type.Int");
}
throw new IllegalArgumentException("Unable to determine data type");
} |
Create thee databuffer type frm the given type,
relative to the bytes in arrow in class:
{@link Type}
@param type the type to create the nd4j {@link DataType} from
@param elementSize the element size
@return the data buffer type
| ArrowSerde::typeFromTensorType | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/ArrowSerde.java | Apache-2.0 |
public static DataBuffer createFromByteBuffer(ByteBuffer bb, int bb_pos, DataType type, int length) {
bb.order(ByteOrder.LITTLE_ENDIAN);
int elementSize = DataTypeUtil.lengthForDtype(type);
DataBuffer ret = Nd4j.createBuffer(ByteBuffer.allocateDirect(length * elementSize),type,length);
switch(type) {
case DOUBLE:
for(int i = 0; i < ret.length(); i++) {
double doubleGet = bb.getDouble(bb.capacity() - bb_pos + (i * elementSize));
ret.put(i,doubleGet);
}
break;
case FLOAT:
for(int i = 0; i < ret.length(); i++) {
float floatGet = bb.getFloat(bb.capacity() - bb_pos + (i * elementSize));
ret.put(i,floatGet);
}
break;
case INT:
for(int i = 0; i < ret.length(); i++) {
int intGet = bb.getInt(bb.capacity() - bb_pos + (i * elementSize));
ret.put(i,intGet);
}
break;
case LONG:
for(int i = 0; i < ret.length(); i++) {
long longGet = bb.getLong(bb.capacity() - bb_pos + (i * elementSize));
ret.put(i,longGet);
}
break;
}
return ret;
} |
Create a {@link DataBuffer} from a
byte buffer. This is meant to be used with flatbuffers
@param bb the flat buffers buffer
@param bb_pos the position to start from
@param type the type of buffer to create
@param length the length of the buffer to create
@return the created databuffer
| DataBufferStruct::createFromByteBuffer | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/DataBufferStruct.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/DataBufferStruct.java | Apache-2.0 |
public static int createDataBufferStruct(FlatBufferBuilder bufferBuilder,DataBuffer create) {
bufferBuilder.prep(create.getElementSize(), (int) create.length() * create.getElementSize());
for(int i = (int) (create.length() - 1); i >= 0; i--) {
switch(create.dataType()) {
case DOUBLE:
double putDouble = create.getDouble(i);
bufferBuilder.putDouble(putDouble);
break;
case FLOAT:
float putFloat = create.getFloat(i);
bufferBuilder.putFloat(putFloat);
break;
case INT:
int putInt = create.getInt(i);
bufferBuilder.putInt(putInt);
break;
case LONG:
long putLong = create.getLong(i);
bufferBuilder.putLong(putLong);
}
}
return bufferBuilder.offset();
} |
Create a data buffer struct within
the passed in {@link FlatBufferBuilder}
@param bufferBuilder the existing flatbuffer
to use to serialize the {@link DataBuffer}
@param create the databuffer to serialize
@return an int representing the offset of the buffer
| DataBufferStruct::createDataBufferStruct | java | deeplearning4j/deeplearning4j | nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/DataBufferStruct.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-serde/nd4j-arrow/src/main/java/org/nd4j/arrow/DataBufferStruct.java | Apache-2.0 |
public static void recordGC(long currMemory) {
MemoryCounter.currMemory.set(currMemory);
} |
Record the current amount of memory used when a system.gc runs.
This {@link #currMemory} is meant to be sampled.
@param currMemory the current amount of memory used
| MemoryCounter::recordGC | java | deeplearning4j/deeplearning4j | nd4j/nd4j-profiler/src/main/java/org/nd4j/profiler/MemoryCounter.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-profiler/src/main/java/org/nd4j/profiler/MemoryCounter.java | Apache-2.0 |
public static DataType dataTypeForTvmType(DLDataType dataType) {
if(dataType.code() == kDLInt && dataType.bits() == 8) {
return INT8;
} else if(dataType.code() == kDLInt && dataType.bits() == 16) {
return INT16;
} else if(dataType.code() == kDLInt && dataType.bits() == 32) {
return INT32;
} else if(dataType.code() == kDLInt && dataType.bits() == 64) {
return INT64;
} else if(dataType.code() == kDLUInt && dataType.bits() == 8) {
return UINT8;
} else if(dataType.code() == kDLUInt && dataType.bits() == 16) {
return UINT16;
} else if(dataType.code() == kDLUInt && dataType.bits() == 32) {
return UINT32;
} else if(dataType.code() == kDLUInt && dataType.bits() == 64) {
return UINT64;
} else if(dataType.code() == kDLFloat && dataType.bits() == 16) {
return FLOAT16;
} else if(dataType.code() == kDLFloat && dataType.bits() == 32) {
return FLOAT;
} else if(dataType.code() == kDLFloat && dataType.bits() == 64) {
return DOUBLE;
} else if(dataType.code() == kDLBfloat && dataType.bits() == 16) {
return BFLOAT16;
} else
throw new IllegalArgumentException("Illegal data type code " + dataType.code() + " with bits " + dataType.bits());
} |
Return a {@link DataType}
for the tvm data type
@param dataType the equivalent nd4j data type
@return
| TVMUtils::dataTypeForTvmType | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | Apache-2.0 |
public static DLDataType tvmTypeForDataType(DataType dataType) {
if(dataType == INT8) {
return new DLDataType().code((byte)kDLInt).bits((byte)8).lanes((short)1);
} else if(dataType == INT16) {
return new DLDataType().code((byte)kDLInt).bits((byte)16).lanes((short)1);
} else if(dataType == INT32) {
return new DLDataType().code((byte)kDLInt).bits((byte)32).lanes((short)1);
} else if(dataType == INT64) {
return new DLDataType().code((byte)kDLInt).bits((byte)64).lanes((short)1);
} else if(dataType == UINT8) {
return new DLDataType().code((byte)kDLUInt).bits((byte)8).lanes((short)1);
} else if(dataType == UINT16) {
return new DLDataType().code((byte)kDLUInt).bits((byte)16).lanes((short)1);
} else if(dataType == UINT32) {
return new DLDataType().code((byte)kDLUInt).bits((byte)32).lanes((short)1);
} else if(dataType == UINT64) {
return new DLDataType().code((byte)kDLUInt).bits((byte)64).lanes((short)1);
} else if(dataType == FLOAT16) {
return new DLDataType().code((byte)kDLFloat).bits((byte)16).lanes((short)1);
} else if(dataType == FLOAT) {
return new DLDataType().code((byte)kDLFloat).bits((byte)32).lanes((short)1);
} else if(dataType == DOUBLE) {
return new DLDataType().code((byte)kDLFloat).bits((byte)64).lanes((short)1);
} else if(dataType == BFLOAT16) {
return new DLDataType().code((byte)kDLBfloat).bits((byte)16).lanes((short)1);
} else
throw new IllegalArgumentException("Illegal data type " + dataType);
} |
Convert the tvm type for the given data type
@param dataType
@return
| TVMUtils::tvmTypeForDataType | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | Apache-2.0 |
public static INDArray getArray(DLTensor value) {
DataType dataType = dataTypeForTvmType(value.dtype());
LongPointer shape = value.shape();
LongPointer stride = value.strides();
long[] shapeConvert;
if(shape != null) {
shapeConvert = new long[value.ndim()];
shape.get(shapeConvert);
} else {
shapeConvert = new long[]{1};
}
long[] strideConvert;
if(stride != null) {
strideConvert = new long[value.ndim()];
stride.get(strideConvert);
} else {
strideConvert = Nd4j.getStrides(shapeConvert);
}
long size = 1;
for (int i = 0; i < shapeConvert.length; i++) {
size *= shapeConvert[i];
}
size *= value.dtype().bits() / 8;
DataBuffer getBuffer = getDataBuffer(value,size);
Preconditions.checkState(dataType.equals(getBuffer.dataType()),"Data type must be equivalent as specified by the tvm metadata.");
return Nd4j.create(getBuffer,shapeConvert,strideConvert,0);
} |
Convert an tvm {@link DLTensor}
in to an {@link INDArray}
@param value the tensor to convert
@return
| TVMUtils::getArray | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | Apache-2.0 |
public static DLTensor getTensor(INDArray ndArray, DLDevice ctx) {
DLTensor ret = new DLTensor();
ret.data(ndArray.data().pointer());
ret.device(ctx);
ret.ndim(ndArray.rank());
ret.dtype(tvmTypeForDataType(ndArray.dataType()));
ret.shape(new LongPointer(ndArray.shape()));
ret.strides(new LongPointer(ndArray.stride()));
ret.byte_offset(ndArray.offset());
return ret;
} |
Get an tvm tensor from an ndarray.
@param ndArray the ndarray to get the value from
@param ctx the {@link DLDevice} to use.
@return
| TVMUtils::getTensor | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | Apache-2.0 |
public static DataBuffer getDataBuffer(DLTensor tens, long size) {
DataBuffer buffer = null;
DataType type = dataTypeForTvmType(tens.dtype());
switch (type) {
case BYTE:
BytePointer pInt8 = new BytePointer(tens.data()).capacity(size);
Indexer int8Indexer = ByteIndexer.create(pInt8);
buffer = Nd4j.createBuffer(pInt8, type, size, int8Indexer);
break;
case SHORT:
ShortPointer pInt16 = new ShortPointer(tens.data()).capacity(size);
Indexer int16Indexer = ShortIndexer.create(pInt16);
buffer = Nd4j.createBuffer(pInt16, type, size, int16Indexer);
break;
case INT:
IntPointer pInt32 = new IntPointer(tens.data()).capacity(size);
Indexer int32Indexer = IntIndexer.create(pInt32);
buffer = Nd4j.createBuffer(pInt32, type, size, int32Indexer);
break;
case LONG:
LongPointer pInt64 = new LongPointer(tens.data()).capacity(size);
Indexer int64Indexer = LongIndexer.create(pInt64);
buffer = Nd4j.createBuffer(pInt64, type, size, int64Indexer);
break;
case UBYTE:
BytePointer pUint8 = new BytePointer(tens.data()).capacity(size);
Indexer uint8Indexer = UByteIndexer.create(pUint8);
buffer = Nd4j.createBuffer(pUint8, type, size, uint8Indexer);
break;
case UINT16:
ShortPointer pUint16 = new ShortPointer(tens.data()).capacity(size);
Indexer uint16Indexer = UShortIndexer.create(pUint16);
buffer = Nd4j.createBuffer(pUint16, type, size, uint16Indexer);
break;
case UINT32:
IntPointer pUint32 = new IntPointer(tens.data()).capacity(size);
Indexer uint32Indexer = UIntIndexer.create(pUint32);
buffer = Nd4j.createBuffer(pUint32, type, size, uint32Indexer);
break;
case UINT64:
LongPointer pUint64 = new LongPointer(tens.data()).capacity(size);
Indexer uint64Indexer = LongIndexer.create(pUint64);
buffer = Nd4j.createBuffer(pUint64, type, size, uint64Indexer);
break;
case HALF:
ShortPointer pFloat16 = new ShortPointer(tens.data()).capacity(size);
Indexer float16Indexer = HalfIndexer.create(pFloat16);
buffer = Nd4j.createBuffer(pFloat16, type, size, float16Indexer);
break;
case FLOAT:
FloatPointer pFloat = new FloatPointer(tens.data()).capacity(size);
FloatIndexer floatIndexer = FloatIndexer.create(pFloat);
buffer = Nd4j.createBuffer(pFloat, type, size, floatIndexer);
break;
case DOUBLE:
DoublePointer pDouble = new DoublePointer(tens.data()).capacity(size);
Indexer doubleIndexer = DoubleIndexer.create(pDouble);
buffer = Nd4j.createBuffer(pDouble, type, size, doubleIndexer);
break;
case BFLOAT16:
ShortPointer pBfloat16 = new ShortPointer(tens.data()).capacity(size);
Indexer bfloat16Indexer = Bfloat16Indexer.create(pBfloat16);
buffer = Nd4j.createBuffer(pBfloat16, type, size, bfloat16Indexer);
break;
default:
throw new RuntimeException("Unsupported data type encountered");
}
return buffer;
} |
Get the data buffer from the given value
@param tens the values to get
@return the equivalent data buffer
| TVMUtils::getDataBuffer | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/util/TVMUtils.java | Apache-2.0 |
public Map<String,INDArray> exec(Map<String,INDArray> input) {
try (PointerScope scope = new PointerScope()) {
getNumInputs.CallPacked(new TVMArgs(values, codes, 0), rv);
long numInputNodes = rv.asLong();
getNumOutputs.CallPacked(new TVMArgs(values, codes, 0), rv);
long numOutputNodes = rv.asLong();
// set the right input
for (Map.Entry<String,INDArray> e : input.entrySet()) {
String name = e.getKey();
INDArray arr = e.getValue();
DLTensor inputTensor = getTensor(arr, ctx);
Preconditions.checkState(inputTensor != null,"Input must be a tensor.");
setter.apply(0, new BytePointer(name));
setter.apply(1, inputTensor);
setInput.CallPacked(new TVMArgs(values, codes, 2), rv);
}
// run the code
run.CallPacked(new TVMArgs(values, codes, 0), rv);
Map<String, INDArray> ret = new LinkedHashMap<>();
// get the output
for (int i = 0; i < numOutputNodes; i++) {
setter.apply(0, i);
getOutput.CallPacked(new TVMArgs(values, codes, 1), rv);
DLTensor outputTensor = rv.asDLTensor();
ret.put(Integer.toString(i), getArray(outputTensor));
}
return ret;
}
} |
Execute the {@link #run} function
using the given input {@link Map}
@param input the input map
@return a map of the names of the ndarrays
| TvmRunner::exec | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/runner/TvmRunner.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tvm/src/main/java/org/nd4j/tvm/runner/TvmRunner.java | Apache-2.0 |
public static TensorDataType fromProtoValue(String value) {
String valueReplace = value.replace("DT_","");
return TensorDataType.valueOf(valueReplace);
} |
Map a tensor data type to a proto value found in tensorflow.
Generally, this is just replacing DT_ with empty
and returning enum.valueOf(string)
@param value the input string
@return the associated {@link TensorDataType}
| TensorDataType::fromProtoValue | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorDataType.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorDataType.java | Apache-2.0 |
public static String toPythonName(TensorDataType tensorDataType) {
switch(tensorDataType) {
case DOUBLE: return "float64";
case FLOAT: return "float32";
case HALF: return "float16";
default: return tensorDataType.name().toLowerCase();
}
} |
Get the python name for the given data type
@param tensorDataType the python name for the given data type
@return float64 for double, float32 for double, float16 for half, otherwise
the type's name converted to lower case
| TensorDataType::toPythonName | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorDataType.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorDataType.java | Apache-2.0 |
public static TensorflowConversion getInstance() {
if(INSTANCE == null)
INSTANCE = new TensorflowConversion();
return INSTANCE;
} |
Get a singleton instance
@return
| TensorflowConversion::getInstance | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | Apache-2.0 |
public TF_Tensor tensorFromNDArray(INDArray ndArray) {
if(ndArray == null) {
throw new IllegalArgumentException("NDArray must not be null!");
}
//we infer data type from the ndarray.databuffer()
//for now we throw an exception
if(ndArray.data() == null) {
throw new IllegalArgumentException("Unable to infer data type from null databuffer");
}
if(ndArray.isView() || ndArray.ordering() != 'c') {
ndArray = ndArray.dup('c');
}
long[] ndShape = ndArray.shape();
long[] tfShape = new long[ndShape.length];
System.arraycopy(ndShape, 0, tfShape, 0, ndShape.length);
int type;
DataBuffer data = ndArray.data();
DataType dataType = data.dataType();
switch (dataType) {
case DOUBLE: type = DT_DOUBLE; break;
case FLOAT: type = DT_FLOAT; break;
case INT: type = DT_INT32; break;
case HALF: type = DT_HALF; break;
case COMPRESSED:
CompressedDataBuffer compressedData = (CompressedDataBuffer)data;
CompressionDescriptor desc = compressedData.getCompressionDescriptor();
String algo = desc.getCompressionAlgorithm();
switch (algo) {
case "FLOAT16": type = DT_HALF; break;
case "INT8": type = DT_INT8; break;
case "UINT8": type = DT_UINT8; break;
case "INT16": type = DT_INT16; break;
case "UINT16": type = DT_UINT16; break;
default: throw new IllegalArgumentException("Unsupported compression algorithm: " + algo);
}
break;
case SHORT: type = DT_INT16; break;
case LONG: type = DT_INT64; break;
case UTF8: type = DT_STRING; break;
case BYTE: type = DT_INT8; break;
case UBYTE: type = DT_UINT8; break;
case UINT16: type = DT_UINT16; break;
case UINT32: type = DT_UINT32; break;
case UINT64: type = DT_UINT64; break;
case BFLOAT16: type = DT_BFLOAT16; break;
case BOOL: type = DT_BOOL; break;
default: throw new IllegalArgumentException("Unsupported data type: " + dataType);
}
try {
Nd4j.getAffinityManager().ensureLocation(ndArray, AffinityManager.Location.HOST);
} catch (Exception e) {
// ND4J won't let us access compressed data in GPU memory, so we'll leave TensorFlow do the conversion instead
ndArray.getDouble(0); // forces decompression and data copy to host
data = ndArray.data();
dataType = data.dataType();
switch (dataType) {
case DOUBLE: type = DT_DOUBLE; break;
case FLOAT: type = DT_FLOAT; break;
case INT: type = DT_INT32; break;
case LONG: type = DT_INT64; break;
case UTF8: type = DT_STRING; break;
default: throw new IllegalArgumentException("Unsupported data type: " + dataType);
}
}
LongPointer longPointer = new LongPointer(tfShape);
TF_Tensor tf_tensor = null;
if (type == DT_STRING) {
long size = 0;
long length = ndArray.length();
BytePointer[] strings = new BytePointer[(int)length];
for (int i = 0; i < length; i++) {
strings[i] = new BytePointer(ndArray.getString(i));
size += TF_StringEncodedSize(strings[i].capacity());
}
tf_tensor = TF_AllocateTensor(
type,
longPointer,
tfShape.length,
8 * length + size);
long offset = 0;
BytePointer tf_data = new BytePointer(TF_TensorData(tf_tensor)).capacity(TF_TensorByteSize(tf_tensor));
TF_Status status = TF_NewStatus();
for (int i = 0; i < length; i++) {
tf_data.position(8 * i).putLong(offset);
offset += TF_StringEncode(strings[i], strings[i].capacity() - 1, tf_data.position(8 * length + offset), tf_data.capacity() - tf_data.position(), status);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to convert tensor " + TF_Message(status).getString());
}
}
TF_DeleteStatus(status);
} else {
tf_tensor = TF_NewTensor(
type,
longPointer,
tfShape.length,
data.pointer(),
data.length() * data.getElementSize(),
calling,null);
}
return tf_tensor;
} |
Convert an {@link INDArray}
to a {@link TF_Tensor}
with zero copy.
Uses a direct pointer to the underlying ndarray's
data
@param ndArray the ndarray to use
@return the equivalent {@link TF_Tensor}
| TensorflowConversion::tensorFromNDArray | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | Apache-2.0 |
public INDArray ndArrayFromTensor(TF_Tensor tensor) {
int rank = TF_NumDims(tensor);
int[] ndShape;
if (rank == 0) {
// scalar
ndShape = new int[] {};
} else {
ndShape = new int[rank];
for (int i = 0; i < ndShape.length; i++) {
ndShape[i] = (int) TF_Dim(tensor,i);
}
}
int tfType = TF_TensorType(tensor);
DataType nd4jType = typeFor(tfType);
//scalars are technically length 1 but of rank 0
int length = Math.max(1,ArrayUtil.prod(ndShape));
INDArray array;
if (nd4jType == DataType.UTF8) {
String[] strings = new String[length];
BytePointer data = new BytePointer(TF_TensorData(tensor)).capacity(TF_TensorByteSize(tensor));
BytePointer str = new BytePointer((Pointer)null);
SizeTPointer size = new SizeTPointer(1);
TF_Status status = TF_NewStatus();
for (int i = 0; i < length; i++) {
long offset = data.position(8 * i).getLong();
TF_StringDecode(data.position(8 * length + offset), data.capacity() - data.position(), str, size, status);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to convert tensor " + TF_Message(status).getString());
}
strings[i] = str.position(0).capacity(size.get()).getString();
}
TF_DeleteStatus(status);
array = Nd4j.create(strings);
} else {
Pointer pointer = TF_TensorData(tensor).capacity(length);
Indexer indexer = indexerForType(nd4jType,pointer);
DataBuffer d = Nd4j.createBuffer(indexer.pointer(),nd4jType,length,indexer);
array = Nd4j.create(d,ndShape);
}
// we don't need this in this case. Device memory will be updated right in the constructor
//Nd4j.getAffinityManager().tagLocation(array, AffinityManager.Location.HOST);
return array;
} |
Convert a {@link INDArray}
to a {@link TF_Tensor}
using zero copy.
It will use the underlying
pointer with in nd4j.
@param tensor the tensor to use
@return
| TensorflowConversion::ndArrayFromTensor | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | Apache-2.0 |
public TF_Graph loadGraph(String filePath, TF_Status status) throws IOException {
byte[] bytes = Files.readAllBytes(Paths.get(filePath));
return loadGraph(bytes, status);
} |
Get an initialized {@link TF_Graph}
based on the passed in file
(the file must be a binary protobuf/pb file)
The graph will be modified to be associated
with the device associated with this current thread.
Depending on the active {@link Nd4j#getBackend()}
the device will either be the gpu pinned to the current thread
or the cpu
@param filePath the path to the file to read
@return the initialized graph
@throws IOException
| TensorflowConversion::loadGraph | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | Apache-2.0 |
public static String defaultDeviceForThread() {
Integer deviceForThread = Nd4j.getAffinityManager().getDeviceForCurrentThread();
String deviceName = null;
//gpu
if(Nd4j.getBackend().getClass().getName().contains("JCublasBackend")) {
deviceName = "/device:gpu:" + deviceForThread;
}
else {
deviceName = "/device:cpu:" + deviceForThread;
}
return deviceName;
} |
Infers the device for the given thread
based on the {@link Nd4j#getAffinityManager()}
Usually, this will either be a gpu or cpu
reserved for the current device.
You can think of the "current thread"
as a worker. This is mainly useful with multiple gpus
@return
| TensorflowConversion::defaultDeviceForThread | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | Apache-2.0 |
public TF_Session loadSavedModel(SavedModelConfig savedModelConfig, TF_SessionOptions options, TF_Buffer runOptions, TF_Graph graph, Map<String, String> inputsMap, Map<String, String> outputsMap, TF_Status status) {
TF_Buffer metaGraph = TF_Buffer.newBuffer();
TF_Session session = TF_LoadSessionFromSavedModel(options, runOptions, new BytePointer(savedModelConfig.getSavedModelPath()),
new BytePointer(savedModelConfig.getModelTag()), 1, graph, metaGraph, status);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to import model " + TF_Message(status).getString());
}
MetaGraphDef metaGraphDef;
try {
metaGraphDef = MetaGraphDef.parseFrom(metaGraph.data().capacity(metaGraph.length()).asByteBuffer());
} catch (InvalidProtocolBufferException ex) {
throw new IllegalStateException("ERROR: Unable to import model " + ex);
}
Map<String, SignatureDef> signatureDefMap = metaGraphDef.getSignatureDefMap();
SignatureDef signatureDef = signatureDefMap.get(savedModelConfig.getSignatureKey());
Map<String, TensorInfo> inputs = signatureDef.getInputsMap();
for (Map.Entry<String, TensorInfo> e : inputs.entrySet()) {
inputsMap.put(e.getKey(), e.getValue().getName());
}
Map<String, TensorInfo> outputs = signatureDef.getOutputsMap();
for (Map.Entry<String, TensorInfo> e : outputs.entrySet()) {
outputsMap.put(e.getKey(), e.getValue().getName());
}
return session;
} |
Load a session based on the saved model
@param savedModelConfig the configuration for the saved model
@param options the session options to use
@param runOptions the run configuration to use
@param graph the tf graph to use
@param inputsMap the input map
@param outputsMap the output names
@param status the status object to use for verifying the results
@return
| TensorflowConversion::loadSavedModel | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java | Apache-2.0 |
public Map<String, TF_Tensor> recastInputs(Map<String, TF_Tensor> inputs) {
return recastInputs(inputs,inputOrder,inputDataTypes);
} |
Cast inputs from the original data type
to the target resulting input data type.
This is for when there's a disconnect from the inputs
to the target input data type. This runs a pre cast automatically.
@param inputs the inputs to cast
@return the re casted input
| GraphRunner::recastInputs | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | Apache-2.0 |
public Map<String, TF_Tensor> recastOutputs(Map<String, TF_Tensor> inputs) {
return recastInputs(inputs,outputOrder,outputDataTypes);
} |
Cast inputs from the original data type
to the target resulting input data type.
This is for when there's a disconnect from the inputs
to the target input data type. This runs a pre cast automatically.
@param inputs the inputs to cast
@return the re casted input
| GraphRunner::recastOutputs | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | Apache-2.0 |
public Map<String, TF_Tensor> recastInputs(Map<String, TF_Tensor> inputs, List<String> inputOrder, Map<String,TensorDataType> inputDataTypes) {
if(inputDataTypes == null || inputDataTypes.isEmpty()) {
inputDataTypes = new LinkedHashMap<>();
if(inputOrder != null)
for(int i = 0; i < inputOrder.size(); i++) {
TensorDataType tensorDataType = TensorDataType.values()[TF_TensorType(inputs.get(inputOrder.get(i)))];
Preconditions.checkNotNull(tensorDataType,"Data type of " + TF_TensorType(inputs.get(inputOrder.get(i))) + " was null!");
inputDataTypes.put(inputOrder.get(i),tensorDataType);
}
}
Map<String, TF_Tensor> ret = new HashMap<>();
if(inputOrder != null)
for(int i = 0; i < inputOrder.size(); i++) {
TF_Tensor currInput = inputs.get(inputOrder.get(i));
TensorDataType fromDType = TensorDataType.values()[TF_TensorType(currInput)];
if(fromDType != inputDataTypes.get(inputOrder.get(i))) {
TF_Tensor oldTensor = currInput;
currInput = castTensor(currInput, fromDType, inputDataTypes.get(inputOrder.get(i)));
TF_DeleteTensor(oldTensor);
}
ret.put(inputOrder.get(i),currInput);
}
return ret;
} |
Automatically recast the input arrays
as the specified types
@param inputs the input tensors to recast
@param inputOrder the order of the input tensors
@param inputDataTypes the data types to cast to (null means stay the same)
@return the new values
| GraphRunner::recastInputs | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | Apache-2.0 |
public Map<String, TF_Tensor> runTfTensor(Map<String, TF_Tensor> inputs) {
if(graph == null) {
throw new IllegalStateException("Graph not initialized.");
}
if(!inputs.isEmpty() && inputOrder != null && inputs.size() != inputOrder.size()) {
throw new IllegalArgumentException("Number of inputs specified do not match number of arrays specified.");
}
if(inputDataTypes == null) {
inputDataTypes = new LinkedHashMap<>();
if(inputOrder != null)
for(int i = 0; i < inputOrder.size(); i++) {
inputDataTypes.put(inputOrder.get(i),TensorDataType.values()[TF_TensorType(inputs.get(inputOrder.get(i)))]);
}
}
for(Map.Entry<String, org.bytedeco.tensorflow.TF_Tensor> entry : inputs.entrySet()) {
Preconditions.checkNotNull(entry.getValue(),"Entry " + entry.getKey() + " was null!");
}
//recast for adapting input
inputs = recastInputs(inputs);
if(savedModelConfig != null) {
Map<String, TF_Tensor> outputArrays = new LinkedHashMap<>();
Map<String, org.bytedeco.tensorflow.TF_Operation> opsByName = new HashMap<>();
org.bytedeco.tensorflow.TF_Output inputOut = new org.bytedeco.tensorflow.TF_Output(savedModelConfig.getSavedModelInputOrder().size());
TF_Tensor[] inputTensors = new TF_Tensor[savedModelConfig.getSavedModelInputOrder().size()];
for(int i = 0; i < savedModelConfig.getSavedModelInputOrder().size(); i++) {
String[] name = savedModelConfig.getSavedModelInputOrder().get(i).split(":");
org.bytedeco.tensorflow.TF_Operation inputOp = TF_GraphOperationByName(graph, name[0]);
opsByName.put(savedModelConfig.getSavedModelInputOrder().get(i),inputOp);
inputOut.position(i).oper(inputOp).index(name.length > 1 ? Integer.parseInt(name[1]) : 0);
TF_Tensor tfTensor = inputs.get(inputOrder != null && !inputOrder.isEmpty()
? inputOrder.get(i) : savedModelConfig.getSavedModelInputOrder().get(i));
inputTensors[i] = tfTensor;
}
//reset the position of the pointer for execution
inputOut.position(0);
org.bytedeco.tensorflow.TF_Output outputOut = new org.bytedeco.tensorflow.TF_Output(savedModelConfig.getSaveModelOutputOrder().size());
//only setup the output ops
for(int i = 0; i < savedModelConfig.getSaveModelOutputOrder().size(); i++) {
String[] name = savedModelConfig.getSaveModelOutputOrder().get(i).split(":");
org.bytedeco.tensorflow.TF_Operation outputOp = TF_GraphOperationByName(graph, name[0]);
opsByName.put(savedModelConfig.getSaveModelOutputOrder().get(i),outputOp);
outputOut.position(i).oper(outputOp).index(name.length > 1 ? Integer.parseInt(name[1]) : 0);
}
//reset the position of the pointer for execution
outputOut.position(0);
//these are references to the nd4j ndarrays wrapped for tensorflow
PointerPointer<TF_Tensor> inputTensorsPointer = new PointerPointer<>(inputTensors);
//note that these are the result pointers
//the result pointers are null, and will be populated automatically by the session run
PointerPointer<TF_Tensor> outputTensorsPointer = new PointerPointer<>(savedModelConfig.getSaveModelOutputOrder().size());
long start = System.nanoTime();
TF_SessionRun(
session,
null,
//inputs
inputOut, inputTensorsPointer, inputTensors.length,
//outputSchema
outputOut, outputTensorsPointer, savedModelConfig.getSaveModelOutputOrder().size(),
//targets
null, 0,
null,
status); long end = System.nanoTime();
long diff = TimeUnit.NANOSECONDS.toMillis((end - start));
log.debug("Session runtime: {} ms", diff);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to run session " + TF_Message(status).getString());
} else {
for(int i = 0; i < outputOrder.size(); i++) {
outputArrays.put(outputOrder != null && !outputOrder.isEmpty() ? outputOrder.get(i) :
savedModelConfig.getSaveModelOutputOrder().get(i),new TF_Tensor(outputTensorsPointer.get(i)));
}
}
return outputArrays;
}
else {
Map<String, TF_Tensor> outputArrays = new LinkedHashMap<>();
int inputOrderSize = inputOrder == null ? 0 : inputOrder.size();
Map<String, org.bytedeco.tensorflow.TF_Operation> opsByName = new HashMap<>();
org.bytedeco.tensorflow.TF_Output inputOut = new org.bytedeco.tensorflow.TF_Output(inputOrderSize);
TF_Tensor[] inputTensors = new TF_Tensor[inputOrderSize];
for(int i = 0; i < inputOrderSize; i++) {
String[] name = inputOrder.get(i).split(":");
org.bytedeco.tensorflow.TF_Operation inputOp = TF_GraphOperationByName(graph, name[0]);
opsByName.put(inputOrder.get(i),inputOp);
inputOut.position(i).oper(inputOp).index(name.length > 1 ? Integer.parseInt(name[1]) : 0);
TF_Tensor tf_tensor = inputs.get(inputOrder.get(i));
inputTensors[i] = tf_tensor;
}
//reset the position of the pointer for execution
inputOut.position(0);
int outputOrderSize = outputOrder == null ? 0 : outputOrder.size();
org.bytedeco.tensorflow.TF_Output outputOut = new org.bytedeco.tensorflow.TF_Output(outputOrder.size());
//only setup the output ops
for(int i = 0; i < outputOrderSize; i++) {
String[] name = outputOrder.get(i).split(":");
org.bytedeco.tensorflow.TF_Operation outputOp = TF_GraphOperationByName(graph, name[0]);
if(outputOp == null) {
throw new IllegalArgumentException("Illegal output found " + outputOrder.get(i) + " - no op found! Mis specified name perhaps?");
}
opsByName.put(outputOrder.get(i),outputOp);
outputOut.position(i).oper(outputOp).index(name.length > 1 ? Integer.parseInt(name[1]) : 0);
}
//reset the position of the pointer for execution
outputOut.position(0);
//these are references to the nd4j ndarrays wrapped for tensorflow
PointerPointer<TF_Tensor> inputTensorsPointer = new PointerPointer<>(inputTensors);
//note that these are the result pointers
//the result pointers are null, and will be populated automatically by the session run
PointerPointer<TF_Tensor> outputTensorsPointer = new PointerPointer<>(outputOrderSize);
long start = System.nanoTime();
TF_SessionRun(
session,
null,
//inputs
inputOut, inputTensorsPointer, inputOrderSize,
//output
outputOut, outputTensorsPointer, outputOrderSize,
//targets
null, 0,
null,
status);
long end = System.nanoTime();
long diff = TimeUnit.NANOSECONDS.toMillis((end - start));
log.debug("Session runtime: {} ms", diff);
if (TF_GetCode(status) != TF_OK) {
throw new IllegalStateException("ERROR: Unable to run session " + TF_Message(status).getString());
} else {
for(int i = 0; i < outputOrder.size(); i++) {
outputArrays.put(outputOrder.get(i),new TF_Tensor(outputTensorsPointer.get(i)));
}
}
return outputArrays;
}
} |
Run the graph definition with the given inputs
in native tensorflow
@param inputs the inputs to run
@return the outputSchema from the native tensorflow wrapper
| GraphRunner::runTfTensor | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | Apache-2.0 |
public static org.tensorflow.framework.ConfigProto fromJson(String json) {
org.tensorflow.framework.ConfigProto.Builder builder = org.tensorflow.framework.ConfigProto.newBuilder();
try {
org.nd4j.shade.protobuf.util.JsonFormat.parser().merge(json,builder);
org.tensorflow.framework.ConfigProto build = builder.build();
org.nd4j.shade.protobuf.ByteString serialized = build.toByteString();
byte[] binaryString = serialized.toByteArray();
org.tensorflow.framework.ConfigProto configProto = org.tensorflow.framework.ConfigProto.parseFrom(binaryString);
return configProto;
} catch (Exception e) {
log.error("",e);
}
return null;
} |
Convert a json string written out
by {@link org.nd4j.shade.protobuf.util.JsonFormat}
to a {@link org.bytedeco.tensorflow.ConfigProto}
@param json the json to read
@return the config proto to use
| GraphRunner::fromJson | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | Apache-2.0 |
public static TF_Tensor castTensor(TF_Tensor input, TensorDataType from, TensorDataType to) {
if(from.equals(to))
return input;
Map<String, TF_Tensor> inputMap = new HashMap<>();
inputMap.put("input",input);
GraphRunner graphRunner = getRunner(from,to);
try {
Map<String, TF_Tensor> output = graphRunner.runTfTensor(inputMap);
return output.get("cast_output");
} catch(Exception e) {
throw new IllegalStateException("Unable to run graph",e);
}
} |
Cast a tensor to another type using
the tensorflow c api.
This method loads a graph from the classpath from
cast_graph/cast_(name of datatype lower case).pb
which contains a simple protobuf file with a
variant data type tensorflow input place holder
named place holder and an output named cast_output.
@param input the input data
@param from the input data type to cast from
@param to the output data type to
@return the casted tensor
| GraphRunner::castTensor | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | Apache-2.0 |
public String sessionOptionsToJson() {
if(sessionOptionsConfigProto == null)
return null;
try {
return org.nd4j.shade.protobuf.util.JsonFormat.printer().print(sessionOptionsConfigProto);
} catch (Exception e) {
log.error("",e);
}
return null;
} |
Write out the session options used
by this {@link org.nd4j.tensorflow.conversion.graphrunner.GraphRunner}
a s a json string using the
{@link org.nd4j.shade.protobuf.util.JsonFormat}
@return the session options as json (mainly for debugging)
| GraphRunner::sessionOptionsToJson | java | deeplearning4j/deeplearning4j | nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/graphrunner/GraphRunner.java | Apache-2.0 |
public static INDArray[] ndarraysFromSequence(Value outValue,OrtAllocator ortAllocator) {
Preconditions.checkState(outValue.HasValue(),"No value found in specified value!");
INDArray[] ret = new INDArray[(int) outValue.GetCount()];
for(int i = 0; i < ret.length; i++) {
INDArray retValue = getArray(outValue.GetValue(i,ortAllocator));
ret[i] = retValue;
}
return ret;
} |
Return the {@link INDArray} from a sequence
@param outValue the input sequence to get the ndarrays from
@param ortAllocator the allocator to use to retrieve relevant memory
@return the equivalent arrays
| ONNXUtils::ndarraysFromSequence | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static ValueVector getSequence(List<INDArray> sequence,MemoryInfo memoryInfo) {
ValueVector valueVector = new ValueVector(sequence.size());
for(int i = 0; i < sequence.size(); i++) {
valueVector.put(getTensor(sequence.get(i),memoryInfo));
}
return valueVector;
} |
Create a sequence from a list of tensors
returning a {@link ValueVector} equivalent
using {@link #getTensor(INDArray, MemoryInfo)}
@param sequence the sequence to get
@param memoryInfo the memory info to use for allocation
@return
| ONNXUtils::getSequence | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static ONNXType getTypeForOutput(Session session,int i) {
TypeInfo typeInfo = session.GetOutputTypeInfo(i);
return ONNXType.values()[typeInfo.GetONNXType()];
} |
Get the onnx type of the output
@param session the session to get the input for
@param i the index of the output
@return
| ONNXUtils::getTypeForOutput | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static ONNXType getTypeForInput(Session session,long i) {
TypeInfo typeInfo = session.GetInputTypeInfo(i);
return ONNXType.values()[typeInfo.GetONNXType()];
} |
Get the onnx type of the input
@param session the session to get the output type info from
@param i the index of the input
@return the relevant type information
| ONNXUtils::getTypeForInput | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static INDArray getSampleForValueInfo(Onnx.ValueInfoProto valueInfoProto) {
Preconditions.checkState(valueInfoProto.hasType(),"Value info must have a type!");
Onnx.TypeProto.Tensor tensorType = valueInfoProto.getType().getTensorType();
long[] shape = Longs.toArray(tensorType.getShape().getDimList().stream().map(input -> input.getDimValue()).collect(Collectors.toList()));
DataType type = dataTypeForOnnxType(tensorType.getElemType());
return Nd4j.create(type,shape);
} |
Returns a zeroed array of the input data.
This array's shape and data type are determined
from {@link Onnx.ValueInfoProto#getType()}
tensor field.
given the value type. Mainly used for quick debugging/
testing.
@param valueInfoProto the value info proto
to get the shape information from
@return the sample tensor
| ONNXUtils::getSampleForValueInfo | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static void validateType(DataType expected, INDArray array) {
if (!array.dataType().equals(expected))
throw new RuntimeException("INDArray data type (" + array.dataType() + ") does not match required ONNX data type (" + expected + ")");
} |
@param expected
@param array
| ONNXUtils::validateType | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static DataType dataTypeForOnnxType(int dataType) {
if(dataType == dataType) {
return FLOAT;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8) {
return INT8;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE) {
return DOUBLE;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL) {
return BOOL;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8) {
return UINT8;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16) {
return UINT16;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16) {
return INT16;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32) {
return INT32;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64) {
return INT64;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16) {
return FLOAT16;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32) {
return UINT32;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64) {
return UINT64;
} else if(dataType == ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16) {
return BFLOAT16;
}
else
throw new IllegalArgumentException("Illegal data type " + dataType);
} |
Return a {@link DataType}
for the onnx data type
@param dataType the equivalent nd4j data type
@return
| ONNXUtils::dataTypeForOnnxType | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static int onnxTypeForDataType(DataType dataType) {
if(dataType == FLOAT) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT;
} else if(dataType == INT8) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8;
} else if(dataType == DOUBLE) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE;
} else if(dataType == BOOL) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL;
} else if(dataType == UINT8) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
} else if(dataType == UINT16) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16;
} else if(dataType == INT16) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16;
} else if(dataType == INT32) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32;
} else if(dataType == INT64) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
} else if(dataType == FLOAT16) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16;
} else if(dataType == UINT32) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32;
} else if(dataType == UINT64) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64;
} else if(dataType == BFLOAT16) {
return ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16;
}
else
throw new IllegalArgumentException("Illegal data type " + dataType);
} |
Convert the onnx type for the given data type
@param dataType
@return
| ONNXUtils::onnxTypeForDataType | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static INDArray getArray(Value value) {
DataType dataType = dataTypeForOnnxType(value.GetTypeInfo().GetONNXType());
LongVector shape = value.GetTensorTypeAndShapeInfo().GetShape();
long[] shapeConvert;
if(shape != null) {
shapeConvert = new long[(int) value.GetTensorTypeAndShapeInfo().GetDimensionsCount()];
for(int j = 0; j < shapeConvert.length; j++) {
shapeConvert[j] = shape.get(j);
}
} else {
shapeConvert = new long[]{1};
}
DataBuffer getBuffer = getDataBuffer(value);
Preconditions.checkState(dataType.equals(getBuffer.dataType()),"Data type must be equivalent as specified by the onnx metadata.");
return Nd4j.create(getBuffer,shapeConvert,Nd4j.getStrides(shapeConvert),0);
} |
Convert an onnx {@link Value}
in to an {@link INDArray}
@param value the value to convert
@return
| ONNXUtils::getArray | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static int getOnnxLogLevelFromLogger(Logger logger) {
if(logger.isTraceEnabled() || logger.isDebugEnabled()) {
return ORT_LOGGING_LEVEL_VERBOSE;
}
else if(logger.isInfoEnabled()) {
return ORT_LOGGING_LEVEL_INFO;
}
else if(logger.isWarnEnabled()) {
return ORT_LOGGING_LEVEL_WARNING;
}
else if(logger.isErrorEnabled()) {
return ORT_LOGGING_LEVEL_ERROR;
}
return ORT_LOGGING_LEVEL_INFO;
} |
Get the onnx log level relative to the given slf4j logger.
Trace or debug will return ORT_LOGGING_LEVEL_VERBOSE
Info will return: ORT_LOGGING_LEVEL_INFO
Warn returns ORT_LOGGING_LEVEL_WARNING
Error returns error ORT_LOGGING_LEVEL_ERROR
The default is info
@param logger the slf4j logger to get the onnx log level for
@return
| ONNXUtils::getOnnxLogLevelFromLogger | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static Value getTensor(INDArray ndArray, MemoryInfo memoryInfo) {
if(ndArray == null || ndArray.isEmpty()) {
/**
* static Value CreateTensor(const OrtMemoryInfo* info, void* p_data, size_t p_data_byte_count, const int64_t* shape, size_t shape_len,
* ONNXTensorElementDataType type)
*/
LongPointer dims = new LongPointer(0);
Value ret = Value.CreateTensor(
memoryInfo.asOrtMemoryInfo(),
new FloatPointer(),
0,
dims,
0,
onnxTypeForDataType(FLOAT));
return ret;
}
Pointer inputTensorValuesPtr = ndArray.data().pointer();
Pointer inputTensorValues = inputTensorValuesPtr;
long sizeInBytes = ndArray.length() * ndArray.data().getElementSize();
/**
* static Value CreateTensor(const OrtMemoryInfo* info, void* p_data, size_t p_data_byte_count, const int64_t* shape, size_t shape_len,
* ONNXTensorElementDataType type)
*/
LongPointer dims = new LongPointer(ndArray.shape());
Value ret = Value.CreateTensor(
memoryInfo.asOrtMemoryInfo(),
inputTensorValues,
sizeInBytes,
dims,
ndArray.rank(),
onnxTypeForDataType(ndArray.dataType()));
return ret;
} |
Get an onnx tensor from an ndarray.
@param ndArray the ndarray to get the value from
@param memoryInfo the {@link MemoryInfo} to use.
Can be created with:
MemoryInfo memoryInfo = MemoryInfo.CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
@return
| ONNXUtils::getTensor | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static DataBuffer getDataBuffer(Value tens) {
if(tens.isNull())
throw new IllegalArgumentException("Native underlying tensor value was null!");
try (PointerScope scope = new PointerScope()) {
DataBuffer buffer = null;
int type = tens.GetTensorTypeAndShapeInfo().GetElementType();
long size = tens.GetTensorTypeAndShapeInfo().GetElementCount();
switch (type) {
case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
FloatPointer pFloat = tens.GetTensorMutableDataFloat().capacity(size);
FloatIndexer floatIndexer = FloatIndexer.create(pFloat);
buffer = Nd4j.createBuffer(pFloat, DataType.FLOAT, size, floatIndexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
BytePointer pUint8 = tens.GetTensorMutableDataUByte().capacity(size);
Indexer uint8Indexer = ByteIndexer.create(pUint8);
buffer = Nd4j.createBuffer(pUint8, DataType.UINT8, size, uint8Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8:
BytePointer pInt8 = tens.GetTensorMutableDataByte().capacity(size);
Indexer int8Indexer = ByteIndexer.create(pInt8);
buffer = Nd4j.createBuffer(pInt8, DataType.UINT8, size, int8Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16:
ShortPointer pUint16 = tens.GetTensorMutableDataUShort().capacity(size);
Indexer uint16Indexer = ShortIndexer.create(pUint16);
buffer = Nd4j.createBuffer(pUint16, DataType.UINT16, size, uint16Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16:
ShortPointer pInt16 = tens.GetTensorMutableDataShort().capacity(size);
Indexer int16Indexer = ShortIndexer.create(pInt16);
buffer = Nd4j.createBuffer(pInt16, INT16, size, int16Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
IntPointer pInt32 = tens.GetTensorMutableDataInt().capacity(size);
Indexer int32Indexer = IntIndexer.create(pInt32);
buffer = Nd4j.createBuffer(pInt32, DataType.INT32, size, int32Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:
LongPointer pInt64 = tens.GetTensorMutableDataLong().capacity(size);
Indexer int64Indexer = LongIndexer.create(pInt64);
buffer = Nd4j.createBuffer(pInt64, DataType.INT64, size, int64Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING:
BytePointer pString = tens.GetTensorMutableDataByte().capacity(size);
Indexer stringIndexer = ByteIndexer.create(pString);
buffer = Nd4j.createBuffer(pString, DataType.INT8, size, stringIndexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL:
BoolPointer pBool = tens.GetTensorMutableDataBool().capacity(size);
Indexer boolIndexer = BooleanIndexer.create(new BooleanPointer(pBool)); //Converting from JavaCPP Bool to Boolean here - C++ bool type size is not defined, could cause problems on some platforms
buffer = Nd4j.createBuffer(pBool, DataType.BOOL, size, boolIndexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16:
ShortPointer pFloat16 = tens.GetTensorMutableDataShort().capacity(size);
Indexer float16Indexer = ShortIndexer.create(pFloat16);
buffer = Nd4j.createBuffer(pFloat16, DataType.FLOAT16, size, float16Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE:
DoublePointer pDouble = tens.GetTensorMutableDataDouble().capacity(size);
Indexer doubleIndexer = DoubleIndexer.create(pDouble);
buffer = Nd4j.createBuffer(pDouble, DataType.DOUBLE, size, doubleIndexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32:
IntPointer pUint32 = tens.GetTensorMutableDataUInt().capacity(size);
Indexer uint32Indexer = IntIndexer.create(pUint32);
buffer = Nd4j.createBuffer(pUint32, DataType.UINT32, size, uint32Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64:
LongPointer pUint64 = tens.GetTensorMutableDataULong().capacity(size);
Indexer uint64Indexer = LongIndexer.create(pUint64);
buffer = Nd4j.createBuffer(pUint64, DataType.UINT64, size, uint64Indexer);
break;
case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
ShortPointer pBfloat16 = tens.GetTensorMutableDataShort().capacity(size);
Indexer bfloat16Indexer = ShortIndexer.create(pBfloat16);
buffer = Nd4j.createBuffer(pBfloat16, DataType.BFLOAT16, size, bfloat16Indexer);
break;
default:
throw new RuntimeException("Unsupported data type encountered");
}
return buffer;
}
} |
Get the data buffer from the given value
@param tens the values to get
@return the equivalent data buffer
| ONNXUtils::getDataBuffer | java | deeplearning4j/deeplearning4j | nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-onnxruntime/src/main/java/org/nd4j/onnxruntime/util/ONNXUtils.java | Apache-2.0 |
public static void checkVersions(){
boolean doCheck = Boolean.parseBoolean(System.getProperty(ND4JSystemProperties.VERSION_CHECK_PROPERTY, "true"));
if(!doCheck){
return;
}
if(ND4JClassLoading.classPresentOnClasspath(ND4J_JBLAS_CLASS)) {
//nd4j-jblas is ancient and incompatible
log.error("Found incompatible/obsolete backend and version (nd4j-jblas) on classpath. ND4J is unlikely to"
+ " function correctly with nd4j-jblas on the classpath. JVM will now exit.");
System.exit(1);
}
if(ND4JClassLoading.classPresentOnClasspath(CANOVA_CLASS)) {
//Canova is ancient and likely to pull in incompatible dependencies
log.error("Found incompatible/obsolete library Canova on classpath. ND4J is unlikely to"
+ " function correctly with this library on the classpath. JVM will now exit.");
System.exit(1);
}
List<VersionInfo> dependencies = getVersionInfos();
if(dependencies.size() <= 2){
//No -properties.git files were found on the classpath. This may be due to a misconfigured uber-jar
// or maybe running in IntelliJ with "dynamic.classpath" set to true (in workspace.xml). Either way,
// we can't check versions and don't want to log an error, which will more often than not be wrong
if(dependencies.size() == 0){
return;
}
//Another edge case: no -properties.git files were found, but DL4J and/or DataVec were inferred
// by class names. If these "inferred by opName" versions were the only things found, we should also
// not log a warning, as we can't check versions in this case
boolean dl4jViaClass = false;
boolean datavecViaClass = false;
for(VersionInfo vi : dependencies ){
if(DL4J_GROUPID.equals(vi.getGroupId()) && DL4J_ARTIFACT.equals(vi.getArtifactId())
&& (UNKNOWN_VERSION.equals(vi.getBuildVersion()))){
dl4jViaClass = true;
} else if(DATAVEC_GROUPID.equals(vi.getGroupId()) && DATAVEC_ARTIFACT.equals(vi.getArtifactId())
&& (UNKNOWN_VERSION.equals(vi.getBuildVersion()))){
datavecViaClass = true;
}
}
if(dependencies.size() == 1 && (dl4jViaClass || datavecViaClass)){
return;
} else if(dependencies.size() == 2 && dl4jViaClass && datavecViaClass){
return;
}
}
Set<String> foundVersions = new HashSet<>();
for(VersionInfo vi : dependencies){
String g = vi.getGroupId();
if(g != null && GROUPIDS_TO_CHECK.contains(g)){
String version = vi.getBuildVersion();
if(version.contains("_spark_")){
//Normalize spark versions:
// "0.9.1_spark_1" to "0.9.1" and "0.9.1_spark_1-SNAPSHOT" to "0.9.1-SNAPSHOT"
version = version.replaceAll("_spark_1","");
version = version.replaceAll("_spark_2","");
}
foundVersions.add(version);
}
}
boolean logVersions = false;
if(foundVersions.size() > 1){
log.warn("*** ND4J VERSION CHECK FAILED - INCOMPATIBLE VERSIONS FOUND ***");
log.warn("Incompatible versions (different version number) of DL4J, ND4J, RL4J, DataVec, Arbiter are unlikely to function correctly");
logVersions = true;
}
//Also: check for mixed scala versions - but only for our dependencies... These are in the artifact ID,
// scored like dl4j-spack_2.10 and deeplearning4j-ui_2.11
//And check for mixed spark versions (again, just DL4J/DataVec etc dependencies for now)
boolean scala210 = false;
boolean scala211 = false;
boolean spark1 = false;
boolean spark2 = false;
for(VersionInfo vi : dependencies){
String artifact = vi.getArtifactId();
if(!scala210 && artifact.contains(SCALA_210_SUFFIX)){
scala210 = true;
}
if(!scala211 && artifact.contains(SCALA_211_SUFFIX)){
scala211 = true;
}
String version = vi.getBuildVersion();
if(!spark1 && version.contains(SPARK_1_VER_STRING)){
spark1 = true;
}
if(!spark2 && version.contains(SPARK_2_VER_STRING)){
spark2 = true;
}
}
if(scala210 && scala211){
log.warn("*** ND4J VERSION CHECK FAILED - FOUND BOTH SCALA VERSION 2.10 AND 2.11 ARTIFACTS ***");
log.warn("Projects with mixed Scala versions (2.10/2.11) are unlikely to function correctly");
logVersions = true;
}
if(spark1 && spark2){
log.warn("*** ND4J VERSION CHECK FAILED - FOUND BOTH SPARK VERSION 1 AND 2 ARTIFACTS ***");
log.warn("Projects with mixed Spark versions (1 and 2) are unlikely to function correctly");
logVersions = true;
}
if(logVersions){
log.info("Versions of artifacts found on classpath:");
logVersionInfo();
}
} |
Perform a check of the versions of ND4J, DL4J, DataVec, RL4J and Arbiter dependencies, logging a warning
if necessary.
| Detail::checkVersions | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/versioncheck/VersionCheck.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/versioncheck/VersionCheck.java | Apache-2.0 |
public NDArrayList(int size) {
this(DataType.DOUBLE, size);
} |
Initialize with the desired size.
This will set the list.size()
to be equal to the passed in size
@param size the initial size of the array
| NDArrayList::NDArrayList | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/list/NDArrayList.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/list/NDArrayList.java | Apache-2.0 |
public NDArrayList(@NonNull INDArray container,int size) {
Preconditions.checkState(container == null || container.rank() == 1, "Container must be rank 1: is rank %s",
container == null ? 0 : container.rank());
this.container = container;
this.size = size;
} |
Specify the underlying ndarray for this list.
@param container the underlying array.
@param size the initial size of the array. This will set list.size()
to be equal to the passed in size.
| NDArrayList::NDArrayList | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/list/NDArrayList.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/list/NDArrayList.java | Apache-2.0 |
public NDArrayList(@NonNull INDArray container) {
this(container,0);
} |
Specify the underlying ndarray for this list.
@param container the underlying array.
| NDArrayList::NDArrayList | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/list/NDArrayList.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/list/NDArrayList.java | Apache-2.0 |
public INDArray array() {
return container.get(NDArrayIndex.interval(0,size)).reshape(1,size);
} |
Get a view of the underlying array
relative to the size of the actual array.
(Sometimes there are overflows in the internals
but you want to use the internal INDArray for computing something
directly, this gives you the relevant subset that reflects the content of the list)
@return the view of the underlying ndarray relative to the collection's real size
| BaseNDArrayList::array | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/list/BaseNDArrayList.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/list/BaseNDArrayList.java | Apache-2.0 |
public String memoryPerDevice() {
StringBuilder stringBuilder = new StringBuilder();
for(int i = 0; i < Nd4j.getAffinityManager().getNumberOfDevices(); i++) {
stringBuilder.append("------Device: " + i + "---------------\n");
stringBuilder.append("Allocated on device: " + allocatedPerDevice.get(i).get() + "\n");
stringBuilder.append("Total workspace memory allocated for device: " + workspacesPerDevice.get(i).get() + "\n");
stringBuilder.append("Cached memory for device: " + cachedPerDevice.get(i).get() + "\n");
stringBuilder.append("Total device memory available: " + totalPerDevice.get(i).get() + "\n");
stringBuilder.append("Free total memory for device: " + freePerDevice.get(i).get() + "\n");
stringBuilder.append("-----------------------------------------\n");
}
return stringBuilder.toString();
} |
toString() overview of every device's current status including available memory,
number of workspaces per device, free memory per device, total memory available for a device
@return
| MemoryTracker::memoryPerDevice | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public long getCachedHostAmount() {
return cachedHost.get();
} |
This method returns number of bytes currently cached from host memory
@return
| MemoryTracker::getCachedHostAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public long getAllocatedHostAmount() {
return allocatedHost.get();
} |
This method returns number of bytes currently allocated from host memory
@return
| MemoryTracker::getAllocatedHostAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public long getActiveHostAmount() {
return getAllocatedHostAmount() + getCachedHostAmount();
} |
This method returns number of bytes allocated and cached in host ram
@return
| MemoryTracker::getActiveHostAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public long getApproximateFreeMemory(int deviceId) {
val externalAllocations = getTotalMemory(deviceId) - getFreeMemory(deviceId);
val active = getActiveMemory(deviceId);
val free = getTotalMemory(deviceId) - (active + externalAllocations);
return free;
} |
This method returns approximate free memory on specified device
@param deviceId
@return
| MemoryTracker::getApproximateFreeMemory | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public long getPreciseFreeMemory(int deviceId) {
// we refresh free memory on device
val extFree =Nd4j.getNativeOps().getDeviceFreeMemory(deviceId);
return extFree;
} |
This method returns precise amount of free memory on specified device
@param deviceId
@return
| MemoryTracker::getPreciseFreeMemory | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public long getUsableMemory(int deviceId) {
return getTotalMemory(deviceId) - getFreeMemory(deviceId);
} |
This method returns delta between total memory and free memory
@param deviceId
@return
| MemoryTracker::getUsableMemory | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public long getActiveMemory(int deviceId) {
return getWorkspaceAllocatedAmount(deviceId) + getAllocatedAmount(deviceId) + getCachedAmount(deviceId);
} |
This method returns total amount of device memory allocated on specified device
Includes: workspace memory, cached memory, regular memory
@param deviceId
@return
| MemoryTracker::getActiveMemory | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public long getManagedMemory(int deviceId) {
return getAllocatedAmount(deviceId) + getCachedAmount(deviceId);
} |
This method returns amount of memory that relies on JVM GC
Includes: cached memory, regular allocated memory
@param deviceId
@return
| MemoryTracker::getManagedMemory | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public void incrementAllocatedAmount(int deviceId, long memoryAdded) {
allocatedPerDevice.get(deviceId).getAndAdd(matchBlock(memoryAdded));
} |
This method increments amount of regular allocated memory
@param deviceId
@param memoryAdded
| MemoryTracker::incrementAllocatedAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public void incrementCachedAmount(int deviceId, long memoryAdded) {
cachedPerDevice.get(deviceId).getAndAdd(matchBlock(memoryAdded));
} |
This method increments amount of cached memory
@param deviceId
@param memoryAdded
| MemoryTracker::incrementCachedAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public void decrementAllocatedAmount(int deviceId, long memorySubtracted) {
allocatedPerDevice.get(deviceId).getAndAdd(-matchBlock(memorySubtracted));
} |
This method decrements amount of regular allocated memory
@param deviceId
@param memorySubtracted
| MemoryTracker::decrementAllocatedAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public void decrementCachedAmount(int deviceId, long memorySubtracted) {
cachedPerDevice.get(deviceId).getAndAdd(-matchBlock(memorySubtracted));
} |
This method decrements amount of cached memory
@param deviceId
@param memorySubtracted
| MemoryTracker::decrementCachedAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public void incrementWorkspaceAllocatedAmount(int deviceId, long memoryAdded) {
workspacesPerDevice.get(deviceId).getAndAdd(matchBlock(memoryAdded));
} |
This method increments amount of memory allocated within workspaces
@param deviceId
@param memoryAdded
| MemoryTracker::incrementWorkspaceAllocatedAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public void decrementWorkspaceAmount(int deviceId, long memorySubtracted) {
workspacesPerDevice.get(deviceId).getAndAdd(-matchBlock(memorySubtracted));
} |
This method decrements amount of memory allocated within workspaces
@param deviceId
@param memorySubtracted
| MemoryTracker::decrementWorkspaceAmount | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/allocator/impl/MemoryTracker.java | Apache-2.0 |
public static <T extends IEvaluation> T fromYaml(String yaml, Class<T> clazz) {
try {
return JsonMappers.getYamlMapper().readValue(yaml, clazz);
} catch (IOException e) {
throw new RuntimeException(e);
}
} |
@param yaml YAML representation
@param clazz Class
@param <T> Type to return
@return Evaluation instance
| BaseEvaluation::fromYaml | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/BaseEvaluation.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/BaseEvaluation.java | Apache-2.0 |
public static <T extends IEvaluation> T fromJson(String json, Class<T> clazz) {
try {
return JsonMappers.getMapper().readValue(json, clazz);
} catch (InvalidTypeIdException e) {
if (e.getMessage().contains("Could not resolve type id")) {
try {
return (T) attempFromLegacyFromJson(json, e);
} catch (Throwable t) {
throw new RuntimeException("Cannot deserialize from JSON - JSON is invalid?", t);
}
}
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
} |
@param json Jason representation of the evaluation instance
@param clazz Class
@param <T> Type to return
@return Evaluation instance
| BaseEvaluation::fromJson | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/BaseEvaluation.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/BaseEvaluation.java | Apache-2.0 |
protected static <T extends IEvaluation> T attempFromLegacyFromJson(String json, InvalidTypeIdException originalException) throws InvalidTypeIdException {
if (json.contains("org.deeplearning4j.eval.Evaluation")) {
String newJson = json.replaceAll("org.deeplearning4j.eval.Evaluation", "org.nd4j.evaluation.classification.Evaluation");
return (T) fromJson(newJson, Evaluation.class);
}
if (json.contains("org.deeplearning4j.eval.EvaluationBinary")) {
String newJson = json.replaceAll("org.deeplearning4j.eval.EvaluationBinary", "org.nd4j.evaluation.classification.EvaluationBinary")
.replaceAll("org.deeplearning4j.eval.ROC", "org.nd4j.evaluation.classification.ROC")
.replaceAll("org.deeplearning4j.eval.curves.", "org.nd4j.evaluation.curves.");
return (T) fromJson(newJson, EvaluationBinary.class);
}
if (json.contains("org.deeplearning4j.eval.EvaluationCalibration")) {
String newJson = json.replaceAll("org.deeplearning4j.eval.EvaluationCalibration", "org.nd4j.evaluation.classification.EvaluationCalibration")
.replaceAll("org.deeplearning4j.eval.curves.", "org.nd4j.evaluation.curves.");
return (T) fromJson(newJson, EvaluationCalibration.class);
}
if (json.contains("org.deeplearning4j.eval.ROCBinary")) {
String newJson = json.replaceAll("org.deeplearning4j.eval.ROCBinary", "org.nd4j.evaluation.classification.ROCBinary")
.replaceAll("org.deeplearning4j.eval.ROC", "org.nd4j.evaluation.classification.ROC") //Nested ROC instances internally
.replaceAll("org.deeplearning4j.eval.curves.", "org.nd4j.evaluation.curves.");
return (T) fromJson(newJson, ROCBinary.class);
}
if (json.contains("org.deeplearning4j.eval.ROCMultiClass")) {
String newJson = json.replaceAll("org.deeplearning4j.eval.ROCMultiClass", "org.nd4j.evaluation.classification.ROCMultiClass")
.replaceAll("org.deeplearning4j.eval.ROC", "org.nd4j.evaluation.classification.ROC") //Nested ROC instances internally
.replaceAll("org.deeplearning4j.eval.curves.", "org.nd4j.evaluation.curves.");
return (T) fromJson(newJson, ROCMultiClass.class);
}
if (json.contains("org.deeplearning4j.eval.ROC")) { //Has to be checked after ROCBinary/ROCMultiClass due to it being a prefix
String newJson = json.replaceAll("org.deeplearning4j.eval.ROC", "org.nd4j.evaluation.classification.ROC")
.replaceAll("org.deeplearning4j.eval.curves.", "org.nd4j.evaluation.curves.");
return (T) fromJson(newJson, ROC.class);
}
if (json.contains("org.deeplearning4j.eval.RegressionEvaluation")) {
String newJson = json.replaceAll("org.deeplearning4j.eval.RegressionEvaluation", "org.nd4j.evaluation.regression.RegressionEvaluation");
return (T) fromJson(newJson, RegressionEvaluation.class);
}
throw originalException;
} |
Attempt to load DL4J IEvaluation JSON from 1.0.0-beta2 or earlier.
Given IEvaluation classes were moved to ND4J with no major changes, a simple "find and replace" for the class
names is used.
@param json JSON to attempt to deserialize
@param originalException Original exception to be re-thrown if it isn't legacy JSON
| BaseEvaluation::attempFromLegacyFromJson | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/BaseEvaluation.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/BaseEvaluation.java | Apache-2.0 |
public static double precision(long tpCount, long fpCount, double edgeCase) {
//Edge case
if (tpCount == 0 && fpCount == 0) {
return edgeCase;
}
return tpCount / (double) (tpCount + fpCount);
} |
Calculate the precision from true positive and false positive counts
@param tpCount True positive count
@param fpCount False positive count
@param edgeCase Edge case value use to avoid 0/0
@return Precision
| EvaluationUtils::precision | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
public static double recall(long tpCount, long fnCount, double edgeCase) {
//Edge case
if (tpCount == 0 && fnCount == 0) {
return edgeCase;
}
return tpCount / (double) (tpCount + fnCount);
} |
Calculate the recall from true positive and false negative counts
@param tpCount True positive count
@param fnCount False negative count
@param edgeCase Edge case values used to avoid 0/0
@return Recall
| EvaluationUtils::recall | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
public static double falsePositiveRate(long fpCount, long tnCount, double edgeCase) {
//Edge case
if (fpCount == 0 && tnCount == 0) {
return edgeCase;
}
return fpCount / (double) (fpCount + tnCount);
} |
Calculate the false positive rate from the false positive count and true negative count
@param fpCount False positive count
@param tnCount True negative count
@param edgeCase Edge case values are used to avoid 0/0
@return False positive rate
| EvaluationUtils::falsePositiveRate | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
public static double falseNegativeRate(long fnCount, long tpCount, double edgeCase) {
//Edge case
if (fnCount == 0 && tpCount == 0) {
return edgeCase;
}
return fnCount / (double) (fnCount + tpCount);
} |
Calculate the false negative rate from the false negative counts and true positive count
@param fnCount False negative count
@param tpCount True positive count
@param edgeCase Edge case value to use to avoid 0/0
@return False negative rate
| EvaluationUtils::falseNegativeRate | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
public static double fBeta(double beta, long tp, long fp, long fn) {
double prec = tp / ((double) tp + fp);
double recall = tp / ((double) tp + fn);
return fBeta(beta, prec, recall);
} |
Calculate the F beta value from counts
@param beta Beta of value to use
@param tp True positive count
@param fp False positive count
@param fn False negative count
@return F beta
| EvaluationUtils::fBeta | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
public static double fBeta(double beta, double precision, double recall) {
if (precision == 0.0 || recall == 0.0)
return 0;
double numerator = (1 + beta * beta) * precision * recall;
double denominator = beta * beta * precision + recall;
return numerator / denominator;
} |
Calculate the F-beta value from precision and recall
@param beta Beta value to use
@param precision Precision
@param recall Recall
@return F-beta value
| EvaluationUtils::fBeta | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
public static double gMeasure(double precision, double recall) {
return Math.sqrt(precision * recall);
} |
Calculate the G-measure from precision and recall
@param precision Precision value
@param recall Recall value
@return G-measure
| EvaluationUtils::gMeasure | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
public static double matthewsCorrelation(long tp, long fp, long fn, long tn) {
double numerator = ((double) tp) * tn - ((double) fp) * fn;
double denominator = Math.sqrt(((double) tp + fp) * (tp + fn) * (tn + fp) * (tn + fn));
return numerator / denominator;
} |
Calculate the binary Matthews correlation coefficient from counts
@param tp True positive count
@param fp False positive counts
@param fn False negative counts
@param tn True negative count
@return Matthews correlation coefficient
| EvaluationUtils::matthewsCorrelation | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
public static INDArray reshapeTimeSeriesMaskToVector(INDArray timeSeriesMask) {
if (timeSeriesMask.rank() != 2)
throw new IllegalArgumentException("Cannot reshape mask: rank is not 2");
if (timeSeriesMask.ordering() != 'f')
timeSeriesMask = timeSeriesMask.dup('f');
return timeSeriesMask.reshape('f', timeSeriesMask.length(), 1);
} |
Reshape time series mask arrays. This should match the assumptions (f order, etc) in RnnOutputLayer
@param timeSeriesMask Mask array to reshape to a column vector
@return Mask array as a column vector
| EvaluationUtils::reshapeTimeSeriesMaskToVector | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/EvaluationUtils.java | Apache-2.0 |
protected double calculateArea() {
return calculateArea(getX(), getY());
} |
@return Area under the curve
| BaseCurve::calculateArea | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | Apache-2.0 |
public String toJson() {
try {
return JsonMappers.getMapper().writeValueAsString(this);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
} |
@return JSON representation of the curve
| BaseCurve::toJson | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | Apache-2.0 |
public String toYaml() {
try {
return JsonMappers.getYamlMapper().writeValueAsString(this);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
} |
@return YAML representation of the curve
| BaseCurve::toYaml | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | Apache-2.0 |
public static <T extends BaseCurve> T fromJson(String json, Class<T> curveClass) {
try {
return JsonMappers.getMapper().readValue(json, curveClass);
} catch (IOException e) {
throw new RuntimeException(e);
}
} |
@param json JSON representation
@param curveClass Class for the curve
@param <T> Type
@return Instance of the curve
| BaseCurve::fromJson | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | Apache-2.0 |
public static <T extends BaseCurve> T fromYaml(String yaml, Class<T> curveClass) {
try {
return JsonMappers.getYamlMapper().readValue(yaml, curveClass);
} catch (IOException e) {
throw new RuntimeException(e);
}
} |
@param yaml YAML representation
@param curveClass Class for the curve
@param <T> Type
@return Instance of the curve
| BaseCurve::fromYaml | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseCurve.java | Apache-2.0 |
public double getThreshold(int i) {
Preconditions.checkArgument(i >= 0 && i < threshold.length, "Invalid index: " + i);
return threshold[i];
} |
@param i Point number, 0 to numPoints()-1 inclusive
@return Threshold of a given point
| RocCurve::getThreshold | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/RocCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/RocCurve.java | Apache-2.0 |
public double getTruePositiveRate(int i) {
Preconditions.checkArgument(i >= 0 && i < tpr.length, "Invalid index: " + i);
return tpr[i];
} |
@param i Point number, 0 to numPoints()-1 inclusive
@return True positive rate of a given point
| RocCurve::getTruePositiveRate | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/RocCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/RocCurve.java | Apache-2.0 |
public double getFalsePositiveRate(int i) {
Preconditions.checkArgument(i >= 0 && i < fpr.length, "Invalid index: " + i);
return fpr[i];
} |
@param i Point number, 0 to numPoints()-1 inclusive
@return False positive rate of a given point
| RocCurve::getFalsePositiveRate | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/RocCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/RocCurve.java | Apache-2.0 |
public double calculateAUC() {
if (auc != null) {
return auc;
}
auc = calculateArea();
return auc;
} |
Calculate and return the area under ROC curve
| RocCurve::calculateAUC | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/RocCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/RocCurve.java | Apache-2.0 |
public double getThreshold(int i) {
Preconditions.checkArgument(i >= 0 && i < threshold.length, "Invalid index: " + i);
return threshold[i];
} |
@param i Point number, 0 to numPoints()-1 inclusive
@return Threshold of a given point
| PrecisionRecallCurve::getThreshold | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public double getPrecision(int i) {
Preconditions.checkArgument(i >= 0 && i < precision.length, "Invalid index: " + i);
return precision[i];
} |
@param i Point number, 0 to numPoints()-1 inclusive
@return Precision of a given point
| PrecisionRecallCurve::getPrecision | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public double getRecall(int i) {
Preconditions.checkArgument(i >= 0 && i < recall.length, "Invalid index: " + i);
return recall[i];
} |
@param i Point number, 0 to numPoints()-1 inclusive
@return Recall of a given point
| PrecisionRecallCurve::getRecall | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public double calculateAUPRC() {
if (area != null) {
return area;
}
area = calculateArea();
return area;
} |
@return The area under the precision recall curve
| PrecisionRecallCurve::calculateAUPRC | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public Point getPointAtThreshold(double threshold) {
//Return (closest) point number, precision, recall, whether it's interpolated or not
//Binary search to find closest threshold
int idx = Arrays.binarySearch(this.threshold, threshold);
if (idx < 0) {
//Not found (usual case). binarySearch javadoc:
/*
index of the search key, if it is contained in the array;
otherwise, (-(insertion point) - 1). The
insertion point is defined as the point at which the
key would be inserted into the array: the index of the first
element greater than the key, or a.length if all
elements in the array are less than the specified key.
*/
idx = -idx - 1;
}
//At this point: idx = exact, on the next highest
double thr = this.threshold[idx];
double pr = precision[idx];
double rec = recall[idx];
return new Point(idx, thr, pr, rec);
} |
Get the point (index, threshold, precision, recall) at the given threshold.<br>
Note that if the threshold is not found exactly, the next highest threshold exceeding the requested threshold
is returned
@param threshold Threshold to get the point for
@return point (index, threshold, precision, recall) at the given threshold
| PrecisionRecallCurve::getPointAtThreshold | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public Point getPointAtPrecision(double precision) {
//Find the LOWEST threshold that gives the specified precision
for (int i = 0; i < this.precision.length; i++) {
if (this.precision[i] >= precision) {
return new Point(i, threshold[i], this.precision[i], recall[i]);
}
}
//Not found, return last point. Should never happen though...
int i = threshold.length - 1;
return new Point(i, threshold[i], this.precision[i], this.recall[i]);
} |
Get the point (index, threshold, precision, recall) at the given precision.<br>
Specifically, return the points at the lowest threshold that has precision equal to or greater than the
requested precision.
@param precision Precision to get the point for
@return point (index, threshold, precision, recall) at (or closest exceeding) the given precision
| PrecisionRecallCurve::getPointAtPrecision | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public Point getPointAtRecall(double recall) {
Point foundPoint = null;
//Find the HIGHEST threshold that gives the specified recall
for (int i = this.recall.length - 1; i >= 0; i--) {
if (this.recall[i] >= recall) {
if (foundPoint == null ||(this.recall[i] == foundPoint.getRecall() && this.precision[i] >= foundPoint.getPrecision())) {
foundPoint = new Point(i, threshold[i], precision[i], this.recall[i]);
}
}
}
if (foundPoint == null){
//Not found - return first point. Should never happen...
foundPoint = new Point(0, threshold[0], precision[0], this.recall[0]);
}
return foundPoint;
} |
Get the point (index, threshold, precision, recall) at the given recall.<br>
Specifically, return the points at the highest threshold that has recall equal to or greater than the
requested recall.
@param recall Recall to get the point for
@return point (index, threshold, precision, recall) at (or closest exceeding) the given recall
| PrecisionRecallCurve::getPointAtRecall | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public Confusion getConfusionMatrixAtThreshold(double threshold) {
Point p = getPointAtThreshold(threshold);
int idx = p.idx;
int tn = totalCount - (tpCount[idx] + fpCount[idx] + fnCount[idx]);
return new Confusion(p, tpCount[idx], fpCount[idx], fnCount[idx], tn);
} |
Get the binary confusion matrix for the given threshold. As per {@link #getPointAtThreshold(double)},
if the threshold is not found exactly, the next highest threshold exceeding the requested threshold
is returned
@param threshold Threshold at which to get the confusion matrix
@return Binary confusion matrix
| PrecisionRecallCurve::getConfusionMatrixAtThreshold | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public Confusion getConfusionMatrixAtPoint(int point) {
return getConfusionMatrixAtThreshold(threshold[point]);
} |
Get the binary confusion matrix for the given position. As per {@link #getPointAtThreshold(double)}.
@param point Position at which to get the binary confusion matrix
@return Binary confusion matrix
| PrecisionRecallCurve::getConfusionMatrixAtPoint | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/PrecisionRecallCurve.java | Apache-2.0 |
public static Histogram fromJson(String json) {
return BaseHistogram.fromJson(json, Histogram.class);
} |
@param json JSON representation
@return Instance of the histogram
| Histogram::fromJson | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/Histogram.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/Histogram.java | Apache-2.0 |
public String toJson() {
try {
return JsonMappers.getMapper().writeValueAsString(this);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
} |
@return JSON representation of the curve
| BaseHistogram::toJson | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseHistogram.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseHistogram.java | Apache-2.0 |
public String toYaml() {
try {
return JsonMappers.getYamlMapper().writeValueAsString(this);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
} |
@return YAML representation of the curve
| BaseHistogram::toYaml | java | deeplearning4j/deeplearning4j | nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseHistogram.java | https://github.com/deeplearning4j/deeplearning4j/blob/master/nd4j/nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/evaluation/curves/BaseHistogram.java | Apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.