fix issue on inferfence logic used to calculate tensor size to support 4D (batch) tensors

This commit is contained in:
swdee
2025-06-30 13:27:08 +12:00
parent 51d5c80ce0
commit 207a0a5a41
+2 -2
View File
@@ -60,7 +60,7 @@ func (r *Runtime) Inference(mats []gocv.Mat) (*Outputs, error) {
Index: uint32(idx),
Type: TensorFloat32,
// multiply by 4 for size of float32
Size: uint32(mat.Cols() * mat.Rows() * mat.Channels() * 4),
Size: uint32(len(data) * 4), // bytes = elements * 4
Fmt: TensorNHWC,
Buf: unsafe.Pointer(&data[0]),
PassThrough: false,
@@ -77,7 +77,7 @@ func (r *Runtime) Inference(mats []gocv.Mat) (*Outputs, error) {
inputs[idx] = Input{
Index: uint32(idx),
Type: TensorUint8,
Size: uint32(mat.Cols() * mat.Rows() * mat.Channels()),
Size: uint32(len(data)), // bytes = elements
Fmt: TensorNHWC,
Buf: unsafe.Pointer(&data[0]),
PassThrough: false,