add validation tests for softmax, resize, split, gather
This commit is contained in:
@@ -85,6 +85,36 @@ python3 validation/operations/gen_tests.py
|
||||
| 4D | `sigmoid/4d` | [2,3,4,4] | [2,3,4,4] | Standalone NCHW Sigmoid |
|
||||
| After Gemm | `sigmoid/after_gemm` | [4,64] | [4,32] | Gemm + bias, then Sigmoid |
|
||||
|
||||
## Softmax
|
||||
|
||||
| Test | Directory | Input | Output | Axis | Notes |
|
||||
|--------------|--------------------------|-------------|-------------|------|---------------------------------|
|
||||
| Basic | `softmax/basic` | [3,5] | [3,5] | 1 | Row-wise softmax over features |
|
||||
| 3D last axis | `softmax/3d_last_axis` | [2,3,4] | [2,3,4] | 2 | Last-dimension normalization |
|
||||
| Channel axis | `softmax/channel_axis` | [1,3,2,2] | [1,3,2,2] | 1 | NCHW channel-wise softmax |
|
||||
|
||||
## Resize
|
||||
|
||||
| Test | Directory | Input | Output | Mode | Notes |
|
||||
|---------------------|-------------------------|-----------|-----------|---------|-----------------------------------------|
|
||||
| Nearest 2x | `resize/nearest_2x` | [1,1,2,3] | [1,1,4,6] | nearest | NCHW upsampling with scales [1,1,2,2] |
|
||||
| Non-uniform scales | `resize/non_uniform` | [1,1,2,3] | [1,1,6,6] | nearest | Different height/width scaling factors |
|
||||
| Explicit sizes | `resize/with_sizes` | [1,1,2,3] | [1,1,3,5] | nearest | Sizes input used instead of scales |
|
||||
|
||||
## Split
|
||||
|
||||
| Test | Directory | Input | Outputs | Axis | Notes |
|
||||
|-----------------|---------------------------|-------|----------------------|------|-------------------------------------|
|
||||
| Basic | `split/basic` | [2,6] | [2,2], [2,4] | 1 | Two-way split with explicit sizes |
|
||||
| Equal three-way | `split/equal_three_way` | [2,6] | [2,2], [2,2], [2,2] | 1 | Optional split input omitted |
|
||||
|
||||
## Gather
|
||||
|
||||
| Test | Directory | Input | Indices | Output | Axis | Notes |
|
||||
|----------------------|--------------------------------|-------|---------|----------|------|--------------------------------|
|
||||
| Axis 1 | `gather/axis1` | [3,4] | [2] | [3,2] | 1 | Select two columns |
|
||||
| Axis 0 matrix indices| `gather/axis0_matrix_indices` | [4,3] | [2,2] | [2,2,3] | 0 | Gather rows with 2D indices |
|
||||
|
||||
## Add
|
||||
|
||||
| Test | Directory | Input(s) | Output | Notes |
|
||||
|
||||
Binary file not shown.
BIN
validation/operations/gather/axis1/gather_axis1.onnx
Normal file
BIN
validation/operations/gather/axis1/gather_axis1.onnx
Normal file
Binary file not shown.
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Generate ONNX test models for validating GEMM, Conv, Pooling, Relu, and ReduceMean implementations."""
|
||||
"""Generate ONNX test models for validating supported ONNX operations."""
|
||||
|
||||
import numpy as np
|
||||
import onnx
|
||||
@@ -473,6 +473,140 @@ def sigmoid_after_gemm():
|
||||
save_model(model, "sigmoid/after_gemm", "sigmoid_after_gemm.onnx")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Softmax tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def softmax_basic():
|
||||
"""Softmax over the last dimension of a 2D tensor."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 5])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [3, 5])
|
||||
node = helper.make_node("Softmax", ["X"], ["Y"], axis=1)
|
||||
graph = helper.make_graph([node], "softmax_basic", [X], [Y])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "softmax/basic", "softmax_basic.onnx")
|
||||
|
||||
|
||||
def softmax_3d_last_axis():
|
||||
"""Softmax over the last axis of a 3D tensor."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 3, 4])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 3, 4])
|
||||
node = helper.make_node("Softmax", ["X"], ["Y"], axis=2)
|
||||
graph = helper.make_graph([node], "softmax_3d_last_axis", [X], [Y])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "softmax/3d_last_axis", "softmax_3d_last_axis.onnx")
|
||||
|
||||
|
||||
def softmax_channel_axis():
|
||||
"""Softmax over the channel axis of an NCHW tensor."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 3, 2, 2])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 3, 2, 2])
|
||||
node = helper.make_node("Softmax", ["X"], ["Y"], axis=1)
|
||||
graph = helper.make_graph([node], "softmax_channel_axis", [X], [Y])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "softmax/channel_axis", "softmax_channel_axis.onnx")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Resize tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def resize_nearest_2x():
|
||||
"""Resize an NCHW tensor with nearest-neighbor upsampling by a factor of 2."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 1, 2, 3])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 1, 4, 6])
|
||||
roi = numpy_helper.from_array(np.asarray([], dtype=np.float32), name="roi")
|
||||
scales = numpy_helper.from_array(np.asarray([1.0, 1.0, 2.0, 2.0], dtype=np.float32), name="scales")
|
||||
node = helper.make_node(
|
||||
"Resize", ["X", "roi", "scales"], ["Y"],
|
||||
mode="nearest", coordinate_transformation_mode="asymmetric", nearest_mode="floor")
|
||||
graph = helper.make_graph([node], "resize_nearest_2x", [X], [Y], initializer=[roi, scales])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "resize/nearest_2x", "resize_nearest_2x.onnx")
|
||||
|
||||
|
||||
def resize_nearest_non_uniform():
|
||||
"""Resize an NCHW tensor with non-uniform nearest-neighbor scales."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 1, 2, 3])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 1, 6, 6])
|
||||
roi = numpy_helper.from_array(np.asarray([], dtype=np.float32), name="roi")
|
||||
scales = numpy_helper.from_array(np.asarray([1.0, 1.0, 3.0, 2.0], dtype=np.float32), name="scales")
|
||||
node = helper.make_node(
|
||||
"Resize", ["X", "roi", "scales"], ["Y"],
|
||||
mode="nearest", coordinate_transformation_mode="asymmetric", nearest_mode="floor")
|
||||
graph = helper.make_graph([node], "resize_nearest_non_uniform", [X], [Y], initializer=[roi, scales])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "resize/non_uniform", "resize_non_uniform.onnx")
|
||||
|
||||
|
||||
def resize_with_sizes():
|
||||
"""Resize an NCHW tensor to explicit output sizes."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 1, 2, 3])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 1, 3, 5])
|
||||
roi = numpy_helper.from_array(np.asarray([], dtype=np.float32), name="roi")
|
||||
sizes = make_int64_initializer("sizes", [1, 1, 3, 5])
|
||||
node = helper.make_node(
|
||||
"Resize", ["X", "roi", "", "sizes"], ["Y"],
|
||||
mode="nearest", coordinate_transformation_mode="asymmetric", nearest_mode="floor")
|
||||
graph = helper.make_graph([node], "resize_with_sizes", [X], [Y], initializer=[roi, sizes])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "resize/with_sizes", "resize_with_sizes.onnx")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Split tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def split_basic():
|
||||
"""Split a 2D tensor into two outputs along the feature axis."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 6])
|
||||
Y0 = helper.make_tensor_value_info("Y0", TensorProto.FLOAT, [2, 2])
|
||||
Y1 = helper.make_tensor_value_info("Y1", TensorProto.FLOAT, [2, 4])
|
||||
split = make_int64_initializer("split", [2, 4])
|
||||
node = helper.make_node("Split", ["X", "split"], ["Y0", "Y1"], axis=1)
|
||||
graph = helper.make_graph([node], "split_basic", [X], [Y0, Y1], initializer=[split])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "split/basic", "split_basic.onnx")
|
||||
|
||||
|
||||
def split_equal_three_way():
|
||||
"""Split a 2D tensor evenly into three outputs."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [2, 6])
|
||||
Y0 = helper.make_tensor_value_info("Y0", TensorProto.FLOAT, [2, 2])
|
||||
Y1 = helper.make_tensor_value_info("Y1", TensorProto.FLOAT, [2, 2])
|
||||
Y2 = helper.make_tensor_value_info("Y2", TensorProto.FLOAT, [2, 2])
|
||||
node = helper.make_node("Split", ["X"], ["Y0", "Y1", "Y2"], axis=1)
|
||||
graph = helper.make_graph([node], "split_equal_three_way", [X], [Y0, Y1, Y2])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "split/equal_three_way", "split_equal_three_way.onnx")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Gather tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def gather_axis1():
|
||||
"""Gather selected columns from a 2D tensor."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 4])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [3, 2])
|
||||
indices = make_int64_initializer("indices", [0, 2])
|
||||
node = helper.make_node("Gather", ["X", "indices"], ["Y"], axis=1)
|
||||
graph = helper.make_graph([node], "gather_axis1", [X], [Y], initializer=[indices])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "gather/axis1", "gather_axis1.onnx")
|
||||
|
||||
|
||||
def gather_axis0_matrix_indices():
|
||||
"""Gather rows using a 2D indices tensor."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [4, 3])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [2, 2, 3])
|
||||
indices = make_int64_initializer("indices", [[0, 2], [3, 1]])
|
||||
node = helper.make_node("Gather", ["X", "indices"], ["Y"], axis=0)
|
||||
graph = helper.make_graph([node], "gather_axis0_matrix_indices", [X], [Y], initializer=[indices])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "gather/axis0_matrix_indices", "gather_axis0_matrix_indices.onnx")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Add tests
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -599,55 +733,6 @@ def div_after_gemm():
|
||||
save_model(model, "div/after_gemm", "div_after_gemm.onnx")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ReduceMean tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def reducemean_basic():
|
||||
"""ReduceMean over the feature dimension, preserving rank."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [4, 8])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [4, 1])
|
||||
node = helper.make_node("ReduceMean", ["X"], ["Y"], axes=[1], keepdims=1)
|
||||
graph = helper.make_graph([node], "reducemean_basic", [X], [Y])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "reduce_mean/basic", "reduce_mean_basic.onnx")
|
||||
|
||||
|
||||
def reducemean_keepdims_0():
|
||||
"""ReduceMean over the feature dimension, dropping the reduced axis."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [4, 8])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [4])
|
||||
node = helper.make_node("ReduceMean", ["X"], ["Y"], axes=[1], keepdims=0)
|
||||
graph = helper.make_graph([node], "reducemean_keepdims_0", [X], [Y])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "reduce_mean/keepdims_0", "reduce_mean_keepdims_0.onnx")
|
||||
|
||||
|
||||
def reducemean_4d_spatial():
|
||||
"""ReduceMean over H and W on an NCHW tensor."""
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 3, 4, 4])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 3, 1, 1])
|
||||
node = helper.make_node("ReduceMean", ["X"], ["Y"], axes=[2, 3], keepdims=1)
|
||||
graph = helper.make_graph([node], "reducemean_4d_spatial", [X], [Y])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "reduce_mean/4d_spatial", "reduce_mean_4d_spatial.onnx")
|
||||
|
||||
|
||||
def reducemean_after_conv():
|
||||
"""Conv followed by ReduceMean over the spatial dimensions."""
|
||||
rng = np.random.default_rng(62)
|
||||
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 3, 5, 5])
|
||||
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2, 1, 1])
|
||||
W = numpy_helper.from_array(rng.uniform(-1, 1, (2, 3, 3, 3)).astype(np.float32), name="W")
|
||||
B = numpy_helper.from_array(rng.uniform(-1, 1, (2,)).astype(np.float32), name="B")
|
||||
conv = helper.make_node("Conv", ["X", "W", "B"], ["C"],
|
||||
kernel_shape=[3, 3], strides=[1, 1], pads=[0, 0, 0, 0])
|
||||
reduce = helper.make_node("ReduceMean", ["C"], ["Y"], axes=[2, 3], keepdims=1)
|
||||
graph = helper.make_graph([conv, reduce], "reducemean_after_conv", [X], [Y], initializer=[W, B])
|
||||
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
|
||||
save_model(model, "reduce_mean/after_conv", "reduce_mean_after_conv.onnx")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -699,6 +784,24 @@ if __name__ == "__main__":
|
||||
sigmoid_4d()
|
||||
sigmoid_after_gemm()
|
||||
|
||||
print("\nGenerating Split tests:")
|
||||
split_basic()
|
||||
split_equal_three_way()
|
||||
|
||||
print("\nGenerating Softmax tests:")
|
||||
softmax_basic()
|
||||
softmax_3d_last_axis()
|
||||
softmax_channel_axis()
|
||||
|
||||
print("\nGenerating Resize tests:")
|
||||
resize_nearest_2x()
|
||||
resize_nearest_non_uniform()
|
||||
resize_with_sizes()
|
||||
|
||||
print("\nGenerating Gather tests:")
|
||||
gather_axis1()
|
||||
gather_axis0_matrix_indices()
|
||||
|
||||
print("\nGenerating Add tests:")
|
||||
add_basic()
|
||||
add_broadcast_row()
|
||||
|
||||
BIN
validation/operations/resize/nearest_2x/resize_nearest_2x.onnx
Normal file
BIN
validation/operations/resize/nearest_2x/resize_nearest_2x.onnx
Normal file
Binary file not shown.
BIN
validation/operations/resize/non_uniform/resize_non_uniform.onnx
Normal file
BIN
validation/operations/resize/non_uniform/resize_non_uniform.onnx
Normal file
Binary file not shown.
BIN
validation/operations/resize/with_sizes/resize_with_sizes.onnx
Normal file
BIN
validation/operations/resize/with_sizes/resize_with_sizes.onnx
Normal file
Binary file not shown.
Binary file not shown.
BIN
validation/operations/softmax/basic/softmax_basic.onnx
Normal file
BIN
validation/operations/softmax/basic/softmax_basic.onnx
Normal file
Binary file not shown.
Binary file not shown.
BIN
validation/operations/split/basic/split_basic.onnx
Normal file
BIN
validation/operations/split/basic/split_basic.onnx
Normal file
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user