constant fold linalg.map (generated from tensor.pad for padding)

refactor pim helpers in PimCommon
This commit is contained in:
NiccoloN
2026-03-20 20:51:20 +01:00
parent dbe646ac0d
commit 6933804003
14 changed files with 751 additions and 263 deletions

View File

@@ -54,7 +54,7 @@ size_t getSliceActualOffset(tensor::ExtractSliceOp& sliceOp, ShapedType& inputSh
return returnValue;
}
Operation* getEarliestUserWithinBlock(Value value) {
Operation* getEarliestUserWithinBlock(mlir::Value value) {
auto users = value.getUsers();
assert(!users.empty());
@@ -67,23 +67,24 @@ Operation* getEarliestUserWithinBlock(Value value) {
return earliestUser;
}
SmallVector<Value> getOpOperandsSortedByUses(Operation* operation) {
auto operandsAndUses = map_to_vector(operation->getOperands(), [](Value operand) -> std::pair<Value, size_t> {
SmallVector<mlir::Value> getOpOperandsSortedByUses(Operation* operation) {
auto operandsAndUses =
map_to_vector(operation->getOperands(), [](mlir::Value operand) -> std::pair<mlir::Value, size_t> {
return {operand, std::distance(operand.use_begin(), operand.use_end())};
});
sort(operandsAndUses, [](auto a, auto b) { return a.second < b.second; });
return map_to_vector(operandsAndUses, [](auto operandAndUse) { return operandAndUse.first; });
}
Value getBestOutputTensorFromOperandsOrAllocate(PatternRewriter& rewriter, Operation* operation) {
mlir::Value getBestOutputTensorFromOperandsOrAllocate(PatternRewriter& rewriter, Operation* operation) {
assert("Only support operations with a single result" && operation->getNumResults() == 1);
Value result = operation->getResult(0);
mlir::Value result = operation->getResult(0);
auto resultType = result.getType();
assert("Only support result ShapedType as result type" && isa<ShapedType>(resultType));
SmallVector<Value> operands = getOpOperandsSortedByUses(operation);
SmallVector<mlir::Value> operands = getOpOperandsSortedByUses(operation);
auto validOperands =
make_filter_range(operands, [resultType](Value operand) { return operand.getType() == resultType; });
make_filter_range(operands, [resultType](mlir::Value operand) { return operand.getType() == resultType; });
auto bestOperand = validOperands.begin();
if (bestOperand != validOperands.end())

View File

@@ -2,6 +2,7 @@
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "src/Accelerators/PIM/Common/PimCommon.hpp"
#include "src/Accelerators/PIM/Dialect/Spatial/SpatialOps.hpp"
namespace onnx_mlir {
@@ -39,71 +40,13 @@ mlir::SmallVector<mlir::Value> getOpOperandsSortedByUses(mlir::Operation* operat
mlir::Value getBestOutputTensorFromOperandsOrAllocate(mlir::PatternRewriter& rewriter, mlir::Operation* operation);
static bool isMemoryContiguous(const mlir::ArrayRef<int64_t> srcShape,
const mlir::ArrayRef<int64_t> offsets,
const mlir::ArrayRef<int64_t> sizes,
const mlir::ArrayRef<int64_t> strides) {
// Check that all strides are 1
if (std::any_of(strides.begin(), strides.end(), [](int64_t stride) -> bool { return stride != 1; }))
return false;
// Check offsets from right to left:
// The first offset_n at position n different from 0:
// - limits all sizes to the left to 1
// - limits size_n to dimension_n - offset_n
auto offsetsAndSizesAndShape = llvm::zip_equal(llvm::make_range(offsets.rbegin(), offsets.rend()),
llvm::make_range(sizes.rbegin(), sizes.rend()),
llvm::make_range(srcShape.rbegin(), srcShape.rend()));
auto firstNonZeroOffset = std::find_if(
offsetsAndSizesAndShape.begin(), offsetsAndSizesAndShape.end(), [&](auto offsetAndSizeAndShape) -> bool {
auto [offset, _size, _dimension] = offsetAndSizeAndShape;
return offset != 0;
});
if (firstNonZeroOffset != offsetsAndSizesAndShape.end()) {
auto [offset, size, dimension] = *firstNonZeroOffset;
if (size > dimension - offset)
return false;
++firstNonZeroOffset;
if (std::any_of(firstNonZeroOffset, offsetsAndSizesAndShape.end(), [](auto offsetAndSizeAndShape) -> bool {
auto [_offset, size, _dimension] = offsetAndSizeAndShape;
return size != 1;
}))
return false;
}
// Check sizes from right to left:
// The first size_n at position n different from shape_n limits all sizes to the left to 1
auto sizesAndShape = llvm::zip_equal(llvm::make_range(sizes.rbegin(), sizes.rend()),
llvm::make_range(srcShape.rbegin(), srcShape.rend()));
auto firstDifferentSize = std::find_if(sizesAndShape.begin(), sizesAndShape.end(), [&](auto sizeAndShape) -> bool {
auto [size, dimension] = sizeAndShape;
return size != dimension;
});
if (firstDifferentSize != sizesAndShape.end()) {
++firstDifferentSize;
if (std::any_of(firstDifferentSize, sizesAndShape.end(), [](auto sizeAndShape) -> bool {
auto [size, _] = sizeAndShape;
return size != 1;
}))
return false;
}
return true;
}
inline mlir::tensor::EmptyOp
createEmptyTensorFromShaped(mlir::IRRewriter& rewriter, mlir::Location loc, mlir::ShapedType shapedType) {
return mlir::tensor::EmptyOp::create(rewriter, loc, shapedType.getShape(), shapedType.getElementType());
}
inline bool isAConcatOp(mlir::Operation* op) {
return isa<mlir::tensor::ConcatOp>(op) || isa<spatial::SpatImgConcatOp>(op);
return llvm::isa<mlir::tensor::ConcatOp>(op) || llvm::isa<spatial::SpatImgConcatOp>(op);
}
} // namespace onnx_mlir

View File

@@ -129,7 +129,7 @@ void SpatialToPimPass::runOnOperation() {
}
// Dump to file for debug
dumpModule(moduleOp, "pim");
dumpModule(moduleOp, "pim0");
}
void SpatialToPimPass::runOnComputeOp(spatial::SpatWeightedCompute computeOp, IRRewriter& rewriter) {