100 lines
2.8 KiB
C++
100 lines
2.8 KiB
C++
#include "mlir/IR/ValueRange.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include <cassert>
|
|
#include <cstddef>
|
|
|
|
#include "SpatialToPimCommon.hpp"
|
|
|
|
using namespace llvm;
|
|
using namespace mlir;
|
|
|
|
namespace onnx_mlir {
|
|
|
|
size_t getSliceActualOffset(tensor::ExtractSliceOp& sliceOp, ShapedType& inputShape) {
|
|
/*
|
|
EXAMPLE RUN:
|
|
[1, 10, 3, 4] inputShape
|
|
[0, 2, 1, 3] offsets
|
|
|
|
acc = 1
|
|
---
|
|
ret = 3
|
|
acc = 4
|
|
---
|
|
ret = 3 + 4 * 1 = 7
|
|
acc = 12
|
|
---
|
|
ret = 7 + 12 * 2 = 31
|
|
acc = 120
|
|
---
|
|
ret = 31 + 120 * 0 = 31
|
|
acc = 120
|
|
*/
|
|
|
|
size_t returnValue = 0;
|
|
|
|
auto sliceOffsets = sliceOp.getStaticOffsets();
|
|
auto inputDimSizes = inputShape.getShape();
|
|
|
|
assert(sliceOffsets.size() == inputDimSizes.size());
|
|
|
|
size_t accumulatedDimensionSize = 1;
|
|
|
|
// Reverse iterate the two vectors
|
|
for (auto it : reverse(zip(sliceOffsets, inputDimSizes))) {
|
|
auto curSliceOffset = std::get<0>(it);
|
|
auto curInputDimSize = std::get<1>(it);
|
|
|
|
returnValue += accumulatedDimensionSize * curSliceOffset;
|
|
accumulatedDimensionSize *= curInputDimSize;
|
|
}
|
|
|
|
return returnValue;
|
|
}
|
|
|
|
Operation* getEarliestUserWithinBlock(mlir::Value value) {
|
|
auto users = value.getUsers();
|
|
|
|
assert(!users.empty());
|
|
|
|
Operation* earliestUser = *users.begin();
|
|
for (auto curUser : users)
|
|
if (curUser->isBeforeInBlock(earliestUser))
|
|
earliestUser = curUser;
|
|
|
|
return earliestUser;
|
|
}
|
|
|
|
SmallVector<mlir::Value> getOpOperandsSortedByUses(Operation* operation) {
|
|
auto operandsAndUses =
|
|
map_to_vector(operation->getOperands(), [](mlir::Value operand) -> std::pair<mlir::Value, size_t> {
|
|
return {operand, std::distance(operand.use_begin(), operand.use_end())};
|
|
});
|
|
sort(operandsAndUses, [](auto a, auto b) { return a.second < b.second; });
|
|
return map_to_vector(operandsAndUses, [](auto operandAndUse) { return operandAndUse.first; });
|
|
}
|
|
|
|
mlir::Value getBestOutputTensorFromOperandsOrAllocate(PatternRewriter& rewriter, Operation* operation) {
|
|
assert("Only support operations with a single result" && operation->getNumResults() == 1);
|
|
mlir::Value result = operation->getResult(0);
|
|
auto resultType = result.getType();
|
|
assert("Only support result ShapedType as result type" && isa<ShapedType>(resultType));
|
|
|
|
SmallVector<mlir::Value> operands = getOpOperandsSortedByUses(operation);
|
|
auto validOperands =
|
|
make_filter_range(operands, [resultType](mlir::Value operand) { return operand.getType() == resultType; });
|
|
auto bestOperand = validOperands.begin();
|
|
|
|
if (bestOperand != validOperands.end())
|
|
return *bestOperand;
|
|
|
|
auto resultShapedType = cast<ShapedType>(resultType);
|
|
rewriter.setInsertionPoint(operation);
|
|
return tensor::EmptyOp::create(
|
|
rewriter, operation->getLoc(), resultShapedType.getShape(), resultShapedType.getElementType());
|
|
}
|
|
|
|
} // namespace onnx_mlir
|