replace deprecated "rewriter.create()" pattern
refactor PIM to Pim everywhere except for the accelerator name
This commit is contained in:
98
src/PIM/Conversion/SpatialToPim/SpatialToPimCommon.cpp
Normal file
98
src/PIM/Conversion/SpatialToPim/SpatialToPimCommon.cpp
Normal file
@@ -0,0 +1,98 @@
|
||||
#include "mlir/IR/ValueRange.h"
|
||||
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
|
||||
#include "SpatialToPimCommon.hpp"
|
||||
|
||||
using namespace llvm;
|
||||
using namespace mlir;
|
||||
|
||||
namespace onnx_mlir {
|
||||
|
||||
size_t getSliceActualOffset(tensor::ExtractSliceOp& sliceOp, ShapedType& inputShape) {
|
||||
/*
|
||||
EXAMPLE RUN:
|
||||
[1, 10, 3, 4] inputShape
|
||||
[0, 2, 1, 3] offsets
|
||||
|
||||
acc = 1
|
||||
---
|
||||
ret = 3
|
||||
acc = 4
|
||||
---
|
||||
ret = 3 + 4 * 1 = 7
|
||||
acc = 12
|
||||
---
|
||||
ret = 7 + 12 * 2 = 31
|
||||
acc = 120
|
||||
---
|
||||
ret = 31 + 120 * 0 = 31
|
||||
acc = 120
|
||||
*/
|
||||
|
||||
size_t returnValue = 0;
|
||||
|
||||
auto sliceOffsets = sliceOp.getStaticOffsets();
|
||||
auto inputDimSizes = inputShape.getShape();
|
||||
|
||||
assert(sliceOffsets.size() == inputDimSizes.size());
|
||||
|
||||
size_t accumulatedDimensionSize = 1;
|
||||
|
||||
// Reverse iterate the two vectors
|
||||
for (auto it : reverse(zip(sliceOffsets, inputDimSizes))) {
|
||||
auto curSliceOffset = std::get<0>(it);
|
||||
auto curInputDimSize = std::get<1>(it);
|
||||
|
||||
returnValue += accumulatedDimensionSize * curSliceOffset;
|
||||
accumulatedDimensionSize *= curInputDimSize;
|
||||
}
|
||||
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
Operation* getEarliestUserWithinBlock(Value value) {
|
||||
auto users = value.getUsers();
|
||||
|
||||
assert(!users.empty());
|
||||
|
||||
Operation* earliestUser = *users.begin();
|
||||
for (auto curUser : users)
|
||||
if (curUser->isBeforeInBlock(earliestUser))
|
||||
earliestUser = curUser;
|
||||
|
||||
return earliestUser;
|
||||
}
|
||||
|
||||
SmallVector<Value> getOpOperandsSortedByUses(Operation* operation) {
|
||||
auto operandsAndUses = map_to_vector(operation->getOperands(), [](Value operand) -> std::pair<Value, size_t> {
|
||||
return {operand, std::distance(operand.use_begin(), operand.use_end())};
|
||||
});
|
||||
sort(operandsAndUses, [](auto a, auto b) { return a.second < b.second; });
|
||||
return map_to_vector(operandsAndUses, [](auto operandAndUse) { return operandAndUse.first; });
|
||||
}
|
||||
|
||||
Value getBestOutputTensorFromOperandsOrAllocate(PatternRewriter& rewriter, Operation* operation) {
|
||||
assert("Only support operations with a single result" && operation->getNumResults() == 1);
|
||||
Value result = operation->getResult(0);
|
||||
auto resultType = result.getType();
|
||||
assert("Only support result ShapedType as result type" && isa<ShapedType>(resultType));
|
||||
|
||||
SmallVector<Value> operands = getOpOperandsSortedByUses(operation);
|
||||
auto validOperands =
|
||||
make_filter_range(operands, [resultType](Value operand) { return operand.getType() == resultType; });
|
||||
auto bestOperand = validOperands.begin();
|
||||
|
||||
if (bestOperand != validOperands.end())
|
||||
return *bestOperand;
|
||||
|
||||
auto resultShapedType = cast<ShapedType>(resultType);
|
||||
rewriter.setInsertionPoint(operation);
|
||||
return tensor::EmptyOp::create(
|
||||
rewriter, operation->getLoc(), resultShapedType.getShape(), resultShapedType.getElementType());
|
||||
}
|
||||
|
||||
} // namespace onnx_mlir
|
||||
Reference in New Issue
Block a user