154 lines
5.6 KiB
C++
154 lines
5.6 KiB
C++
#pragma once
|
|
|
|
#include "mlir/IR/Block.h"
|
|
#include "mlir/IR/BuiltinTypes.h"
|
|
#include "mlir/IR/ValueRange.h"
|
|
#include "mlir/Transforms/DialectConversion.h"
|
|
|
|
#include <cassert>
|
|
#include <cstddef>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
#include "src/Accelerators/PIM/Dialect/Spatial/SpatialOps.hpp"
|
|
|
|
namespace onnx_mlir {
|
|
|
|
namespace detail {
|
|
|
|
inline mlir::ValueRange getBlockArgs(mlir::Block* block) { return mlir::ValueRange(block->getArguments()); }
|
|
|
|
template <typename Fn, size_t... Is>
|
|
decltype(auto) invokeWithBlockArgs(Fn&& fn, mlir::Block* block, std::index_sequence<Is...>) {
|
|
return std::forward<Fn>(fn)(block->getArgument(Is)...);
|
|
}
|
|
|
|
template <typename Fn, size_t... Is>
|
|
decltype(auto) invokeWithValues(Fn&& fn, mlir::ArrayRef<mlir::Value> values, std::index_sequence<Is...>) {
|
|
return std::forward<Fn>(fn)(values[Is]...);
|
|
}
|
|
|
|
template <size_t>
|
|
using ValueArg = mlir::Value;
|
|
|
|
template <typename Fn, typename Seq>
|
|
struct InvokeWithBlockArgsResult;
|
|
|
|
template <typename Fn, size_t... Is>
|
|
struct InvokeWithBlockArgsResult<Fn, std::index_sequence<Is...>> {
|
|
using type = std::invoke_result_t<Fn, ValueArg<Is>...>;
|
|
};
|
|
|
|
template <typename Fn, typename Seq>
|
|
using InvokeWithBlockArgsResultT = typename InvokeWithBlockArgsResult<Fn, Seq>::type;
|
|
|
|
template <typename Fn>
|
|
using InvokeWithValueRangeResultT = std::invoke_result_t<Fn, mlir::ValueRange>;
|
|
|
|
} // namespace detail
|
|
|
|
template <typename RewriterT>
|
|
inline mlir::Value createSpatConcat(RewriterT& rewriter, mlir::Location loc, int64_t axis, mlir::ValueRange inputs) {
|
|
assert(!inputs.empty() && "spat.concat requires at least one input");
|
|
if (inputs.size() == 1)
|
|
return inputs.front();
|
|
|
|
auto firstType = mlir::cast<mlir::RankedTensorType>(inputs.front().getType());
|
|
auto outputShape = llvm::to_vector(firstType.getShape());
|
|
int64_t concatDimSize = 0;
|
|
bool concatDimDynamic = false;
|
|
|
|
for (mlir::Value input : inputs) {
|
|
auto inputType = mlir::cast<mlir::RankedTensorType>(input.getType());
|
|
assert(inputType.getRank() == firstType.getRank() && "spat.concat expects same-rank inputs");
|
|
if (mlir::ShapedType::isDynamic(inputType.getDimSize(axis)))
|
|
concatDimDynamic = true;
|
|
else
|
|
concatDimSize += inputType.getDimSize(axis);
|
|
}
|
|
|
|
outputShape[axis] = concatDimDynamic ? mlir::ShapedType::kDynamic : concatDimSize;
|
|
auto outputType = mlir::RankedTensorType::get(outputShape, firstType.getElementType(), firstType.getEncoding());
|
|
return spatial::SpatConcatOp::create(rewriter, loc, outputType, rewriter.getI64IntegerAttr(axis), inputs).getOutput();
|
|
}
|
|
|
|
/// Builds a `spat.compute` with a fixed number of SSA inputs and erases it if
|
|
/// the body callback reports failure.
|
|
template <size_t NumInputs, typename RewriterT, typename BodyFn>
|
|
auto createSpatCompute(RewriterT& rewriter,
|
|
mlir::Location loc,
|
|
mlir::TypeRange resultTypes,
|
|
mlir::ValueRange weights,
|
|
mlir::ValueRange inputs,
|
|
BodyFn&& body) {
|
|
assert(inputs.size() == NumInputs && "NumInputs must match the number of input values");
|
|
auto computeOp = spatial::SpatCompute::create(rewriter, loc, resultTypes, weights, inputs);
|
|
|
|
auto* block = new mlir::Block();
|
|
for (mlir::Value input : inputs)
|
|
block->addArgument(input.getType(), loc);
|
|
|
|
computeOp.getBody().push_back(block);
|
|
rewriter.setInsertionPointToStart(block);
|
|
|
|
using BodyResult = detail::InvokeWithBlockArgsResultT<std::decay_t<BodyFn>, std::make_index_sequence<NumInputs>>;
|
|
if constexpr (std::is_same_v<BodyResult, void>) {
|
|
detail::invokeWithBlockArgs(std::forward<BodyFn>(body), block, std::make_index_sequence<NumInputs> {});
|
|
|
|
rewriter.setInsertionPointAfter(computeOp);
|
|
return computeOp;
|
|
}
|
|
else {
|
|
auto bodyResult =
|
|
detail::invokeWithBlockArgs(std::forward<BodyFn>(body), block, std::make_index_sequence<NumInputs> {});
|
|
if (mlir::failed(bodyResult)) {
|
|
rewriter.setInsertionPointAfter(computeOp);
|
|
rewriter.eraseOp(computeOp);
|
|
return mlir::FailureOr<spatial::SpatCompute>(mlir::failure());
|
|
}
|
|
rewriter.setInsertionPointAfter(computeOp);
|
|
return mlir::FailureOr<spatial::SpatCompute>(computeOp);
|
|
}
|
|
}
|
|
|
|
/// Builds a `spat.compute` whose body consumes the block arguments as a single
|
|
/// `ValueRange`, which is convenient for variadic reductions/concats.
|
|
template <typename RewriterT, typename BodyFn>
|
|
auto createSpatCompute(RewriterT& rewriter,
|
|
mlir::Location loc,
|
|
mlir::TypeRange resultTypes,
|
|
mlir::ValueRange weights,
|
|
mlir::ValueRange inputs,
|
|
BodyFn&& body) {
|
|
auto computeOp = spatial::SpatCompute::create(rewriter, loc, resultTypes, weights, inputs);
|
|
|
|
auto* block = new mlir::Block();
|
|
for (mlir::Value input : inputs)
|
|
block->addArgument(input.getType(), loc);
|
|
|
|
computeOp.getBody().push_back(block);
|
|
rewriter.setInsertionPointToStart(block);
|
|
|
|
using BodyResult = detail::InvokeWithValueRangeResultT<std::decay_t<BodyFn>>;
|
|
if constexpr (std::is_same_v<BodyResult, void>) {
|
|
std::forward<BodyFn>(body)(detail::getBlockArgs(block));
|
|
|
|
rewriter.setInsertionPointAfter(computeOp);
|
|
return computeOp;
|
|
}
|
|
else {
|
|
auto bodyResult = std::forward<BodyFn>(body)(detail::getBlockArgs(block));
|
|
if (mlir::failed(bodyResult)) {
|
|
rewriter.setInsertionPointAfter(computeOp);
|
|
rewriter.eraseOp(computeOp);
|
|
return mlir::FailureOr<spatial::SpatCompute>(mlir::failure());
|
|
}
|
|
rewriter.setInsertionPointAfter(computeOp);
|
|
return mlir::FailureOr<spatial::SpatCompute>(computeOp);
|
|
}
|
|
}
|
|
|
|
mlir::Value sumTensors(mlir::ArrayRef<mlir::Value> tensors, mlir::ConversionPatternRewriter& rewriter);
|
|
|
|
} // namespace onnx_mlir
|