compact pim IR
Validate Operations / validate-operations (push) Successful in 22m15s

This commit is contained in:
NiccoloN
2026-05-06 17:16:51 +02:00
parent 7bb58e80de
commit f2fe147961
13 changed files with 2264 additions and 307 deletions
+2
View File
@@ -6,6 +6,8 @@ add_subdirectory(Transforms/Bufferization)
add_pim_library(PimOps
PimOps.hpp
PimOps.cpp
PimOpsAsm.cpp
PimOpsVerify.cpp
EXCLUDE_FROM_OM_LIBS
+161 -8
View File
@@ -50,9 +50,7 @@ def PimCoreBatchOp : PimOp<"core_batch", [SingleBlock, IsolatedFromAbove, AttrSi
Variadic<PimTensor>:$inputs
);
let assemblyFormat = [{
`lanes` $laneCount `(` $weights `)` `[` $inputs `]` attr-dict regions `:` type($weights) `[` type($inputs) `]` `->` `(` `)`
}];
let hasCustomAssemblyFormat = 1;
}
def PimHaltOp : PimOp<"halt", [Terminator]> {
@@ -63,6 +61,48 @@ def PimHaltOp : PimOp<"halt", [Terminator]> {
}];
}
def PimYieldOp : PimOp<"yield", [Terminator]> {
let summary = "Yield results from a Pim region";
let arguments = (ins
Variadic<PimTensor>:$outputs
);
let hasCustomAssemblyFormat = 1;
}
def PimMapOp : PimOp<"map", [SingleBlock]> {
let summary = "Apply the same lane-local region to many independent tensors";
let arguments = (ins
Variadic<PimTensor>:$inputs
);
let results = (outs
Variadic<PimTensor>:$outputs
);
let regions = (region SizedRegion<1>:$body);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
//===----------------------------------------------------------------------===//
// Tensor Utilities
//===----------------------------------------------------------------------===//
def PimEmptyManyOp : PimOp<"empty_many", []> {
let summary = "Create many identical empty tensors";
let results = (outs
Variadic<AnyRankedTensor>:$outputs
);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
//===----------------------------------------------------------------------===//
// Communication
//===----------------------------------------------------------------------===//
@@ -81,6 +121,18 @@ def PimSendOp : PimOp<"send", []> {
}];
}
def PimSendManyOp : PimOp<"send_many", []> {
let summary = "Send multiple tensors to target cores";
let arguments = (ins
DenseI32ArrayAttr:$targetCoreIds,
Variadic<PimTensor>:$inputs
);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def PimSendBatchOp : PimOp<"send_batch", []> {
let summary = "Send a per-lane tensor to target cores from a batched core";
@@ -90,9 +142,19 @@ def PimSendBatchOp : PimOp<"send_batch", []> {
DenseI32ArrayAttr:$targetCoreIds
);
let assemblyFormat = [{
`(` $input `)` attr-dict `:` type($input) `->` `(` `)`
}];
let hasCustomAssemblyFormat = 1;
}
def PimSendManyBatchOp : PimOp<"send_many_batch", []> {
let summary = "Send multiple per-lane tensors to target cores from a batched core";
let arguments = (ins
DenseI32ArrayAttr:$targetCoreIds,
Variadic<PimTensor>:$inputs
);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def PimReceiveOp : PimOp<"receive", [DestinationStyleOpInterface]> {
@@ -119,6 +181,28 @@ def PimReceiveOp : PimOp<"receive", [DestinationStyleOpInterface]> {
}];
}
def PimReceiveManyOp : PimOp<"receive_many", [DestinationStyleOpInterface]> {
let summary = "Receive multiple tensors from source cores";
let arguments = (ins
Variadic<PimTensor>:$outputBuffers,
DenseI32ArrayAttr:$sourceCoreIds
);
let results = (outs
Variadic<PimTensor>:$outputs
);
let extraClassDeclaration = [{
mlir::MutableOperandRange getDpsInitsMutable() {
return getOutputBuffersMutable();
}
}];
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def PimReceiveBatchOp : PimOp<"receive_batch", [DestinationStyleOpInterface]> {
let summary = "Receive per-lane tensors from source cores into a batched core";
@@ -138,9 +222,29 @@ def PimReceiveBatchOp : PimOp<"receive_batch", [DestinationStyleOpInterface]> {
}
}];
let assemblyFormat = [{
`(` $outputBuffer `)` attr-dict `:` type($outputBuffer) `->` type($output)
let hasCustomAssemblyFormat = 1;
}
def PimReceiveManyBatchOp : PimOp<"receive_many_batch", [DestinationStyleOpInterface]> {
let summary = "Receive multiple per-lane tensors from source cores into a batched core";
let arguments = (ins
Variadic<PimTensor>:$outputBuffers,
DenseI32ArrayAttr:$sourceCoreIds
);
let results = (outs
Variadic<PimTensor>:$outputs
);
let extraClassDeclaration = [{
mlir::MutableOperandRange getDpsInitsMutable() {
return getOutputBuffersMutable();
}
}];
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def PimMemCopyHostToDevOp : PimOp<"memcp_hd", [DestinationStyleOpInterface]> {
@@ -247,6 +351,55 @@ def PimMemCopyOp : PimOp<"memcp", [DestinationStyleOpInterface]> {
}];
}
//===----------------------------------------------------------------------===//
// Tensor utilities
//===----------------------------------------------------------------------===//
def PimExtractRowsOp : PimOp<"extract_rows", [DestinationStyleOpInterface]> {
let summary = "Extract every row of a rank-2 tensor as separate rank-2 row tensors";
let arguments = (ins
PimTensor:$input,
Variadic<PimTensor>:$outputBuffers
);
let results = (outs
Variadic<PimTensor>:$outputs
);
let extraClassDeclaration = [{
mlir::MutableOperandRange getDpsInitsMutable() {
return getOutputBuffersMutable();
}
}];
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def PimConcatOp : PimOp<"concat", [DestinationStyleOpInterface]> {
let summary = "Concatenate tensors";
let arguments = (ins
I64Attr:$axis,
Variadic<PimTensor>:$inputs,
PimTensor:$outputBuffer
);
let results = (outs
PimTensor:$output
);
let extraClassDeclaration = [{
mlir::MutableOperandRange getDpsInitsMutable() {
return getOutputBufferMutable();
}
}];
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
//===----------------------------------------------------------------------===//
// Math
//===----------------------------------------------------------------------===//
-14
View File
@@ -1,19 +1,5 @@
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/IntegerSet.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/PatternMatch.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "src/Accelerators/PIM/Dialect/Pim/PimOps.hpp"
using namespace mlir;
namespace onnx_mlir {
namespace pim {
+486
View File
@@ -0,0 +1,486 @@
#include "mlir/IR/DialectImplementation.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/Value.h"
#include "llvm/Support/LogicalResult.h"
#include "src/Accelerators/PIM/Common/PimCommon.hpp"
#include "src/Accelerators/PIM/Common/IR/CompactAsmUtils.hpp"
#include "src/Accelerators/PIM/Dialect/Pim/PimOps.hpp"
using namespace mlir;
namespace onnx_mlir {
namespace pim {
namespace {
using namespace onnx_mlir::compact_asm;
static DenseI32ArrayAttr getDenseI32ArrayAttr(OpAsmParser& parser, ArrayRef<int32_t> values) {
return parser.getBuilder().getDenseI32ArrayAttr(values);
}
static void printCoreIdList(OpAsmPrinter& printer, StringRef keyword, ArrayRef<int32_t> coreIds) {
printer << " " << keyword << " ";
printCompressedIntegerList(printer, coreIds);
}
static ParseResult parseOptionalCoreIdList(OpAsmParser& parser, StringRef keyword, SmallVectorImpl<int32_t>& coreIds) {
if (failed(parser.parseOptionalKeyword(keyword)))
return success();
return parseCompressedIntegerList(parser, coreIds);
}
} // namespace
void PimCoreBatchOp::print(OpAsmPrinter& printer) {
printer << " lanes " << getLaneCount() << " ";
size_t weightsPerLane = getLaneCount() > 0 ? getWeights().size() / static_cast<size_t>(getLaneCount()) : 0;
if (getLaneCount() > 1 && hasRepeatedTuple(getWeights(), weightsPerLane))
printValueTupleRun(printer, getWeights(), weightsPerLane, ListDelimiter::Paren);
else
printCompressedValueList(printer, getWeights(), ListDelimiter::Paren);
printer << " ";
printCompressedValueList(printer, getInputs(), ListDelimiter::Square);
if (auto coreIdsAttr = (*this)->getAttrOfType<DenseI32ArrayAttr>(onnx_mlir::kCoreIdsAttrName))
printCoreIdList(printer, "coreIds", coreIdsAttr.asArrayRef());
printer.printOptionalAttrDict(
(*this)->getAttrs(),
{getLaneCountAttrName().getValue(), getOperandSegmentSizesAttrName().getValue(), onnx_mlir::kCoreIdsAttrName});
printer << " ";
printer.printRegion(getBody(), /*printEntryBlockArgs=*/false);
printer << " : ";
if (getLaneCount() > 1 && hasRepeatedTuple(TypeRange(getWeights()), weightsPerLane))
printTypeTupleRun(printer, TypeRange(getWeights()), weightsPerLane, ListDelimiter::Paren);
else
printCompressedTypeList(printer, TypeRange(getWeights()), ListDelimiter::Paren);
printer << " ";
printCompressedTypeList(printer, TypeRange(getInputs()), ListDelimiter::Square);
printer << " -> ()";
}
ParseResult PimCoreBatchOp::parse(OpAsmParser& parser, OperationState& result) {
int32_t laneCount = 0;
SmallVector<OpAsmParser::UnresolvedOperand> weights;
SmallVector<OpAsmParser::UnresolvedOperand> inputs;
SmallVector<Type> weightTypes;
SmallVector<Type> inputTypes;
SmallVector<int32_t> coreIds;
if (parser.parseKeyword("lanes") || parser.parseInteger(laneCount)
|| parseCompressedOrTupleOperandList(parser, ListDelimiter::Paren, weights)
|| parseCompressedOperandList(parser, ListDelimiter::Square, inputs))
return failure();
bool hasCoreIds = succeeded(parser.parseOptionalKeyword("coreIds"));
if (hasCoreIds && parseCompressedIntegerList(parser, coreIds))
return failure();
if (parser.parseOptionalAttrDict(result.attributes))
return failure();
Region* body = result.addRegion();
if (parser.parseRegion(*body))
return failure();
if (parser.parseColon() || parseCompressedOrTupleTypeList(parser, ListDelimiter::Paren, weightTypes)
|| parseCompressedTypeList(parser, ListDelimiter::Square, inputTypes) || parser.parseArrow()
|| parser.parseLParen() || parser.parseRParen())
return failure();
if (weights.size() != weightTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of weights and weight types must match");
if (inputs.size() != inputTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of inputs and input types must match");
if (hasCoreIds && result.attributes.get(onnx_mlir::kCoreIdsAttrName))
return parser.emitError(parser.getCurrentLocation(),
"coreIds cannot be specified both positionally and in attr-dict");
auto& builder = parser.getBuilder();
result.addAttribute("laneCount", builder.getI32IntegerAttr(laneCount));
result.addAttribute("operandSegmentSizes",
builder.getDenseI32ArrayAttr(
{static_cast<int32_t>(weights.size()), static_cast<int32_t>(inputs.size())}));
if (hasCoreIds)
result.addAttribute(onnx_mlir::kCoreIdsAttrName, getDenseI32ArrayAttr(parser, coreIds));
if (parser.resolveOperands(weights, weightTypes, parser.getCurrentLocation(), result.operands)
|| parser.resolveOperands(inputs, inputTypes, parser.getCurrentLocation(), result.operands)) {
return failure();
}
return success();
}
void PimYieldOp::print(OpAsmPrinter& printer) {
printer << " ";
printCompressedValueSequence(printer, getOutputs());
printer.printOptionalAttrDict((*this)->getAttrs());
printer << " : ";
printCompressedTypeSequence(printer, getOutputs().getTypes());
}
ParseResult PimYieldOp::parse(OpAsmParser& parser, OperationState& result) {
SmallVector<OpAsmParser::UnresolvedOperand> outputs;
SmallVector<Type> outputTypes;
OpAsmParser::UnresolvedOperand firstOutput;
OptionalParseResult firstOutputResult = parser.parseOptionalOperand(firstOutput);
if (firstOutputResult.has_value()) {
if (failed(*firstOutputResult))
return failure();
if (parseCompressedOperandEntryWithFirst(parser, firstOutput, outputs))
return failure();
while (succeeded(parser.parseOptionalComma()))
if (parseOneCompressedOperandEntry(parser, outputs))
return failure();
}
if (parser.parseOptionalAttrDict(result.attributes) || parser.parseColon()
|| parseCompressedTypeSequence(parser, outputTypes, /*allowEmpty=*/true))
return failure();
if (outputs.size() != outputTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of outputs and output types must match");
return parser.resolveOperands(outputs, outputTypes, parser.getCurrentLocation(), result.operands);
}
void PimMapOp::print(OpAsmPrinter& printer) {
printer << " ";
printArgumentBindings(printer, getBody().front(), getInputs());
printer.printOptionalAttrDict((*this)->getAttrs());
printer << " : ";
printer.printType(getInputs().front().getType());
printer << " -> ";
printer.printType(getOutputs().front().getType());
printer << " ";
printer.printRegion(getBody(), /*printEntryBlockArgs=*/false);
}
ParseResult PimMapOp::parse(OpAsmParser& parser, OperationState& result) {
SmallVector<OpAsmParser::Argument> regionArgs;
SmallVector<OpAsmParser::UnresolvedOperand> inputs;
Type inputType;
Type outputType;
if (parseArgumentBindings(parser, regionArgs, inputs))
return failure();
if (inputs.empty())
return parser.emitError(parser.getCurrentLocation(), "map requires at least one input");
if (parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || parser.parseType(inputType)
|| parser.parseArrow() || parser.parseType(outputType))
return failure();
SmallVector<Type> inputTypes(inputs.size(), inputType);
SmallVector<Type> outputTypes(inputs.size(), outputType);
if (regionArgs.size() != inputs.size())
return parser.emitError(parser.getCurrentLocation(), "number of argument bindings and input operands must match");
if (parser.resolveOperands(inputs, inputTypes, parser.getCurrentLocation(), result.operands))
return failure();
result.addTypes(outputTypes);
applyArgumentTypes(inputTypes, regionArgs);
Region* body = result.addRegion();
return parser.parseRegion(*body, regionArgs);
}
void PimEmptyManyOp::print(OpAsmPrinter& printer) {
printer.printOptionalAttrDict((*this)->getAttrs());
printer << " : ";
printer.printType(getOutputs().front().getType());
printer << " x" << getOutputs().size();
}
ParseResult PimEmptyManyOp::parse(OpAsmParser& parser, OperationState& result) {
Type outputType;
int64_t resultCount = 0;
if (parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || parser.parseType(outputType)
|| parser.parseKeyword("x") || parser.parseInteger(resultCount))
return failure();
if (resultCount <= 0)
return parser.emitError(parser.getCurrentLocation(), "result count after 'x' must be positive");
SmallVector<Type> resultTypes(resultCount, outputType);
result.addTypes(resultTypes);
return success();
}
void PimSendBatchOp::print(OpAsmPrinter& printer) {
printer << " ";
printer.printOperand(getInput());
printCoreIdList(printer, "to", getTargetCoreIds());
printer.printOptionalAttrDict((*this)->getAttrs(), {getTargetCoreIdsAttrName().getValue()});
printer << " : ";
printer.printType(getInput().getType());
}
ParseResult PimSendBatchOp::parse(OpAsmParser& parser, OperationState& result) {
OpAsmParser::UnresolvedOperand input;
Type inputType;
SmallVector<int32_t> targetCoreIds;
if (parser.parseOperand(input) || parseOptionalCoreIdList(parser, "to", targetCoreIds)
|| parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || parser.parseType(inputType))
return failure();
if (!targetCoreIds.empty() && result.attributes.get("targetCoreIds"))
return parser.emitError(parser.getCurrentLocation(),
"targetCoreIds cannot be specified both positionally and in attr-dict");
if (!targetCoreIds.empty())
result.addAttribute("targetCoreIds", getDenseI32ArrayAttr(parser, targetCoreIds));
return parser.resolveOperand(input, inputType, result.operands);
}
void PimSendManyOp::print(OpAsmPrinter& printer) {
printer << " ";
printCompressedValueSequence(printer, getInputs());
printCoreIdList(printer, "to", getTargetCoreIds());
printer.printOptionalAttrDict((*this)->getAttrs(), {getTargetCoreIdsAttrName().getValue()});
printer << " : ";
printCompressedTypeSequence(printer, TypeRange(getInputs()));
}
ParseResult PimSendManyOp::parse(OpAsmParser& parser, OperationState& result) {
SmallVector<OpAsmParser::UnresolvedOperand> inputs;
SmallVector<Type> inputTypes;
SmallVector<int32_t> targetCoreIds;
if (parseCompressedOperandSequence(parser, inputs) || parseOptionalCoreIdList(parser, "to", targetCoreIds)
|| parser.parseOptionalAttrDict(result.attributes) || parser.parseColon()
|| parseCompressedTypeSequence(parser, inputTypes, /*allowEmpty=*/false))
return failure();
if (inputs.size() != inputTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of inputs and input types must match");
if (!targetCoreIds.empty() && result.attributes.get("targetCoreIds"))
return parser.emitError(parser.getCurrentLocation(),
"targetCoreIds cannot be specified both positionally and in attr-dict");
if (!targetCoreIds.empty())
result.addAttribute("targetCoreIds", getDenseI32ArrayAttr(parser, targetCoreIds));
return parser.resolveOperands(inputs, inputTypes, parser.getCurrentLocation(), result.operands);
}
void PimSendManyBatchOp::print(OpAsmPrinter& printer) {
printer << " ";
printCompressedValueSequence(printer, getInputs());
printCoreIdList(printer, "to", getTargetCoreIds());
printer.printOptionalAttrDict((*this)->getAttrs(), {getTargetCoreIdsAttrName().getValue()});
printer << " : ";
printCompressedTypeSequence(printer, TypeRange(getInputs()));
}
ParseResult PimSendManyBatchOp::parse(OpAsmParser& parser, OperationState& result) {
SmallVector<OpAsmParser::UnresolvedOperand> inputs;
SmallVector<Type> inputTypes;
SmallVector<int32_t> targetCoreIds;
if (parseCompressedOperandSequence(parser, inputs) || parseOptionalCoreIdList(parser, "to", targetCoreIds)
|| parser.parseOptionalAttrDict(result.attributes) || parser.parseColon()
|| parseCompressedTypeSequence(parser, inputTypes, /*allowEmpty=*/false))
return failure();
if (inputs.size() != inputTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of inputs and input types must match");
if (!targetCoreIds.empty() && result.attributes.get("targetCoreIds"))
return parser.emitError(parser.getCurrentLocation(),
"targetCoreIds cannot be specified both positionally and in attr-dict");
if (!targetCoreIds.empty())
result.addAttribute("targetCoreIds", getDenseI32ArrayAttr(parser, targetCoreIds));
return parser.resolveOperands(inputs, inputTypes, parser.getCurrentLocation(), result.operands);
}
void PimReceiveManyOp::print(OpAsmPrinter& printer) {
printCoreIdList(printer, "from", getSourceCoreIds());
printer << " into ";
printOpenDelimiter(printer, ListDelimiter::Paren);
printCompressedValueSequence(printer, getOutputBuffers());
printCloseDelimiter(printer, ListDelimiter::Paren);
printer.printOptionalAttrDict((*this)->getAttrs(), {getSourceCoreIdsAttrName().getValue()});
printer << " : ";
printCompressedTypeSequence(printer, getOutputs().getTypes());
}
ParseResult PimReceiveManyOp::parse(OpAsmParser& parser, OperationState& result) {
SmallVector<OpAsmParser::UnresolvedOperand> outputBuffers;
SmallVector<Type> outputTypes;
SmallVector<int32_t> sourceCoreIds;
if (parseOptionalCoreIdList(parser, "from", sourceCoreIds) || parser.parseKeyword("into") || parser.parseLParen()
|| parseCompressedOperandSequence(parser, outputBuffers) || parser.parseRParen()
|| parser.parseOptionalAttrDict(result.attributes) || parser.parseColon()
|| parseCompressedTypeSequence(parser, outputTypes, /*allowEmpty=*/false))
return failure();
if (outputBuffers.size() != outputTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of output buffers and output types must match");
if (!sourceCoreIds.empty() && result.attributes.get("sourceCoreIds"))
return parser.emitError(parser.getCurrentLocation(),
"sourceCoreIds cannot be specified both positionally and in attr-dict");
if (!sourceCoreIds.empty())
result.addAttribute("sourceCoreIds", getDenseI32ArrayAttr(parser, sourceCoreIds));
if (parser.resolveOperands(outputBuffers, outputTypes, parser.getCurrentLocation(), result.operands))
return failure();
result.addTypes(outputTypes);
return success();
}
void PimReceiveBatchOp::print(OpAsmPrinter& printer) {
printCoreIdList(printer, "from", getSourceCoreIds());
printer << " into ";
printOpenDelimiter(printer, ListDelimiter::Paren);
printer.printOperand(getOutputBuffer());
printCloseDelimiter(printer, ListDelimiter::Paren);
printer.printOptionalAttrDict((*this)->getAttrs(), {getSourceCoreIdsAttrName().getValue()});
printer << " : ";
printer.printType(getOutputBuffer().getType());
printer << " -> ";
printer.printType(getOutput().getType());
}
ParseResult PimReceiveBatchOp::parse(OpAsmParser& parser, OperationState& result) {
OpAsmParser::UnresolvedOperand outputBuffer;
Type outputBufferType;
Type outputType;
SmallVector<int32_t> sourceCoreIds;
if (parseOptionalCoreIdList(parser, "from", sourceCoreIds) || parser.parseKeyword("into") || parser.parseLParen()
|| parser.parseOperand(outputBuffer) || parser.parseRParen() || parser.parseOptionalAttrDict(result.attributes)
|| parser.parseColon() || parser.parseType(outputBufferType) || parser.parseArrow()
|| parser.parseType(outputType))
return failure();
if (!sourceCoreIds.empty() && result.attributes.get("sourceCoreIds"))
return parser.emitError(parser.getCurrentLocation(),
"sourceCoreIds cannot be specified both positionally and in attr-dict");
if (!sourceCoreIds.empty())
result.addAttribute("sourceCoreIds", getDenseI32ArrayAttr(parser, sourceCoreIds));
if (parser.resolveOperand(outputBuffer, outputBufferType, result.operands))
return failure();
result.addTypes(outputType);
return success();
}
void PimReceiveManyBatchOp::print(OpAsmPrinter& printer) {
printCoreIdList(printer, "from", getSourceCoreIds());
printer << " into ";
printOpenDelimiter(printer, ListDelimiter::Paren);
printCompressedValueSequence(printer, getOutputBuffers());
printCloseDelimiter(printer, ListDelimiter::Paren);
printer.printOptionalAttrDict((*this)->getAttrs(), {getSourceCoreIdsAttrName().getValue()});
printer << " : ";
printCompressedTypeSequence(printer, getOutputs().getTypes());
}
ParseResult PimReceiveManyBatchOp::parse(OpAsmParser& parser, OperationState& result) {
SmallVector<OpAsmParser::UnresolvedOperand> outputBuffers;
SmallVector<Type> outputTypes;
SmallVector<int32_t> sourceCoreIds;
if (parseOptionalCoreIdList(parser, "from", sourceCoreIds) || parser.parseKeyword("into") || parser.parseLParen()
|| parseCompressedOperandSequence(parser, outputBuffers) || parser.parseRParen()
|| parser.parseOptionalAttrDict(result.attributes) || parser.parseColon()
|| parseCompressedTypeSequence(parser, outputTypes, /*allowEmpty=*/false))
return failure();
if (outputBuffers.size() != outputTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of output buffers and output types must match");
if (!sourceCoreIds.empty() && result.attributes.get("sourceCoreIds"))
return parser.emitError(parser.getCurrentLocation(),
"sourceCoreIds cannot be specified both positionally and in attr-dict");
if (!sourceCoreIds.empty())
result.addAttribute("sourceCoreIds", getDenseI32ArrayAttr(parser, sourceCoreIds));
if (parser.resolveOperands(outputBuffers, outputTypes, parser.getCurrentLocation(), result.operands))
return failure();
result.addTypes(outputTypes);
return success();
}
void PimExtractRowsOp::print(OpAsmPrinter& printer) {
printer << " ";
printer.printOperand(getInput());
printer << " into ";
printOpenDelimiter(printer, ListDelimiter::Paren);
printCompressedValueSequence(printer, getOutputBuffers());
printCloseDelimiter(printer, ListDelimiter::Paren);
printer.printOptionalAttrDict((*this)->getAttrs());
printer << " : ";
printer.printType(getInput().getType());
printer << " -> ";
printCompressedTypeSequence(printer, getOutputs().getTypes());
}
ParseResult PimExtractRowsOp::parse(OpAsmParser& parser, OperationState& result) {
OpAsmParser::UnresolvedOperand input;
SmallVector<OpAsmParser::UnresolvedOperand> outputBuffers;
Type inputType;
SmallVector<Type> outputTypes;
if (parser.parseOperand(input) || parser.parseKeyword("into") || parser.parseLParen()
|| parseCompressedOperandSequence(parser, outputBuffers) || parser.parseRParen()
|| parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || parser.parseType(inputType)
|| parser.parseArrow() || parseCompressedTypeSequence(parser, outputTypes, /*allowEmpty=*/false))
return failure();
if (outputBuffers.size() != outputTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of output buffers and output types must match");
if (parser.resolveOperand(input, inputType, result.operands)
|| parser.resolveOperands(outputBuffers, outputTypes, parser.getCurrentLocation(), result.operands))
return failure();
result.addTypes(outputTypes);
return success();
}
void PimConcatOp::print(OpAsmPrinter& printer) {
printer << " axis " << getAxis() << " ";
printCompressedValueSequence(printer, getInputs());
printer << " into ";
printer.printOperand(getOutputBuffer());
printer.printOptionalAttrDict((*this)->getAttrs(), {getAxisAttrName().getValue()});
printer << " : ";
printOpenDelimiter(printer, ListDelimiter::Paren);
printCompressedTypeSequence(printer, TypeRange(getInputs()));
printCloseDelimiter(printer, ListDelimiter::Paren);
printer << " -> ";
printer.printType(getOutput().getType());
}
ParseResult PimConcatOp::parse(OpAsmParser& parser, OperationState& result) {
int64_t axis = 0;
SmallVector<OpAsmParser::UnresolvedOperand> inputs;
OpAsmParser::UnresolvedOperand outputBuffer;
SmallVector<Type> inputTypes;
Type outputType;
if (parser.parseKeyword("axis") || parser.parseInteger(axis) || parseCompressedOperandSequence(parser, inputs)
|| parser.parseKeyword("into") || parser.parseOperand(outputBuffer)
|| parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || parser.parseLParen()
|| parseCompressedTypeSequence(parser, inputTypes, /*allowEmpty=*/false) || parser.parseRParen()
|| parser.parseArrow() || parser.parseType(outputType))
return failure();
if (inputs.size() != inputTypes.size())
return parser.emitError(parser.getCurrentLocation(), "number of inputs and input types must match");
if (result.attributes.get("axis"))
return parser.emitError(parser.getCurrentLocation(), "axis cannot be specified both positionally and in attr-dict");
result.addAttribute("axis", parser.getBuilder().getI64IntegerAttr(axis));
if (parser.resolveOperands(inputs, inputTypes, parser.getCurrentLocation(), result.operands)
|| parser.resolveOperand(outputBuffer, outputType, result.operands))
return failure();
result.addTypes(outputType);
return success();
}
} // namespace pim
} // namespace onnx_mlir
+268
View File
@@ -0,0 +1,268 @@
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/TypeUtilities.h"
#include "llvm/Support/LogicalResult.h"
#include "src/Accelerators/PIM/Dialect/Pim/PimOps.hpp"
using namespace mlir;
namespace onnx_mlir {
namespace pim {
namespace {
static LogicalResult verifyManyCommunicationSizes(Operation* op, ArrayRef<int32_t> coreIds, size_t valueCount) {
if (coreIds.size() != valueCount)
return op->emitError("core id metadata length must match the number of values");
return success();
}
static bool haveSameShapedContainerKind(Type lhs, Type rhs) {
return (isa<RankedTensorType>(lhs) && isa<RankedTensorType>(rhs)) || (isa<MemRefType>(lhs) && isa<MemRefType>(rhs));
}
static LogicalResult verifyCompatibleShapedTypes(Operation* op, Type lhs, Type rhs, StringRef message) {
auto lhsShaped = dyn_cast<ShapedType>(lhs);
auto rhsShaped = dyn_cast<ShapedType>(rhs);
if (!lhsShaped || !rhsShaped || !haveSameShapedContainerKind(lhs, rhs))
return op->emitError(message);
if (lhsShaped.getElementType() != rhsShaped.getElementType() || lhsShaped.getShape() != rhsShaped.getShape())
return op->emitError(message);
return success();
}
static LogicalResult verifyManyCommunicationTypes(Operation* op, TypeRange types, StringRef kind) {
if (types.empty())
return op->emitError() << kind << " must carry at least one value";
Type firstType = types.front();
auto firstShapedType = dyn_cast<ShapedType>(firstType);
bool firstIsTensor = isa<RankedTensorType>(firstType);
bool firstIsMemRef = isa<MemRefType>(firstType);
for (Type type : types.drop_front())
if (type != firstType) {
auto shapedType = dyn_cast<ShapedType>(type);
if (!firstShapedType || !shapedType)
return op->emitError() << kind << " values must all have the same type";
if (firstIsTensor != isa<RankedTensorType>(type) || firstIsMemRef != isa<MemRefType>(type))
return op->emitError() << kind << " values must all use the same shaped container kind";
if (firstShapedType.getElementType() != shapedType.getElementType() || firstShapedType.getShape() != shapedType.getShape())
return op->emitError() << kind << " values must all have the same shape and element type";
}
return success();
}
static FailureOr<int32_t> getParentBatchLaneCount(Operation* op) {
auto coreBatchOp = op->getParentOfType<PimCoreBatchOp>();
if (!coreBatchOp)
return failure();
return coreBatchOp.getLaneCount();
}
static LogicalResult verifyManyBatchCommunicationSizes(Operation* op,
ArrayRef<int32_t> coreIds,
size_t valueCount) {
auto laneCount = getParentBatchLaneCount(op);
if (failed(laneCount))
return op->emitError("must be nested inside pim.core_batch");
if (coreIds.size() != valueCount * static_cast<size_t>(*laneCount))
return op->emitError("core id metadata length must match the number of values times parent laneCount");
return success();
}
} // namespace
LogicalResult PimEmptyManyOp::verify() {
if (getOutputs().empty())
return emitError("must produce at least one output");
Type firstType = getOutputs().front().getType();
auto firstTensorType = dyn_cast<RankedTensorType>(firstType);
if (!firstTensorType)
return emitError("outputs must all be ranked tensor types");
for (Value output : getOutputs().drop_front())
if (output.getType() != firstType)
return emitError("outputs must all have the same type");
return success();
}
LogicalResult PimMapOp::verify() {
if (getInputs().empty())
return emitError("requires at least one input");
if (getOutputs().size() != getInputs().size())
return emitError("number of outputs must match number of inputs");
Type inputType = getInputs().front().getType();
for (Value input : getInputs().drop_front())
if (input.getType() != inputType)
return emitError("all inputs must have the same type");
Type outputType = getOutputs().front().getType();
for (Value output : getOutputs().drop_front())
if (output.getType() != outputType)
return emitError("all outputs must have the same type");
Block& block = getBody().front();
if (block.getNumArguments() != 1)
return emitError("body must have exactly one block argument");
if (block.getArgument(0).getType() != inputType)
return emitError("body block argument type must match input type");
auto yieldOp = dyn_cast_or_null<PimYieldOp>(block.getTerminator());
if (!yieldOp)
return emitError("body must terminate with pim.yield");
if (yieldOp.getNumOperands() != 1)
return emitError("body yield must produce exactly one value");
if (yieldOp.getOperand(0).getType() != outputType)
return emitError("body yield type must match output type");
return success();
}
LogicalResult PimSendManyOp::verify() {
if (failed(verifyManyCommunicationSizes(getOperation(), getTargetCoreIds(), getInputs().size())))
return failure();
return verifyManyCommunicationTypes(getOperation(), getInputs().getTypes(), "send_many");
}
LogicalResult PimSendManyBatchOp::verify() {
if (failed(verifyManyBatchCommunicationSizes(getOperation(), getTargetCoreIds(), getInputs().size())))
return failure();
return verifyManyCommunicationTypes(getOperation(), getInputs().getTypes(), "send_many_batch");
}
LogicalResult PimReceiveManyOp::verify() {
if (getOutputBuffers().size() != getOutputs().size())
return emitError("number of output buffers must match the number of outputs");
if (failed(verifyManyCommunicationSizes(getOperation(), getSourceCoreIds(), getOutputs().size())))
return failure();
if (failed(verifyManyCommunicationTypes(getOperation(), getOutputBuffers().getTypes(), "receive_many")))
return failure();
if (failed(verifyManyCommunicationTypes(getOperation(), getOperation()->getResultTypes(), "receive_many")))
return failure();
for (auto [outputBuffer, output] : llvm::zip(getOutputBuffers(), getOutputs()))
if (outputBuffer.getType() != output.getType())
return emitError("output buffers and outputs must have matching types");
return success();
}
LogicalResult PimReceiveManyBatchOp::verify() {
if (getOutputBuffers().size() != getOutputs().size())
return emitError("number of output buffers must match the number of outputs");
if (failed(verifyManyBatchCommunicationSizes(getOperation(), getSourceCoreIds(), getOutputs().size())))
return failure();
if (failed(verifyManyCommunicationTypes(getOperation(), getOutputBuffers().getTypes(), "receive_many_batch")))
return failure();
if (failed(verifyManyCommunicationTypes(getOperation(), getOperation()->getResultTypes(), "receive_many_batch")))
return failure();
for (auto [outputBuffer, output] : llvm::zip(getOutputBuffers(), getOutputs()))
if (outputBuffer.getType() != output.getType())
return emitError("output buffers and outputs must have matching types");
return success();
}
LogicalResult PimExtractRowsOp::verify() {
if (getOutputBuffers().size() != getOutputs().size())
return emitError("number of output buffers must match the number of outputs");
auto inputType = dyn_cast<ShapedType>(getInput().getType());
if (!inputType || !inputType.hasRank() || inputType.getRank() != 2)
return emitError("input must be a rank-2 shaped type");
int64_t numRows = inputType.getShape()[0];
int64_t numCols = inputType.getShape()[1];
Type elementType = inputType.getElementType();
if (numRows >= 0 && static_cast<int64_t>(getOutputs().size()) != numRows)
return emitError("number of outputs must match the number of input rows");
for (auto [outputBuffer, output] : llvm::zip(getOutputBuffers(), getOutputs())) {
if (failed(verifyCompatibleShapedTypes(
getOperation(), outputBuffer.getType(), output.getType(), "output buffers and outputs must match")))
return failure();
auto outputType = dyn_cast<ShapedType>(output.getType());
if (!outputType || !outputType.hasRank() || outputType.getRank() != 2)
return emitError("outputs must all be rank-2 shaped types");
if (!haveSameShapedContainerKind(getInput().getType(), output.getType()))
return emitError("outputs must use the same shaped container kind as the input");
if (outputType.getElementType() != elementType)
return emitError("output element types must match input element type");
auto outputShape = outputType.getShape();
if (outputShape[0] != 1)
return emitError("each output must have exactly one row");
if (numCols >= 0 && outputShape[1] != numCols)
return emitError("output column count must match input column count");
}
return success();
}
LogicalResult PimConcatOp::verify() {
if (getInputs().empty())
return emitError("requires at least one input");
if (failed(verifyCompatibleShapedTypes(
getOperation(), getOutputBuffer().getType(), getOutput().getType(), "output buffer and output must match")))
return failure();
auto outputType = dyn_cast<ShapedType>(getOutput().getType());
if (!outputType || !outputType.hasRank())
return emitError("output must be a ranked shaped type");
int64_t axis = getAxis();
int64_t rank = outputType.getRank();
if (axis < 0 || axis >= rank)
return emitError("axis must be within the output rank");
int64_t concatenatedDimSize = 0;
bool concatenatedDimDynamic = false;
Type outputElementType = outputType.getElementType();
for (Value input : getInputs()) {
auto inputType = dyn_cast<ShapedType>(input.getType());
if (!inputType || !inputType.hasRank())
return emitError("inputs must be ranked shaped types");
if (!haveSameShapedContainerKind(input.getType(), getOutput().getType()))
return emitError("inputs and output must use the same shaped container kind");
if (inputType.getRank() != rank)
return emitError("all inputs must have the same rank as the output");
if (inputType.getElementType() != outputElementType)
return emitError("all inputs must have the same element type as the output");
for (int64_t dim = 0; dim < rank; ++dim) {
if (dim == axis)
continue;
int64_t inputDim = inputType.getDimSize(dim);
int64_t outputDim = outputType.getDimSize(dim);
if (!ShapedType::isDynamic(inputDim) && !ShapedType::isDynamic(outputDim) && inputDim != outputDim)
return emitError("non-concatenated dimensions must match the output shape");
}
int64_t inputConcatDim = inputType.getDimSize(axis);
if (ShapedType::isDynamic(inputConcatDim)) {
concatenatedDimDynamic = true;
continue;
}
concatenatedDimSize += inputConcatDim;
}
int64_t outputConcatDim = outputType.getDimSize(axis);
if (!concatenatedDimDynamic && !ShapedType::isDynamic(outputConcatDim) && concatenatedDimSize != outputConcatDim)
return emitError("output concatenated dimension must equal the sum of input sizes");
return success();
}
} // namespace pim
} // namespace onnx_mlir
@@ -173,6 +173,235 @@ struct ReceiveBatchOpInterface : DstBufferizableOpInterfaceExternalModel<Receive
}
};
struct ReceiveManyOpInterface : DstBufferizableOpInterfaceExternalModel<ReceiveManyOpInterface, PimReceiveManyOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return !cast<DestinationStyleOpInterface>(op).isDpsInit(&opOperand);
}
LogicalResult bufferize(Operation* op,
RewriterBase& rewriter,
const BufferizationOptions& options,
BufferizationState& state) const {
auto receiveOp = cast<PimReceiveManyOp>(op);
SmallVector<Value> outputBuffers;
SmallVector<Type> resultTypes;
SmallVector<Value> tensorResults;
outputBuffers.reserve(receiveOp.getOutputBuffers().size());
resultTypes.reserve(receiveOp.getOutputBuffers().size());
tensorResults.reserve(receiveOp.getOutputBuffers().size());
for (Value outputBuffer : receiveOp.getOutputBuffers()) {
auto outputBufferOpt = getBuffer(rewriter, outputBuffer, options, state);
if (failed(outputBufferOpt))
return failure();
outputBuffers.push_back(*outputBufferOpt);
resultTypes.push_back(outputBufferOpt->getType());
}
auto newOp = PimReceiveManyOp::create(
rewriter, receiveOp.getLoc(), TypeRange(resultTypes), ValueRange(outputBuffers), receiveOp.getSourceCoreIdsAttr());
for (auto [bufferResult, tensorResult] : llvm::zip(newOp.getOutputs(), receiveOp.getOutputs())) {
auto tensorType = cast<RankedTensorType>(tensorResult.getType());
auto toTensor =
bufferization::ToTensorOp::create(rewriter, receiveOp.getLoc(), tensorType, bufferResult, UnitAttr(), UnitAttr());
tensorResults.push_back(toTensor.getResult());
}
rewriter.replaceOp(receiveOp, tensorResults);
return success();
}
};
struct ReceiveManyBatchOpInterface
: DstBufferizableOpInterfaceExternalModel<ReceiveManyBatchOpInterface, PimReceiveManyBatchOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return !cast<DestinationStyleOpInterface>(op).isDpsInit(&opOperand);
}
LogicalResult bufferize(Operation* op,
RewriterBase& rewriter,
const BufferizationOptions& options,
BufferizationState& state) const {
auto receiveOp = cast<PimReceiveManyBatchOp>(op);
SmallVector<Value> outputBuffers;
SmallVector<Type> resultTypes;
SmallVector<Value> tensorResults;
outputBuffers.reserve(receiveOp.getOutputBuffers().size());
resultTypes.reserve(receiveOp.getOutputBuffers().size());
tensorResults.reserve(receiveOp.getOutputBuffers().size());
for (Value outputBuffer : receiveOp.getOutputBuffers()) {
auto outputBufferOpt = getBuffer(rewriter, outputBuffer, options, state);
if (failed(outputBufferOpt))
return failure();
outputBuffers.push_back(*outputBufferOpt);
resultTypes.push_back(outputBufferOpt->getType());
}
auto newOp = PimReceiveManyBatchOp::create(rewriter,
receiveOp.getLoc(),
TypeRange(resultTypes),
ValueRange(outputBuffers),
receiveOp.getSourceCoreIdsAttr());
for (auto [bufferResult, tensorResult] : llvm::zip(newOp.getOutputs(), receiveOp.getOutputs())) {
auto tensorType = cast<RankedTensorType>(tensorResult.getType());
auto toTensor =
bufferization::ToTensorOp::create(rewriter, receiveOp.getLoc(), tensorType, bufferResult, UnitAttr(), UnitAttr());
tensorResults.push_back(toTensor.getResult());
}
rewriter.replaceOp(receiveOp, tensorResults);
return success();
}
};
struct ExtractRowsOpInterface : DstBufferizableOpInterfaceExternalModel<ExtractRowsOpInterface, PimExtractRowsOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return !cast<DestinationStyleOpInterface>(op).isDpsInit(&opOperand);
}
LogicalResult bufferize(Operation* op,
RewriterBase& rewriter,
const BufferizationOptions& options,
BufferizationState& state) const {
auto extractRowsOp = cast<PimExtractRowsOp>(op);
auto inputOpt = getBuffer(rewriter, extractRowsOp.getInput(), options, state);
if (failed(inputOpt))
return failure();
SmallVector<Value> outputBuffers;
SmallVector<Type> resultTypes;
outputBuffers.reserve(extractRowsOp.getOutputBuffers().size());
resultTypes.reserve(extractRowsOp.getOutputBuffers().size());
for (Value outputBuffer : extractRowsOp.getOutputBuffers()) {
auto outputBufferOpt = getBuffer(rewriter, outputBuffer, options, state);
if (failed(outputBufferOpt))
return failure();
outputBuffers.push_back(*outputBufferOpt);
resultTypes.push_back(outputBufferOpt->getType());
}
auto newOp = PimExtractRowsOp::create(rewriter,
extractRowsOp.getLoc(),
TypeRange(resultTypes),
materializeContiguousMemRef(*inputOpt, op->getLoc(), rewriter),
ValueRange(outputBuffers));
rewriter.replaceOp(extractRowsOp, newOp.getOutputs());
return success();
}
};
struct ConcatOpInterface : DstBufferizableOpInterfaceExternalModel<ConcatOpInterface, PimConcatOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return !cast<DestinationStyleOpInterface>(op).isDpsInit(&opOperand);
}
LogicalResult bufferize(Operation* op,
RewriterBase& rewriter,
const BufferizationOptions& options,
BufferizationState& state) const {
auto concatOp = cast<PimConcatOp>(op);
SmallVector<Value> inputs;
inputs.reserve(concatOp.getInputs().size());
for (Value input : concatOp.getInputs()) {
auto inputOpt = getBuffer(rewriter, input, options, state);
if (failed(inputOpt))
return failure();
inputs.push_back(materializeContiguousMemRef(*inputOpt, op->getLoc(), rewriter));
}
auto outputBufferOpt = getBuffer(rewriter, concatOp.getOutputBuffer(), options, state);
if (failed(outputBufferOpt))
return failure();
replaceOpWithNewBufferizedOp<PimConcatOp>(
rewriter, op, outputBufferOpt->getType(), concatOp.getAxisAttr(), ValueRange(inputs), *outputBufferOpt);
return success();
}
};
struct MapOpInterface : BufferizableOpInterface::ExternalModel<MapOpInterface, PimMapOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const { return true; }
bool bufferizesToMemoryWrite(Operation* op, OpOperand& opOperand, const AnalysisState& state) const { return false; }
AliasingValueList getAliasingValues(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return {};
}
AliasingOpOperandList getAliasingOpOperands(Operation* op, Value value, const AnalysisState& state) const {
auto mapOp = cast<PimMapOp>(op);
auto bbArg = dyn_cast<BlockArgument>(value);
if (!bbArg || bbArg.getOwner() != &mapOp.getBody().front() || bbArg.getArgNumber() != 0 || mapOp.getInputs().empty())
return {};
return {{&mapOp->getOpOperand(0), BufferRelation::Equivalent}};
}
bool isWritable(Operation* op, Value value, const AnalysisState& state) const { return false; }
FailureOr<BufferLikeType>
getBufferType(Operation* op,
Value value,
const BufferizationOptions& options,
const BufferizationState& state,
SmallVector<Value>& invocationStack) const {
auto mapOp = cast<PimMapOp>(op);
auto bbArg = dyn_cast<BlockArgument>(value);
if (!bbArg || bbArg.getOwner() != &mapOp.getBody().front() || bbArg.getArgNumber() != 0 || mapOp.getInputs().empty())
return failure();
auto inputType = dyn_cast<BufferLikeType>(mapOp.getInputs().front().getType());
if (inputType)
return inputType;
auto shapedType = cast<ShapedType>(mapOp.getInputs().front().getType());
return BufferLikeType(MemRefType::get(shapedType.getShape(), shapedType.getElementType()));
}
LogicalResult bufferize(Operation* op,
RewriterBase& rewriter,
const BufferizationOptions& options,
BufferizationState& state) const {
auto mapOp = cast<PimMapOp>(op);
SmallVector<Value> inputs;
SmallVector<Type> resultTypes;
inputs.reserve(mapOp.getInputs().size());
resultTypes.reserve(mapOp.getOutputs().size());
for (Value input : mapOp.getInputs()) {
if (isa<TensorType>(input.getType())) {
auto inputOpt = getBuffer(rewriter, input, options, state);
if (failed(inputOpt))
return failure();
inputs.push_back(*inputOpt);
}
else {
inputs.push_back(input);
}
}
for (Value output : mapOp.getOutputs()) {
auto shapedType = cast<ShapedType>(output.getType());
resultTypes.push_back(MemRefType::get(shapedType.getShape(), shapedType.getElementType()));
}
rewriter.setInsertionPoint(mapOp);
auto newOp = PimMapOp::create(rewriter, mapOp.getLoc(), TypeRange(resultTypes), ValueRange(inputs));
rewriter.inlineRegionBefore(mapOp.getBody(), newOp.getBody(), newOp.getBody().begin());
for (Block& block : newOp.getBody())
if (failed(bufferization::bufferizeBlockSignature(&block, rewriter, options, state)))
return failure();
rewriter.replaceOp(mapOp, newOp.getOutputs());
return success();
}
};
struct CoreBatchOpInterface : BufferizableOpInterface::ExternalModel<CoreBatchOpInterface, PimCoreBatchOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return true;
@@ -435,9 +664,14 @@ struct UnaryDstOpInterface : DstBufferizableOpInterfaceExternalModel<UnaryDstOpI
void registerOpBufferizationInterfaces(DialectRegistry& registry) {
registry.addExtension(+[](MLIRContext* ctx, PimDialect* dialect) {
PimMapOp::attachInterface<MapOpInterface>(*ctx);
PimCoreBatchOp::attachInterface<CoreBatchOpInterface>(*ctx);
PimReceiveOp::attachInterface<ReceiveOpInterface>(*ctx);
PimReceiveManyOp::attachInterface<ReceiveManyOpInterface>(*ctx);
PimReceiveBatchOp::attachInterface<ReceiveBatchOpInterface>(*ctx);
PimReceiveManyBatchOp::attachInterface<ReceiveManyBatchOpInterface>(*ctx);
PimExtractRowsOp::attachInterface<ExtractRowsOpInterface>(*ctx);
PimConcatOp::attachInterface<ConcatOpInterface>(*ctx);
PimMemCopyHostToDevOp::attachInterface<MemCopyHostToDevOpInterface>(*ctx);
PimMemCopyHostToDevBatchOp::attachInterface<MemCopyHostToDevBatchOpInterface>(*ctx);
PimMemCopyDevToHostOp::attachInterface<MemCopyDevToHostOpInterface>(*ctx);
@@ -3,7 +3,9 @@
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Threading.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "llvm/Support/Casting.h"
@@ -45,6 +47,23 @@ private:
void PimBufferizationPass::runOnOperation() {
auto moduleOp = getOperation();
{
SmallVector<pim::PimEmptyManyOp> emptyManyOps;
moduleOp.walk([&](pim::PimEmptyManyOp emptyManyOp) { emptyManyOps.push_back(emptyManyOp); });
IRRewriter rewriter(moduleOp.getContext());
for (auto emptyManyOp : emptyManyOps) {
SmallVector<Value> replacementValues;
replacementValues.reserve(emptyManyOp.getOutputs().size());
rewriter.setInsertionPoint(emptyManyOp);
for (Value output : emptyManyOp.getOutputs()) {
auto outputType = cast<RankedTensorType>(output.getType());
replacementValues.push_back(
tensor::EmptyOp::create(rewriter, emptyManyOp.getLoc(), outputType.getShape(), outputType.getElementType()));
}
rewriter.replaceOp(emptyManyOp, replacementValues);
}
}
// Refactor this into a function
{
auto funcOp = getPimEntryFunc(moduleOp);
+21 -167
View File
@@ -8,6 +8,7 @@
#include <string>
#include "src/Accelerators/PIM/Common/PimCommon.hpp"
#include "src/Accelerators/PIM/Common/IR/CompactAsmUtils.hpp"
#include "src/Accelerators/PIM/Dialect/Spatial/SpatialOps.hpp"
using namespace mlir;
@@ -23,23 +24,23 @@ enum class ListDelimiter {
};
static ParseResult parseOpenDelimiter(OpAsmParser& parser, ListDelimiter delimiter) {
if (delimiter == ListDelimiter::Square)
return parser.parseLSquare();
return parser.parseLParen();
return onnx_mlir::compact_asm::parseOpenDelimiter(
parser, static_cast<onnx_mlir::compact_asm::ListDelimiter>(delimiter));
}
static ParseResult parseOptionalCloseDelimiter(OpAsmParser& parser, ListDelimiter delimiter) {
if (delimiter == ListDelimiter::Square)
return parser.parseOptionalRSquare();
return parser.parseOptionalRParen();
return onnx_mlir::compact_asm::parseOptionalCloseDelimiter(
parser, static_cast<onnx_mlir::compact_asm::ListDelimiter>(delimiter));
}
static void printOpenDelimiter(OpAsmPrinter& printer, ListDelimiter delimiter) {
printer << (delimiter == ListDelimiter::Square ? "[" : "(");
onnx_mlir::compact_asm::printOpenDelimiter(
printer, static_cast<onnx_mlir::compact_asm::ListDelimiter>(delimiter));
}
static void printCloseDelimiter(OpAsmPrinter& printer, ListDelimiter delimiter) {
printer << (delimiter == ListDelimiter::Square ? "]" : ")");
onnx_mlir::compact_asm::printCloseDelimiter(
printer, static_cast<onnx_mlir::compact_asm::ListDelimiter>(delimiter));
}
static bool parseOptionalKeywordAlias(OpAsmParser& parser, StringRef preferred, StringRef legacy) {
@@ -51,31 +52,8 @@ static ParseResult parseCompressedRepeatedList(OpAsmParser& parser,
ListDelimiter delimiter,
SmallVectorImpl<EntryT>& entries,
ParseEntryFn parseEntry) {
if (parseOpenDelimiter(parser, delimiter))
return failure();
if (succeeded(parseOptionalCloseDelimiter(parser, delimiter)))
return success();
while (true) {
EntryT entry;
if (parseEntry(entry))
return failure();
int64_t repeatCount = 1;
if (succeeded(parser.parseOptionalKeyword("x"))) {
if (parser.parseInteger(repeatCount) || repeatCount <= 0)
return parser.emitError(parser.getCurrentLocation(), "repeat count after 'x' must be positive");
}
for (int64_t index = 0; index < repeatCount; ++index)
entries.push_back(entry);
if (succeeded(parseOptionalCloseDelimiter(parser, delimiter)))
break;
if (parser.parseComma())
return failure();
}
return success();
return onnx_mlir::compact_asm::parseCompressedRepeatedList(
parser, static_cast<onnx_mlir::compact_asm::ListDelimiter>(delimiter), entries, parseEntry);
}
template <typename IntT>
@@ -388,156 +366,32 @@ static ParseResult parseCompressedOperandSequence(OpAsmParser& parser,
static ParseResult parseCompressedTypeSequence(OpAsmParser& parser, SmallVectorImpl<Type>& types, bool allowEmpty);
static bool hasRepeatedTuple(ValueRange values, size_t tupleSize) {
if (tupleSize == 0 || values.empty() || values.size() % tupleSize != 0)
return false;
SmallVector<Value> valueVec(values.begin(), values.end());
ArrayRef<Value> tuple(valueVec.data(), tupleSize);
for (size_t index = tupleSize; index < values.size(); index += tupleSize)
if (!llvm::equal(tuple, ArrayRef<Value>(valueVec).slice(index, tupleSize)))
return false;
return true;
return onnx_mlir::compact_asm::hasRepeatedTuple(values, tupleSize);
}
static bool hasRepeatedTuple(TypeRange types, size_t tupleSize) {
if (tupleSize == 0 || types.empty() || types.size() % tupleSize != 0)
return false;
SmallVector<Type> typeVec(types.begin(), types.end());
ArrayRef<Type> tuple(typeVec.data(), tupleSize);
for (size_t index = tupleSize; index < types.size(); index += tupleSize)
if (!llvm::equal(tuple, ArrayRef<Type>(typeVec).slice(index, tupleSize)))
return false;
return true;
return onnx_mlir::compact_asm::hasRepeatedTuple(types, tupleSize);
}
static void printValueTupleRun(OpAsmPrinter& printer, ValueRange values, size_t tupleSize) {
printer << "[";
printOpenDelimiter(printer, ListDelimiter::Paren);
for (size_t index = 0; index < tupleSize; ++index) {
if (index != 0)
printer << ", ";
printer.printOperand(values[index]);
}
printCloseDelimiter(printer, ListDelimiter::Paren);
printer << " x" << (values.size() / tupleSize) << "]";
onnx_mlir::compact_asm::printValueTupleRun(
printer, values, tupleSize, onnx_mlir::compact_asm::ListDelimiter::Square);
}
static void printTypeTupleRun(OpAsmPrinter& printer, TypeRange types, size_t tupleSize) {
printer << "[";
printOpenDelimiter(printer, ListDelimiter::Paren);
for (size_t index = 0; index < tupleSize; ++index) {
if (index != 0)
printer << ", ";
printer.printType(types[index]);
}
printCloseDelimiter(printer, ListDelimiter::Paren);
printer << " x" << (types.size() / tupleSize) << "]";
onnx_mlir::compact_asm::printTypeTupleRun(
printer, types, tupleSize, onnx_mlir::compact_asm::ListDelimiter::Square);
}
static ParseResult parseCompressedOrTupleOperandList(OpAsmParser& parser,
SmallVectorImpl<OpAsmParser::UnresolvedOperand>& operands) {
if (parser.parseLSquare())
return failure();
if (succeeded(parser.parseOptionalRSquare()))
return success();
if (succeeded(parser.parseOptionalLParen())) {
SmallVector<OpAsmParser::UnresolvedOperand> tupleOperands;
if (parseCompressedOperandSequence(parser, tupleOperands) || parser.parseRParen())
return failure();
int64_t repeatCount = 1;
if (succeeded(parser.parseOptionalKeyword("x"))) {
if (parser.parseInteger(repeatCount) || repeatCount <= 0)
return parser.emitError(parser.getCurrentLocation(), "repeat count after 'x' must be positive");
}
for (int64_t repeat = 0; repeat < repeatCount; ++repeat)
llvm::append_range(operands, tupleOperands);
while (succeeded(parser.parseOptionalComma())) {
if (parser.parseLParen())
return failure();
tupleOperands.clear();
if (parseCompressedOperandSequence(parser, tupleOperands) || parser.parseRParen())
return failure();
repeatCount = 1;
if (succeeded(parser.parseOptionalKeyword("x"))) {
if (parser.parseInteger(repeatCount) || repeatCount <= 0)
return parser.emitError(parser.getCurrentLocation(), "repeat count after 'x' must be positive");
}
for (int64_t repeat = 0; repeat < repeatCount; ++repeat)
llvm::append_range(operands, tupleOperands);
}
return parser.parseRSquare();
}
while (true) {
if (parseOneCompressedOperandEntry(parser, operands))
return failure();
if (succeeded(parser.parseOptionalRSquare()))
return success();
if (parser.parseComma())
return failure();
}
return onnx_mlir::compact_asm::parseCompressedOrTupleOperandList(
parser, onnx_mlir::compact_asm::ListDelimiter::Square, operands);
}
static ParseResult parseCompressedOrTupleTypeList(OpAsmParser& parser, SmallVectorImpl<Type>& types) {
if (parser.parseLSquare())
return failure();
if (succeeded(parser.parseOptionalRSquare()))
return success();
if (succeeded(parser.parseOptionalLParen())) {
SmallVector<Type> tupleTypes;
if (parseCompressedTypeSequence(parser, tupleTypes, /*allowEmpty=*/false) || parser.parseRParen())
return failure();
int64_t repeatCount = 1;
if (succeeded(parser.parseOptionalKeyword("x"))) {
if (parser.parseInteger(repeatCount) || repeatCount <= 0)
return parser.emitError(parser.getCurrentLocation(), "repeat count after 'x' must be positive");
}
for (int64_t repeat = 0; repeat < repeatCount; ++repeat)
llvm::append_range(types, tupleTypes);
while (succeeded(parser.parseOptionalComma())) {
if (parser.parseLParen())
return failure();
tupleTypes.clear();
if (parseCompressedTypeSequence(parser, tupleTypes, /*allowEmpty=*/false) || parser.parseRParen())
return failure();
repeatCount = 1;
if (succeeded(parser.parseOptionalKeyword("x"))) {
if (parser.parseInteger(repeatCount) || repeatCount <= 0)
return parser.emitError(parser.getCurrentLocation(), "repeat count after 'x' must be positive");
}
for (int64_t repeat = 0; repeat < repeatCount; ++repeat)
llvm::append_range(types, tupleTypes);
}
return parser.parseRSquare();
}
while (true) {
Type type;
if (parser.parseType(type))
return failure();
int64_t repeatCount = 1;
if (succeeded(parser.parseOptionalKeyword("x"))) {
if (parser.parseInteger(repeatCount) || repeatCount <= 0)
return parser.emitError(parser.getCurrentLocation(), "repeat count after 'x' must be positive");
}
for (int64_t repeat = 0; repeat < repeatCount; ++repeat)
types.push_back(type);
if (succeeded(parser.parseOptionalRSquare()))
return success();
if (parser.parseComma())
return failure();
}
return onnx_mlir::compact_asm::parseCompressedOrTupleTypeList(
parser, onnx_mlir::compact_asm::ListDelimiter::Square, types);
}
static ParseResult parseCompressedOperandEntryWithFirst(OpAsmParser& parser,