compact spatial IR through different new operations and dedicated syntax

fast spatial node merging with batch operations
This commit is contained in:
NiccoloN
2026-05-03 14:14:14 +02:00
parent 15e8edb9c4
commit b605585b1f
34 changed files with 4419 additions and 1445 deletions

View File

@@ -39,6 +39,22 @@ def PimCoreOp : PimOp<"core", [SingleBlock]> {
}];
}
def PimCoreBatchOp : PimOp<"core_batch", [SingleBlock, AttrSizedOperandSegments]> {
let summary = "Execute equivalent batched core bodies";
let regions = (region SizedRegion<1>:$body);
let arguments = (ins
I32Attr:$laneCount,
Variadic<PimTensor>:$weights,
Variadic<PimTensor>:$inputs
);
let assemblyFormat = [{
`lanes` $laneCount `(` $weights `)` `[` $inputs `]` attr-dict regions `:` type($weights) `[` type($inputs) `]` `->` `(` `)`
}];
}
def PimHaltOp : PimOp<"halt", [Terminator]> {
let summary = "Halt execution of the core";
@@ -65,6 +81,20 @@ def PimSendOp : PimOp<"send", []> {
}];
}
def PimSendBatchOp : PimOp<"send_batch", []> {
let summary = "Send a per-lane tensor to target cores from a batched core";
let arguments = (ins
PimTensor:$input,
I32Attr:$size,
DenseI32ArrayAttr:$targetCoreIds
);
let assemblyFormat = [{
`(` $input `)` attr-dict `:` type($input) `->` `(` `)`
}];
}
def PimReceiveOp : PimOp<"receive", [DestinationStyleOpInterface]> {
let summary = "Receive a tensor from another core";
@@ -89,6 +119,30 @@ def PimReceiveOp : PimOp<"receive", [DestinationStyleOpInterface]> {
}];
}
def PimReceiveBatchOp : PimOp<"receive_batch", [DestinationStyleOpInterface]> {
let summary = "Receive per-lane tensors from source cores into a batched core";
let arguments = (ins
PimTensor:$outputBuffer,
I32Attr:$size,
DenseI32ArrayAttr:$sourceCoreIds
);
let results = (outs
PimTensor:$output
);
let extraClassDeclaration = [{
mlir::MutableOperandRange getDpsInitsMutable() {
return getOutputBufferMutable();
}
}];
let assemblyFormat = [{
`(` $outputBuffer `)` attr-dict `:` type($outputBuffer) `->` type($output)
}];
}
def PimMemCopyHostToDevOp : PimOp<"memcp_hd", [DestinationStyleOpInterface]> {
let summary = "Copy a memory region from host memory into device memory";
@@ -115,6 +169,32 @@ def PimMemCopyHostToDevOp : PimOp<"memcp_hd", [DestinationStyleOpInterface]> {
}];
}
def PimMemCopyHostToDevBatchOp : PimOp<"memcp_hd_batch", [DestinationStyleOpInterface]> {
let summary = "Copy a per-lane tensor from host memory into device memory inside a batched core";
let arguments = (ins
PimTensor:$deviceTarget,
PimTensor:$hostSource,
I32Attr:$deviceTargetOffset,
I32Attr:$hostSourceOffset,
I32Attr:$size
);
let results = (outs
PimTensor:$output
);
let extraClassDeclaration = [{
mlir::MutableOperandRange getDpsInitsMutable() {
return getDeviceTargetMutable();
}
}];
let assemblyFormat = [{
`(` $deviceTarget `,` $hostSource `)` attr-dict `:` `(` type($deviceTarget) `,` type($hostSource) `)` `->` type($output)
}];
}
def PimMemCopyDevToHostOp : PimOp<"memcp_dh", [DestinationStyleOpInterface]> {
let summary = "Copy a memory region from device memory into host memory";

View File

@@ -1,6 +1,7 @@
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/Bufferization/IR/DstBufferizableOpInterfaceImpl.h"
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "OpBufferizationInterfaces.hpp"
@@ -65,6 +66,32 @@ struct MemCopyHostToDevOpInterface
}
};
struct MemCopyHostToDevBatchOpInterface
: DstBufferizableOpInterfaceExternalModel<MemCopyHostToDevBatchOpInterface, PimMemCopyHostToDevBatchOp> {
LogicalResult bufferize(Operation* op,
RewriterBase& rewriter,
const BufferizationOptions& options,
BufferizationState& state) const {
auto memCopyHostToDevOp = cast<PimMemCopyHostToDevBatchOp>(op);
auto deviceTargetOpt = getBuffer(rewriter, memCopyHostToDevOp.getDeviceTarget(), options, state);
if (failed(deviceTargetOpt))
return failure();
auto hostSourceOpt = getBuffer(rewriter, memCopyHostToDevOp.getHostSource(), options, state);
if (failed(hostSourceOpt))
return failure();
replaceOpWithNewBufferizedOp<PimMemCopyHostToDevBatchOp>(rewriter,
memCopyHostToDevOp,
deviceTargetOpt->getType(),
*deviceTargetOpt,
*hostSourceOpt,
memCopyHostToDevOp.getDeviceTargetOffsetAttr(),
memCopyHostToDevOp.getHostSourceOffsetAttr(),
memCopyHostToDevOp.getSizeAttr());
return success();
}
};
struct MemCopyDevToHostOpInterface
: DstBufferizableOpInterfaceExternalModel<MemCopyDevToHostOpInterface, PimMemCopyDevToHostOp> {
LogicalResult bufferize(Operation* op,
@@ -122,6 +149,127 @@ struct ReceiveOpInterface : DstBufferizableOpInterfaceExternalModel<ReceiveOpInt
}
};
struct ReceiveBatchOpInterface : DstBufferizableOpInterfaceExternalModel<ReceiveBatchOpInterface, PimReceiveBatchOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return !cast<DestinationStyleOpInterface>(op).isDpsInit(&opOperand);
}
LogicalResult bufferize(Operation* op,
RewriterBase& rewriter,
const BufferizationOptions& options,
BufferizationState& state) const {
auto receiveOp = cast<PimReceiveBatchOp>(op);
auto outputBufferOpt = getBuffer(rewriter, receiveOp.getOutputBuffer(), options, state);
if (failed(outputBufferOpt))
return failure();
replaceOpWithNewBufferizedOp<PimReceiveBatchOp>(rewriter,
op,
outputBufferOpt->getType(),
*outputBufferOpt,
receiveOp.getSizeAttr(),
receiveOp.getSourceCoreIdsAttr());
return success();
}
};
struct CoreBatchOpInterface : BufferizableOpInterface::ExternalModel<CoreBatchOpInterface, PimCoreBatchOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return true;
}
bool bufferizesToMemoryWrite(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return false;
}
AliasingValueList getAliasingValues(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return {};
}
AliasingOpOperandList getAliasingOpOperands(Operation* op, Value value, const AnalysisState& state) const {
auto coreBatchOp = cast<PimCoreBatchOp>(op);
auto bbArg = dyn_cast<BlockArgument>(value);
if (!bbArg || bbArg.getOwner() != &coreBatchOp.getBody().front())
return {};
unsigned inputOperandIndex = coreBatchOp.getWeights().size() + bbArg.getArgNumber();
return {{&coreBatchOp->getOpOperand(inputOperandIndex), BufferRelation::Equivalent}};
}
bool isWritable(Operation* op, Value value, const AnalysisState& state) const {
return false;
}
FailureOr<BufferLikeType>
getBufferType(Operation* op,
Value value,
const BufferizationOptions& options,
const BufferizationState& state,
SmallVector<Value>& invocationStack) const {
auto coreBatchOp = cast<PimCoreBatchOp>(op);
auto bbArg = dyn_cast<BlockArgument>(value);
if (!bbArg || bbArg.getOwner() != &coreBatchOp.getBody().front())
return failure();
Value tiedInput = coreBatchOp.getInputs()[bbArg.getArgNumber()];
if (auto memRefType = dyn_cast<BufferLikeType>(tiedInput.getType()))
return memRefType;
return bufferization::getBufferType(tiedInput, options, state, invocationStack);
}
LogicalResult bufferize(Operation* op,
RewriterBase& rewriter,
const BufferizationOptions& options,
BufferizationState& state) const {
auto coreBatchOp = cast<PimCoreBatchOp>(op);
SmallVector<Value> weights;
SmallVector<Value> inputs;
weights.reserve(coreBatchOp.getWeights().size());
inputs.reserve(coreBatchOp.getInputs().size());
for (Value weight : coreBatchOp.getWeights()) {
if (isa<TensorType>(weight.getType())) {
auto weightOpt = getBuffer(rewriter, weight, options, state);
if (failed(weightOpt))
return failure();
weights.push_back(*weightOpt);
}
else {
weights.push_back(weight);
}
}
for (Value input : coreBatchOp.getInputs()) {
if (isa<TensorType>(input.getType())) {
auto inputOpt = getBuffer(rewriter, input, options, state);
if (failed(inputOpt))
return failure();
inputs.push_back(*inputOpt);
}
else {
inputs.push_back(input);
}
}
rewriter.setInsertionPoint(coreBatchOp);
auto newOp = PimCoreBatchOp::create(
rewriter, coreBatchOp.getLoc(), coreBatchOp.getLaneCountAttr(), ValueRange(weights), ValueRange(inputs));
newOp.getProperties().setOperandSegmentSizes({static_cast<int>(weights.size()), static_cast<int>(inputs.size())});
if (auto coreIdsAttr = coreBatchOp->getAttr(onnx_mlir::kCoreIdAttrName))
newOp->setAttr(onnx_mlir::kCoreIdAttrName, coreIdsAttr);
rewriter.inlineRegionBefore(coreBatchOp.getBody(), newOp.getBody(), newOp.getBody().begin());
for (Block& block : newOp.getBody())
if (failed(bufferization::bufferizeBlockSignature(&block, rewriter, options, state)))
return failure();
rewriter.eraseOp(coreBatchOp);
return success();
}
};
struct TransposeOpInterface : DstBufferizableOpInterfaceExternalModel<TransposeOpInterface, PimTransposeOp> {
bool bufferizesToMemoryRead(Operation* op, OpOperand& opOperand, const AnalysisState& state) const {
return !cast<DestinationStyleOpInterface>(op).isDpsInit(&opOperand);
@@ -287,8 +435,11 @@ struct UnaryDstOpInterface : DstBufferizableOpInterfaceExternalModel<UnaryDstOpI
void registerOpBufferizationInterfaces(DialectRegistry& registry) {
registry.addExtension(+[](MLIRContext* ctx, PimDialect* dialect) {
PimCoreBatchOp::attachInterface<CoreBatchOpInterface>(*ctx);
PimReceiveOp::attachInterface<ReceiveOpInterface>(*ctx);
PimReceiveBatchOp::attachInterface<ReceiveBatchOpInterface>(*ctx);
PimMemCopyHostToDevOp::attachInterface<MemCopyHostToDevOpInterface>(*ctx);
PimMemCopyHostToDevBatchOp::attachInterface<MemCopyHostToDevBatchOpInterface>(*ctx);
PimMemCopyDevToHostOp::attachInterface<MemCopyDevToHostOpInterface>(*ctx);
PimTransposeOp::attachInterface<TransposeOpInterface>(*ctx);
PimVMMOp::attachInterface<VMMOpInterface>(*ctx);

View File

@@ -93,8 +93,8 @@ void PimBufferizationPass::runOnOperation() {
}
void PimBufferizationPass::annotateWeightsMemrefs(ModuleOp moduleOp, func::FuncOp funcOp) const {
funcOp.walk([&](PimCoreOp coreOp) {
walkPimMvmVmmWeightUses(coreOp, [&](OpOperand& weightUse) {
auto markWeights = [&](Operation* op) {
walkPimMvmVmmWeightUses(op, [&](OpOperand& weightUse) {
Value weight = weightUse.get();
auto getGlobalOp = weight.getDefiningOp<memref::GetGlobalOp>();
if (!getGlobalOp)
@@ -104,7 +104,10 @@ void PimBufferizationPass::annotateWeightsMemrefs(ModuleOp moduleOp, func::FuncO
markWeightAlways(getGlobalOp);
markWeightAlways(globalMemrefOp);
});
});
};
funcOp.walk([&](PimCoreOp coreOp) { markWeights(coreOp); });
funcOp.walk([&](PimCoreBatchOp coreBatchOp) { markWeights(coreBatchOp); });
}
std::unique_ptr<Pass> createPimBufferizationPass() { return std::make_unique<PimBufferizationPass>(); }

View File

@@ -2,6 +2,7 @@ add_onnx_mlir_dialect(Spatial spat)
add_onnx_mlir_dialect_doc(spat Spatial.td)
add_pim_library(SpatialOps
Channels.cpp
SpatialOps.cpp
Transforms/MergeComputeNodes/MergeComputeNodesPass.cpp
Transforms/MergeComputeNodes/DCPGraph/Graph.cpp

View File

@@ -0,0 +1,120 @@
#include "src/Accelerators/PIM/Dialect/Spatial/Channels.hpp"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Diagnostics.h"
using namespace mlir;
namespace onnx_mlir::spatial {
namespace {
static Channels::ChannelId getChannelId(SpatChannelSendOp sendOp) { return sendOp.getChannelId(); }
static Channels::ChannelId getChannelId(SpatChannelReceiveOp receiveOp) { return receiveOp.getChannelId(); }
static LogicalResult verifyEndpointPair(ChannelEndpoints endpoints) {
if (!endpoints.send || !endpoints.receive)
return failure();
if (endpoints.send.getSourceCoreId() != endpoints.receive.getSourceCoreId()) {
endpoints.send.emitOpError("sourceCoreId does not match paired spat.channel_receive");
return failure();
}
if (endpoints.send.getTargetCoreId() != endpoints.receive.getTargetCoreId()) {
endpoints.send.emitOpError("targetCoreId does not match paired spat.channel_receive");
return failure();
}
if (endpoints.send.getInput().getType() != endpoints.receive.getOutput().getType()) {
endpoints.send.emitOpError("input type does not match paired spat.channel_receive result type");
return failure();
}
return success();
}
} // namespace
Channels::Channels(func::FuncOp funcOp) {
if (!funcOp)
return;
funcOp.walk([&](SpatChannelSendOp sendOp) { insertSend(sendOp); });
funcOp.walk([&](SpatChannelReceiveOp receiveOp) { insertReceive(receiveOp); });
}
Channels::ChannelId Channels::allocate() { return nextChannelId++; }
void Channels::insertSend(SpatChannelSendOp sendOp) {
ChannelId channelId = getChannelId(sendOp);
nextChannelId = std::max(nextChannelId, channelId + 1);
endpoints[channelId].send = sendOp;
}
void Channels::insertReceive(SpatChannelReceiveOp receiveOp) {
ChannelId channelId = getChannelId(receiveOp);
nextChannelId = std::max(nextChannelId, channelId + 1);
endpoints[channelId].receive = receiveOp;
}
void Channels::eraseSend(SpatChannelSendOp sendOp) {
ChannelId channelId = getChannelId(sendOp);
auto it = endpoints.find(channelId);
if (it == endpoints.end())
return;
it->second.send = {};
if (!it->second.receive)
endpoints.erase(it);
}
void Channels::eraseReceive(SpatChannelReceiveOp receiveOp) {
ChannelId channelId = getChannelId(receiveOp);
auto it = endpoints.find(channelId);
if (it == endpoints.end())
return;
it->second.receive = {};
if (!it->second.send)
endpoints.erase(it);
}
FailureOr<ChannelEndpoints> Channels::lookup(ChannelId id) const {
auto it = endpoints.find(id);
if (it == endpoints.end())
return failure();
return it->second;
}
FailureOr<SpatChannelReceiveOp> Channels::getReceiveFor(SpatChannelSendOp sendOp) const {
auto endpointsOr = lookup(getChannelId(sendOp));
if (failed(endpointsOr) || !endpointsOr->receive)
return failure();
return endpointsOr->receive;
}
FailureOr<SpatChannelSendOp> Channels::getSendFor(SpatChannelReceiveOp receiveOp) const {
auto endpointsOr = lookup(getChannelId(receiveOp));
if (failed(endpointsOr) || !endpointsOr->send)
return failure();
return endpointsOr->send;
}
LogicalResult Channels::verify() const {
for (const auto& [channelId, pair] : endpoints) {
if (!pair.send || !pair.receive) {
if (pair.send) {
auto sendOp = pair.send;
sendOp.emitOpError("channel_id ") << channelId << " is missing a paired spat.channel_receive";
}
else if (pair.receive) {
auto receiveOp = pair.receive;
receiveOp.emitOpError("channel_id ") << channelId << " is missing a paired spat.channel_send";
}
return failure();
}
if (failed(verifyEndpointPair(pair)))
return failure();
}
return success();
}
} // namespace onnx_mlir::spatial

View File

@@ -0,0 +1,43 @@
#pragma once
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "src/Accelerators/PIM/Common/PimCommon.hpp"
#include "src/Accelerators/PIM/Dialect/Spatial/SpatialOps.hpp"
namespace onnx_mlir::spatial {
struct ChannelEndpoints {
SpatChannelSendOp send;
SpatChannelReceiveOp receive;
};
class Channels {
public:
using ChannelId = int64_t;
explicit Channels(mlir::func::FuncOp funcOp);
ChannelId allocate();
void insertSend(SpatChannelSendOp sendOp);
void insertReceive(SpatChannelReceiveOp receiveOp);
void eraseSend(SpatChannelSendOp sendOp);
void eraseReceive(SpatChannelReceiveOp receiveOp);
llvm::FailureOr<ChannelEndpoints> lookup(ChannelId id) const;
llvm::FailureOr<SpatChannelReceiveOp> getReceiveFor(SpatChannelSendOp sendOp) const;
llvm::FailureOr<SpatChannelSendOp> getSendFor(SpatChannelReceiveOp receiveOp) const;
mlir::LogicalResult verify() const;
private:
ChannelId nextChannelId = 0;
llvm::DenseMap<ChannelId, ChannelEndpoints> endpoints;
};
} // namespace onnx_mlir::spatial

View File

@@ -9,7 +9,6 @@ def SpatialDialect : Dialect {
let name = "spat";
let summary = "Dialect designed for deep learning computation in a spatial architecture";
let cppNamespace = "::onnx_mlir::spatial";
let useDefaultTypePrinterParser = 1;
}
class SpatOp<string mnemonic, list<Trait> traits = []> :
@@ -19,15 +18,6 @@ class SpatOp<string mnemonic, list<Trait> traits = []> :
def SpatTensor :
AnyTypeOf<[AnyMemRef, AnyRankedTensor], "", "::mlir::ShapedType">;
class SpatType<string name, string typeMnemonic, list<Trait> traits = []>
: TypeDef<SpatialDialect, name, traits> {
let mnemonic = typeMnemonic;
}
def SpatChannelType : SpatType<"SpatChannel", "ch"> {
let summary = "Virtual channel type";
}
//===----------------------------------------------------------------------===//
// Execution
//===----------------------------------------------------------------------===//
@@ -48,10 +38,27 @@ def SpatCompute : SpatOp<"compute", [SingleBlock, AttrSizedOperandSegments]> {
let hasVerifier = 1;
let hasFolder = 1;
let hasCustomAssemblyFormat = 1;
}
let assemblyFormat = [{
`[` $weights `]` `(` $inputs `)` attr-dict `:` `[` type($weights) `]` `(` type($inputs) `)` `->` type($outputs) $body
}];
def SpatComputeBatch : SpatOp<"compute_batch",
[SingleBlock, AttrSizedOperandSegments]> {
let summary = "Compressed batch of independent equivalent compute lanes";
let arguments = (ins
I32Attr:$laneCount,
Variadic<SpatTensor>:$weights,
Variadic<SpatTensor>:$inputs
);
let results = (outs
Variadic<SpatTensor>:$outputs
);
let regions = (region SizedRegion<1>:$body);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def SpatYieldOp : SpatOp<"yield", [Terminator]> {
@@ -61,51 +68,66 @@ def SpatYieldOp : SpatOp<"yield", [Terminator]> {
Variadic<SpatTensor>:$outputs
);
let assemblyFormat = [{
$outputs attr-dict `:` type($outputs)
}];
let hasCustomAssemblyFormat = 1;
}
def SpatExtractRowsOp : SpatOp<"extract_rows", []> {
let summary = "Extract every row of a rank-2 tensor as separate rank-2 row tensors";
let arguments = (ins
SpatTensor:$input
);
let results = (outs
Variadic<SpatTensor>:$outputs
);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def SpatConcatOp : SpatOp<"concat", []> {
let summary = "Concatenate tensors with compact Spatial operand syntax";
let arguments = (ins
I64Attr:$axis,
Variadic<SpatTensor>:$inputs
);
let results = (outs
SpatTensor:$output
);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
//===----------------------------------------------------------------------===//
// Communication
//===----------------------------------------------------------------------===//
def SpatChannelNewOp : SpatOp<"channel_new", []> {
let summary = "Create a new virtual channel";
let results = (outs
SpatChannelType:$channel
);
let builders = [
OpBuilder<(ins ), [{
$_state.addTypes(SpatChannelType());
}]>
];
let assemblyFormat = [{
attr-dict
}];
}
def SpatChannelSendOp : SpatOp<"channel_send", []> {
let summary = "Send a tensor through a channel";
let summary = "Send a tensor through a logical channel";
let arguments = (ins
SpatChannelType:$channel,
I64Attr:$channelId,
I32Attr:$sourceCoreId,
I32Attr:$targetCoreId,
SpatTensor:$input
);
let assemblyFormat = [{
$input `to` $channel attr-dict `:` `(` type($input) `->` type($channel) `)`
$input attr-dict `:` type($input)
}];
}
def SpatChannelReceiveOp : SpatOp<"channel_receive", []> {
let summary = "Receive a tensor from a channel";
let summary = "Receive a tensor from a logical channel";
let arguments = (ins
SpatChannelType:$channel
I64Attr:$channelId,
I32Attr:$sourceCoreId,
I32Attr:$targetCoreId
);
let results = (outs
@@ -113,37 +135,70 @@ def SpatChannelReceiveOp : SpatOp<"channel_receive", []> {
);
let assemblyFormat = [{
$channel attr-dict `:` `(` type($channel) `->` type($output) `)`
attr-dict `:` type($output)
}];
}
def SpatChannelBroadcastSendOp : SpatOp<"channel_broadcast_send", []> {
let summary = "Broadcast a tensor through a shared channel buffer";
def SpatChannelSendManyOp : SpatOp<"channel_send_many", []> {
let summary = "Send multiple tensors through logical channels";
let arguments = (ins
SpatChannelType:$channel,
DenseI64ArrayAttr:$channelIds,
DenseI32ArrayAttr:$sourceCoreIds,
DenseI32ArrayAttr:$targetCoreIds,
Variadic<SpatTensor>:$inputs
);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def SpatChannelReceiveManyOp : SpatOp<"channel_receive_many", []> {
let summary = "Receive multiple tensors from logical channels";
let arguments = (ins
DenseI64ArrayAttr:$channelIds,
DenseI32ArrayAttr:$sourceCoreIds,
DenseI32ArrayAttr:$targetCoreIds
);
let results = (outs
Variadic<SpatTensor>:$outputs
);
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def SpatChannelSendBatchOp : SpatOp<"channel_send_batch", []> {
let summary = "Send per-lane tensors through logical channels in a batch body";
let arguments = (ins
DenseI64ArrayAttr:$channelIds,
DenseI32ArrayAttr:$sourceCoreIds,
DenseI32ArrayAttr:$targetCoreIds,
SpatTensor:$input
);
let assemblyFormat = [{
$input `to` $channel attr-dict `:` `(` type($input) `->` type($channel) `)`
}];
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
def SpatChannelBroadcastReceiveOp : SpatOp<"channel_broadcast_receive", []> {
let summary = "Receive a tensor from a shared channel buffer";
def SpatChannelReceiveBatchOp : SpatOp<"channel_receive_batch", []> {
let summary = "Receive a per-lane tensor through logical channels in a batch body";
let arguments = (ins
SpatChannelType:$channel
DenseI64ArrayAttr:$channelIds,
DenseI32ArrayAttr:$sourceCoreIds,
DenseI32ArrayAttr:$targetCoreIds
);
let results = (outs
SpatTensor:$output
);
let assemblyFormat = [{
$channel attr-dict `:` `(` type($channel) `->` type($output) `)`
}];
let hasVerifier = 1;
let hasCustomAssemblyFormat = 1;
}
//===----------------------------------------------------------------------===//

File diff suppressed because it is too large Load Diff

View File

@@ -28,6 +28,8 @@ namespace spatial {
using namespace mlir;
namespace {
using SpatCompute = onnx_mlir::spatial::SpatCompute;
using SpatComputeBatch = onnx_mlir::spatial::SpatComputeBatch;
struct VirtualNode {
SmallVector<size_t, 4> originalComputeIndices;
@@ -54,6 +56,45 @@ struct WindowScheduleResult {
size_t maxMergeGroupSize = 0;
};
constexpr CPU kDefaultMaxCpuCount = 1000;
size_t getSchedulingCpuBudget() {
if (coresCount.getValue() > 0)
return static_cast<size_t>(coresCount.getValue());
return static_cast<size_t>(kDefaultMaxCpuCount);
}
size_t getBatchChunkTargetCount(int32_t laneCount) {
assert(laneCount > 0 && "laneCount must be positive");
return std::min(static_cast<size_t>(laneCount), std::max<size_t>(1, getSchedulingCpuBudget()));
}
ComputeInstance getBatchChunkForIndex(SpatComputeBatch batch, size_t chunkIndex) {
size_t totalLanes = static_cast<size_t>(batch.getLaneCount());
size_t chunkCount = getBatchChunkTargetCount(batch.getLaneCount());
size_t baseChunkSize = totalLanes / chunkCount;
size_t largeChunkCount = totalLanes % chunkCount;
size_t laneStart = chunkIndex * baseChunkSize + std::min(chunkIndex, largeChunkCount);
size_t laneCount = baseChunkSize + (chunkIndex < largeChunkCount ? 1 : 0);
return {batch.getOperation(), static_cast<uint32_t>(laneStart), static_cast<uint32_t>(laneCount)};
}
ComputeInstance getBatchChunkForLane(SpatComputeBatch batch, uint32_t lane) {
size_t totalLanes = static_cast<size_t>(batch.getLaneCount());
size_t chunkCount = getBatchChunkTargetCount(batch.getLaneCount());
size_t baseChunkSize = totalLanes / chunkCount;
size_t largeChunkCount = totalLanes % chunkCount;
size_t largeChunkSpan = largeChunkCount * (baseChunkSize + 1);
size_t chunkIndex = 0;
if (static_cast<size_t>(lane) < largeChunkSpan)
chunkIndex = static_cast<size_t>(lane) / (baseChunkSize + 1);
else
chunkIndex = largeChunkCount + (static_cast<size_t>(lane) - largeChunkSpan) / baseChunkSize;
return getBatchChunkForIndex(batch, chunkIndex);
}
std::vector<IndexedEdge> aggregateEdges(ArrayRef<IndexedEdge> edges) {
llvm::DenseMap<std::pair<size_t, size_t>, Weight> edgeWeights;
for (auto [start, end, weight] : edges) {
@@ -81,14 +122,96 @@ std::vector<IndexedEdge> aggregateEdges(ArrayRef<IndexedEdge> edges) {
return aggregatedEdges;
}
VirtualGraph buildInitialVirtualGraph(ArrayRef<SpatCompute> spatComputes, ArrayRef<IndexedEdge> edges) {
Weight getComputeBodyWeight(Region& body) {
constexpr Weight kOperationWeight = 100;
Weight numOperations = 0;
for (auto& block : body)
for ([[maybe_unused]] auto& op : block)
numOperations = checkedAdd(numOperations, static_cast<Weight>(1));
return checkedMultiply(numOperations, kOperationWeight);
}
CrossbarUsage getComputeBodyCrossbarUsage(Region& body) {
CrossbarUsage crossbarUsage = 0;
for (auto& block : body)
for (auto& op : block)
if (isa<SpatWeightedVMMOp>(op))
crossbarUsage = checkedAdd(crossbarUsage, static_cast<CrossbarUsage>(1));
return crossbarUsage;
}
Weight getComputeInstanceWeight(const ComputeInstance& instance) {
if (auto spatCompute = dyn_cast<SpatCompute>(instance.op))
return getSpatComputeWeight(spatCompute);
auto batch = cast<SpatComputeBatch>(instance.op);
return checkedMultiply(getComputeBodyWeight(batch.getBody()), static_cast<Weight>(instance.laneCount));
}
CrossbarUsage getComputeInstanceCrossbarUsage(const ComputeInstance& instance) {
if (auto spatCompute = dyn_cast<SpatCompute>(instance.op))
return getSpatComputeCrossbarUsage(spatCompute);
auto batch = cast<SpatComputeBatch>(instance.op);
return checkedMultiply(getComputeBodyCrossbarUsage(batch.getBody()), static_cast<CrossbarUsage>(instance.laneCount));
}
SmallVector<Value, 4> getComputeInstanceInputs(const ComputeInstance& instance) {
if (auto spatCompute = dyn_cast<SpatCompute>(instance.op))
return SmallVector<Value, 4>(spatCompute.getInputs().begin(), spatCompute.getInputs().end());
auto batch = cast<SpatComputeBatch>(instance.op);
SmallVector<Value, 4> inputs;
inputs.reserve(instance.laneCount);
for (uint32_t lane = instance.laneStart; lane < instance.laneStart + instance.laneCount; ++lane)
inputs.push_back(batch.getInputs()[lane]);
return inputs;
}
std::optional<ComputeInstance> getOriginalComputeInstance(Value value) {
Operation* op = value.getDefiningOp();
if (!op)
return std::nullopt;
while (auto extract = dyn_cast<tensor::ExtractSliceOp>(op)) {
value = extract.getSource();
op = value.getDefiningOp();
if (!op)
return std::nullopt;
}
if (auto spatCompute = dyn_cast<SpatCompute>(op))
return ComputeInstance {spatCompute.getOperation(), 0, 1};
if (auto batch = dyn_cast<SpatComputeBatch>(op))
return getBatchChunkForLane(batch, static_cast<uint32_t>(cast<OpResult>(value).getResultNumber()));
return std::nullopt;
}
SmallVector<ComputeInstance> collectComputeInstances(Operation* entryOp) {
SmallVector<ComputeInstance> instances;
for (Region& region : entryOp->getRegions()) {
for (Block& block : region) {
for (Operation& op : block) {
if (auto spatCompute = dyn_cast<SpatCompute>(&op)) {
instances.push_back({spatCompute.getOperation(), 0, 1});
continue;
}
if (auto batch = dyn_cast<SpatComputeBatch>(&op)) {
size_t chunkCount = getBatchChunkTargetCount(batch.getLaneCount());
for (size_t chunkIndex = 0; chunkIndex < chunkCount; ++chunkIndex)
instances.push_back(getBatchChunkForIndex(batch, chunkIndex));
}
}
}
}
return instances;
}
VirtualGraph buildInitialVirtualGraph(ArrayRef<ComputeInstance> computeInstances, ArrayRef<IndexedEdge> edges) {
VirtualGraph graph;
graph.nodes.reserve(spatComputes.size());
for (auto [index, spatCompute] : llvm::enumerate(spatComputes)) {
graph.nodes.reserve(computeInstances.size());
for (auto [index, computeInstance] : llvm::enumerate(computeInstances)) {
VirtualNode node;
node.originalComputeIndices.push_back(index);
node.weight = getSpatComputeWeight(spatCompute);
node.crossbarUsage = getSpatComputeCrossbarUsage(spatCompute);
node.weight = getComputeInstanceWeight(computeInstance);
node.crossbarUsage = getComputeInstanceCrossbarUsage(computeInstance);
graph.nodes.push_back(std::move(node));
}
graph.edges = aggregateEdges(edges);
@@ -116,22 +239,34 @@ TimingInfo computeTiming(const VirtualGraph& graph) {
incomingEdgeCount[endIndex]++;
}
std::vector<size_t> readyNodes;
readyNodes.reserve(nodeCount);
auto getVirtualNodeOrderKey = [&](size_t nodeIndex) {
const VirtualNode& node = graph.nodes[nodeIndex];
if (!node.originalComputeIndices.empty())
return node.originalComputeIndices.front();
return nodeIndex;
};
auto readyNodeGreater = [&](size_t lhs, size_t rhs) {
size_t lhsKey = getVirtualNodeOrderKey(lhs);
size_t rhsKey = getVirtualNodeOrderKey(rhs);
if (lhsKey != rhsKey)
return lhsKey > rhsKey;
return lhs > rhs;
};
std::priority_queue<size_t, std::vector<size_t>, decltype(readyNodeGreater)> readyNodes(readyNodeGreater);
for (size_t i = 0; i < nodeCount; ++i)
if (incomingEdgeCount[i] == 0)
readyNodes.push_back(i);
readyNodes.push(i);
size_t readyIndex = 0;
while (readyIndex != readyNodes.size()) {
size_t current = readyNodes[readyIndex++];
while (!readyNodes.empty()) {
size_t current = readyNodes.top();
readyNodes.pop();
timing.topologicalOrder.push_back(current);
for (auto [child, weight] : children[current]) {
(void) weight;
assert(incomingEdgeCount[child] > 0 && "incoming edge count underflow");
incomingEdgeCount[child]--;
if (incomingEdgeCount[child] == 0)
readyNodes.push_back(child);
readyNodes.push(child);
}
}
@@ -287,17 +422,21 @@ std::vector<IndexedEdge> buildWindowEdges(const VirtualGraph& graph, const std::
WindowScheduleResult scheduleWindow(const VirtualGraph& graph, ArrayRef<size_t> selectedNodes, MLIRContext* context) {
std::vector<Weight> windowWeights;
std::vector<CrossbarUsage> windowCrossbarUsage;
std::vector<int64_t> windowNodeOrderKeys;
std::vector<int64_t> nodeToWindowIndex(graph.nodes.size(), -1);
windowWeights.reserve(selectedNodes.size());
windowCrossbarUsage.reserve(selectedNodes.size());
windowNodeOrderKeys.reserve(selectedNodes.size());
for (auto [windowIndex, nodeIndex] : llvm::enumerate(selectedNodes)) {
nodeToWindowIndex[nodeIndex] = static_cast<int64_t>(windowIndex);
windowWeights.push_back(graph.nodes[nodeIndex].weight);
windowCrossbarUsage.push_back(graph.nodes[nodeIndex].crossbarUsage);
windowNodeOrderKeys.push_back(static_cast<int64_t>(nodeIndex));
}
GraphDCP windowGraph(windowWeights, buildWindowEdges(graph, nodeToWindowIndex), windowCrossbarUsage);
GraphDCP windowGraph(
windowWeights, buildWindowEdges(graph, nodeToWindowIndex), windowNodeOrderKeys, windowCrossbarUsage);
if (coresCount.getValue() > 0)
windowGraph.setMaxCpuCount(static_cast<int>(coresCount.getValue()));
windowGraph.setContext(context);
@@ -414,13 +553,7 @@ bool coarsenGraph(const VirtualGraph& graph,
return true;
}
constexpr CPU kDefaultMaxCpuCount = 1000;
CPU getVirtualGraphMaxCpuCount() {
if (coresCount.getValue() > 0)
return static_cast<CPU>(coresCount.getValue());
return kDefaultMaxCpuCount;
}
CPU getVirtualGraphMaxCpuCount() { return static_cast<CPU>(getSchedulingCpuBudget()); }
size_t getDcpCoarseningWindowSize(size_t nodeCount) {
size_t windowSize = std::min(dcpCriticalWindowSize.getValue(), nodeCount);
@@ -430,7 +563,7 @@ size_t getDcpCoarseningWindowSize(size_t nodeCount) {
return windowSize;
}
DCPAnalysisResult buildResultFromVirtualGraph(const VirtualGraph& graph, ArrayRef<SpatCompute> spatComputes) {
DCPAnalysisResult buildResultFromVirtualGraph(const VirtualGraph& graph, ArrayRef<ComputeInstance> computeInstances) {
DCPAnalysisResult result;
TimingInfo timing = computeTiming(graph);
@@ -443,19 +576,19 @@ DCPAnalysisResult buildResultFromVirtualGraph(const VirtualGraph& graph, ArrayRe
std::iota(virtualNodeOrder.begin(), virtualNodeOrder.end(), 0);
}
std::vector<size_t> originalComputeToCpu(spatComputes.size(), 0);
std::vector<size_t> originalComputeToCpu(computeInstances.size(), 0);
for (auto [cpu, virtualNodeIndex] : llvm::enumerate(virtualNodeOrder)) {
const VirtualNode& virtualNode = graph.nodes[virtualNodeIndex];
for (size_t originalIndex : virtualNode.originalComputeIndices)
originalComputeToCpu[originalIndex] = cpu;
}
result.dominanceOrderCompute.reserve(spatComputes.size());
for (auto [originalIndex, spatCompute] : llvm::enumerate(spatComputes)) {
result.dominanceOrderCompute.reserve(computeInstances.size());
for (auto [originalIndex, computeInstance] : llvm::enumerate(computeInstances)) {
size_t cpu = originalComputeToCpu[originalIndex];
result.dominanceOrderCompute.push_back(spatCompute);
result.computeToCpuMap[spatCompute] = cpu;
result.cpuToLastComputeMap[cpu] = spatCompute;
result.dominanceOrderCompute.push_back(computeInstance);
result.computeToCpuMap[computeInstance] = cpu;
result.cpuToLastComputeMap[cpu] = computeInstance;
}
for (const auto& [cpu, lastCompute] : result.cpuToLastComputeMap)
result.isLastComputeOfCpu.insert(lastCompute);
@@ -463,13 +596,44 @@ DCPAnalysisResult buildResultFromVirtualGraph(const VirtualGraph& graph, ArrayRe
return result;
}
DCPAnalysisResult runLegacyDcp(ArrayRef<SpatCompute> spatComputes, ArrayRef<IndexedEdge> edges, MLIRContext* context) {
GraphDCP graphDCP(spatComputes, edges);
DCPAnalysisResult buildResultFromScheduledGraph(GraphDCP& graphDCP, ArrayRef<ComputeInstance> computeInstances) {
DCPAnalysisResult result;
result.dominanceOrderCompute.assign(computeInstances.begin(), computeInstances.end());
for (CPU cpu = 0; cpu < graphDCP.cpuCount(); ++cpu) {
auto scheduledTasks = graphDCP.getScheduledTasks(cpu);
if (scheduledTasks.empty())
continue;
for (const auto& task : scheduledTasks)
result.computeToCpuMap[computeInstances[task.nodeIndex]] = cpu;
result.cpuToLastComputeMap[cpu] = computeInstances[scheduledTasks.back().nodeIndex];
result.isLastComputeOfCpu.insert(computeInstances[scheduledTasks.back().nodeIndex]);
}
return result;
}
DCPAnalysisResult
runLegacyDcp(ArrayRef<ComputeInstance> computeInstances, ArrayRef<IndexedEdge> edges, MLIRContext* context) {
SmallVector<Weight> nodeWeights;
SmallVector<CrossbarUsage> nodeCrossbarUsage;
SmallVector<int64_t> nodeOrderKeys;
nodeWeights.reserve(computeInstances.size());
nodeCrossbarUsage.reserve(computeInstances.size());
nodeOrderKeys.reserve(computeInstances.size());
for (auto [index, instance] : llvm::enumerate(computeInstances)) {
nodeWeights.push_back(getComputeInstanceWeight(instance));
nodeCrossbarUsage.push_back(getComputeInstanceCrossbarUsage(instance));
nodeOrderKeys.push_back(static_cast<int64_t>(index));
}
GraphDCP graphDCP(nodeWeights, edges, nodeOrderKeys, nodeCrossbarUsage);
if (coresCount.getValue() > 0)
graphDCP.setMaxCpuCount(static_cast<int>(coresCount.getValue()));
graphDCP.setContext(context);
graphDCP.runDcp();
return graphDCP.getResult();
return buildResultFromScheduledGraph(graphDCP, computeInstances);
}
} // namespace
@@ -488,27 +652,31 @@ SpatCompute getOriginalSpatCompute(Operation* op) {
}
DCPAnalysisResult DCPAnalysis::run() {
SmallVector<SpatCompute, 10> spatComputes;
SmallVector<ComputeInstance> computeInstances = collectComputeInstances(entryOp);
SmallVector<IndexedEdge, 10> edges;
for (auto& region : entryOp->getRegions())
for (SpatCompute spatCompute : region.getOps<SpatCompute>())
spatComputes.push_back(spatCompute);
for (auto [indexEndEdge, spatCompute] : llvm::enumerate(spatComputes)) {
for (Value input : spatCompute.getInputs()) {
if (auto producerCompute = getOriginalSpatCompute(input.getDefiningOp())) {
auto producerIt = llvm::find(spatComputes, producerCompute);
assert(producerIt != spatComputes.end());
auto indexStartEdge = std::distance(spatComputes.begin(), producerIt);
edges.push_back({indexStartEdge, indexEndEdge, getSizeInBytes(cast<ShapedType>(input.getType()))});
llvm::DenseMap<ComputeInstance, size_t> instanceToIndex;
instanceToIndex.reserve(computeInstances.size());
for (auto [index, instance] : llvm::enumerate(computeInstances))
instanceToIndex[instance] = index;
for (auto [indexEndEdge, computeInstance] : llvm::enumerate(computeInstances)) {
for (Value input : getComputeInstanceInputs(computeInstance)) {
if (auto producerInstance = getOriginalComputeInstance(input)) {
auto producerIt = instanceToIndex.find(*producerInstance);
assert(producerIt != instanceToIndex.end());
auto indexStartEdge = producerIt->second;
edges.push_back({static_cast<int64_t>(indexStartEdge),
static_cast<int64_t>(indexEndEdge),
static_cast<int64_t>(getSizeInBytes(cast<ShapedType>(input.getType())))});
}
}
}
if (dcpCriticalWindowSize.getValue() == 0)
return runLegacyDcp(spatComputes, edges, entryOp->getContext());
return runLegacyDcp(computeInstances, edges, entryOp->getContext());
VirtualGraph virtualGraph = buildInitialVirtualGraph(spatComputes, edges);
VirtualGraph virtualGraph = buildInitialVirtualGraph(computeInstances, edges);
size_t iteration = 0;
auto tryCoarsenSelectedNodes = [&](ArrayRef<size_t> selectedNodes) {
size_t oldNodeCount = virtualGraph.nodes.size();
@@ -545,6 +713,13 @@ DCPAnalysisResult DCPAnalysis::run() {
};
while (virtualGraph.nodes.size() > 1) {
if (virtualGraph.nodes.size() <= getSchedulingCpuBudget()) {
if (virtualGraph.nodes.size() >= 200)
llvm::errs() << llvm::formatv(
"[DCP-COARSEN] iter={0} old={1} stop=cpu-budget\n", iteration, virtualGraph.nodes.size());
break;
}
iteration++;
TimingInfo timing = computeTiming(virtualGraph);
if (!timing.valid) {
@@ -576,7 +751,7 @@ DCPAnalysisResult DCPAnalysis::run() {
break;
}
return buildResultFromVirtualGraph(virtualGraph, spatComputes);
return buildResultFromVirtualGraph(virtualGraph, computeInstances);
}
} // namespace spatial

View File

@@ -5,15 +5,28 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include <cstdint>
#include <vector>
#include "src/Accelerators/PIM/Dialect/Spatial/SpatialOps.hpp"
// A scheduling identity that covers both spat.compute and scheduled shards of
// spat.compute_batch.
struct ComputeInstance {
mlir::Operation* op = nullptr;
uint32_t laneStart = 0;
uint32_t laneCount = 1;
bool operator==(const ComputeInstance& other) const {
return op == other.op && laneStart == other.laneStart && laneCount == other.laneCount;
}
};
struct DCPAnalysisResult {
std::vector<onnx_mlir::spatial::SpatCompute> dominanceOrderCompute;
llvm::DenseMap<onnx_mlir::spatial::SpatCompute, size_t> computeToCpuMap;
llvm::DenseSet<onnx_mlir::spatial::SpatCompute> isLastComputeOfCpu;
llvm::DenseMap<size_t, onnx_mlir::spatial::SpatCompute> cpuToLastComputeMap;
std::vector<ComputeInstance> dominanceOrderCompute;
llvm::DenseMap<ComputeInstance, size_t> computeToCpuMap;
llvm::DenseSet<ComputeInstance> isLastComputeOfCpu;
llvm::DenseMap<size_t, ComputeInstance> cpuToLastComputeMap;
};
namespace onnx_mlir {
@@ -34,3 +47,21 @@ public:
} // namespace spatial
} // namespace onnx_mlir
namespace llvm {
template <>
struct DenseMapInfo<ComputeInstance> {
static ComputeInstance getEmptyKey() {
return {DenseMapInfo<mlir::Operation*>::getEmptyKey(), UINT32_MAX, UINT32_MAX};
}
static ComputeInstance getTombstoneKey() {
return {DenseMapInfo<mlir::Operation*>::getTombstoneKey(), UINT32_MAX, UINT32_MAX};
}
static unsigned getHashValue(const ComputeInstance& v) {
return llvm::hash_combine(v.op, v.laneStart, v.laneCount);
}
static bool isEqual(const ComputeInstance& a, const ComputeInstance& b) {
return a == b;
}
};
} // namespace llvm

View File

@@ -1491,18 +1491,21 @@ void GraphDCP::runDcp() {
struct ReadyEntry {
Time slack;
Time aest;
int64_t orderKey;
TaskDCP* task;
bool operator>(const ReadyEntry& other) const {
if (slack != other.slack)
return slack > other.slack;
return aest > other.aest;
if (aest != other.aest)
return aest > other.aest;
return orderKey > other.orderKey;
}
};
std::priority_queue<ReadyEntry, std::vector<ReadyEntry>, std::greater<ReadyEntry>> readyQueue;
size_t readyCount = 0;
auto pushReady = [&](TaskDCP* node) {
readyQueue.push({slackOrZero(node->getAest(), node->getAlst()), node->getAest(), node});
readyQueue.push({slackOrZero(node->getAest(), node->getAlst()), node->getAest(), node->Id(), node});
};
for (auto& node : nodes) {
@@ -1528,7 +1531,7 @@ void GraphDCP::runDcp() {
candidate = entry.task;
break;
}
readyQueue.push({curSlack, curAest, entry.task});
readyQueue.push({curSlack, curAest, entry.orderKey, entry.task});
}
assert(candidate != nullptr && "readyCount > 0 but heap exhausted");
--readyCount;
@@ -1579,8 +1582,11 @@ DCPAnalysisResult GraphDCP::getResult() {
auto dominanceOrder = dcp_graph::collectDominanceOrder(getRoots(), nodes.size());
ret.dominanceOrderCompute.reserve(dominanceOrder.size());
for (auto elem : dominanceOrder)
ret.dominanceOrderCompute.push_back(elem->getSpatCompute());
for (auto elem : dominanceOrder) {
auto spatCompute = elem->getSpatCompute();
if (spatCompute)
ret.dominanceOrderCompute.push_back({spatCompute.getOperation(), 0});
}
for (CPU cpu = 0; cpu < getLastCpu(); ++cpu) {
const CpuTaskList* tasks = findCpuTasks(cpu);
@@ -1588,10 +1594,14 @@ DCPAnalysisResult GraphDCP::getResult() {
continue;
size_t i = 0;
for (auto node : *tasks) {
ret.computeToCpuMap[node->getSpatCompute()] = cpu;
auto spatCompute = node->getSpatCompute();
if (!spatCompute)
continue;
ComputeInstance instance {spatCompute.getOperation(), 0};
ret.computeToCpuMap[instance] = cpu;
if (i++ == tasks->size() - 1) {
ret.isLastComputeOfCpu.insert(node->getSpatCompute());
ret.cpuToLastComputeMap[cpu] = node->getSpatCompute();
ret.isLastComputeOfCpu.insert(instance);
ret.cpuToLastComputeMap[cpu] = instance;
}
}
}

View File

@@ -138,13 +138,18 @@ public:
GraphDCP(llvm::ArrayRef<Weight> nodeWeights,
llvm::ArrayRef<IndexedEdge> edges,
llvm::ArrayRef<int64_t> nodeOrderKeys = {},
llvm::ArrayRef<CrossbarUsage> nodeCrossbarUsage = {})
: nodes(), cpuTasks(), cpuCrossbarUsage() {
assert((nodeCrossbarUsage.empty() || nodeCrossbarUsage.size() == nodeWeights.size())
&& "synthetic crossbar usage must match synthetic node weights");
assert((nodeOrderKeys.empty() || nodeOrderKeys.size() == nodeWeights.size())
&& "synthetic node order keys must match synthetic node weights");
nodes.reserve(nodeWeights.size());
for (auto [index, weight] : llvm::enumerate(nodeWeights))
nodes.emplace_back(index, weight, nodeCrossbarUsage.empty() ? 0 : nodeCrossbarUsage[index]);
nodes.emplace_back(nodeOrderKeys.empty() ? static_cast<int64_t>(index) : nodeOrderKeys[index],
weight,
nodeCrossbarUsage.empty() ? 0 : nodeCrossbarUsage[index]);
for (auto [start, end, weight] : edges)
makeEdge(start, end, weight);
}