Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
b9ae621
[MLIR][SparseTensor] Loop ordering strategy infrastructure (flag)
gmalasan Aug 21, 2025
97cec4e
[MLIR][SparseTensor] Fixed up the rest of the boilerplate code, strat…
gmalasan Aug 21, 2025
3f3661a
[MLIR][SparseTensor] Fixed PR feedback about style
gmalasan Aug 26, 2025
17943dc
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Aug 26, 2025
6f3012f
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Aug 26, 2025
e9a0671
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Aug 30, 2025
3a5d7e2
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 1, 2025
d0b96d7
[MLIR][SparseTensor] Comment style fixes
gmalasan Sep 8, 2025
ccc784d
Merge branch 'sparse-tensor-loop-ordering-infrastructure' of https://…
gmalasan Sep 8, 2025
fde6039
[MLIR][SparseTensor] Missed comment style fix in Passes.h
gmalasan Sep 9, 2025
e80fee7
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 9, 2025
79ee468
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 12, 2025
cc7af1b
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 22, 2025
b6f5c6b
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 23, 2025
3832d9d
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 23, 2025
fbfa3b6
Apply clang-format to C++ files for loop ordering infrastructure
gmalasan Sep 24, 2025
8630cc7
Merge branch 'sparse-tensor-loop-ordering-infrastructure' of https://…
gmalasan Sep 24, 2025
db6ed6d
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 24, 2025
57c4088
Fix clang-format issues in SparseTensorPasses.cpp
gmalasan Sep 27, 2025
7e416fc
Merge branch 'sparse-tensor-loop-ordering-infrastructure' of https://…
gmalasan Sep 27, 2025
c108117
Merge branch 'main' into sparse-tensor-loop-ordering-infrastructure
gmalasan Sep 27, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 17 additions & 2 deletions mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,16 @@ enum class SparseEmitStrategy {
kDebugInterface, // generate only place-holder for sparse iteration
};

namespace sparse_tensor {

/// Defines a strategy for loop ordering during sparse code generation.
enum class LoopOrderingStrategy : unsigned {
kDefault, ///< Default strategy (eagerly selects last loop in topological
///< sort).
};

} // namespace sparse_tensor

#define GEN_PASS_DECL
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc"

Expand All @@ -71,11 +81,16 @@ std::unique_ptr<Pass> createSparseAssembler(bool directOut);
// The SparseReinterpretMap pass.
//===----------------------------------------------------------------------===//

void populateSparseReinterpretMap(RewritePatternSet &patterns,
ReinterpretMapScope scope);
void populateSparseReinterpretMap(
RewritePatternSet &patterns, ReinterpretMapScope scope,
sparse_tensor::LoopOrderingStrategy strategy =
sparse_tensor::LoopOrderingStrategy::kDefault);

std::unique_ptr<Pass> createSparseReinterpretMapPass();
std::unique_ptr<Pass> createSparseReinterpretMapPass(ReinterpretMapScope scope);
std::unique_ptr<Pass>
createSparseReinterpretMapPass(ReinterpretMapScope scope,
sparse_tensor::LoopOrderingStrategy strategy);

//===----------------------------------------------------------------------===//
// The PreSparsificationRewriting pass.
Expand Down
5 changes: 5 additions & 0 deletions mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,11 @@ def SparseReinterpretMap : Pass<"sparse-reinterpret-map", "ModuleOp"> {
clEnumValN(mlir::ReinterpretMapScope::kExceptGeneric,
"except-generic",
"Run on operations expect linalg.generic (e.g., foreach)"))}]>,
Option<"loopOrderingStrategy", "loop-ordering-strategy", "mlir::sparse_tensor::LoopOrderingStrategy",
"mlir::sparse_tensor::LoopOrderingStrategy::kDefault",
"Set the loop ordering strategy for sparse code generation", [{llvm::cl::values(
clEnumValN(mlir::sparse_tensor::LoopOrderingStrategy::kDefault, "default",
"Default strategy (eagerly selects last loop in topological sort)"))}]>,
];
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,15 @@ struct DemapInsRewriter : public OpRewritePattern<SourceOp> {

// Flattens an affine expression into a list of AffineDimExprs.
struct AffineDimCollector : public AffineExprVisitor<AffineDimCollector> {
explicit AffineDimCollector(unsigned dimNum) : dims(dimNum){};
explicit AffineDimCollector(unsigned dimNum) : dims(dimNum) {};
void visitDimExpr(AffineDimExpr expr) { dims.set(expr.getPosition()); }
BitVector dims;
};

// Flattens an affine expression into a list of AffineDimExprs.
struct AffineExprAdmissibleVisitor
: public AffineExprVisitor<AffineExprAdmissibleVisitor> {
explicit AffineExprAdmissibleVisitor(bool isOutput) : isOutput(isOutput){};
explicit AffineExprAdmissibleVisitor(bool isOutput) : isOutput(isOutput) {};

// We only allow AffineDimExpr on output.
void visitAddExpr(AffineBinaryOpExpr expr) {
Expand Down Expand Up @@ -407,7 +407,10 @@ struct GenericOpReinterpretMap
};

struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
using OpRewritePattern::OpRewritePattern;
GenericOpScheduler(MLIRContext *context,
sparse_tensor::LoopOrderingStrategy strategy)
: OpRewritePattern<linalg::GenericOp>(context), strategy(strategy) {}

LogicalResult matchAndRewrite(linalg::GenericOp linalgOp,
PatternRewriter &rewriter) const override {
if (linalgOp.getNumDpsInits() != 1 || !linalgOp.hasPureTensorSemantics() ||
Expand All @@ -420,7 +423,8 @@ struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
if (linalgOp->hasAttr(sorted))
return failure();

auto scheduler = IterationGraphSorter::fromGenericOp(linalgOp);
// Pass strategy to IterationGraphSorter.
auto scheduler = IterationGraphSorter::fromGenericOp(linalgOp, strategy);
bool isAdmissible = false;
AffineMap order;
// A const list of all masks that we used for iteration graph
Expand Down Expand Up @@ -582,6 +586,9 @@ struct GenericOpScheduler : public OpRewritePattern<linalg::GenericOp> {
// TODO: convert more than one?
return failure();
}

private:
sparse_tensor::LoopOrderingStrategy strategy;
};

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -786,12 +793,13 @@ struct ForeachOpDemapper

} // namespace

void mlir::populateSparseReinterpretMap(RewritePatternSet &patterns,
ReinterpretMapScope scope) {
void mlir::populateSparseReinterpretMap(
RewritePatternSet &patterns, ReinterpretMapScope scope,
sparse_tensor::LoopOrderingStrategy strategy) {
if (scope == ReinterpretMapScope::kAll ||
scope == ReinterpretMapScope::kGenericOnly) {
patterns.add<GenericOpReinterpretMap, GenericOpScheduler>(
patterns.getContext());
patterns.add<GenericOpReinterpretMap>(patterns.getContext());
patterns.add<GenericOpScheduler>(patterns.getContext(), strategy);
}
if (scope == ReinterpretMapScope::kAll ||
scope == ReinterpretMapScope::kExceptGeneric) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,13 @@ struct SparseReinterpretMap
SparseReinterpretMap(const SparseReinterpretMap &pass) = default;
SparseReinterpretMap(const SparseReinterpretMapOptions &options) {
scope = options.scope;
loopOrderingStrategy = options.loopOrderingStrategy;
}

void runOnOperation() override {
auto *ctx = &getContext();
RewritePatternSet patterns(ctx);
populateSparseReinterpretMap(patterns, scope);
populateSparseReinterpretMap(patterns, scope, loopOrderingStrategy);
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}
};
Expand Down Expand Up @@ -438,6 +439,14 @@ mlir::createSparseReinterpretMapPass(ReinterpretMapScope scope) {
return std::make_unique<SparseReinterpretMap>(options);
}

std::unique_ptr<Pass> mlir::createSparseReinterpretMapPass(
ReinterpretMapScope scope, sparse_tensor::LoopOrderingStrategy strategy) {
SparseReinterpretMapOptions options;
options.scope = scope;
options.loopOrderingStrategy = strategy;
return std::make_unique<SparseReinterpretMap>(options);
}

std::unique_ptr<Pass> mlir::createPreSparsificationRewritePass() {
return std::make_unique<PreSparsificationRewritePass>();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,15 @@ AffineMap IterationGraphSorter::topoSort() {
// We always prefer a parallel loop over a reduction loop because putting
// a reduction loop early might make the loop sequence inadmissible.
auto &it = !parIt.empty() ? parIt : redIt;
auto src = it.back();

// Select loop based on strategy.
unsigned src;
switch (strategy) {
case sparse_tensor::LoopOrderingStrategy::kDefault:
src = it.back();
break;
}

loopOrder.push_back(src);
it.pop_back();
// Update in-degree, and push 0-degree node into worklist.
Expand All @@ -122,8 +130,8 @@ AffineMap IterationGraphSorter::topoSort() {
return AffineMap();
}

IterationGraphSorter
IterationGraphSorter::fromGenericOp(linalg::GenericOp genericOp) {
IterationGraphSorter IterationGraphSorter::fromGenericOp(
linalg::GenericOp genericOp, sparse_tensor::LoopOrderingStrategy strategy) {
// Must be a demapped sparse kernel.
assert(!hasAnyNonIdentityOperandsOrResults(genericOp) &&
hasAnySparseOperandOrResult(genericOp) &&
Expand All @@ -140,14 +148,16 @@ IterationGraphSorter::fromGenericOp(linalg::GenericOp genericOp) {
genericOp.getIteratorTypesArray();

return IterationGraphSorter(std::move(ins), std::move(loopMap), out, outMap,
std::move(iterTypes));
std::move(iterTypes), strategy);
}

IterationGraphSorter::IterationGraphSorter(
SmallVector<Value> &&ins, SmallVector<AffineMap> &&loop2InsLvl, Value out,
AffineMap loop2OutLvl, SmallVector<utils::IteratorType> &&iterTypes)
AffineMap loop2OutLvl, SmallVector<utils::IteratorType> &&iterTypes,
sparse_tensor::LoopOrderingStrategy strategy)
: ins(std::move(ins)), loop2InsLvl(std::move(loop2InsLvl)), out(out),
loop2OutLvl(loop2OutLvl), iterTypes(std::move(iterTypes)) {
loop2OutLvl(loop2OutLvl), iterTypes(std::move(iterTypes)),
strategy(strategy) {
// One map per tensor.
assert(loop2InsLvl.size() == ins.size());
// All the affine maps have the same number of dimensions (loops).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_

#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/IR/AffineMap.h"

namespace mlir {
Expand Down Expand Up @@ -41,9 +42,12 @@ enum class SortMask : unsigned {

class IterationGraphSorter {
public:
/// Factory method that construct an iteration graph sorter
/// for the given linalg.generic operation.
static IterationGraphSorter fromGenericOp(linalg::GenericOp genericOp);
/// Factory method that constructs an iteration graph sorter
/// for the given linalg.generic operation with a specific loop ordering
/// strategy.
static IterationGraphSorter
fromGenericOp(linalg::GenericOp genericOp,
sparse_tensor::LoopOrderingStrategy strategy);

/// Returns a permutation that represents the scheduled loop order.
/// Note that the returned AffineMap could be null if the kernel
Expand All @@ -58,7 +62,9 @@ class IterationGraphSorter {
IterationGraphSorter(SmallVector<Value> &&ins,
SmallVector<AffineMap> &&loop2InsLvl, Value out,
AffineMap loop2OutLvl,
SmallVector<utils::IteratorType> &&iterTypes);
SmallVector<utils::IteratorType> &&iterTypes,
sparse_tensor::LoopOrderingStrategy strategy =
sparse_tensor::LoopOrderingStrategy::kDefault);

// Adds all the constraints in the given loop to level map.
void addConstraints(Value t, AffineMap loop2LvlMap);
Expand All @@ -84,6 +90,9 @@ class IterationGraphSorter {

// InDegree used for topo sort.
std::vector<unsigned> inDegree;

// Loop ordering strategy.
sparse_tensor::LoopOrderingStrategy strategy;
};

} // namespace sparse_tensor
Expand Down