1 //===- SparseTensorRewriting.cpp - Sparse tensor rewriting rules ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements rewriting rules that are specific to sparse tensors. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Utils/CodegenUtils.h" 14 #include "Utils/LoopEmitter.h" 15 16 #include "mlir/Dialect/Affine/IR/AffineOps.h" 17 #include "mlir/Dialect/Arith/IR/Arith.h" 18 #include "mlir/Dialect/Bufferization/IR/Bufferization.h" 19 #include "mlir/Dialect/Linalg/IR/Linalg.h" 20 #include "mlir/Dialect/Linalg/Utils/Utils.h" 21 #include "mlir/Dialect/MemRef/IR/MemRef.h" 22 #include "mlir/Dialect/SCF/IR/SCF.h" 23 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" 24 #include "mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h" 25 #include "mlir/Dialect/SparseTensor/IR/SparseTensorType.h" 26 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" 27 #include "mlir/Dialect/Tensor/IR/Tensor.h" 28 #include "mlir/Dialect/Vector/IR/VectorOps.h" 29 #include "mlir/IR/AffineMap.h" 30 #include "mlir/IR/Matchers.h" 31 #include "mlir/Support/LLVM.h" 32 33 using namespace mlir; 34 using namespace mlir::bufferization; 35 using namespace mlir::linalg; 36 using namespace mlir::sparse_tensor; 37 38 //===---------------------------------------------------------------------===// 39 // Helper methods for the actual rewriting rules. 40 //===---------------------------------------------------------------------===// 41 42 // Helper method to match any typed zero. 43 static bool isZeroValue(Value val) { 44 return matchPattern(val, m_Zero()) || matchPattern(val, m_AnyZeroFloat()); 45 } 46 47 // Helper to detect a sparse tensor type operand. 48 static bool isSparseTensor(Value v) { 49 auto enc = getSparseTensorEncoding(v.getType()); 50 return enc && !llvm::all_of(enc.getLvlTypes(), 51 [](auto lt) { return lt == LevelFormat::Dense; }); 52 } 53 static bool isSparseTensor(OpOperand *op) { return isSparseTensor(op->get()); } 54 55 // Helper method to find zero/uninitialized tensor materialization. 56 static bool isMaterializing(OpOperand *op, bool isZero) { 57 Value val = op->get(); 58 // Check allocation, with zero alloc when required. 59 if (auto alloc = val.getDefiningOp<AllocTensorOp>()) { 60 Value copy = alloc.getCopy(); 61 if (isZero) 62 return copy && isZeroValue(copy); 63 return !copy; 64 } 65 // Check for empty tensor materialization. 66 if (auto empty = val.getDefiningOp<tensor::EmptyOp>()) 67 return !isZero; 68 // Last resort for zero alloc: the whole value is zero. 69 return isZero && isZeroValue(val); 70 } 71 72 // Helper to detect sampling operation. 73 static bool isSampling(GenericOp op) { 74 auto yieldOp = cast<linalg::YieldOp>(op.getRegion().front().getTerminator()); 75 if (auto *def = yieldOp.getOperand(0).getDefiningOp()) { 76 if (isa<arith::MulFOp>(def) || isa<arith::MulIOp>(def)) { 77 // Both scalar input arguments used exactly once. 78 Value s1 = op.getBlock()->getArgument(0); 79 Value s2 = op.getBlock()->getArgument(1); 80 return (def->getOperand(0) == s1 && def->getOperand(1) == s2) || 81 (def->getOperand(1) == s1 && def->getOperand(0) == s2); 82 } 83 } 84 return false; 85 } 86 87 // Helper to detect chain of multiplications that do not involve x. 88 static bool isMulChain(Value val, Value x) { 89 if (auto arg = dyn_cast<BlockArgument>(val)) 90 return arg != x; 91 if (auto *def = val.getDefiningOp()) { 92 if (isa<arith::MulFOp>(def) || isa<arith::MulIOp>(def)) 93 return isMulChain(def->getOperand(0), x) && 94 isMulChain(def->getOperand(1), x); 95 } 96 return false; 97 } 98 99 // Helper to detect x = x + <multiplications>. 100 static bool isSumOfMul(GenericOp op) { 101 auto yieldOp = cast<linalg::YieldOp>(op.getRegion().front().getTerminator()); 102 if (auto *def = yieldOp.getOperand(0).getDefiningOp()) { 103 if (isa<arith::AddFOp>(def) || isa<arith::AddIOp>(def)) { 104 Value x = op.getBlock()->getArguments().back(); 105 return (def->getOperand(0) == x && isMulChain(def->getOperand(1), x)) || 106 (def->getOperand(1) == x && isMulChain(def->getOperand(0), x)); 107 } 108 } 109 return false; 110 } 111 112 // Helper to detect direct yield of a zero value. 113 static bool isZeroYield(GenericOp op) { 114 auto yieldOp = cast<linalg::YieldOp>(op.getRegion().front().getTerminator()); 115 if (auto arg = dyn_cast<BlockArgument>(yieldOp.getOperand(0))) { 116 if (arg.getOwner()->getParentOp() == op) { 117 return isZeroValue(op->getOperand(arg.getArgNumber())); 118 } 119 } 120 return isZeroValue(yieldOp.getOperand(0)); 121 } 122 123 /// Populates given sizes array from type (for static sizes) and from 124 /// the tensor (for dynamic sizes). 125 static void sizesForTensor(OpBuilder &builder, SmallVectorImpl<Value> &sizes, 126 Location loc, ShapedType stp, Value tensor) { 127 for (const auto &d : enumerate(stp.getShape())) { 128 Value dim; 129 if (d.value() == ShapedType::kDynamic) 130 dim = builder.create<tensor::DimOp>(loc, tensor, d.index()); 131 else 132 dim = constantIndex(builder, loc, d.value()); 133 sizes.push_back(dim); 134 } 135 } 136 137 static RankedTensorType getBufferType(const SparseTensorType &stt, 138 bool needTmpCOO) { 139 return needTmpCOO ? stt.getCOOType(/*ordered=*/false) 140 : stt.getRankedTensorType(); 141 } 142 143 /// Collects the dynamic dimension sizes for `tp` with the assumption that 144 /// `sizes` are the dimension sizes for the type. Stores the dynamic dimension 145 /// sizes to dynSizes. 146 static void getDynamicSizes(RankedTensorType tp, ValueRange sizes, 147 SmallVectorImpl<Value> &dynSizes) { 148 for (const auto &d : enumerate(tp.getShape())) { 149 if (d.value() == ShapedType::kDynamic) 150 dynSizes.push_back(sizes[d.index()]); 151 } 152 } 153 154 static LogicalResult genForeachOnSparseConstant(ForeachOp op, 155 RewriterBase &rewriter, 156 SparseElementsAttr attr) { 157 auto loc = op.getLoc(); 158 SmallVector<Value> reduc = op.getInitArgs(); 159 160 // Foreach on constant. 161 foreachInSparseConstant( 162 rewriter, loc, attr, op.getOrder().value_or(AffineMap()), 163 [&reduc, &rewriter, op](ArrayRef<Value> cvs, Value v) mutable { 164 SmallVector<Value> args; 165 args.append(cvs.begin(), cvs.end()); 166 args.push_back(v); 167 args.append(reduc); 168 // Clones the foreach op to get a copy of the loop body. 169 auto cloned = cast<ForeachOp>(rewriter.clone(*op.getOperation())); 170 assert(args.size() == cloned.getBody()->getNumArguments()); 171 Operation *yield = cloned.getBody()->getTerminator(); 172 rewriter.inlineBlockBefore(cloned.getBody(), op, args); 173 // clean up 174 rewriter.eraseOp(cloned); 175 reduc = yield->getOperands(); 176 rewriter.eraseOp(yield); 177 }); 178 179 rewriter.replaceOp(op, reduc); 180 return success(); 181 } 182 183 /// Populates the given sizes array for concatenation from types (for static 184 /// sizes) and from the source tensors (for dynamic sizes). 185 static void concatSizesFromInputs(OpBuilder &builder, 186 SmallVectorImpl<Value> &sizes, Location loc, 187 ShapedType dstTp, ValueRange srcs, 188 unsigned dim) { 189 auto dstShape = dstTp.getShape(); 190 sizesFromSrc(builder, sizes, loc, srcs[0]); 191 192 // Sum up on the `dim` if the dimension is dynamic. 193 if (dstShape[dim] != ShapedType::kDynamic) { 194 // Faithfully take the static size. 195 sizes[dim] = constantIndex(builder, loc, dstShape[dim]); 196 } else { 197 // Else, compute the shape dynamically. 198 for (const auto &src : srcs.drop_front()) { 199 Value srcSz = linalg::createOrFoldDimOp(builder, loc, src, dim); 200 // Sum up all the sizes. 201 sizes[dim] = builder.create<arith::AddIOp>(loc, sizes[dim], srcSz); 202 } 203 } 204 } 205 206 //===---------------------------------------------------------------------===// 207 // The actual sparse tensor rewriting rules. 208 //===---------------------------------------------------------------------===// 209 210 namespace { 211 212 /// Rewriting rule that converts direct yield of zero with initial allocation. 213 struct FoldInvariantYield : public OpRewritePattern<GenericOp> { 214 public: 215 using OpRewritePattern<GenericOp>::OpRewritePattern; 216 217 LogicalResult matchAndRewrite(GenericOp op, 218 PatternRewriter &rewriter) const override { 219 if (!op.hasPureTensorSemantics() || op.getNumResults() != 1 || 220 !isMaterializing(op.getDpsInitOperand(0), /*isZero=*/false) || 221 !isZeroYield(op) || !op.getDpsInitOperand(0)->get().hasOneUse()) 222 return failure(); 223 auto outputType = getRankedTensorType(op.getResult(0)); 224 // Yielding zero on newly materialized sparse tensor can be 225 // optimized directly (regardless of dynamic or static size). 226 if (getSparseTensorEncoding(outputType)) { 227 rewriter.replaceOp(op, op.getDpsInitOperand(0)->get()); 228 return success(); 229 } 230 // Use static zero value directly instead of materialization. 231 if (!outputType.hasStaticShape()) 232 return failure(); 233 Operation *def = op.getDpsInitOperand(0)->get().getDefiningOp(); 234 rewriter.replaceOp(op, constantZero(rewriter, op.getLoc(), outputType)); 235 rewriter.eraseOp(def); 236 return success(); 237 } 238 }; 239 240 /// Rewriting rule that converts two kernels: 241 /// 242 /// T(i,j) = SUM(k, A(i,j,k) * B(i,j,k) * ... ) 243 /// X(i,j) = S(i,j) * T(i,j) 244 /// 245 /// into a single kernel, using distributive law: 246 /// 247 /// X(i,j) = SUM(k, S(i,j) * A(i,j,k) * B(i,j,k) * ... ) 248 /// 249 /// This kind of fusion (merging two ops into one but using arithmetic 250 /// equalities that may not hold for floating-point computations) would 251 /// be undesirable in the dense case, since we distribute the multiplication 252 /// into the reduction loop. However, for sparse sampling tensor S, such 253 /// a fusion may actually reduce the asymptotic complexity of the kernel, 254 /// since intermediate results may be nullified. 255 struct FuseSparseMultiplyOverAdd : public OpRewritePattern<GenericOp> { 256 public: 257 using OpRewritePattern<GenericOp>::OpRewritePattern; 258 259 LogicalResult matchAndRewrite(GenericOp op, 260 PatternRewriter &rewriter) const override { 261 // Check consumer. 262 if (!op.hasPureTensorSemantics() || op.getNumDpsInputs() != 2 || 263 op.getNumResults() != 1 || 264 op.getNumParallelLoops() != op.getNumLoops() || 265 !op.getMatchingIndexingMap(op.getDpsInitOperand(0)).isIdentity() || 266 !op.getMatchingIndexingMap(op.getDpsInputOperand(0)).isIdentity() || 267 !op.getMatchingIndexingMap(op.getDpsInputOperand(1)).isIdentity()) 268 return failure(); 269 // Find consuming OP2(sparse, other) or OP2(other, sparse). The other 270 // operand can be sparse or dense, since the point of this rewriting rule 271 // is detecting a situation in which *more* sparsity is introduced into 272 // a computation, be it already sparse or still dense. 273 unsigned other = 0; 274 if (isSparseTensor(op.getDpsInputOperand(0))) 275 other = 1; 276 else if (!isSparseTensor(op.getDpsInputOperand(1))) 277 return failure(); 278 // Check producer. 279 auto prod = dyn_cast_or_null<GenericOp>( 280 op.getDpsInputOperand(other)->get().getDefiningOp()); 281 if (!prod || !prod.hasPureTensorSemantics() || prod.getNumResults() != 1 || 282 !prod.getResult(0).hasOneUse()) 283 return failure(); 284 // Sampling consumer and sum of multiplication chain producer. 285 if (!isMaterializing(op.getDpsInitOperand(0), /*isZero=*/false) || 286 !isMaterializing(prod.getDpsInitOperand(0), /*isZero=*/true) || 287 !isSampling(op) || !isSumOfMul(prod)) 288 return failure(); 289 // Modify operand structure of producer and consumer. 290 Location loc = prod.getLoc(); 291 SmallVector<Value> inputOps = prod.getInputs(); 292 SmallVector<Value> outputOps = op.getOutputs(); 293 SmallVector<AffineMap> fusedIndexMaps = prod.getIndexingMapsArray(); 294 inputOps.push_back(op.getDpsInputOperand(1 - other)->get()); 295 fusedIndexMaps.push_back(fusedIndexMaps.back()); // mimic other 296 // Fuse producer and consumer into a new generic op. 297 auto fusedOp = rewriter.create<GenericOp>( 298 loc, op.getResult(0).getType(), inputOps, outputOps, 299 rewriter.getAffineMapArrayAttr(fusedIndexMaps), prod.getIteratorTypes(), 300 /*doc=*/nullptr, /*library_call=*/nullptr); 301 Block &prodBlock = prod.getRegion().front(); 302 Block &consBlock = op.getRegion().front(); 303 IRMapping mapper; 304 Block *fusedBlock = rewriter.createBlock(&fusedOp.getRegion()); 305 unsigned num = prodBlock.getNumArguments(); 306 for (unsigned i = 0; i < num - 1; i++) 307 addArg(mapper, fusedBlock, prodBlock.getArgument(i)); 308 addArg(mapper, fusedBlock, consBlock.getArgument(1 - other)); 309 addArg(mapper, fusedBlock, prodBlock.getArgument(num - 1)); 310 // Clone bodies of the producer and consumer in new evaluation order. 311 auto *acc = prodBlock.getTerminator()->getOperand(0).getDefiningOp(); 312 auto *sampler = consBlock.getTerminator()->getOperand(0).getDefiningOp(); 313 Value last; 314 for (auto &op : prodBlock.without_terminator()) 315 if (&op != acc) { 316 last = op.getResult(0); 317 rewriter.clone(op, mapper); 318 } 319 mapper.map(consBlock.getArgument(other), fusedBlock->back().getResult(0)); 320 mapper.map(last, rewriter.clone(*sampler, mapper)->getResult(0)); 321 last = rewriter.clone(*acc, mapper)->getResult(0); 322 rewriter.create<linalg::YieldOp>(loc, last); 323 // Force initial value on merged allocation for dense outputs. 324 // TODO: deal with non alloc tensor here one day 325 if (!getSparseTensorEncoding(op.getResult(0).getType())) { 326 Value init = prod.getDpsInitOperand(0) 327 ->get() 328 .getDefiningOp<AllocTensorOp>() 329 .getCopy(); 330 AllocTensorOp a = 331 op.getDpsInitOperand(0)->get().getDefiningOp<AllocTensorOp>(); 332 rewriter.modifyOpInPlace(a, [&]() { a.getCopyMutable().assign(init); }); 333 } 334 // Replace consumer with fused operation. Old producer 335 // and consumer ops will be removed by DCE. 336 rewriter.replaceOp(op, fusedOp->getResults()); 337 return success(); 338 } 339 340 private: 341 // Helper to add argument and record the mapping. 342 static void addArg(IRMapping &mapper, Block *b, BlockArgument a) { 343 mapper.map(a, b->addArgument(a.getType(), a.getLoc())); 344 } 345 }; 346 347 // Fuse a tensor cast into producing operation. Note that a tensor.cast 348 // should really not be used to convert between sparse encodings. Since 349 // the pattern currently appears as a result of some prior rewriting 350 // we make an attempt to repair very obvious cases. 351 // TODO: audit the pure tensor dialect rewriting rules 352 struct FuseTensorCast : public OpRewritePattern<tensor::CastOp> { 353 public: 354 using OpRewritePattern<tensor::CastOp>::OpRewritePattern; 355 356 LogicalResult matchAndRewrite(tensor::CastOp op, 357 PatternRewriter &rewriter) const override { 358 Type srcType = op.getSource().getType(); 359 Type dstType = op.getDest().getType(); 360 // A nop cast simply folds away. 361 if (srcType == dstType) { 362 rewriter.replaceOp(op, op->getResults()); 363 return success(); 364 } 365 // See if a sparsity changing cast can be fused into producer. 366 if (tensor::isSameTypeWithoutEncoding(srcType, dstType)) { 367 if (Operation *def = op.getSource().getDefiningOp()) { 368 if (def->hasOneUse() && isa<tensor::ExtractSliceOp>(def)) { 369 rewriter.modifyOpInPlace(def, [&]() { 370 def->getResult(0).setType(op->getResultTypes()[0]); 371 }); 372 rewriter.replaceOp(op, def->getResult(0)); 373 return success(); 374 } 375 } 376 } 377 // Repair tensor casts with at least one sparse operand into the 378 // the properly supported sparse_tensor.convert. 379 if (getSparseTensorEncoding(srcType) || getSparseTensorEncoding(dstType)) { 380 rewriter.replaceOpWithNewOp<ConvertOp>(op, dstType, op.getSource()); 381 return success(); 382 } 383 // Fail otherwise. 384 return failure(); 385 } 386 }; 387 388 /// Rewrites a sequence of operations for sparse tensor selections in to 389 /// semi-ring operations such that they can be compiled correctly by the 390 /// sparsifier. E.g., transforming the following sequence 391 /// 392 /// %sel = arith.select %cond, %sp1, %sp2 393 /// 394 /// to 395 /// 396 /// %sel = binary %sp1, %sp2: 397 /// both (%l, %r) {yield select %cond, %l, %r} 398 /// left (%l) {yield select %cond, %l, 0} 399 /// right (%r) {yield select %cond, 0, %r} 400 /// 401 /// TODO: We require that the tensor used for extracting conditions to be dense 402 /// to sparsify the code. To support a sparse condition tensor, we need a 403 /// tri-nary operation. 404 struct GenSemiRingSelect : public OpRewritePattern<GenericOp> { 405 public: 406 using OpRewritePattern<GenericOp>::OpRewritePattern; 407 LogicalResult matchAndRewrite(GenericOp op, 408 PatternRewriter &rewriter) const override { 409 // Rejects non sparse kernels. 410 if (!op.hasPureTensorSemantics() || !hasAnySparseOperand(op)) 411 return failure(); 412 413 Location loc = op.getLoc(); 414 SmallVector<std::pair<Operation *, sparse_tensor::BinaryOp>> semiRings; 415 for (Operation &inst : *op.getBody()) { 416 // Matches pattern. 417 auto matched = isRewritablePattern(op, &inst); 418 if (!matched.has_value()) 419 continue; 420 421 rewriter.setInsertionPoint(&inst); 422 auto [c, t, f] = matched.value(); 423 assert(t.getType() == f.getType()); 424 auto selTp = t.getType(); 425 auto c0 = constantZero(rewriter, loc, selTp); 426 auto binOp = rewriter.create<sparse_tensor::BinaryOp>(loc, selTp, t, f); 427 // Initializes all the blocks. 428 rewriter.createBlock(&binOp.getOverlapRegion(), {}, {selTp, selTp}, 429 {t.getLoc(), f.getLoc()}); 430 rewriter.createBlock(&binOp.getRightRegion(), {}, selTp, f.getLoc()); 431 rewriter.createBlock(&binOp.getLeftRegion(), {}, selTp, t.getLoc()); 432 433 for (auto *r : binOp.getRegions()) { 434 Block *b = &r->front(); 435 rewriter.setInsertionPointToStart(b); 436 437 IRMapping irMap; 438 // Clones the cmp operations into the region to make the binary op 439 // admissible. 440 Value newC = c; 441 if (auto *def = c.getDefiningOp()) 442 newC = rewriter.clone(*def, irMap)->getResult(0); 443 444 irMap.map(c, newC); 445 if (r == &binOp.getLeftRegion()) { 446 irMap.map(t, b->getArgument(0)); 447 irMap.map(f, c0); 448 } else if (r == &binOp.getRightRegion()) { 449 irMap.map(t, c0); 450 irMap.map(f, b->getArgument(0)); 451 } else { 452 irMap.map(t, b->getArgument(0)); 453 irMap.map(f, b->getArgument(1)); 454 } 455 auto y = rewriter.clone(inst, irMap)->getResult(0); 456 rewriter.create<sparse_tensor::YieldOp>(loc, y); 457 } 458 459 // We successfully rewrited a operation. We can not do replacement here 460 // becuase it invalidate the iterator for the current loop to traverse 461 // the instructions. 462 semiRings.emplace_back(&inst, binOp); 463 } 464 465 // Finalizes the replacement. 466 for (auto [sel, semi] : semiRings) 467 rewriter.replaceOp(sel, semi->getResults()); 468 469 return success(!semiRings.empty()); 470 } 471 472 private: 473 static std::optional<std::tuple<Value, BlockArgument, BlockArgument>> 474 isRewritablePattern(GenericOp op, Operation *v) { 475 auto sel = dyn_cast<arith::SelectOp>(v); 476 if (!sel) 477 return std::nullopt; 478 479 auto tVal = sel.getTrueValue().dyn_cast<BlockArgument>(); 480 auto fVal = sel.getFalseValue().dyn_cast<BlockArgument>(); 481 // TODO: For simplicity, we only handle cases where both true/false value 482 // are directly loaded the input tensor. We can probably admit more cases 483 // in theory. 484 if (!tVal || !fVal) 485 return std::nullopt; 486 487 // Helper lambda to determine whether the value is loaded from a dense input 488 // or is a loop invariant. 489 auto isValFromDenseInputOrInvariant = [&op](Value v) -> bool { 490 if (auto bArg = v.dyn_cast<BlockArgument>(); 491 bArg && !isSparseTensor(op.getDpsInputOperand(bArg.getArgNumber()))) 492 return true; 493 // If the value is defined outside the loop, it is a loop invariant. 494 return v.getDefiningOp() && v.getDefiningOp()->getBlock() != op.getBody(); 495 }; 496 497 // If the condition value is load directly from a dense tensor or 498 // loop-invariants, we can sparsify the kernel. 499 auto cond = sel.getCondition(); 500 if (isValFromDenseInputOrInvariant(cond)) 501 return std::make_tuple(cond, tVal, fVal); 502 503 Value cmpL, cmpR; 504 if (matchPattern(cond, m_Op<arith::CmpIOp>(matchers::m_Any(&cmpL), 505 matchers::m_Any(&cmpR))) || 506 matchPattern(cond, m_Op<arith::CmpFOp>(matchers::m_Any(&cmpL), 507 matchers::m_Any(&cmpR)))) { 508 // TODO: we can do it recursively to check whether all the leaf values are 509 // loaded from dense tensors or are loop invariants. 510 if (isValFromDenseInputOrInvariant(cmpL) || 511 isValFromDenseInputOrInvariant(cmpR)) 512 return std::make_tuple(cond, tVal, fVal); 513 } 514 515 return std::nullopt; 516 }; 517 }; 518 519 /// Rewrites a sparse reduction that would not sparsify directly since 520 /// doing so would only iterate over the stored elements, ignoring the 521 /// implicit zeros, into a semi-ring. Applies to all prod/and/min/max 522 /// (note that reductions like add/sub/or/xor can directly be sparsified 523 /// since the implicit zeros do not contribute to the final result). 524 /// Note that prod/and are still included since, even though they often 525 /// are nullified in sparse data, they may still occur for special 526 /// situations in which e.g. some rows in a sparse matrix are fully 527 /// dense. For min/max, including the implicit zeros is a much more 528 /// common situation. 529 /// 530 /// TODO: this essentially "densifies" the operation; we want to implement 531 /// this much more efficiently by performing the reduction over the 532 /// stored values, and feed in the zero once if there were *any* 533 /// implicit zeros as well; but for now, at least we provide 534 /// the functionality 535 /// 536 struct GenSemiRingReduction : public OpRewritePattern<GenericOp> { 537 public: 538 using OpRewritePattern<GenericOp>::OpRewritePattern; 539 540 LogicalResult matchAndRewrite(GenericOp op, 541 PatternRewriter &rewriter) const override { 542 // Reject non-reductions. 543 if (!op.hasPureTensorSemantics() || op.getNumDpsInputs() != 1 || 544 op.getNumReductionLoops() == 0 || op.getNumResults() != 1) 545 return failure(); 546 auto *inp = op.getDpsInputOperand(0); 547 auto *init = op.getDpsInitOperand(0); 548 if (!isSparseTensor(inp)) 549 return failure(); 550 // Look for direct x = x OP y for semi-ring ready reductions. 551 auto *red = cast<linalg::YieldOp>(op.getRegion().front().getTerminator()) 552 .getOperand(0) 553 .getDefiningOp(); 554 if (!isa<arith::AndIOp, arith::MulIOp, arith::MulFOp, arith::MinimumFOp, 555 arith::MinSIOp, arith::MinUIOp, arith::MaximumFOp, arith::MaxSIOp, 556 arith::MaxUIOp>(red)) 557 return failure(); 558 Value s0 = op.getBlock()->getArgument(0); 559 Value s1 = op.getBlock()->getArgument(1); 560 if ((red->getOperand(0) != s0 || red->getOperand(1) != s1) && 561 (red->getOperand(0) != s1 || red->getOperand(1) != s0)) 562 return failure(); 563 // Identity. 564 Location loc = op.getLoc(); 565 Value identity = 566 rewriter.create<tensor::ExtractOp>(loc, init->get(), ValueRange()); 567 // Unary { 568 // present -> value 569 // absent -> zero. 570 // } 571 Type rtp = s0.getType(); 572 rewriter.setInsertionPointToStart(&op.getRegion().front()); 573 auto semiring = rewriter.create<sparse_tensor::UnaryOp>(loc, rtp, s0); 574 Block *present = 575 rewriter.createBlock(&semiring.getPresentRegion(), {}, rtp, loc); 576 rewriter.setInsertionPointToStart(&semiring.getPresentRegion().front()); 577 rewriter.create<sparse_tensor::YieldOp>(loc, present->getArgument(0)); 578 rewriter.createBlock(&semiring.getAbsentRegion(), {}, {}, {}); 579 rewriter.setInsertionPointToStart(&semiring.getAbsentRegion().front()); 580 auto zero = 581 rewriter.create<arith::ConstantOp>(loc, rewriter.getZeroAttr(rtp)); 582 rewriter.create<sparse_tensor::YieldOp>(loc, zero); 583 rewriter.setInsertionPointAfter(semiring); 584 // CustomReduce { 585 // x = x REDUC y, identity 586 // } 587 auto custom = rewriter.create<sparse_tensor::ReduceOp>( 588 loc, rtp, semiring.getResult(), s1, identity); 589 Block *region = 590 rewriter.createBlock(&custom.getRegion(), {}, {rtp, rtp}, {loc, loc}); 591 rewriter.setInsertionPointToStart(&custom.getRegion().front()); 592 IRMapping irMap; 593 irMap.map(red->getOperand(0), region->getArgument(0)); 594 irMap.map(red->getOperand(1), region->getArgument(1)); 595 auto *cloned = rewriter.clone(*red, irMap); 596 rewriter.create<sparse_tensor::YieldOp>(loc, cloned->getResult(0)); 597 rewriter.setInsertionPointAfter(custom); 598 rewriter.replaceOp(red, custom.getResult()); 599 return success(); 600 } 601 }; 602 603 /// Sparse rewriting rule for the print operator. This operation is mainly used 604 /// for debugging and testing. As such, it lowers to the vector.print operation 605 /// which only require very light-weight runtime support. 606 struct PrintRewriter : public OpRewritePattern<PrintOp> { 607 public: 608 using OpRewritePattern::OpRewritePattern; 609 LogicalResult matchAndRewrite(PrintOp op, 610 PatternRewriter &rewriter) const override { 611 Location loc = op.getLoc(); 612 auto tensor = op.getTensor(); 613 auto stt = getSparseTensorType(tensor); 614 // Header with NSE. 615 auto nse = rewriter.create<NumberOfEntriesOp>(loc, tensor); 616 rewriter.create<vector::PrintOp>( 617 loc, rewriter.getStringAttr("---- Sparse Tensor ----\nnse = ")); 618 rewriter.create<vector::PrintOp>(loc, nse); 619 // Print run-time contents for dim/lvl sizes. 620 rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("dim = ")); 621 printSizes(rewriter, loc, tensor, stt.getDimRank(), /*isDim=*/true); 622 rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("lvl = ")); 623 printSizes(rewriter, loc, tensor, stt.getLvlRank(), /*isDim=*/false); 624 // Use the "codegen" foreach loop construct to iterate over 625 // all typical sparse tensor components for printing. 626 foreachFieldAndTypeInSparseTensor(stt, [&rewriter, &loc, &tensor, 627 &stt](Type, FieldIndex, 628 SparseTensorFieldKind kind, 629 Level l, LevelType) { 630 switch (kind) { 631 case SparseTensorFieldKind::StorageSpec: { 632 break; 633 } 634 case SparseTensorFieldKind::PosMemRef: { 635 auto lvl = constantIndex(rewriter, loc, l); 636 rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("pos[")); 637 rewriter.create<vector::PrintOp>( 638 loc, lvl, vector::PrintPunctuation::NoPunctuation); 639 rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : ")); 640 auto pos = rewriter.create<ToPositionsOp>(loc, tensor, l); 641 printContents(rewriter, loc, pos); 642 break; 643 } 644 case SparseTensorFieldKind::CrdMemRef: { 645 auto lvl = constantIndex(rewriter, loc, l); 646 rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("crd[")); 647 rewriter.create<vector::PrintOp>( 648 loc, lvl, vector::PrintPunctuation::NoPunctuation); 649 rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("] : ")); 650 Value crd = nullptr; 651 // TODO: eliminates ToCoordinateBufferOp! 652 if (stt.getAoSCOOStart() == l) 653 crd = rewriter.create<ToCoordinatesBufferOp>(loc, tensor); 654 else 655 crd = rewriter.create<ToCoordinatesOp>(loc, tensor, l); 656 printContents(rewriter, loc, crd); 657 break; 658 } 659 case SparseTensorFieldKind::ValMemRef: { 660 rewriter.create<vector::PrintOp>(loc, 661 rewriter.getStringAttr("values : ")); 662 auto val = rewriter.create<ToValuesOp>(loc, tensor); 663 printContents(rewriter, loc, val); 664 break; 665 } 666 } 667 return true; 668 }); 669 rewriter.create<vector::PrintOp>(loc, rewriter.getStringAttr("----\n")); 670 rewriter.eraseOp(op); 671 return success(); 672 } 673 674 private: 675 // Helper to print contents of a single memref. Note that for the "push_back" 676 // vectors, this prints the full capacity, not just the size. This is done 677 // on purpose, so that clients see how much storage has been allocated in 678 // total. Contents of the extra capacity in the buffer may be uninitialized 679 // (unless the flag enable-buffer-initialization is set to true). 680 // 681 // Generates code to print: 682 // ( a0, a1, ... ) 683 static void printContents(PatternRewriter &rewriter, Location loc, 684 Value vec) { 685 // Open bracket. 686 rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Open); 687 // For loop over elements. 688 auto zero = constantIndex(rewriter, loc, 0); 689 auto size = rewriter.create<memref::DimOp>(loc, vec, zero); 690 auto step = constantIndex(rewriter, loc, 1); 691 auto forOp = rewriter.create<scf::ForOp>(loc, zero, size, step); 692 rewriter.setInsertionPointToStart(forOp.getBody()); 693 auto idx = forOp.getInductionVar(); 694 auto val = rewriter.create<memref::LoadOp>(loc, vec, idx); 695 if (llvm::isa<ComplexType>(val.getType())) { 696 // Since the vector dialect does not support complex types in any op, 697 // we split those into (real, imag) pairs here. 698 Value real = rewriter.create<complex::ReOp>(loc, val); 699 Value imag = rewriter.create<complex::ImOp>(loc, val); 700 rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Open); 701 rewriter.create<vector::PrintOp>(loc, real, 702 vector::PrintPunctuation::Comma); 703 rewriter.create<vector::PrintOp>(loc, imag, 704 vector::PrintPunctuation::Close); 705 rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Comma); 706 } else { 707 rewriter.create<vector::PrintOp>(loc, val, 708 vector::PrintPunctuation::Comma); 709 } 710 rewriter.setInsertionPointAfter(forOp); 711 // Close bracket and end of line. 712 rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Close); 713 rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::NewLine); 714 } 715 716 // Helper method to print run-time lvl/dim sizes. 717 static void printSizes(PatternRewriter &rewriter, Location loc, Value tensor, 718 unsigned size, bool isDim) { 719 // Open bracket. 720 rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Open); 721 // Print unrolled contents (dimop requires constant value). 722 for (unsigned i = 0; i < size; i++) { 723 auto idx = constantIndex(rewriter, loc, i); 724 Value val; 725 if (isDim) 726 val = rewriter.create<tensor::DimOp>(loc, tensor, idx); 727 else 728 val = rewriter.create<LvlOp>(loc, tensor, idx); 729 rewriter.create<vector::PrintOp>( 730 loc, val, 731 i != size - 1 ? vector::PrintPunctuation::Comma 732 : vector::PrintPunctuation::NoPunctuation); 733 } 734 // Close bracket and end of line. 735 rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::Close); 736 rewriter.create<vector::PrintOp>(loc, vector::PrintPunctuation::NewLine); 737 } 738 }; 739 740 /// Sparse rewriting rule for sparse-to-sparse reshape operator. 741 struct TensorReshapeRewriter : public OpRewritePattern<tensor::ReshapeOp> { 742 public: 743 using OpRewritePattern<tensor::ReshapeOp>::OpRewritePattern; 744 745 LogicalResult matchAndRewrite(tensor::ReshapeOp op, 746 PatternRewriter &rewriter) const override { 747 Location loc = op.getLoc(); 748 Value srcTensor = op.getSource(); 749 const auto srcTp = getSparseTensorType(srcTensor); 750 const auto dstTp = getSparseTensorType(op.getResult()); 751 752 if (!srcTp.hasEncoding() || !dstTp.hasEncoding() || 753 !dstTp.hasStaticDimShape()) 754 return failure(); 755 756 SmallVector<Value> srcSizes; 757 sizesForTensor(rewriter, srcSizes, loc, srcTp, srcTensor); 758 SmallVector<Value> dstSizes; 759 for (Dimension d : dstTp.getDimShape()) 760 dstSizes.push_back(constantIndex(rewriter, loc, d)); 761 762 Value nnz = rewriter.create<NumberOfEntriesOp>(loc, srcTensor); 763 // Only need an unordered COO buffer if input and output are not sorted 764 // in the same way. 765 Type bufferTp = getBufferType( 766 dstTp.withoutDimToLvl(), 767 !srcTp.isAllOrdered() || !srcTp.isIdentity() || !dstTp.isIdentity()); 768 SmallVector<Value> dynSizes; 769 Value buffer = rewriter 770 .create<AllocTensorOp>(loc, bufferTp, dynSizes, Value(), 771 nnz, Attribute()) 772 .getResult(); 773 774 // Convert src coordinates to dst coordinates by first collapsing it to 1D 775 // and then expand it to the match the rank of the destination tensor. 776 // Implemented as follows: 777 // foreach srcCoords %srcTensor 778 // collapsedCoords = reshapeCvs(srcCoords, [1, ..., srcRank]) 779 // expandedCoords = reshapeCvs(collapsedCoords, [1, ..., dstRank]) 780 // insert expandedCoords, %buffer 781 // 782 // followed by an optional 783 // %t = sparse_tensor.cast %tmp 784 // depending on whether the input/output are sorted in the same way. 785 const auto encSrc = srcTp.getEncoding(); 786 ForeachOp foreachOp = rewriter.create<ForeachOp>( 787 loc, srcTensor, buffer, 788 [&](OpBuilder &builder, Location loc, ValueRange srcLcvs, Value v, 789 ValueRange reduc) { 790 const Dimension srcRank = srcTp.getDimRank(); 791 SmallVector<Value> srcDcvs; 792 srcDcvs.reserve(srcRank); 793 for (Dimension d = 0; d < srcRank; d++) { 794 Level lvl = toLvl(encSrc, d); 795 srcDcvs.push_back(srcLcvs[lvl]); 796 } 797 798 Value collapseSize = constantIndex(builder, loc, 1); 799 for (Dimension d = 0; d < srcRank; d++) 800 collapseSize = 801 builder.create<arith::MulIOp>(loc, collapseSize, srcSizes[d]); 802 SmallVector<Value, 1> collapsedSizes = {collapseSize}; 803 804 ReassociationIndices collapseIdx; 805 for (Dimension i = 0; i < srcRank; i++) 806 collapseIdx.push_back(i); 807 SmallVector<ReassociationIndices, 1> collapseReass = {collapseIdx}; 808 SmallVector<Value, 1> collapsedDcvs; 809 reshapeCvs(builder, loc, collapseReass, srcSizes, srcDcvs, 810 collapsedSizes, collapsedDcvs); 811 812 ReassociationIndices expandIdx; 813 for (Dimension i = 0; i < dstTp.getDimRank(); i++) 814 expandIdx.push_back(i); 815 SmallVector<ReassociationIndices, 1> expandReass = {expandIdx}; 816 SmallVector<Value> dstDcvs; 817 reshapeCvs(builder, loc, expandReass, collapsedSizes, collapsedDcvs, 818 dstSizes, dstDcvs); 819 820 auto t = builder.create<InsertOp>(loc, v, reduc.front(), dstDcvs); 821 builder.create<sparse_tensor::YieldOp>(loc, t); 822 }); 823 824 Value t = rewriter.create<LoadOp>(loc, foreachOp.getResult(0), true); 825 if (bufferTp != dstTp) { 826 auto dstRTT = dstTp.getRankedTensorType(); 827 Value converted = rewriter.create<ConvertOp>(loc, dstRTT, t).getResult(); 828 rewriter.create<DeallocTensorOp>(loc, t); 829 t = converted; 830 } 831 rewriter.replaceOp(op, t); 832 return success(); 833 } 834 }; 835 836 /// Sparse rewriting rule for sparse-to-sparse reshape operator. 837 template <typename ReshapeOp> 838 struct Sparse2SparseReshapeRewriter : public OpRewritePattern<ReshapeOp> { 839 public: 840 using OpRewritePattern<ReshapeOp>::OpRewritePattern; 841 842 LogicalResult matchAndRewrite(ReshapeOp op, 843 PatternRewriter &rewriter) const override { 844 Location loc = op.getLoc(); 845 Value srcTensor = op.getSrc(); 846 const auto srcTp = getSparseTensorType(srcTensor); 847 const auto dstTp = getSparseTensorType(op.getResult()); 848 if (!srcTp.hasEncoding() || !dstTp.hasEncoding()) 849 return failure(); 850 851 // Generate code to represent the static dimension constants or compute 852 // the dynamic dimension values. 853 SmallVector<Value> srcSizes; 854 sizesForTensor(rewriter, srcSizes, loc, srcTp, srcTensor); 855 SmallVector<Value> dstSizes; 856 SmallVector<Value> dstDynSizes; 857 if (dstTp.hasStaticDimShape()) { 858 for (Dimension d : dstTp.getDimShape()) 859 dstSizes.push_back(constantIndex(rewriter, loc, d)); 860 } else { 861 ArrayRef<Size> dstShape = dstTp.getDimShape(); 862 genReshapeDstShape(rewriter, loc, dstSizes, srcSizes, dstShape, 863 op.getReassociationIndices()); 864 for (auto [idx, shape] : llvm::enumerate(dstShape)) { 865 if (shape == ShapedType::kDynamic) 866 dstDynSizes.push_back(dstSizes[idx]); 867 } 868 } 869 Value nnz = rewriter.create<NumberOfEntriesOp>(loc, srcTensor); 870 // Only need a unordered COO buffer if input and output are not sorted 871 // in the same way. 872 Type bufferTp = getBufferType( 873 dstTp.withoutDimToLvl(), 874 !srcTp.isAllOrdered() || !srcTp.isIdentity() || !dstTp.isIdentity()); 875 876 Value buffer = 877 rewriter 878 .create<AllocTensorOp>(loc, bufferTp, dstDynSizes, Value(), 879 /*sizeHint=*/nnz, Attribute()) 880 .getResult(); 881 882 // Implement the sparse2sparse reshape as follows: 883 // foreach srcCoords %srcTensor 884 // insert reshapeCvs(srcCoords), %buffer 885 // 886 // followed by an optional 887 // %t = sparse_tensor.cast %tmp 888 // depending on whether the input/output are sorted in the same way. 889 const auto encSrc = srcTp.getEncoding(); 890 ForeachOp foreachOp = rewriter.create<ForeachOp>( 891 loc, srcTensor, buffer, 892 [&](OpBuilder &builder, Location loc, ValueRange srcLcvs, Value v, 893 ValueRange reduc) { 894 const Dimension dimRank = srcTp.getDimRank(); 895 SmallVector<Value> srcDcvs; 896 srcDcvs.reserve(dimRank); 897 for (Dimension d = 0; d < dimRank; d++) { 898 Level lvl = toLvl(encSrc, d); 899 srcDcvs.push_back(srcLcvs[lvl]); 900 } 901 SmallVector<Value> dstDcvs; 902 reshapeCvs(builder, loc, op.getReassociationIndices(), srcSizes, 903 srcDcvs, dstSizes, dstDcvs); 904 auto t = builder.create<InsertOp>(loc, v, reduc.front(), dstDcvs); 905 builder.create<sparse_tensor::YieldOp>(loc, t); 906 }); 907 908 Value t = rewriter.create<LoadOp>(loc, foreachOp.getResult(0), true); 909 if (bufferTp != dstTp) { 910 auto dstRTT = dstTp.getRankedTensorType(); 911 Value converted = rewriter.create<ConvertOp>(loc, dstRTT, t).getResult(); 912 rewriter.create<DeallocTensorOp>(loc, t); 913 t = converted; 914 } 915 rewriter.replaceOp(op, t); 916 return success(); 917 } 918 }; 919 920 /// Sparse rewriting rule for sparse-to-dense and dense-to-sparse reshape 921 /// operator. 922 template <typename ReshapeOp> 923 struct ReshapeRewriter : public OpRewritePattern<ReshapeOp> { 924 public: 925 using OpRewritePattern<ReshapeOp>::OpRewritePattern; 926 927 LogicalResult matchAndRewrite(ReshapeOp op, 928 PatternRewriter &rewriter) const override { 929 Location loc = op->getLoc(); 930 auto encDst = getSparseTensorEncoding(op.getResult().getType()); 931 auto encSrc = getSparseTensorEncoding(op.getSrc().getType()); 932 // Since a pure dense expansion is very cheap (change of view), for 933 // a sparse2dense or dense2sparse, we can simply unfuse a sparse 934 // conversion from the reshape operation itself. 935 // All other cases are handled elsewhere. 936 if (encDst && encSrc) { 937 return failure(); 938 } 939 if (encSrc) { 940 auto rtp = getRankedTensorType(op.getSrc()); 941 auto denseTp = 942 RankedTensorType::get(rtp.getShape(), rtp.getElementType()); 943 auto convert = rewriter.create<ConvertOp>(loc, denseTp, op.getSrc()); 944 rewriter.modifyOpInPlace(op, [&]() { op->setOperand(0, convert); }); 945 return success(); 946 } 947 if (encDst) { 948 auto rtp = getRankedTensorType(op.getResult()); 949 auto denseTp = 950 RankedTensorType::get(rtp.getShape(), rtp.getElementType()); 951 auto reshape = rewriter.create<ReshapeOp>(loc, denseTp, op.getSrc(), 952 op.getReassociation()); 953 Value convert = rewriter.create<ConvertOp>(loc, rtp, reshape); 954 rewriter.replaceOp(op, convert); 955 return success(); 956 } 957 return failure(); 958 } 959 }; 960 961 // A trivial wrapper to help generate different operations for dense/sparse 962 // tensors. 963 struct TensorLike { 964 TensorLike(OpBuilder &builder, Location loc, RankedTensorType rtt, 965 ValueRange sizes) { 966 SmallVector<Value> dynSzs; 967 getDynamicSizes(rtt, sizes, dynSzs); 968 969 val = builder.create<AllocTensorOp>(loc, rtt, dynSzs); 970 if (!isSparse()) { 971 Value c0 = constantZero(builder, loc, rtt.getElementType()); 972 val = builder.create<linalg::FillOp>(loc, c0, val).getResult(0); 973 } 974 } 975 976 void insert(OpBuilder &builder, Location loc, Value v, ValueRange crds) { 977 val = builder.create<tensor::InsertOp>(loc, v, val, crds); 978 } 979 980 Value finalize(OpBuilder &builder, Location loc, RankedTensorType rtp) const { 981 if (isSparse()) 982 return builder.create<LoadOp>(loc, val, true); 983 return val; 984 } 985 986 bool isSparse() const { 987 return getSparseTensorEncoding(val.getType()) != nullptr; 988 } 989 990 Value val; 991 }; 992 993 struct SparseTensorDimOpRewriter : public OpRewritePattern<tensor::DimOp> { 994 using OpRewritePattern::OpRewritePattern; 995 LogicalResult matchAndRewrite(tensor::DimOp op, 996 PatternRewriter &rewriter) const override { 997 std::optional<int64_t> dim = op.getConstantIndex(); 998 auto stt = getSparseTensorType(op.getSource()); 999 if (!dim || !stt.hasEncoding()) 1000 return failure(); 1001 1002 if (stt.isPermutation()) { 1003 rewriter.replaceOpWithNewOp<LvlOp>(op, op.getSource(), 1004 toLvl(stt.getEncoding(), *dim)); 1005 return success(); 1006 } 1007 1008 // Non-permutation dim2lvl/lvl2dim maps. 1009 // Compute as follows: 1010 // affine.apply #map (l0 - 1, l1 - 1, ...) + 1 1011 // Note that it is not the most efficient way (but a more general one) for 1012 // the lvl to dim translation, e.g., for BSR, the dimension size for can be 1013 // computed simply by lvl_size * block_size. 1014 Location loc = op.getLoc(); 1015 SmallVector<Value> maxLvlCrds; 1016 for (Level l = 0; l < stt.getLvlRank(); l++) { 1017 Value lvlSz = rewriter.create<LvlOp>(loc, op.getSource(), l); 1018 Value maxLvlCrd = rewriter.create<arith::SubIOp>( 1019 loc, lvlSz, constantOne(rewriter, loc, rewriter.getIndexType())); 1020 maxLvlCrds.push_back(maxLvlCrd); 1021 } 1022 1023 AffineExpr lvl2DimExp = stt.getLvlToDim().getResult(*dim); 1024 Value maxDimCrd = rewriter.create<affine::AffineApplyOp>( 1025 op.getLoc(), AffineMap::get(stt.getLvlRank(), 0, lvl2DimExp), 1026 maxLvlCrds); 1027 1028 Value dimSz = rewriter.create<arith::AddIOp>( 1029 loc, maxDimCrd, constantOne(rewriter, loc, rewriter.getIndexType())); 1030 rewriter.replaceOp(op, dimSz); 1031 return success(); 1032 } 1033 }; 1034 1035 struct ConcatenateRewriter : public OpRewritePattern<ConcatenateOp> { 1036 using OpRewritePattern::OpRewritePattern; 1037 LogicalResult matchAndRewrite(ConcatenateOp op, 1038 PatternRewriter &rewriter) const override { 1039 if (op.needsExtraSort()) 1040 op.emitError("ConcatenateOp not staged"); 1041 1042 const Location loc = op.getLoc(); 1043 const auto dstTp = getSparseTensorType(op); 1044 const Dimension conDim = op.getDimension(); 1045 SmallVector<Value> sizes; 1046 concatSizesFromInputs(rewriter, sizes, loc, dstTp, op.getInputs(), conDim); 1047 1048 // %t = concatenate %s1, %s2, %s3 {dim = 1} 1049 // ==> 1050 // if (isSparseDst) 1051 // if (allDense) 1052 // %tmp = bufferization.alloc_tensor dstTp 1053 // else 1054 // %tmp = bufferization.alloc_tensor : unordered COO 1055 // else 1056 // %tmp = memref.alloc : dense tensor 1057 // foreach in %s1 : insert d0, d1, %tmp 1058 // foreach in %s2 : insert d0, d1 + size(s1), %tmp 1059 // foreach in %s3 : insert d0, d1 + size(s1) + size(s2), %tmp 1060 1061 TensorLike dstBuf(rewriter, loc, dstTp.getRankedTensorType(), sizes); 1062 Value offset = constantIndex(rewriter, loc, 0); 1063 Value iterArg = dstBuf.val; 1064 1065 ForeachOp foreachOp; 1066 for (Value input : op.getInputs()) { 1067 // Builds a for op for each input tensor to append new values into the 1068 // output tensor. 1069 foreachOp = rewriter.create<ForeachOp>( 1070 loc, input, iterArg, 1071 [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v, 1072 ValueRange reduc) { 1073 SmallVector<Value> offDimCrd(dcvs); 1074 offDimCrd[conDim] = 1075 builder.create<arith::AddIOp>(loc, offDimCrd[conDim], offset); 1076 1077 // Enters foreach, updates the SSA chain. 1078 dstBuf.val = reduc.front(); 1079 if (!dstTp.isAllDense()) { 1080 Value cond = genIsNonzero(builder, loc, v); 1081 auto ifOp = builder.create<scf::IfOp>(loc, reduc.getTypes(), cond, 1082 /*else*/ true); 1083 builder.setInsertionPointToStart(&ifOp.getElseRegion().front()); 1084 builder.create<scf::YieldOp>(loc, dstBuf.val); 1085 1086 builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); 1087 dstBuf.insert(builder, loc, v, offDimCrd); 1088 builder.create<scf::YieldOp>(loc, dstBuf.val); 1089 1090 // Exits the ifOp, update the sparse tensor SSA value. 1091 builder.setInsertionPointAfter(ifOp); 1092 dstBuf.val = ifOp.getResult(0); 1093 } else { 1094 dstBuf.insert(builder, loc, v, offDimCrd); 1095 } 1096 builder.create<sparse_tensor::YieldOp>(loc, dstBuf.val); 1097 }); 1098 // Accumulates the offset. Note that only static-shaped inputs are allowed 1099 // by concatenate op verifier, which saves us from computing the offset 1100 // dynamically. 1101 const Size sz = getSparseTensorType(input).getDynamicDimSize(conDim); 1102 assert(!ShapedType::isDynamic(sz)); 1103 offset = rewriter.create<arith::AddIOp>(loc, offset, 1104 constantIndex(rewriter, loc, sz)); 1105 iterArg = foreachOp.getResult(0); 1106 dstBuf.val = iterArg; 1107 } 1108 1109 dstBuf.val = iterArg; 1110 Value ret = dstBuf.finalize(rewriter, loc, dstTp.getRankedTensorType()); 1111 rewriter.replaceOp(op, ret); 1112 return success(); 1113 } 1114 }; 1115 1116 struct DirectConvertRewriter : public OpRewritePattern<ConvertOp> { 1117 using OpRewritePattern::OpRewritePattern; 1118 LogicalResult matchAndRewrite(ConvertOp op, 1119 PatternRewriter &rewriter) const override { 1120 if (op.needsExtraSort()) 1121 return op.emitError("ConvertOp not staged."); 1122 1123 // TODO: Maybe we want a different operation for this too. 1124 auto encDst = getSparseTensorEncoding(op.getType()); 1125 auto encSrc = getSparseTensorEncoding(op.getSource().getType()); 1126 if (encDst && encSrc && !encSrc.isSlice() && 1127 encSrc.withoutBitWidths() == encDst.withoutBitWidths()) { 1128 // Trivial tensor conversion and simple element type conversion is handled 1129 // in codegen. 1130 return failure(); 1131 } 1132 1133 Location loc = op.getLoc(); 1134 Value src = op.getSource(); 1135 1136 SparseTensorType srcStt = getSparseTensorType(op.getSource()); 1137 SparseTensorType dstStt = getSparseTensorType(op.getDest()); 1138 1139 bool fromSparseConst = false; 1140 if (auto constOp = op.getSource().getDefiningOp<arith::ConstantOp>()) 1141 if (dyn_cast<SparseElementsAttr>(constOp.getValue())) 1142 fromSparseConst = true; 1143 1144 const AffineMapAttr foreachOrder = 1145 (!dstStt.isIdentity() && fromSparseConst) 1146 ? AffineMapAttr::get(dstStt.getExpandedDimToLvl()) 1147 : nullptr; 1148 1149 bool skipZeroCheck = srcStt.hasEncoding() || fromSparseConst; 1150 1151 SmallVector<Value> sizes; 1152 sizesFromSrc(rewriter, sizes, loc, src); 1153 ValueRange vs; 1154 TensorLike dstBuf(rewriter, loc, dstStt.getRankedTensorType(), sizes); 1155 1156 auto foreachOp = rewriter.create<ForeachOp>( 1157 loc, src, dstBuf.val, foreachOrder, 1158 [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v, 1159 ValueRange reduc) { 1160 // Enters the loop, update the SSA value for insertion chain. 1161 dstBuf.val = reduc.front(); 1162 if (!skipZeroCheck) { 1163 Value cond = genIsNonzero(builder, loc, v); 1164 auto ifOp = builder.create<scf::IfOp>(loc, reduc.getTypes(), cond, 1165 /*else*/ true); 1166 builder.setInsertionPointToStart(&ifOp.getElseRegion().front()); 1167 builder.create<scf::YieldOp>(loc, dstBuf.val); 1168 1169 builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); 1170 dstBuf.insert(builder, loc, v, dcvs); 1171 builder.create<scf::YieldOp>(loc, dstBuf.val); 1172 1173 // Exits the ifOp, update the sparse tensor SSA value. 1174 builder.setInsertionPointAfter(ifOp); 1175 dstBuf.val = ifOp.getResult(0); 1176 } else { 1177 dstBuf.insert(builder, loc, v, dcvs); 1178 } 1179 builder.create<sparse_tensor::YieldOp>(loc, dstBuf.val); 1180 }); 1181 1182 rewriter.setInsertionPointAfter(foreachOp); 1183 1184 // Exits the for loop, links the SSA chain. 1185 dstBuf.val = foreachOp.getResult(0); 1186 1187 Value ret = dstBuf.finalize(rewriter, loc, dstStt.getRankedTensorType()); 1188 rewriter.replaceOp(op, ret); 1189 return success(); 1190 } 1191 }; 1192 1193 struct CrdTranslateRewriter : public OpRewritePattern<CrdTranslateOp> { 1194 using OpRewritePattern::OpRewritePattern; 1195 LogicalResult matchAndRewrite(CrdTranslateOp op, 1196 PatternRewriter &rewriter) const override { 1197 AffineMap map = op.getDirection() == CrdTransDirectionKind::dim2lvl 1198 ? op.getEncoder().getDimToLvl() 1199 : op.getEncoder().getLvlToDim(); 1200 1201 SmallVector<Value> outCrds; 1202 for (AffineExpr result : map.getResults()) { 1203 // TODO: we should probably expand the affine map to IR using our own 1204 // rules, since affine.apply assume signed value, while the cooridinates 1205 // we provided must always be signless. 1206 Value trans = rewriter.create<affine::AffineApplyOp>( 1207 op.getLoc(), AffineMap::get(map.getNumDims(), 0, result), 1208 op.getInCrds()); 1209 outCrds.push_back(trans); 1210 } 1211 rewriter.replaceOp(op, outCrds); 1212 return success(); 1213 } 1214 }; 1215 1216 /// Sparse rewriting rule for the foreach operator. 1217 struct ForeachRewriter : public OpRewritePattern<ForeachOp> { 1218 public: 1219 using OpRewritePattern::OpRewritePattern; 1220 1221 LogicalResult matchAndRewrite(ForeachOp op, 1222 PatternRewriter &rewriter) const override { 1223 1224 auto loc = op.getLoc(); 1225 Value input = op.getTensor(); 1226 SmallVector<Value> reduc = op.getInitArgs(); 1227 const auto stt = getSparseTensorType(input); 1228 const Level lvlRank = stt.getLvlRank(); 1229 1230 // Special-case: for each over a sparse constant uses its own rewriting 1231 // rule. 1232 if (auto constOp = input.getDefiningOp<arith::ConstantOp>()) { 1233 if (auto attr = dyn_cast<SparseElementsAttr>(constOp.getValue())) { 1234 return genForeachOnSparseConstant(op, rewriter, attr); 1235 } 1236 } 1237 1238 // Otherwise, use loop emitter to generate loops. 1239 const auto enc = stt.getEncoding(); 1240 1241 // 1. Generates loop for the sparse input. 1242 LoopEmitter loopEmitter( 1243 ValueRange{input}, 1244 StringAttr::get(getContext(), ForeachOp::getOperationName())); 1245 loopEmitter.initializeLoopEmit(rewriter, loc); 1246 for (Level l = 0; l < lvlRank; l++) { 1247 // TODO: provide utility function for loop sequences that only contains 1248 // one for loop? 1249 const SmallVector<TensorLevel, 1> tidLvls{ 1250 loopEmitter.makeTensorLevel(0, l)}; 1251 loopEmitter.enterNewLoopSeq(rewriter, loc, tidLvls); 1252 // Note that reduc will be taken care of by loop emitter and get updated 1253 // in place. 1254 loopEmitter.enterCoIterationOverTensorsAtLvls(rewriter, loc, tidLvls, 1255 reduc); 1256 } 1257 1258 SmallVector<Value> lcvs = loopEmitter.getLoopIVs(); 1259 if (op.getOrder()) { 1260 // TODO: Support it so that we can do direct conversion from CSR->BSR. 1261 llvm_unreachable( 1262 "Level order not yet implemented on non-constant input tensors."); 1263 } 1264 1265 Value vals = loopEmitter.getValBuffer()[0]; 1266 SmallVector<Value> pos = loopEmitter.getValPosits(0); 1267 // Loads the value from sparse tensor using position-index; 1268 // loads the value from dense tensor using coords. 1269 Value val = enc ? rewriter.create<memref::LoadOp>(loc, vals, pos) 1270 : rewriter.create<memref::LoadOp>(loc, vals, lcvs); 1271 1272 // 2. Inline the block in the foreach operator. 1273 Block *srcBlock = op.getBody(); 1274 1275 // Remap coordinates. 1276 SmallVector<Value> args = 1277 enc.translateCrds(rewriter, loc, lcvs, CrdTransDirectionKind::lvl2dim); 1278 1279 // Remap value. 1280 args.push_back(val); 1281 // Remap reduction variables. 1282 args.append(reduc); 1283 1284 // Remove sparse_tensor.yield. 1285 SmallVector<Value> reducValue = srcBlock->getTerminator()->getOperands(); 1286 rewriter.eraseOp(srcBlock->getTerminator()); 1287 1288 Operation &last = rewriter.getBlock()->back(); 1289 if (llvm::isa<scf::YieldOp>(last)) { 1290 // Because `scf.for` inserts an implicit yield op when there is no 1291 // reduction variable upon creation, we reset the insertion point such 1292 // that the block is inlined before *before* the yield op. 1293 rewriter.setInsertionPoint(&last); 1294 } 1295 1296 rewriter.inlineBlockBefore(srcBlock, rewriter.getBlock(), 1297 rewriter.getInsertionPoint(), args); 1298 rewriter.setInsertionPointToEnd(rewriter.getBlock()); 1299 for (Level l = 0; l < lvlRank; l++) { 1300 // Link the reduction chain. Note that loop emitter update the reducValue 1301 // in place. 1302 loopEmitter.exitCurrentLoop(rewriter, loc, reducValue); 1303 loopEmitter.exitCurrentLoopSeq(rewriter, loc); 1304 } 1305 1306 // Replace the foreach operator with the value returned by the outtermost 1307 // for loop. 1308 rewriter.replaceOp(op, reducValue); 1309 return success(); 1310 } 1311 }; 1312 1313 /// Sparse rewriting rule for the new operator. 1314 struct NewRewriter : public OpRewritePattern<NewOp> { 1315 using OpRewritePattern::OpRewritePattern; 1316 LogicalResult matchAndRewrite(NewOp op, 1317 PatternRewriter &rewriter) const override { 1318 Location loc = op.getLoc(); 1319 auto stt = getSparseTensorType(op.getResult()); 1320 if (!stt.hasEncoding() || stt.getAoSCOOStart() == 0) 1321 return failure(); 1322 1323 // Implement the NewOp as follows: 1324 // %orderedCoo = sparse_tensor.new %filename 1325 // %t = sparse_tensor.convert %orderedCoo 1326 // with enveloping reinterpreted_map ops for non-permutations. 1327 RankedTensorType dstTp = stt.getRankedTensorType(); 1328 RankedTensorType cooTp = stt.getCOOType(/*ordered=*/true); 1329 Value cooTensor = rewriter.create<NewOp>(loc, cooTp, op.getSource()); 1330 Value convert = cooTensor; 1331 auto enc = stt.getEncoding(); 1332 if (!stt.isPermutation()) { // demap coo, demap dstTp 1333 auto coo = getSparseTensorType(cooTensor).getEncoding().withoutDimToLvl(); 1334 convert = rewriter.create<ReinterpretMapOp>(loc, coo, convert); 1335 dstTp = getSparseTensorType(convert).withEncoding(enc.withoutDimToLvl()); 1336 } 1337 convert = rewriter.create<ConvertOp>(loc, dstTp, convert); 1338 if (!stt.isPermutation()) // remap to original enc 1339 convert = rewriter.create<ReinterpretMapOp>(loc, enc, convert); 1340 rewriter.replaceOp(op, convert); 1341 1342 // Release the temporary ordered COO tensor. 1343 rewriter.setInsertionPointAfterValue(convert); 1344 rewriter.create<DeallocTensorOp>(loc, cooTensor); 1345 1346 return success(); 1347 } 1348 }; 1349 1350 /// Sparse rewriting rule for the out operator. 1351 struct OutRewriter : public OpRewritePattern<OutOp> { 1352 using OpRewritePattern::OpRewritePattern; 1353 LogicalResult matchAndRewrite(OutOp op, 1354 PatternRewriter &rewriter) const override { 1355 Location loc = op.getLoc(); 1356 // Calculate NNZ. 1357 Value src = op.getTensor(); 1358 Value nnz = rewriter.create<NumberOfEntriesOp>(loc, src); 1359 1360 // Allocate a temporary buffer for storing dimension-sizes/coordinates. 1361 const auto srcTp = getSparseTensorType(src); 1362 const Dimension dimRank = srcTp.getDimRank(); 1363 Type indexTp = rewriter.getIndexType(); 1364 Value dimSizes = genAlloca(rewriter, loc, dimRank, indexTp); 1365 1366 // Generate code to calculate dimension size values and store the values to 1367 // the buffer. 1368 SmallVector<Value> dims; 1369 sizesForTensor(rewriter, dims, loc, srcTp, src); 1370 for (Dimension d = 0; d < dimRank; d++) { 1371 rewriter.create<memref::StoreOp>(loc, dims[d], dimSizes, 1372 constantIndex(rewriter, loc, d)); 1373 } 1374 1375 // Create a sparse tensor writer and output meta data. 1376 Type opaqueTp = getOpaquePointerType(rewriter); 1377 Value writer = 1378 createFuncCall(rewriter, loc, "createSparseTensorWriter", {opaqueTp}, 1379 {op.getDest()}, EmitCInterface::Off) 1380 .getResult(0); 1381 Value rankValue = constantIndex(rewriter, loc, dimRank); 1382 createFuncCall(rewriter, loc, "outSparseTensorWriterMetaData", {}, 1383 {writer, rankValue, nnz, dimSizes}, EmitCInterface::On); 1384 1385 Value dimCoords = dimSizes; // Reuse the dimSizes buffer for dimCoords. 1386 Type eltTp = srcTp.getElementType(); 1387 SmallString<29> outNextFuncName{"outSparseTensorWriterNext", 1388 primaryTypeFunctionSuffix(eltTp)}; 1389 Value value = genAllocaScalar(rewriter, loc, eltTp); 1390 ModuleOp module = op->getParentOfType<ModuleOp>(); 1391 1392 // For each element in the source tensor, output the element. 1393 rewriter.create<ForeachOp>( 1394 loc, src, std::nullopt, 1395 [&](OpBuilder &builder, Location loc, ValueRange dcvs, Value v, 1396 ValueRange reduc) { 1397 for (Dimension d = 0; d < dimRank; d++) { 1398 rewriter.create<memref::StoreOp>(loc, dcvs[d], dimCoords, 1399 constantIndex(builder, loc, d)); 1400 } 1401 rewriter.create<memref::StoreOp>(loc, v, value); 1402 SmallVector<Value> operands{writer, rankValue, dimCoords, value}; 1403 FlatSymbolRefAttr fn = getFunc(module, outNextFuncName, {}, operands, 1404 EmitCInterface::On); 1405 builder.create<func::CallOp>(loc, TypeRange(), fn, operands); 1406 builder.create<sparse_tensor::YieldOp>(loc); 1407 }); 1408 1409 // Release the writer. 1410 createFuncCall(rewriter, loc, "delSparseTensorWriter", {}, {writer}, 1411 EmitCInterface::Off); 1412 1413 rewriter.eraseOp(op); 1414 return success(); 1415 } 1416 }; 1417 1418 } // namespace 1419 1420 //===---------------------------------------------------------------------===// 1421 // Methods that add patterns described in this file to a pattern list. 1422 //===---------------------------------------------------------------------===// 1423 1424 void mlir::populatePreSparsificationRewriting(RewritePatternSet &patterns) { 1425 patterns.add<FoldInvariantYield, FuseSparseMultiplyOverAdd, FuseTensorCast, 1426 GenSemiRingReduction, GenSemiRingSelect, PrintRewriter>( 1427 patterns.getContext()); 1428 } 1429 1430 void mlir::populateLowerSparseOpsToForeachPatterns(RewritePatternSet &patterns, 1431 bool enableRT, 1432 bool enableConvert) { 1433 patterns.add<ConcatenateRewriter, ReshapeRewriter<tensor::ExpandShapeOp>, 1434 ReshapeRewriter<tensor::CollapseShapeOp>, 1435 Sparse2SparseReshapeRewriter<tensor::ExpandShapeOp>, 1436 Sparse2SparseReshapeRewriter<tensor::CollapseShapeOp>, 1437 SparseTensorDimOpRewriter, TensorReshapeRewriter, OutRewriter>( 1438 patterns.getContext()); 1439 1440 if (enableConvert) 1441 patterns.add<DirectConvertRewriter>(patterns.getContext()); 1442 if (!enableRT) 1443 patterns.add<NewRewriter>(patterns.getContext()); 1444 } 1445 1446 void mlir::populateLowerForeachToSCFPatterns(RewritePatternSet &patterns) { 1447 // Run CrdTranslateRewriter later in the pipeline so that operation can be 1448 // folded before lowering to affine.apply 1449 patterns.add<CrdTranslateRewriter, ForeachRewriter>(patterns.getContext()); 1450 } 1451