1 //===- SCFToGPU.cpp - Convert an affine loop nest to a GPU kernel -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements a straightforward conversion of an loop nest into a GPU 10 // kernel. The caller is expected to guarantee that the conversion is correct 11 // or to further transform the kernel to ensure correctness. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "mlir/Conversion/SCFToGPU/SCFToGPU.h" 16 17 #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" 18 #include "mlir/Dialect/Affine/IR/AffineOps.h" 19 #include "mlir/Dialect/Arith/IR/Arith.h" 20 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 21 #include "mlir/Dialect/GPU/Transforms/ParallelLoopMapper.h" 22 #include "mlir/Dialect/MemRef/IR/MemRef.h" 23 #include "mlir/Dialect/SCF/IR/SCF.h" 24 #include "mlir/IR/AffineExpr.h" 25 #include "mlir/IR/Builders.h" 26 #include "mlir/IR/IRMapping.h" 27 #include "mlir/Interfaces/SideEffectInterfaces.h" 28 #include "mlir/Pass/Pass.h" 29 #include "mlir/Transforms/DialectConversion.h" 30 #include "mlir/Transforms/Passes.h" 31 #include "mlir/Transforms/RegionUtils.h" 32 #include "llvm/ADT/Sequence.h" 33 #include "llvm/Support/Debug.h" 34 #include <optional> 35 36 #define DEBUG_TYPE "loops-to-gpu" 37 38 using namespace mlir; 39 using namespace mlir::affine; 40 using namespace mlir::scf; 41 42 // Name of internal attribute to mark visited operations during conversion. 43 // 44 // NOTE: The conversion originally used the following legality criteria: 45 // `!parallelOp->hasAttr(gpu::getMappingAttrName())` 46 // But the provided pattern might reject some cases based on more detailed 47 // analysis of the `mapping` attribute. 48 // To avoid dialect conversion failure due to non-converted illegal operation 49 // we use this extra Unit attribute as a marker, that the operation was checked 50 // by the pattern and is should be considered as legal in the following legality 51 // checks. The `finalizeParallelLoopToGPUConversion` function performs clean up 52 // of this extra attributes ans is supposed to be called after the dialect 53 // conversion. 54 // 55 // TODO: Implement a cleaner solution, factoring out the "matching" logic 56 // from the pattern and its callees into a separate function that can be called 57 // from both the pattern and the op legality check. 58 static constexpr StringLiteral kVisitedAttrName = "SCFToGPU_visited"; 59 60 // Extract an indexed value from KernelDim3. 61 static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) { 62 switch (pos) { 63 case 0: 64 return dim3.x; 65 case 1: 66 return dim3.y; 67 case 2: 68 return dim3.z; 69 default: 70 llvm_unreachable("dim3 position out of bounds"); 71 } 72 return nullptr; 73 } 74 75 // Get the lower bound-related operands of a loop operation. 76 static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) { 77 return forOp.getLowerBoundOperands(); 78 } 79 80 // Get the upper bound-related operands of a loop operation. 81 static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) { 82 return forOp.getUpperBoundOperands(); 83 } 84 85 // Get a Value that corresponds to the loop step. If the step is an attribute, 86 // materialize a corresponding constant using builder. 87 static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) { 88 return builder.create<arith::ConstantIndexOp>(forOp.getLoc(), 89 forOp.getStep()); 90 } 91 92 // Get a Value for the loop lower bound. If the value requires computation, 93 // materialize the instructions using builder. 94 static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) { 95 return lowerAffineLowerBound(forOp, builder); 96 } 97 98 // Get a Value for the loop upper bound. If the value requires computation, 99 // materialize the instructions using builder. 100 static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) { 101 return lowerAffineUpperBound(forOp, builder); 102 } 103 104 // Check the structure of the loop nest: 105 // - there are enough loops to map to numDims; 106 // - the loops are perfectly nested; 107 // - the loop bounds can be computed above the outermost loop. 108 // This roughly corresponds to the "matcher" part of the pattern-based 109 // rewriting infrastructure. 110 static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp, 111 unsigned numDims) { 112 Region &limit = forOp.getRegion(); 113 for (unsigned i = 0, e = numDims; i < e; ++i) { 114 Operation *nested = &forOp.getBody()->front(); 115 if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) || 116 !areValuesDefinedAbove(getUpperBoundOperands(forOp), limit)) 117 return forOp.emitError( 118 "loops with bounds depending on other mapped loops " 119 "are not supported"); 120 121 // The innermost loop can have an arbitrary body, skip the perfect nesting 122 // check for it. 123 if (i == e - 1) 124 break; 125 126 auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end(); 127 if (forOp.getBody()->empty() || std::next(begin, 2) != end) 128 return forOp.emitError("expected perfectly nested loops in the body"); 129 130 if (!(forOp = dyn_cast<AffineForOp>(nested))) 131 return nested->emitError("expected a nested loop"); 132 } 133 return success(); 134 } 135 136 static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp, 137 unsigned numBlockDims, 138 unsigned numThreadDims) { 139 if (numBlockDims < 1 || numThreadDims < 1) { 140 LLVM_DEBUG(llvm::dbgs() << "nothing to map"); 141 return success(); 142 } 143 144 if (numBlockDims > 3) { 145 return forOp.emitError("cannot map to more than 3 block dimensions"); 146 } 147 if (numThreadDims > 3) { 148 return forOp.emitError("cannot map to more than 3 thread dimensions"); 149 } 150 return checkAffineLoopNestMappableImpl(forOp, numBlockDims + numThreadDims); 151 } 152 153 namespace { 154 // Helper structure that holds common state of the loop to GPU kernel 155 // conversion. 156 struct AffineLoopToGpuConverter { 157 std::optional<AffineForOp> collectBounds(AffineForOp forOp, 158 unsigned numLoops); 159 160 void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp, 161 unsigned numBlockDims, unsigned numThreadDims); 162 163 // Ranges of the loops mapped to blocks or threads. 164 SmallVector<Value, 6> dims; 165 // Lower bounds of the loops mapped to blocks or threads. 166 SmallVector<Value, 6> lbs; 167 // Induction variables of the loops mapped to blocks or threads. 168 SmallVector<Value, 6> ivs; 169 // Steps of the loops mapped to blocks or threads. 170 SmallVector<Value, 6> steps; 171 }; 172 } // namespace 173 174 // Return true if the value is obviously a constant "one". 175 static bool isConstantOne(Value value) { 176 if (auto def = value.getDefiningOp<arith::ConstantIndexOp>()) 177 return def.value() == 1; 178 return false; 179 } 180 181 // Collect ranges, bounds, steps and induction variables in preparation for 182 // mapping a loop nest of depth "numLoops" rooted at "forOp" to a GPU kernel. 183 // This may fail if the IR for computing loop bounds cannot be constructed, for 184 // example if an affine loop uses semi-affine maps. Return the last loop to be 185 // mapped on success, std::nullopt on failure. 186 std::optional<AffineForOp> 187 AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) { 188 OpBuilder builder(forOp.getOperation()); 189 dims.reserve(numLoops); 190 lbs.reserve(numLoops); 191 ivs.reserve(numLoops); 192 steps.reserve(numLoops); 193 AffineForOp currentLoop = forOp; 194 for (unsigned i = 0; i < numLoops; ++i) { 195 Value lowerBound = getOrEmitLowerBound(currentLoop, builder); 196 Value upperBound = getOrEmitUpperBound(currentLoop, builder); 197 if (!lowerBound || !upperBound) { 198 return std::nullopt; 199 } 200 201 Value range = builder.create<arith::SubIOp>(currentLoop.getLoc(), 202 upperBound, lowerBound); 203 Value step = getOrCreateStep(currentLoop, builder); 204 if (!isConstantOne(step)) 205 range = builder.create<arith::DivSIOp>(currentLoop.getLoc(), range, step); 206 dims.push_back(range); 207 208 lbs.push_back(lowerBound); 209 ivs.push_back(currentLoop.getInductionVar()); 210 steps.push_back(step); 211 212 if (i != numLoops - 1) 213 currentLoop = cast<AffineForOp>(¤tLoop.getBody()->front()); 214 } 215 return currentLoop; 216 } 217 218 // Replace the rooted at "rootForOp" with a GPU launch operation. This expects 219 // "innermostForOp" to point to the last loop to be transformed to the kernel, 220 // and to have (numBlockDims + numThreadDims) perfectly nested loops between 221 // "rootForOp" and "innermostForOp". 222 void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp, 223 AffineForOp innermostForOp, 224 unsigned numBlockDims, 225 unsigned numThreadDims) { 226 OpBuilder builder(rootForOp.getOperation()); 227 // Prepare the grid and block sizes for the launch operation. If there is 228 // no loop mapped to a specific dimension, use constant "1" as its size. 229 Value constOne = 230 (numBlockDims < 3 || numThreadDims < 3) 231 ? builder.create<arith::ConstantIndexOp>(rootForOp.getLoc(), 1) 232 : nullptr; 233 Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne; 234 Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne; 235 Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne; 236 Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne; 237 Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne; 238 Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne; 239 240 // Create a launch op and move the body region of the innermost loop to the 241 // launch op. 242 auto launchOp = builder.create<gpu::LaunchOp>( 243 rootForOp.getLoc(), gridSizeX, gridSizeY, gridSizeZ, blockSizeX, 244 blockSizeY, blockSizeZ); 245 246 // Replace the loop terminator (loops contain only a single block) with the 247 // gpu terminator and move the operations from the loop body block to the gpu 248 // launch body block. Do not move the entire block because of the difference 249 // in block arguments. 250 Operation &terminator = innermostForOp.getBody()->back(); 251 Location terminatorLoc = terminator.getLoc(); 252 terminator.erase(); 253 builder.setInsertionPointToEnd(innermostForOp.getBody()); 254 builder.create<gpu::TerminatorOp>(terminatorLoc, std::nullopt); 255 launchOp.getBody().front().getOperations().splice( 256 launchOp.getBody().front().begin(), 257 innermostForOp.getBody()->getOperations()); 258 259 // Remap the loop iterators to use block/thread identifiers instead. Loops 260 // may iterate from LB with step S whereas GPU thread/block ids always iterate 261 // from 0 to N with step 1. Therefore, loop induction variables are replaced 262 // with (gpu-thread/block-id * S) + LB. 263 builder.setInsertionPointToStart(&launchOp.getBody().front()); 264 auto *lbArgumentIt = lbs.begin(); 265 auto *stepArgumentIt = steps.begin(); 266 for (const auto &en : llvm::enumerate(ivs)) { 267 Value id = 268 en.index() < numBlockDims 269 ? getDim3Value(launchOp.getBlockIds(), en.index()) 270 : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims); 271 Value step = steps[en.index()]; 272 if (!isConstantOne(step)) 273 id = builder.create<arith::MulIOp>(rootForOp.getLoc(), step, id); 274 275 Value ivReplacement = 276 builder.create<arith::AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id); 277 en.value().replaceAllUsesWith(ivReplacement); 278 std::advance(lbArgumentIt, 1); 279 std::advance(stepArgumentIt, 1); 280 } 281 282 // We are done and can erase the original outermost loop. 283 rootForOp.erase(); 284 } 285 286 // Generic loop to GPU kernel conversion function. 287 static LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp, 288 unsigned numBlockDims, 289 unsigned numThreadDims) { 290 if (failed(checkAffineLoopNestMappable(forOp, numBlockDims, numThreadDims))) 291 return failure(); 292 293 AffineLoopToGpuConverter converter; 294 auto maybeInnerLoop = 295 converter.collectBounds(forOp, numBlockDims + numThreadDims); 296 if (!maybeInnerLoop) 297 return failure(); 298 converter.createLaunch(forOp, *maybeInnerLoop, numBlockDims, numThreadDims); 299 300 return success(); 301 } 302 303 LogicalResult mlir::convertAffineLoopNestToGPULaunch(AffineForOp forOp, 304 unsigned numBlockDims, 305 unsigned numThreadDims) { 306 return ::convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims); 307 } 308 309 namespace { 310 struct ParallelToGpuLaunchLowering : public OpRewritePattern<ParallelOp> { 311 using OpRewritePattern<ParallelOp>::OpRewritePattern; 312 313 LogicalResult matchAndRewrite(ParallelOp parallelOp, 314 PatternRewriter &rewriter) const override; 315 }; 316 } // namespace 317 318 /// Tries to derive a static upper bound from the defining operation of 319 /// `upperBound`. 320 static Value deriveStaticUpperBound(Value upperBound, 321 PatternRewriter &rewriter) { 322 if (auto op = upperBound.getDefiningOp<arith::ConstantIndexOp>()) { 323 return op; 324 } 325 326 if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) { 327 for (const AffineExpr &result : minOp.getMap().getResults()) { 328 if (auto constExpr = result.dyn_cast<AffineConstantExpr>()) { 329 return rewriter.create<arith::ConstantIndexOp>(minOp.getLoc(), 330 constExpr.getValue()); 331 } 332 } 333 } 334 335 if (auto minOp = upperBound.getDefiningOp<arith::MinSIOp>()) { 336 for (Value operand : {minOp.getLhs(), minOp.getRhs()}) { 337 if (auto staticBound = deriveStaticUpperBound(operand, rewriter)) 338 return staticBound; 339 } 340 } 341 342 if (auto multiplyOp = upperBound.getDefiningOp<arith::MulIOp>()) { 343 if (auto lhs = dyn_cast_or_null<arith::ConstantIndexOp>( 344 deriveStaticUpperBound(multiplyOp.getOperand(0), rewriter) 345 .getDefiningOp())) 346 if (auto rhs = dyn_cast_or_null<arith::ConstantIndexOp>( 347 deriveStaticUpperBound(multiplyOp.getOperand(1), rewriter) 348 .getDefiningOp())) { 349 // Assumptions about the upper bound of minimum computations no longer 350 // work if multiplied by mixed signs, so abort in this case. 351 if ((lhs.value() < 0) != (rhs.value() < 0)) 352 return {}; 353 354 return rewriter.create<arith::ConstantIndexOp>( 355 multiplyOp.getLoc(), lhs.value() * rhs.value()); 356 } 357 } 358 359 return {}; 360 } 361 362 static bool isMappedToProcessor(gpu::Processor processor) { 363 return processor != gpu::Processor::Sequential; 364 } 365 366 static unsigned getLaunchOpArgumentNum(gpu::Processor processor) { 367 switch (processor) { 368 case gpu::Processor::BlockX: 369 return 0; 370 case gpu::Processor::BlockY: 371 return 1; 372 case gpu::Processor::BlockZ: 373 return 2; 374 case gpu::Processor::ThreadX: 375 return 3; 376 case gpu::Processor::ThreadY: 377 return 4; 378 case gpu::Processor::ThreadZ: 379 return 5; 380 default:; 381 } 382 llvm_unreachable( 383 "invalid processor type while retrieving launch op argument number"); 384 } 385 386 /// Modifies the current transformation state to capture the effect of the given 387 /// `scf.parallel` operation on index substitutions and the operations to be 388 /// inserted. 389 /// Specifically, if a dimension of a parallel loop is mapped to a hardware id, 390 /// this function will 391 /// - compute the loop index based on the hardware id and affine map from the 392 /// mapping and update `cloningMap` to substitute all uses. 393 /// - derive a new upper bound for the hardware id and augment the provided 394 /// `gpu.launch operation` accordingly. 395 /// - if the upper bound is imprecise, insert a conditional in the `gpu.launch` 396 /// and update the rewriter to insert into the conditional's body. 397 /// If the dimension is mapped to sequential, 398 /// - insert a for loop into the body and update the rewriter to insert into 399 /// the for loop's body. 400 /// - update the `cloningMap` to replace uses of the index with the index of 401 /// the new for loop. 402 /// In either case, 403 /// - append the instructions from the loops body to worklist, in reverse order. 404 /// To note the end of the current scope in case a loop or conditional was 405 /// inserted, a sentinel (the `gpu.launch` operation) is inserted into the 406 /// worklist. This signals the processor of the worklist to pop the rewriter 407 /// one scope-level up. 408 static LogicalResult processParallelLoop( 409 ParallelOp parallelOp, gpu::LaunchOp launchOp, IRMapping &cloningMap, 410 SmallVectorImpl<Operation *> &worklist, 411 DenseMap<gpu::Processor, Value> &bounds, PatternRewriter &rewriter) { 412 // TODO: Verify that this is a valid GPU mapping. 413 // processor ids: 0-2 block [x/y/z], 3-5 -> thread [x/y/z], 6-> sequential 414 ArrayAttr mapping = 415 parallelOp->getAttrOfType<ArrayAttr>(gpu::getMappingAttrName()); 416 417 // TODO: Support reductions. 418 if (!mapping || parallelOp.getNumResults() != 0) 419 return failure(); 420 421 Location loc = parallelOp.getLoc(); 422 423 auto launchIndependent = [&launchOp](Value val) { 424 return val.getParentRegion()->isAncestor(launchOp->getParentRegion()); 425 }; 426 427 auto ensureLaunchIndependent = [&rewriter, 428 launchIndependent](Value val) -> Value { 429 if (launchIndependent(val)) 430 return val; 431 if (auto constOp = val.getDefiningOp<arith::ConstantOp>()) 432 return rewriter.create<arith::ConstantOp>(constOp.getLoc(), 433 constOp.getValue()); 434 return {}; 435 }; 436 437 for (auto config : llvm::zip( 438 mapping, parallelOp.getInductionVars(), parallelOp.getLowerBound(), 439 parallelOp.getUpperBound(), parallelOp.getStep())) { 440 Attribute mappingAttribute; 441 Value iv, lowerBound, upperBound, step; 442 std::tie(mappingAttribute, iv, lowerBound, upperBound, step) = config; 443 auto annotation = 444 dyn_cast<gpu::ParallelLoopDimMappingAttr>(mappingAttribute); 445 if (!annotation) 446 return parallelOp.emitOpError() 447 << "expected mapping attribute for lowering to GPU"; 448 Value newIndex; 449 gpu::Processor processor = annotation.getProcessor(); 450 451 if (isMappedToProcessor(processor)) { 452 // Use the corresponding thread/grid index as replacement for the loop iv. 453 Value operand = 454 launchOp.getBody().getArgument(getLaunchOpArgumentNum(processor)); 455 // Take the indexmap and add the lower bound and step computations in. 456 // This computes operand * step + lowerBound. 457 // Use an affine map here so that it composes nicely with the provided 458 // annotation. 459 AffineMap lowerAndStep = AffineMap::get( 460 1, 2, 461 rewriter.getAffineDimExpr(0) * rewriter.getAffineSymbolExpr(0) + 462 rewriter.getAffineSymbolExpr(1)); 463 newIndex = rewriter.create<AffineApplyOp>( 464 loc, annotation.getMap().compose(lowerAndStep), 465 ValueRange{operand, step, lowerBound}); 466 // If there was also a bound, insert that, too. 467 // TODO: Check that we do not assign bounds twice. 468 if (annotation.getBound()) { 469 // We pass as the single operand to the bound-map the number of 470 // iterations, which is (upperBound - lowerBound) ceilDiv step. To 471 // support inner loops with dynamic upper bounds (as generated by e.g. 472 // tiling), try to derive a max for the bounds. If the used bound for 473 // the hardware id is imprecise, wrap the contained code into a 474 // conditional. If the lower-bound is constant or defined before the 475 // launch, we can use it in the launch bounds. Otherwise fail. 476 if (!launchIndependent(lowerBound) && 477 !isa_and_nonnull<arith::ConstantOp>(lowerBound.getDefiningOp())) 478 return failure(); 479 // The step must also be constant or defined outside of the loop nest. 480 if (!launchIndependent(step) && 481 !isa_and_nonnull<arith::ConstantOp>(step.getDefiningOp())) 482 return failure(); 483 // If the upper-bound is constant or defined before the launch, we can 484 // use it in the launch bounds directly. Otherwise try derive a bound. 485 bool boundIsPrecise = 486 launchIndependent(upperBound) || 487 isa_and_nonnull<arith::ConstantOp>(upperBound.getDefiningOp()); 488 { 489 PatternRewriter::InsertionGuard guard(rewriter); 490 rewriter.setInsertionPoint(launchOp); 491 if (!boundIsPrecise) { 492 upperBound = deriveStaticUpperBound(upperBound, rewriter); 493 if (!upperBound) { 494 return rewriter.notifyMatchFailure( 495 parallelOp, 496 "cannot derive loop-invariant upper bound for number of" 497 "iterations"); 498 } 499 } 500 // Compute the number of iterations needed. We compute this as an 501 // affine expression ceilDiv (upperBound - lowerBound) step. We use 502 // affine.apply here so that it composes nicely with the provided map. 503 AffineMap stepMap = AffineMap::get( 504 1, 2, 505 ((rewriter.getAffineDimExpr(0) - rewriter.getAffineSymbolExpr(0)) 506 .ceilDiv(rewriter.getAffineSymbolExpr(1)))); 507 Value launchBound = rewriter.create<AffineApplyOp>( 508 loc, annotation.getBound().compose(stepMap), 509 ValueRange{ 510 ensureLaunchIndependent( 511 cloningMap.lookupOrDefault(upperBound)), 512 ensureLaunchIndependent( 513 cloningMap.lookupOrDefault(lowerBound)), 514 ensureLaunchIndependent(cloningMap.lookupOrDefault(step))}); 515 // todo(herhut,ravishankarm): Update the behavior of setMappingAttr 516 // when this condition is relaxed. 517 if (bounds.contains(processor)) { 518 return rewriter.notifyMatchFailure( 519 parallelOp, "cannot redefine the bound for processor " + 520 Twine(static_cast<int64_t>(processor))); 521 } 522 bounds[processor] = launchBound; 523 } 524 if (!boundIsPrecise) { 525 // We are using an approximation, create a surrounding conditional. 526 Value originalBound = std::get<3>(config); 527 arith::CmpIOp pred = rewriter.create<arith::CmpIOp>( 528 loc, arith::CmpIPredicate::slt, newIndex, 529 cloningMap.lookupOrDefault(originalBound)); 530 scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false); 531 rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); 532 // Put a sentinel into the worklist so we know when to pop out of the 533 // if body again. We use the launchOp here, as that cannot be part of 534 // the bodies instruction. 535 worklist.push_back(launchOp.getOperation()); 536 } 537 } 538 } else { 539 // Create a sequential for loop. 540 auto loopOp = rewriter.create<scf::ForOp>( 541 loc, cloningMap.lookupOrDefault(lowerBound), 542 cloningMap.lookupOrDefault(upperBound), 543 cloningMap.lookupOrDefault(step)); 544 newIndex = loopOp.getInductionVar(); 545 rewriter.setInsertionPointToStart(loopOp.getBody()); 546 // Put a sentinel into the worklist so we know when to pop out of the loop 547 // body again. We use the launchOp here, as that cannot be part of the 548 // bodies instruction. 549 worklist.push_back(launchOp.getOperation()); 550 } 551 cloningMap.map(iv, newIndex); 552 } 553 554 // Propagate custom user defined optional attributes, that can be used at 555 // later stage, such as extension data for GPU kernel dispatch 556 for (const auto &namedAttr : parallelOp->getAttrs()) { 557 if (namedAttr.getName() == gpu::getMappingAttrName() || 558 namedAttr.getName() == ParallelOp::getOperandSegmentSizeAttr()) 559 continue; 560 launchOp->setAttr(namedAttr.getName(), namedAttr.getValue()); 561 } 562 563 Block *body = parallelOp.getBody(); 564 worklist.reserve(worklist.size() + body->getOperations().size()); 565 for (Operation &op : llvm::reverse(body->without_terminator())) 566 worklist.push_back(&op); 567 return success(); 568 } 569 570 /// Lower a `scf.parallel` operation into a corresponding `gpu.launch` 571 /// operation. 572 /// 573 /// This essentially transforms a loop nest into a corresponding SIMT function. 574 /// The conversion is driven by mapping annotations on the `scf.parallel` 575 /// operations. The mapping is provided via a `DictionaryAttribute` named 576 /// `mapping`, which has three entries: 577 /// - processor: the hardware id to map to. 0-2 are block dimensions, 3-5 are 578 /// thread dimensions and 6 is sequential. 579 /// - map : An affine map that is used to pre-process hardware ids before 580 /// substitution. 581 /// - bound : An affine map that is used to compute the bound of the hardware 582 /// id based on an upper bound of the number of iterations. 583 /// If the `scf.parallel` contains nested `scf.parallel` operations, those 584 /// need to be annotated, as well. Structurally, the transformation works by 585 /// splicing all operations from nested `scf.parallel` operations into a single 586 /// sequence. Indices mapped to hardware ids are substituted with those ids, 587 /// wheras sequential mappings result in a sequential for-loop. To have more 588 /// flexibility when mapping code to hardware ids, the transform supports two 589 /// affine maps. The first `map` is used to compute the actual index for 590 /// substitution from the hardware id. The second `bound` is used to compute the 591 /// launch dimension for the hardware id from the number of iterations the 592 /// mapped loop is performing. Note that the number of iterations might be 593 /// imprecise if the corresponding loop-bounds are loop-dependent. In such case, 594 /// the hardware id might iterate over additional indices. The transformation 595 /// caters for this by predicating the created sequence of instructions on 596 /// the actual loop bound. This only works if an static upper bound for the 597 /// dynamic loop bound can be derived, currently via analyzing `affine.min` 598 /// operations. 599 LogicalResult 600 ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp, 601 PatternRewriter &rewriter) const { 602 // Mark the operation as visited for recursive legality check. 603 parallelOp->setAttr(kVisitedAttrName, rewriter.getUnitAttr()); 604 605 // We can only transform starting at the outer-most loop. Launches inside of 606 // parallel loops are not supported. 607 if (auto parentLoop = parallelOp->getParentOfType<ParallelOp>()) 608 return failure(); 609 // Create a launch operation. We start with bound one for all grid/block 610 // sizes. Those will be refined later as we discover them from mappings. 611 Location loc = parallelOp.getLoc(); 612 Value constantOne = 613 rewriter.create<arith::ConstantIndexOp>(parallelOp.getLoc(), 1); 614 gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>( 615 parallelOp.getLoc(), constantOne, constantOne, constantOne, constantOne, 616 constantOne, constantOne); 617 rewriter.setInsertionPointToEnd(&launchOp.getBody().front()); 618 rewriter.create<gpu::TerminatorOp>(loc); 619 rewriter.setInsertionPointToStart(&launchOp.getBody().front()); 620 621 IRMapping cloningMap; 622 llvm::DenseMap<gpu::Processor, Value> launchBounds; 623 SmallVector<Operation *, 16> worklist; 624 if (failed(processParallelLoop(parallelOp, launchOp, cloningMap, worklist, 625 launchBounds, rewriter))) 626 return failure(); 627 628 // Whether we have seen any side-effects. Reset when leaving an inner scope. 629 bool seenSideeffects = false; 630 // Whether we have left a nesting scope (and hence are no longer innermost). 631 bool leftNestingScope = false; 632 while (!worklist.empty()) { 633 Operation *op = worklist.pop_back_val(); 634 // Now walk over the body and clone it. 635 // TODO: This is only correct if there either is no further scf.parallel 636 // nested or this code is side-effect free. Otherwise we might need 637 // predication. We are overly conservative for now and only allow 638 // side-effects in the innermost scope. 639 if (auto nestedParallel = dyn_cast<ParallelOp>(op)) { 640 // Before entering a nested scope, make sure there have been no 641 // sideeffects until now. 642 if (seenSideeffects) 643 return failure(); 644 // A nested scf.parallel needs insertion of code to compute indices. 645 // Insert that now. This will also update the worklist with the loops 646 // body. 647 if (failed(processParallelLoop(nestedParallel, launchOp, cloningMap, 648 worklist, launchBounds, rewriter))) 649 return failure(); 650 } else if (op == launchOp.getOperation()) { 651 // Found our sentinel value. We have finished the operations from one 652 // nesting level, pop one level back up. 653 auto *parent = rewriter.getInsertionPoint()->getParentOp(); 654 rewriter.setInsertionPointAfter(parent); 655 leftNestingScope = true; 656 seenSideeffects = false; 657 } else { 658 // Otherwise we copy it over. 659 Operation *clone = rewriter.clone(*op, cloningMap); 660 cloningMap.map(op->getResults(), clone->getResults()); 661 // Check for side effects. 662 // TODO: Handle region side effects properly. 663 seenSideeffects |= 664 !isMemoryEffectFree(clone) || clone->getNumRegions() != 0; 665 // If we are no longer in the innermost scope, sideeffects are disallowed. 666 if (seenSideeffects && leftNestingScope) 667 return failure(); 668 } 669 } 670 671 // Now that we succeeded creating the launch operation, also update the 672 // bounds. 673 for (auto bound : launchBounds) 674 launchOp.setOperand(getLaunchOpArgumentNum(std::get<0>(bound)), 675 std::get<1>(bound)); 676 677 rewriter.eraseOp(parallelOp); 678 return success(); 679 } 680 681 void mlir::populateParallelLoopToGPUPatterns(RewritePatternSet &patterns) { 682 patterns.add<ParallelToGpuLaunchLowering>(patterns.getContext()); 683 } 684 685 void mlir::configureParallelLoopToGPULegality(ConversionTarget &target) { 686 target.addLegalDialect<memref::MemRefDialect>(); 687 target.addDynamicallyLegalOp<scf::ParallelOp>([](scf::ParallelOp parallelOp) { 688 return !parallelOp->hasAttr(gpu::getMappingAttrName()) || 689 parallelOp->hasAttr(kVisitedAttrName); 690 }); 691 } 692 693 void mlir::finalizeParallelLoopToGPUConversion(Operation *op) { 694 op->walk([](scf::ParallelOp parallelOp) { 695 parallelOp->removeAttr(kVisitedAttrName); 696 }); 697 } 698