1 //===- SCFToGPU.cpp - Convert an affine loop nest to a GPU kernel -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements a straightforward conversion of an loop nest into a GPU 10 // kernel. The caller is expected to guarantee that the conversion is correct 11 // or to further transform the kernel to ensure correctness. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "mlir/Conversion/SCFToGPU/SCFToGPU.h" 16 17 #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" 18 #include "mlir/Dialect/Affine/IR/AffineOps.h" 19 #include "mlir/Dialect/Arith/IR/Arith.h" 20 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 21 #include "mlir/Dialect/GPU/Transforms/ParallelLoopMapper.h" 22 #include "mlir/Dialect/MemRef/IR/MemRef.h" 23 #include "mlir/Dialect/SCF/IR/SCF.h" 24 #include "mlir/IR/AffineExpr.h" 25 #include "mlir/IR/Builders.h" 26 #include "mlir/IR/IRMapping.h" 27 #include "mlir/Interfaces/SideEffectInterfaces.h" 28 #include "mlir/Pass/Pass.h" 29 #include "mlir/Transforms/DialectConversion.h" 30 #include "mlir/Transforms/Passes.h" 31 #include "mlir/Transforms/RegionUtils.h" 32 #include "llvm/ADT/Sequence.h" 33 #include "llvm/Support/Debug.h" 34 #include <optional> 35 36 #define DEBUG_TYPE "loops-to-gpu" 37 38 using namespace mlir; 39 using namespace mlir::scf; 40 41 // Name of internal attribute to mark visited operations during conversion. 42 // 43 // NOTE: The conversion originally used the following legality criteria: 44 // `!parallelOp->hasAttr(gpu::getMappingAttrName())` 45 // But the provided pattern might reject some cases based on more detailed 46 // analysis of the `mapping` attribute. 47 // To avoid dialect conversion failure due to non-converted illegal operation 48 // we use this extra Unit attribute as a marker, that the operation was checked 49 // by the pattern and is should be considered as legal in the following legality 50 // checks. The `finalizeParallelLoopToGPUConversion` function performs clean up 51 // of this extra attributes ans is supposed to be called after the dialect 52 // conversion. 53 // 54 // TODO: Implement a cleaner solution, factoring out the "matching" logic 55 // from the pattern and its callees into a separate function that can be called 56 // from both the pattern and the op legality check. 57 static constexpr StringLiteral kVisitedAttrName = "SCFToGPU_visited"; 58 59 // Extract an indexed value from KernelDim3. 60 static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) { 61 switch (pos) { 62 case 0: 63 return dim3.x; 64 case 1: 65 return dim3.y; 66 case 2: 67 return dim3.z; 68 default: 69 llvm_unreachable("dim3 position out of bounds"); 70 } 71 return nullptr; 72 } 73 74 // Get the lower bound-related operands of a loop operation. 75 static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) { 76 return forOp.getLowerBoundOperands(); 77 } 78 79 // Get the upper bound-related operands of a loop operation. 80 static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) { 81 return forOp.getUpperBoundOperands(); 82 } 83 84 // Get a Value that corresponds to the loop step. If the step is an attribute, 85 // materialize a corresponding constant using builder. 86 static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) { 87 return builder.create<arith::ConstantIndexOp>(forOp.getLoc(), 88 forOp.getStep()); 89 } 90 91 // Get a Value for the loop lower bound. If the value requires computation, 92 // materialize the instructions using builder. 93 static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) { 94 return lowerAffineLowerBound(forOp, builder); 95 } 96 97 // Get a Value for the loop upper bound. If the value requires computation, 98 // materialize the instructions using builder. 99 static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) { 100 return lowerAffineUpperBound(forOp, builder); 101 } 102 103 // Check the structure of the loop nest: 104 // - there are enough loops to map to numDims; 105 // - the loops are perfectly nested; 106 // - the loop bounds can be computed above the outermost loop. 107 // This roughly corresponds to the "matcher" part of the pattern-based 108 // rewriting infrastructure. 109 static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp, 110 unsigned numDims) { 111 Region &limit = forOp.getRegion(); 112 for (unsigned i = 0, e = numDims; i < e; ++i) { 113 Operation *nested = &forOp.getBody()->front(); 114 if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) || 115 !areValuesDefinedAbove(getUpperBoundOperands(forOp), limit)) 116 return forOp.emitError( 117 "loops with bounds depending on other mapped loops " 118 "are not supported"); 119 120 // The innermost loop can have an arbitrary body, skip the perfect nesting 121 // check for it. 122 if (i == e - 1) 123 break; 124 125 auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end(); 126 if (forOp.getBody()->empty() || std::next(begin, 2) != end) 127 return forOp.emitError("expected perfectly nested loops in the body"); 128 129 if (!(forOp = dyn_cast<AffineForOp>(nested))) 130 return nested->emitError("expected a nested loop"); 131 } 132 return success(); 133 } 134 135 static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp, 136 unsigned numBlockDims, 137 unsigned numThreadDims) { 138 if (numBlockDims < 1 || numThreadDims < 1) { 139 LLVM_DEBUG(llvm::dbgs() << "nothing to map"); 140 return success(); 141 } 142 143 if (numBlockDims > 3) { 144 return forOp.emitError("cannot map to more than 3 block dimensions"); 145 } 146 if (numThreadDims > 3) { 147 return forOp.emitError("cannot map to more than 3 thread dimensions"); 148 } 149 return checkAffineLoopNestMappableImpl(forOp, numBlockDims + numThreadDims); 150 } 151 152 namespace { 153 // Helper structure that holds common state of the loop to GPU kernel 154 // conversion. 155 struct AffineLoopToGpuConverter { 156 std::optional<AffineForOp> collectBounds(AffineForOp forOp, 157 unsigned numLoops); 158 159 void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp, 160 unsigned numBlockDims, unsigned numThreadDims); 161 162 // Ranges of the loops mapped to blocks or threads. 163 SmallVector<Value, 6> dims; 164 // Lower bounds of the loops mapped to blocks or threads. 165 SmallVector<Value, 6> lbs; 166 // Induction variables of the loops mapped to blocks or threads. 167 SmallVector<Value, 6> ivs; 168 // Steps of the loops mapped to blocks or threads. 169 SmallVector<Value, 6> steps; 170 }; 171 } // namespace 172 173 // Return true if the value is obviously a constant "one". 174 static bool isConstantOne(Value value) { 175 if (auto def = value.getDefiningOp<arith::ConstantIndexOp>()) 176 return def.value() == 1; 177 return false; 178 } 179 180 // Collect ranges, bounds, steps and induction variables in preparation for 181 // mapping a loop nest of depth "numLoops" rooted at "forOp" to a GPU kernel. 182 // This may fail if the IR for computing loop bounds cannot be constructed, for 183 // example if an affine loop uses semi-affine maps. Return the last loop to be 184 // mapped on success, std::nullopt on failure. 185 std::optional<AffineForOp> 186 AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) { 187 OpBuilder builder(forOp.getOperation()); 188 dims.reserve(numLoops); 189 lbs.reserve(numLoops); 190 ivs.reserve(numLoops); 191 steps.reserve(numLoops); 192 AffineForOp currentLoop = forOp; 193 for (unsigned i = 0; i < numLoops; ++i) { 194 Value lowerBound = getOrEmitLowerBound(currentLoop, builder); 195 Value upperBound = getOrEmitUpperBound(currentLoop, builder); 196 if (!lowerBound || !upperBound) { 197 return std::nullopt; 198 } 199 200 Value range = builder.create<arith::SubIOp>(currentLoop.getLoc(), 201 upperBound, lowerBound); 202 Value step = getOrCreateStep(currentLoop, builder); 203 if (!isConstantOne(step)) 204 range = builder.create<arith::DivSIOp>(currentLoop.getLoc(), range, step); 205 dims.push_back(range); 206 207 lbs.push_back(lowerBound); 208 ivs.push_back(currentLoop.getInductionVar()); 209 steps.push_back(step); 210 211 if (i != numLoops - 1) 212 currentLoop = cast<AffineForOp>(¤tLoop.getBody()->front()); 213 } 214 return currentLoop; 215 } 216 217 // Replace the rooted at "rootForOp" with a GPU launch operation. This expects 218 // "innermostForOp" to point to the last loop to be transformed to the kernel, 219 // and to have (numBlockDims + numThreadDims) perfectly nested loops between 220 // "rootForOp" and "innermostForOp". 221 void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp, 222 AffineForOp innermostForOp, 223 unsigned numBlockDims, 224 unsigned numThreadDims) { 225 OpBuilder builder(rootForOp.getOperation()); 226 // Prepare the grid and block sizes for the launch operation. If there is 227 // no loop mapped to a specific dimension, use constant "1" as its size. 228 Value constOne = 229 (numBlockDims < 3 || numThreadDims < 3) 230 ? builder.create<arith::ConstantIndexOp>(rootForOp.getLoc(), 1) 231 : nullptr; 232 Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne; 233 Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne; 234 Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne; 235 Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne; 236 Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne; 237 Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne; 238 239 // Create a launch op and move the body region of the innermost loop to the 240 // launch op. 241 auto launchOp = builder.create<gpu::LaunchOp>( 242 rootForOp.getLoc(), gridSizeX, gridSizeY, gridSizeZ, blockSizeX, 243 blockSizeY, blockSizeZ); 244 245 // Replace the loop terminator (loops contain only a single block) with the 246 // gpu terminator and move the operations from the loop body block to the gpu 247 // launch body block. Do not move the entire block because of the difference 248 // in block arguments. 249 Operation &terminator = innermostForOp.getBody()->back(); 250 Location terminatorLoc = terminator.getLoc(); 251 terminator.erase(); 252 builder.setInsertionPointToEnd(innermostForOp.getBody()); 253 builder.create<gpu::TerminatorOp>(terminatorLoc, std::nullopt); 254 launchOp.getBody().front().getOperations().splice( 255 launchOp.getBody().front().begin(), 256 innermostForOp.getBody()->getOperations()); 257 258 // Remap the loop iterators to use block/thread identifiers instead. Loops 259 // may iterate from LB with step S whereas GPU thread/block ids always iterate 260 // from 0 to N with step 1. Therefore, loop induction variables are replaced 261 // with (gpu-thread/block-id * S) + LB. 262 builder.setInsertionPointToStart(&launchOp.getBody().front()); 263 auto *lbArgumentIt = lbs.begin(); 264 auto *stepArgumentIt = steps.begin(); 265 for (const auto &en : llvm::enumerate(ivs)) { 266 Value id = 267 en.index() < numBlockDims 268 ? getDim3Value(launchOp.getBlockIds(), en.index()) 269 : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims); 270 Value step = steps[en.index()]; 271 if (!isConstantOne(step)) 272 id = builder.create<arith::MulIOp>(rootForOp.getLoc(), step, id); 273 274 Value ivReplacement = 275 builder.create<arith::AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id); 276 en.value().replaceAllUsesWith(ivReplacement); 277 std::advance(lbArgumentIt, 1); 278 std::advance(stepArgumentIt, 1); 279 } 280 281 // We are done and can erase the original outermost loop. 282 rootForOp.erase(); 283 } 284 285 // Generic loop to GPU kernel conversion function. 286 static LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp, 287 unsigned numBlockDims, 288 unsigned numThreadDims) { 289 if (failed(checkAffineLoopNestMappable(forOp, numBlockDims, numThreadDims))) 290 return failure(); 291 292 AffineLoopToGpuConverter converter; 293 auto maybeInnerLoop = 294 converter.collectBounds(forOp, numBlockDims + numThreadDims); 295 if (!maybeInnerLoop) 296 return failure(); 297 converter.createLaunch(forOp, *maybeInnerLoop, numBlockDims, numThreadDims); 298 299 return success(); 300 } 301 302 LogicalResult mlir::convertAffineLoopNestToGPULaunch(AffineForOp forOp, 303 unsigned numBlockDims, 304 unsigned numThreadDims) { 305 return ::convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims); 306 } 307 308 namespace { 309 struct ParallelToGpuLaunchLowering : public OpRewritePattern<ParallelOp> { 310 using OpRewritePattern<ParallelOp>::OpRewritePattern; 311 312 LogicalResult matchAndRewrite(ParallelOp parallelOp, 313 PatternRewriter &rewriter) const override; 314 }; 315 } // namespace 316 317 /// Tries to derive a static upper bound from the defining operation of 318 /// `upperBound`. 319 static Value deriveStaticUpperBound(Value upperBound, 320 PatternRewriter &rewriter) { 321 if (auto op = upperBound.getDefiningOp<arith::ConstantIndexOp>()) { 322 return op; 323 } 324 325 if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) { 326 for (const AffineExpr &result : minOp.getMap().getResults()) { 327 if (auto constExpr = result.dyn_cast<AffineConstantExpr>()) { 328 return rewriter.create<arith::ConstantIndexOp>(minOp.getLoc(), 329 constExpr.getValue()); 330 } 331 } 332 } 333 334 if (auto minOp = upperBound.getDefiningOp<arith::MinSIOp>()) { 335 for (Value operand : {minOp.getLhs(), minOp.getRhs()}) { 336 if (auto staticBound = deriveStaticUpperBound(operand, rewriter)) 337 return staticBound; 338 } 339 } 340 341 if (auto multiplyOp = upperBound.getDefiningOp<arith::MulIOp>()) { 342 if (auto lhs = dyn_cast_or_null<arith::ConstantIndexOp>( 343 deriveStaticUpperBound(multiplyOp.getOperand(0), rewriter) 344 .getDefiningOp())) 345 if (auto rhs = dyn_cast_or_null<arith::ConstantIndexOp>( 346 deriveStaticUpperBound(multiplyOp.getOperand(1), rewriter) 347 .getDefiningOp())) { 348 // Assumptions about the upper bound of minimum computations no longer 349 // work if multiplied by mixed signs, so abort in this case. 350 if ((lhs.value() < 0) != (rhs.value() < 0)) 351 return {}; 352 353 return rewriter.create<arith::ConstantIndexOp>( 354 multiplyOp.getLoc(), lhs.value() * rhs.value()); 355 } 356 } 357 358 return {}; 359 } 360 361 static bool isMappedToProcessor(gpu::Processor processor) { 362 return processor != gpu::Processor::Sequential; 363 } 364 365 static unsigned getLaunchOpArgumentNum(gpu::Processor processor) { 366 switch (processor) { 367 case gpu::Processor::BlockX: 368 return 0; 369 case gpu::Processor::BlockY: 370 return 1; 371 case gpu::Processor::BlockZ: 372 return 2; 373 case gpu::Processor::ThreadX: 374 return 3; 375 case gpu::Processor::ThreadY: 376 return 4; 377 case gpu::Processor::ThreadZ: 378 return 5; 379 default:; 380 } 381 llvm_unreachable( 382 "invalid processor type while retrieving launch op argument number"); 383 } 384 385 /// Modifies the current transformation state to capture the effect of the given 386 /// `scf.parallel` operation on index substitutions and the operations to be 387 /// inserted. 388 /// Specifically, if a dimension of a parallel loop is mapped to a hardware id, 389 /// this function will 390 /// - compute the loop index based on the hardware id and affine map from the 391 /// mapping and update `cloningMap` to substitute all uses. 392 /// - derive a new upper bound for the hardware id and augment the provided 393 /// `gpu.launch operation` accordingly. 394 /// - if the upper bound is imprecise, insert a conditional in the `gpu.launch` 395 /// and update the rewriter to insert into the conditional's body. 396 /// If the dimension is mapped to sequential, 397 /// - insert a for loop into the body and update the rewriter to insert into 398 /// the for loop's body. 399 /// - update the `cloningMap` to replace uses of the index with the index of 400 /// the new for loop. 401 /// In either case, 402 /// - append the instructions from the loops body to worklist, in reverse order. 403 /// To note the end of the current scope in case a loop or conditional was 404 /// inserted, a sentinel (the `gpu.launch` operation) is inserted into the 405 /// worklist. This signals the processor of the worklist to pop the rewriter 406 /// one scope-level up. 407 static LogicalResult processParallelLoop( 408 ParallelOp parallelOp, gpu::LaunchOp launchOp, IRMapping &cloningMap, 409 SmallVectorImpl<Operation *> &worklist, 410 DenseMap<gpu::Processor, Value> &bounds, PatternRewriter &rewriter) { 411 // TODO: Verify that this is a valid GPU mapping. 412 // processor ids: 0-2 block [x/y/z], 3-5 -> thread [x/y/z], 6-> sequential 413 ArrayAttr mapping = 414 parallelOp->getAttrOfType<ArrayAttr>(gpu::getMappingAttrName()); 415 416 // TODO: Support reductions. 417 if (!mapping || parallelOp.getNumResults() != 0) 418 return failure(); 419 420 Location loc = parallelOp.getLoc(); 421 422 auto launchIndependent = [&launchOp](Value val) { 423 return val.getParentRegion()->isAncestor(launchOp->getParentRegion()); 424 }; 425 426 auto ensureLaunchIndependent = [&rewriter, 427 launchIndependent](Value val) -> Value { 428 if (launchIndependent(val)) 429 return val; 430 if (auto constOp = val.getDefiningOp<arith::ConstantOp>()) 431 return rewriter.create<arith::ConstantOp>(constOp.getLoc(), 432 constOp.getValue()); 433 return {}; 434 }; 435 436 for (auto config : llvm::zip( 437 mapping, parallelOp.getInductionVars(), parallelOp.getLowerBound(), 438 parallelOp.getUpperBound(), parallelOp.getStep())) { 439 Attribute mappingAttribute; 440 Value iv, lowerBound, upperBound, step; 441 std::tie(mappingAttribute, iv, lowerBound, upperBound, step) = config; 442 auto annotation = 443 mappingAttribute.dyn_cast<gpu::ParallelLoopDimMappingAttr>(); 444 if (!annotation) 445 return parallelOp.emitOpError() 446 << "expected mapping attribute for lowering to GPU"; 447 Value newIndex; 448 gpu::Processor processor = annotation.getProcessor(); 449 450 if (isMappedToProcessor(processor)) { 451 // Use the corresponding thread/grid index as replacement for the loop iv. 452 Value operand = 453 launchOp.getBody().getArgument(getLaunchOpArgumentNum(processor)); 454 // Take the indexmap and add the lower bound and step computations in. 455 // This computes operand * step + lowerBound. 456 // Use an affine map here so that it composes nicely with the provided 457 // annotation. 458 AffineMap lowerAndStep = AffineMap::get( 459 1, 2, 460 rewriter.getAffineDimExpr(0) * rewriter.getAffineSymbolExpr(0) + 461 rewriter.getAffineSymbolExpr(1)); 462 newIndex = rewriter.create<AffineApplyOp>( 463 loc, annotation.getMap().compose(lowerAndStep), 464 ValueRange{operand, step, lowerBound}); 465 // If there was also a bound, insert that, too. 466 // TODO: Check that we do not assign bounds twice. 467 if (annotation.getBound()) { 468 // We pass as the single operand to the bound-map the number of 469 // iterations, which is (upperBound - lowerBound) ceilDiv step. To 470 // support inner loops with dynamic upper bounds (as generated by e.g. 471 // tiling), try to derive a max for the bounds. If the used bound for 472 // the hardware id is imprecise, wrap the contained code into a 473 // conditional. If the lower-bound is constant or defined before the 474 // launch, we can use it in the launch bounds. Otherwise fail. 475 if (!launchIndependent(lowerBound) && 476 !isa_and_nonnull<arith::ConstantOp>(lowerBound.getDefiningOp())) 477 return failure(); 478 // The step must also be constant or defined outside of the loop nest. 479 if (!launchIndependent(step) && 480 !isa_and_nonnull<arith::ConstantOp>(step.getDefiningOp())) 481 return failure(); 482 // If the upper-bound is constant or defined before the launch, we can 483 // use it in the launch bounds directly. Otherwise try derive a bound. 484 bool boundIsPrecise = 485 launchIndependent(upperBound) || 486 isa_and_nonnull<arith::ConstantOp>(upperBound.getDefiningOp()); 487 { 488 PatternRewriter::InsertionGuard guard(rewriter); 489 rewriter.setInsertionPoint(launchOp); 490 if (!boundIsPrecise) { 491 upperBound = deriveStaticUpperBound(upperBound, rewriter); 492 if (!upperBound) { 493 return rewriter.notifyMatchFailure( 494 parallelOp, 495 "cannot derive loop-invariant upper bound for number of" 496 "iterations"); 497 } 498 } 499 // Compute the number of iterations needed. We compute this as an 500 // affine expression ceilDiv (upperBound - lowerBound) step. We use 501 // affine.apply here so that it composes nicely with the provided map. 502 AffineMap stepMap = AffineMap::get( 503 1, 2, 504 ((rewriter.getAffineDimExpr(0) - rewriter.getAffineSymbolExpr(0)) 505 .ceilDiv(rewriter.getAffineSymbolExpr(1)))); 506 Value launchBound = rewriter.create<AffineApplyOp>( 507 loc, annotation.getBound().compose(stepMap), 508 ValueRange{ 509 ensureLaunchIndependent( 510 cloningMap.lookupOrDefault(upperBound)), 511 ensureLaunchIndependent( 512 cloningMap.lookupOrDefault(lowerBound)), 513 ensureLaunchIndependent(cloningMap.lookupOrDefault(step))}); 514 // todo(herhut,ravishankarm): Update the behavior of setMappingAttr 515 // when this condition is relaxed. 516 if (bounds.contains(processor)) { 517 return rewriter.notifyMatchFailure( 518 parallelOp, "cannot redefine the bound for processor " + 519 Twine(static_cast<int64_t>(processor))); 520 } 521 bounds[processor] = launchBound; 522 } 523 if (!boundIsPrecise) { 524 // We are using an approximation, create a surrounding conditional. 525 Value originalBound = std::get<3>(config); 526 arith::CmpIOp pred = rewriter.create<arith::CmpIOp>( 527 loc, arith::CmpIPredicate::slt, newIndex, 528 cloningMap.lookupOrDefault(originalBound)); 529 scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false); 530 rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); 531 // Put a sentinel into the worklist so we know when to pop out of the 532 // if body again. We use the launchOp here, as that cannot be part of 533 // the bodies instruction. 534 worklist.push_back(launchOp.getOperation()); 535 } 536 } 537 } else { 538 // Create a sequential for loop. 539 auto loopOp = rewriter.create<scf::ForOp>( 540 loc, cloningMap.lookupOrDefault(lowerBound), 541 cloningMap.lookupOrDefault(upperBound), 542 cloningMap.lookupOrDefault(step)); 543 newIndex = loopOp.getInductionVar(); 544 rewriter.setInsertionPointToStart(loopOp.getBody()); 545 // Put a sentinel into the worklist so we know when to pop out of the loop 546 // body again. We use the launchOp here, as that cannot be part of the 547 // bodies instruction. 548 worklist.push_back(launchOp.getOperation()); 549 } 550 cloningMap.map(iv, newIndex); 551 } 552 553 // Propagate custom user defined optional attributes, that can be used at 554 // later stage, such as extension data for GPU kernel dispatch 555 for (const auto &namedAttr : parallelOp->getAttrs()) { 556 if (namedAttr.getName() == gpu::getMappingAttrName() || 557 namedAttr.getName() == ParallelOp::getOperandSegmentSizeAttr()) 558 continue; 559 launchOp->setAttr(namedAttr.getName(), namedAttr.getValue()); 560 } 561 562 Block *body = parallelOp.getBody(); 563 worklist.reserve(worklist.size() + body->getOperations().size()); 564 for (Operation &op : llvm::reverse(body->without_terminator())) 565 worklist.push_back(&op); 566 return success(); 567 } 568 569 /// Lower a `scf.parallel` operation into a corresponding `gpu.launch` 570 /// operation. 571 /// 572 /// This essentially transforms a loop nest into a corresponding SIMT function. 573 /// The conversion is driven by mapping annotations on the `scf.parallel` 574 /// operations. The mapping is provided via a `DictionaryAttribute` named 575 /// `mapping`, which has three entries: 576 /// - processor: the hardware id to map to. 0-2 are block dimensions, 3-5 are 577 /// thread dimensions and 6 is sequential. 578 /// - map : An affine map that is used to pre-process hardware ids before 579 /// substitution. 580 /// - bound : An affine map that is used to compute the bound of the hardware 581 /// id based on an upper bound of the number of iterations. 582 /// If the `scf.parallel` contains nested `scf.parallel` operations, those 583 /// need to be annotated, as well. Structurally, the transformation works by 584 /// splicing all operations from nested `scf.parallel` operations into a single 585 /// sequence. Indices mapped to hardware ids are substituted with those ids, 586 /// wheras sequential mappings result in a sequential for-loop. To have more 587 /// flexibility when mapping code to hardware ids, the transform supports two 588 /// affine maps. The first `map` is used to compute the actual index for 589 /// substitution from the hardware id. The second `bound` is used to compute the 590 /// launch dimension for the hardware id from the number of iterations the 591 /// mapped loop is performing. Note that the number of iterations might be 592 /// imprecise if the corresponding loop-bounds are loop-dependent. In such case, 593 /// the hardware id might iterate over additional indices. The transformation 594 /// caters for this by predicating the created sequence of instructions on 595 /// the actual loop bound. This only works if an static upper bound for the 596 /// dynamic loop bound can be derived, currently via analyzing `affine.min` 597 /// operations. 598 LogicalResult 599 ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp, 600 PatternRewriter &rewriter) const { 601 // Mark the operation as visited for recursive legality check. 602 parallelOp->setAttr(kVisitedAttrName, rewriter.getUnitAttr()); 603 604 // We can only transform starting at the outer-most loop. Launches inside of 605 // parallel loops are not supported. 606 if (auto parentLoop = parallelOp->getParentOfType<ParallelOp>()) 607 return failure(); 608 // Create a launch operation. We start with bound one for all grid/block 609 // sizes. Those will be refined later as we discover them from mappings. 610 Location loc = parallelOp.getLoc(); 611 Value constantOne = 612 rewriter.create<arith::ConstantIndexOp>(parallelOp.getLoc(), 1); 613 gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>( 614 parallelOp.getLoc(), constantOne, constantOne, constantOne, constantOne, 615 constantOne, constantOne); 616 rewriter.setInsertionPointToEnd(&launchOp.getBody().front()); 617 rewriter.create<gpu::TerminatorOp>(loc); 618 rewriter.setInsertionPointToStart(&launchOp.getBody().front()); 619 620 IRMapping cloningMap; 621 llvm::DenseMap<gpu::Processor, Value> launchBounds; 622 SmallVector<Operation *, 16> worklist; 623 if (failed(processParallelLoop(parallelOp, launchOp, cloningMap, worklist, 624 launchBounds, rewriter))) 625 return failure(); 626 627 // Whether we have seen any side-effects. Reset when leaving an inner scope. 628 bool seenSideeffects = false; 629 // Whether we have left a nesting scope (and hence are no longer innermost). 630 bool leftNestingScope = false; 631 while (!worklist.empty()) { 632 Operation *op = worklist.pop_back_val(); 633 // Now walk over the body and clone it. 634 // TODO: This is only correct if there either is no further scf.parallel 635 // nested or this code is side-effect free. Otherwise we might need 636 // predication. We are overly conservative for now and only allow 637 // side-effects in the innermost scope. 638 if (auto nestedParallel = dyn_cast<ParallelOp>(op)) { 639 // Before entering a nested scope, make sure there have been no 640 // sideeffects until now. 641 if (seenSideeffects) 642 return failure(); 643 // A nested scf.parallel needs insertion of code to compute indices. 644 // Insert that now. This will also update the worklist with the loops 645 // body. 646 if (failed(processParallelLoop(nestedParallel, launchOp, cloningMap, 647 worklist, launchBounds, rewriter))) 648 return failure(); 649 } else if (op == launchOp.getOperation()) { 650 // Found our sentinel value. We have finished the operations from one 651 // nesting level, pop one level back up. 652 auto *parent = rewriter.getInsertionPoint()->getParentOp(); 653 rewriter.setInsertionPointAfter(parent); 654 leftNestingScope = true; 655 seenSideeffects = false; 656 } else { 657 // Otherwise we copy it over. 658 Operation *clone = rewriter.clone(*op, cloningMap); 659 cloningMap.map(op->getResults(), clone->getResults()); 660 // Check for side effects. 661 // TODO: Handle region side effects properly. 662 seenSideeffects |= 663 !isMemoryEffectFree(clone) || clone->getNumRegions() != 0; 664 // If we are no longer in the innermost scope, sideeffects are disallowed. 665 if (seenSideeffects && leftNestingScope) 666 return failure(); 667 } 668 } 669 670 // Now that we succeeded creating the launch operation, also update the 671 // bounds. 672 for (auto bound : launchBounds) 673 launchOp.setOperand(getLaunchOpArgumentNum(std::get<0>(bound)), 674 std::get<1>(bound)); 675 676 rewriter.eraseOp(parallelOp); 677 return success(); 678 } 679 680 void mlir::populateParallelLoopToGPUPatterns(RewritePatternSet &patterns) { 681 patterns.add<ParallelToGpuLaunchLowering>(patterns.getContext()); 682 } 683 684 void mlir::configureParallelLoopToGPULegality(ConversionTarget &target) { 685 target.addLegalDialect<memref::MemRefDialect>(); 686 target.addDynamicallyLegalOp<scf::ParallelOp>([](scf::ParallelOp parallelOp) { 687 return !parallelOp->hasAttr(gpu::getMappingAttrName()) || 688 parallelOp->hasAttr(kVisitedAttrName); 689 }); 690 } 691 692 void mlir::finalizeParallelLoopToGPUConversion(Operation *op) { 693 op->walk([](scf::ParallelOp parallelOp) { 694 parallelOp->removeAttr(kVisitedAttrName); 695 }); 696 } 697