1 //===- SCFToGPU.cpp - Convert an affine loop nest to a GPU kernel -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements a straightforward conversion of an loop nest into a GPU 10 // kernel. The caller is expected to guarantee that the conversion is correct 11 // or to further transform the kernel to ensure correctness. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "mlir/Conversion/SCFToGPU/SCFToGPU.h" 16 17 #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" 18 #include "mlir/Dialect/Affine/IR/AffineOps.h" 19 #include "mlir/Dialect/Arith/IR/Arith.h" 20 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 21 #include "mlir/Dialect/GPU/Transforms/ParallelLoopMapper.h" 22 #include "mlir/Dialect/MemRef/IR/MemRef.h" 23 #include "mlir/Dialect/SCF/IR/SCF.h" 24 #include "mlir/IR/AffineExpr.h" 25 #include "mlir/IR/Builders.h" 26 #include "mlir/IR/IRMapping.h" 27 #include "mlir/Interfaces/SideEffectInterfaces.h" 28 #include "mlir/Pass/Pass.h" 29 #include "mlir/Transforms/DialectConversion.h" 30 #include "mlir/Transforms/Passes.h" 31 #include "mlir/Transforms/RegionUtils.h" 32 #include "llvm/ADT/Sequence.h" 33 #include "llvm/Support/Debug.h" 34 #include <optional> 35 36 #define DEBUG_TYPE "loops-to-gpu" 37 38 using namespace mlir; 39 using namespace mlir::scf; 40 41 // Name of internal attribute to mark visited operations during conversion. 42 // 43 // NOTE: The conversion originally used the following legality criteria: 44 // `!parallelOp->hasAttr(gpu::getMappingAttrName())` 45 // But the provided pattern might reject some cases based on more detailed 46 // analysis of the `mapping` attribute. 47 // To avoid dialect conversion failure due to non-converted illegal operation 48 // we use this extra Unit attribute as a marker, that the operation was checked 49 // by the pattern and is should be considered as legal in the following legality 50 // checks. The `finalizeParallelLoopToGPUConversion` function performs clean up 51 // of this extra attributes ans is supposed to be called after the dialect 52 // conversion. 53 // 54 // TODO: Implement a cleaner solution, factoring out the "matching" logic 55 // from the pattern and its callees into a separate function that can be called 56 // from both the pattern and the op legality check. 57 static constexpr StringLiteral kVisitedAttrName = "SCFToGPU_visited"; 58 59 // Extract an indexed value from KernelDim3. 60 static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) { 61 switch (pos) { 62 case 0: 63 return dim3.x; 64 case 1: 65 return dim3.y; 66 case 2: 67 return dim3.z; 68 default: 69 llvm_unreachable("dim3 position out of bounds"); 70 } 71 return nullptr; 72 } 73 74 // Get the lower bound-related operands of a loop operation. 75 static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) { 76 return forOp.getLowerBoundOperands(); 77 } 78 79 // Get the upper bound-related operands of a loop operation. 80 static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) { 81 return forOp.getUpperBoundOperands(); 82 } 83 84 // Get a Value that corresponds to the loop step. If the step is an attribute, 85 // materialize a corresponding constant using builder. 86 static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) { 87 return builder.create<arith::ConstantIndexOp>(forOp.getLoc(), 88 forOp.getStep()); 89 } 90 91 // Get a Value for the loop lower bound. If the value requires computation, 92 // materialize the instructions using builder. 93 static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) { 94 return lowerAffineLowerBound(forOp, builder); 95 } 96 97 // Get a Value for the loop upper bound. If the value requires computation, 98 // materialize the instructions using builder. 99 static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) { 100 return lowerAffineUpperBound(forOp, builder); 101 } 102 103 // Check the structure of the loop nest: 104 // - there are enough loops to map to numDims; 105 // - the loops are perfectly nested; 106 // - the loop bounds can be computed above the outermost loop. 107 // This roughly corresponds to the "matcher" part of the pattern-based 108 // rewriting infrastructure. 109 static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp, 110 unsigned numDims) { 111 Region &limit = forOp.getRegion(); 112 for (unsigned i = 0, e = numDims; i < e; ++i) { 113 Operation *nested = &forOp.getBody()->front(); 114 if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) || 115 !areValuesDefinedAbove(getUpperBoundOperands(forOp), limit)) 116 return forOp.emitError( 117 "loops with bounds depending on other mapped loops " 118 "are not supported"); 119 120 // The innermost loop can have an arbitrary body, skip the perfect nesting 121 // check for it. 122 if (i == e - 1) 123 break; 124 125 auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end(); 126 if (forOp.getBody()->empty() || std::next(begin, 2) != end) 127 return forOp.emitError("expected perfectly nested loops in the body"); 128 129 if (!(forOp = dyn_cast<AffineForOp>(nested))) 130 return nested->emitError("expected a nested loop"); 131 } 132 return success(); 133 } 134 135 static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp, 136 unsigned numBlockDims, 137 unsigned numThreadDims) { 138 if (numBlockDims < 1 || numThreadDims < 1) { 139 LLVM_DEBUG(llvm::dbgs() << "nothing to map"); 140 return success(); 141 } 142 143 if (numBlockDims > 3) { 144 return forOp.emitError("cannot map to more than 3 block dimensions"); 145 } 146 if (numThreadDims > 3) { 147 return forOp.emitError("cannot map to more than 3 thread dimensions"); 148 } 149 return checkAffineLoopNestMappableImpl(forOp, numBlockDims + numThreadDims); 150 } 151 152 namespace { 153 // Helper structure that holds common state of the loop to GPU kernel 154 // conversion. 155 struct AffineLoopToGpuConverter { 156 Optional<AffineForOp> collectBounds(AffineForOp forOp, unsigned numLoops); 157 158 void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp, 159 unsigned numBlockDims, unsigned numThreadDims); 160 161 // Ranges of the loops mapped to blocks or threads. 162 SmallVector<Value, 6> dims; 163 // Lower bounds of the loops mapped to blocks or threads. 164 SmallVector<Value, 6> lbs; 165 // Induction variables of the loops mapped to blocks or threads. 166 SmallVector<Value, 6> ivs; 167 // Steps of the loops mapped to blocks or threads. 168 SmallVector<Value, 6> steps; 169 }; 170 } // namespace 171 172 // Return true if the value is obviously a constant "one". 173 static bool isConstantOne(Value value) { 174 if (auto def = value.getDefiningOp<arith::ConstantIndexOp>()) 175 return def.value() == 1; 176 return false; 177 } 178 179 // Collect ranges, bounds, steps and induction variables in preparation for 180 // mapping a loop nest of depth "numLoops" rooted at "forOp" to a GPU kernel. 181 // This may fail if the IR for computing loop bounds cannot be constructed, for 182 // example if an affine loop uses semi-affine maps. Return the last loop to be 183 // mapped on success, std::nullopt on failure. 184 Optional<AffineForOp> 185 AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) { 186 OpBuilder builder(forOp.getOperation()); 187 dims.reserve(numLoops); 188 lbs.reserve(numLoops); 189 ivs.reserve(numLoops); 190 steps.reserve(numLoops); 191 AffineForOp currentLoop = forOp; 192 for (unsigned i = 0; i < numLoops; ++i) { 193 Value lowerBound = getOrEmitLowerBound(currentLoop, builder); 194 Value upperBound = getOrEmitUpperBound(currentLoop, builder); 195 if (!lowerBound || !upperBound) { 196 return std::nullopt; 197 } 198 199 Value range = builder.create<arith::SubIOp>(currentLoop.getLoc(), 200 upperBound, lowerBound); 201 Value step = getOrCreateStep(currentLoop, builder); 202 if (!isConstantOne(step)) 203 range = builder.create<arith::DivSIOp>(currentLoop.getLoc(), range, step); 204 dims.push_back(range); 205 206 lbs.push_back(lowerBound); 207 ivs.push_back(currentLoop.getInductionVar()); 208 steps.push_back(step); 209 210 if (i != numLoops - 1) 211 currentLoop = cast<AffineForOp>(¤tLoop.getBody()->front()); 212 } 213 return currentLoop; 214 } 215 216 // Replace the rooted at "rootForOp" with a GPU launch operation. This expects 217 // "innermostForOp" to point to the last loop to be transformed to the kernel, 218 // and to have (numBlockDims + numThreadDims) perfectly nested loops between 219 // "rootForOp" and "innermostForOp". 220 void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp, 221 AffineForOp innermostForOp, 222 unsigned numBlockDims, 223 unsigned numThreadDims) { 224 OpBuilder builder(rootForOp.getOperation()); 225 // Prepare the grid and block sizes for the launch operation. If there is 226 // no loop mapped to a specific dimension, use constant "1" as its size. 227 Value constOne = 228 (numBlockDims < 3 || numThreadDims < 3) 229 ? builder.create<arith::ConstantIndexOp>(rootForOp.getLoc(), 1) 230 : nullptr; 231 Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne; 232 Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne; 233 Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne; 234 Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne; 235 Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne; 236 Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne; 237 238 // Create a launch op and move the body region of the innermost loop to the 239 // launch op. 240 auto launchOp = builder.create<gpu::LaunchOp>( 241 rootForOp.getLoc(), gridSizeX, gridSizeY, gridSizeZ, blockSizeX, 242 blockSizeY, blockSizeZ); 243 244 // Replace the loop terminator (loops contain only a single block) with the 245 // gpu terminator and move the operations from the loop body block to the gpu 246 // launch body block. Do not move the entire block because of the difference 247 // in block arguments. 248 Operation &terminator = innermostForOp.getBody()->back(); 249 Location terminatorLoc = terminator.getLoc(); 250 terminator.erase(); 251 builder.setInsertionPointToEnd(innermostForOp.getBody()); 252 builder.create<gpu::TerminatorOp>(terminatorLoc, std::nullopt); 253 launchOp.getBody().front().getOperations().splice( 254 launchOp.getBody().front().begin(), 255 innermostForOp.getBody()->getOperations()); 256 257 // Remap the loop iterators to use block/thread identifiers instead. Loops 258 // may iterate from LB with step S whereas GPU thread/block ids always iterate 259 // from 0 to N with step 1. Therefore, loop induction variables are replaced 260 // with (gpu-thread/block-id * S) + LB. 261 builder.setInsertionPointToStart(&launchOp.getBody().front()); 262 auto *lbArgumentIt = lbs.begin(); 263 auto *stepArgumentIt = steps.begin(); 264 for (const auto &en : llvm::enumerate(ivs)) { 265 Value id = 266 en.index() < numBlockDims 267 ? getDim3Value(launchOp.getBlockIds(), en.index()) 268 : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims); 269 Value step = steps[en.index()]; 270 if (!isConstantOne(step)) 271 id = builder.create<arith::MulIOp>(rootForOp.getLoc(), step, id); 272 273 Value ivReplacement = 274 builder.create<arith::AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id); 275 en.value().replaceAllUsesWith(ivReplacement); 276 std::advance(lbArgumentIt, 1); 277 std::advance(stepArgumentIt, 1); 278 } 279 280 // We are done and can erase the original outermost loop. 281 rootForOp.erase(); 282 } 283 284 // Generic loop to GPU kernel conversion function. 285 static LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp, 286 unsigned numBlockDims, 287 unsigned numThreadDims) { 288 if (failed(checkAffineLoopNestMappable(forOp, numBlockDims, numThreadDims))) 289 return failure(); 290 291 AffineLoopToGpuConverter converter; 292 auto maybeInnerLoop = 293 converter.collectBounds(forOp, numBlockDims + numThreadDims); 294 if (!maybeInnerLoop) 295 return failure(); 296 converter.createLaunch(forOp, *maybeInnerLoop, numBlockDims, numThreadDims); 297 298 return success(); 299 } 300 301 LogicalResult mlir::convertAffineLoopNestToGPULaunch(AffineForOp forOp, 302 unsigned numBlockDims, 303 unsigned numThreadDims) { 304 return ::convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims); 305 } 306 307 namespace { 308 struct ParallelToGpuLaunchLowering : public OpRewritePattern<ParallelOp> { 309 using OpRewritePattern<ParallelOp>::OpRewritePattern; 310 311 LogicalResult matchAndRewrite(ParallelOp parallelOp, 312 PatternRewriter &rewriter) const override; 313 }; 314 } // namespace 315 316 /// Tries to derive a static upper bound from the defining operation of 317 /// `upperBound`. 318 static Value deriveStaticUpperBound(Value upperBound, 319 PatternRewriter &rewriter) { 320 if (auto op = upperBound.getDefiningOp<arith::ConstantIndexOp>()) { 321 return op; 322 } 323 324 if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) { 325 for (const AffineExpr &result : minOp.getMap().getResults()) { 326 if (auto constExpr = result.dyn_cast<AffineConstantExpr>()) { 327 return rewriter.create<arith::ConstantIndexOp>(minOp.getLoc(), 328 constExpr.getValue()); 329 } 330 } 331 } 332 333 if (auto minOp = upperBound.getDefiningOp<arith::MinSIOp>()) { 334 for (Value operand : {minOp.getLhs(), minOp.getRhs()}) { 335 if (auto staticBound = deriveStaticUpperBound(operand, rewriter)) 336 return staticBound; 337 } 338 } 339 340 if (auto multiplyOp = upperBound.getDefiningOp<arith::MulIOp>()) { 341 if (auto lhs = dyn_cast_or_null<arith::ConstantIndexOp>( 342 deriveStaticUpperBound(multiplyOp.getOperand(0), rewriter) 343 .getDefiningOp())) 344 if (auto rhs = dyn_cast_or_null<arith::ConstantIndexOp>( 345 deriveStaticUpperBound(multiplyOp.getOperand(1), rewriter) 346 .getDefiningOp())) { 347 // Assumptions about the upper bound of minimum computations no longer 348 // work if multiplied by mixed signs, so abort in this case. 349 if ((lhs.value() < 0) != (rhs.value() < 0)) 350 return {}; 351 352 return rewriter.create<arith::ConstantIndexOp>( 353 multiplyOp.getLoc(), lhs.value() * rhs.value()); 354 } 355 } 356 357 return {}; 358 } 359 360 static bool isMappedToProcessor(gpu::Processor processor) { 361 return processor != gpu::Processor::Sequential; 362 } 363 364 static unsigned getLaunchOpArgumentNum(gpu::Processor processor) { 365 switch (processor) { 366 case gpu::Processor::BlockX: 367 return 0; 368 case gpu::Processor::BlockY: 369 return 1; 370 case gpu::Processor::BlockZ: 371 return 2; 372 case gpu::Processor::ThreadX: 373 return 3; 374 case gpu::Processor::ThreadY: 375 return 4; 376 case gpu::Processor::ThreadZ: 377 return 5; 378 default:; 379 } 380 llvm_unreachable( 381 "invalid processor type while retrieving launch op argument number"); 382 } 383 384 /// Modifies the current transformation state to capture the effect of the given 385 /// `scf.parallel` operation on index substitutions and the operations to be 386 /// inserted. 387 /// Specifically, if a dimension of a parallel loop is mapped to a hardware id, 388 /// this function will 389 /// - compute the loop index based on the hardware id and affine map from the 390 /// mapping and update `cloningMap` to substitute all uses. 391 /// - derive a new upper bound for the hardware id and augment the provided 392 /// `gpu.launch operation` accordingly. 393 /// - if the upper bound is imprecise, insert a conditional in the `gpu.launch` 394 /// and update the rewriter to insert into the conditional's body. 395 /// If the dimension is mapped to sequential, 396 /// - insert a for loop into the body and update the rewriter to insert into 397 /// the for loop's body. 398 /// - update the `cloningMap` to replace uses of the index with the index of 399 /// the new for loop. 400 /// In either case, 401 /// - append the instructions from the loops body to worklist, in reverse order. 402 /// To note the end of the current scope in case a loop or conditional was 403 /// inserted, a sentinel (the `gpu.launch` operation) is inserted into the 404 /// worklist. This signals the processor of the worklist to pop the rewriter 405 /// one scope-level up. 406 static LogicalResult processParallelLoop( 407 ParallelOp parallelOp, gpu::LaunchOp launchOp, IRMapping &cloningMap, 408 SmallVectorImpl<Operation *> &worklist, 409 DenseMap<gpu::Processor, Value> &bounds, PatternRewriter &rewriter) { 410 // TODO: Verify that this is a valid GPU mapping. 411 // processor ids: 0-2 block [x/y/z], 3-5 -> thread [x/y/z], 6-> sequential 412 ArrayAttr mapping = 413 parallelOp->getAttrOfType<ArrayAttr>(gpu::getMappingAttrName()); 414 415 // TODO: Support reductions. 416 if (!mapping || parallelOp.getNumResults() != 0) 417 return failure(); 418 419 Location loc = parallelOp.getLoc(); 420 421 auto launchIndependent = [&launchOp](Value val) { 422 return val.getParentRegion()->isAncestor(launchOp->getParentRegion()); 423 }; 424 425 auto ensureLaunchIndependent = [&rewriter, 426 launchIndependent](Value val) -> Value { 427 if (launchIndependent(val)) 428 return val; 429 if (auto constOp = val.getDefiningOp<arith::ConstantOp>()) 430 return rewriter.create<arith::ConstantOp>(constOp.getLoc(), 431 constOp.getValue()); 432 return {}; 433 }; 434 435 for (auto config : llvm::zip( 436 mapping, parallelOp.getInductionVars(), parallelOp.getLowerBound(), 437 parallelOp.getUpperBound(), parallelOp.getStep())) { 438 Attribute mappingAttribute; 439 Value iv, lowerBound, upperBound, step; 440 std::tie(mappingAttribute, iv, lowerBound, upperBound, step) = config; 441 auto annotation = 442 mappingAttribute.dyn_cast<gpu::ParallelLoopDimMappingAttr>(); 443 if (!annotation) 444 return parallelOp.emitOpError() 445 << "expected mapping attribute for lowering to GPU"; 446 Value newIndex; 447 gpu::Processor processor = annotation.getProcessor(); 448 449 if (isMappedToProcessor(processor)) { 450 // Use the corresponding thread/grid index as replacement for the loop iv. 451 Value operand = 452 launchOp.getBody().getArgument(getLaunchOpArgumentNum(processor)); 453 // Take the indexmap and add the lower bound and step computations in. 454 // This computes operand * step + lowerBound. 455 // Use an affine map here so that it composes nicely with the provided 456 // annotation. 457 AffineMap lowerAndStep = AffineMap::get( 458 1, 2, 459 rewriter.getAffineDimExpr(0) * rewriter.getAffineSymbolExpr(0) + 460 rewriter.getAffineSymbolExpr(1)); 461 newIndex = rewriter.create<AffineApplyOp>( 462 loc, annotation.getMap().compose(lowerAndStep), 463 ValueRange{operand, step, lowerBound}); 464 // If there was also a bound, insert that, too. 465 // TODO: Check that we do not assign bounds twice. 466 if (annotation.getBound()) { 467 // We pass as the single operand to the bound-map the number of 468 // iterations, which is (upperBound - lowerBound) ceilDiv step. To 469 // support inner loops with dynamic upper bounds (as generated by e.g. 470 // tiling), try to derive a max for the bounds. If the used bound for 471 // the hardware id is imprecise, wrap the contained code into a 472 // conditional. If the lower-bound is constant or defined before the 473 // launch, we can use it in the launch bounds. Otherwise fail. 474 if (!launchIndependent(lowerBound) && 475 !isa_and_nonnull<arith::ConstantOp>(lowerBound.getDefiningOp())) 476 return failure(); 477 // The step must also be constant or defined outside of the loop nest. 478 if (!launchIndependent(step) && 479 !isa_and_nonnull<arith::ConstantOp>(step.getDefiningOp())) 480 return failure(); 481 // If the upper-bound is constant or defined before the launch, we can 482 // use it in the launch bounds directly. Otherwise try derive a bound. 483 bool boundIsPrecise = 484 launchIndependent(upperBound) || 485 isa_and_nonnull<arith::ConstantOp>(upperBound.getDefiningOp()); 486 { 487 PatternRewriter::InsertionGuard guard(rewriter); 488 rewriter.setInsertionPoint(launchOp); 489 if (!boundIsPrecise) { 490 upperBound = deriveStaticUpperBound(upperBound, rewriter); 491 if (!upperBound) { 492 return rewriter.notifyMatchFailure( 493 parallelOp, 494 "cannot derive loop-invariant upper bound for number of" 495 "iterations"); 496 } 497 } 498 // Compute the number of iterations needed. We compute this as an 499 // affine expression ceilDiv (upperBound - lowerBound) step. We use 500 // affine.apply here so that it composes nicely with the provided map. 501 AffineMap stepMap = AffineMap::get( 502 1, 2, 503 ((rewriter.getAffineDimExpr(0) - rewriter.getAffineSymbolExpr(0)) 504 .ceilDiv(rewriter.getAffineSymbolExpr(1)))); 505 Value launchBound = rewriter.create<AffineApplyOp>( 506 loc, annotation.getBound().compose(stepMap), 507 ValueRange{ 508 ensureLaunchIndependent( 509 cloningMap.lookupOrDefault(upperBound)), 510 ensureLaunchIndependent( 511 cloningMap.lookupOrDefault(lowerBound)), 512 ensureLaunchIndependent(cloningMap.lookupOrDefault(step))}); 513 // todo(herhut,ravishankarm): Update the behavior of setMappingAttr 514 // when this condition is relaxed. 515 if (bounds.find(processor) != bounds.end()) { 516 return rewriter.notifyMatchFailure( 517 parallelOp, "cannot redefine the bound for processor " + 518 Twine(static_cast<int64_t>(processor))); 519 } 520 bounds[processor] = launchBound; 521 } 522 if (!boundIsPrecise) { 523 // We are using an approximation, create a surrounding conditional. 524 Value originalBound = std::get<3>(config); 525 arith::CmpIOp pred = rewriter.create<arith::CmpIOp>( 526 loc, arith::CmpIPredicate::slt, newIndex, 527 cloningMap.lookupOrDefault(originalBound)); 528 scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false); 529 rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); 530 // Put a sentinel into the worklist so we know when to pop out of the 531 // if body again. We use the launchOp here, as that cannot be part of 532 // the bodies instruction. 533 worklist.push_back(launchOp.getOperation()); 534 } 535 } 536 } else { 537 // Create a sequential for loop. 538 auto loopOp = rewriter.create<scf::ForOp>( 539 loc, cloningMap.lookupOrDefault(lowerBound), 540 cloningMap.lookupOrDefault(upperBound), 541 cloningMap.lookupOrDefault(step)); 542 newIndex = loopOp.getInductionVar(); 543 rewriter.setInsertionPointToStart(loopOp.getBody()); 544 // Put a sentinel into the worklist so we know when to pop out of the loop 545 // body again. We use the launchOp here, as that cannot be part of the 546 // bodies instruction. 547 worklist.push_back(launchOp.getOperation()); 548 } 549 cloningMap.map(iv, newIndex); 550 } 551 552 // Propagate custom user defined optional attributes, that can be used at 553 // later stage, such as extension data for GPU kernel dispatch 554 for (const auto &namedAttr : parallelOp->getAttrs()) { 555 if (namedAttr.getName() == gpu::getMappingAttrName() || 556 namedAttr.getName() == ParallelOp::getOperandSegmentSizeAttr()) 557 continue; 558 launchOp->setAttr(namedAttr.getName(), namedAttr.getValue()); 559 } 560 561 Block *body = parallelOp.getBody(); 562 worklist.reserve(worklist.size() + body->getOperations().size()); 563 for (Operation &op : llvm::reverse(body->without_terminator())) 564 worklist.push_back(&op); 565 return success(); 566 } 567 568 /// Lower a `scf.parallel` operation into a corresponding `gpu.launch` 569 /// operation. 570 /// 571 /// This essentially transforms a loop nest into a corresponding SIMT function. 572 /// The conversion is driven by mapping annotations on the `scf.parallel` 573 /// operations. The mapping is provided via a `DictionaryAttribute` named 574 /// `mapping`, which has three entries: 575 /// - processor: the hardware id to map to. 0-2 are block dimensions, 3-5 are 576 /// thread dimensions and 6 is sequential. 577 /// - map : An affine map that is used to pre-process hardware ids before 578 /// substitution. 579 /// - bound : An affine map that is used to compute the bound of the hardware 580 /// id based on an upper bound of the number of iterations. 581 /// If the `scf.parallel` contains nested `scf.parallel` operations, those 582 /// need to be annotated, as well. Structurally, the transformation works by 583 /// splicing all operations from nested `scf.parallel` operations into a single 584 /// sequence. Indices mapped to hardware ids are substituted with those ids, 585 /// wheras sequential mappings result in a sequential for-loop. To have more 586 /// flexibility when mapping code to hardware ids, the transform supports two 587 /// affine maps. The first `map` is used to compute the actual index for 588 /// substitution from the hardware id. The second `bound` is used to compute the 589 /// launch dimension for the hardware id from the number of iterations the 590 /// mapped loop is performing. Note that the number of iterations might be 591 /// imprecise if the corresponding loop-bounds are loop-dependent. In such case, 592 /// the hardware id might iterate over additional indices. The transformation 593 /// caters for this by predicating the created sequence of instructions on 594 /// the actual loop bound. This only works if an static upper bound for the 595 /// dynamic loop bound can be derived, currently via analyzing `affine.min` 596 /// operations. 597 LogicalResult 598 ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp, 599 PatternRewriter &rewriter) const { 600 // Mark the operation as visited for recursive legality check. 601 parallelOp->setAttr(kVisitedAttrName, rewriter.getUnitAttr()); 602 603 // We can only transform starting at the outer-most loop. Launches inside of 604 // parallel loops are not supported. 605 if (auto parentLoop = parallelOp->getParentOfType<ParallelOp>()) 606 return failure(); 607 // Create a launch operation. We start with bound one for all grid/block 608 // sizes. Those will be refined later as we discover them from mappings. 609 Location loc = parallelOp.getLoc(); 610 Value constantOne = 611 rewriter.create<arith::ConstantIndexOp>(parallelOp.getLoc(), 1); 612 gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>( 613 parallelOp.getLoc(), constantOne, constantOne, constantOne, constantOne, 614 constantOne, constantOne); 615 rewriter.setInsertionPointToEnd(&launchOp.getBody().front()); 616 rewriter.create<gpu::TerminatorOp>(loc); 617 rewriter.setInsertionPointToStart(&launchOp.getBody().front()); 618 619 IRMapping cloningMap; 620 llvm::DenseMap<gpu::Processor, Value> launchBounds; 621 SmallVector<Operation *, 16> worklist; 622 if (failed(processParallelLoop(parallelOp, launchOp, cloningMap, worklist, 623 launchBounds, rewriter))) 624 return failure(); 625 626 // Whether we have seen any side-effects. Reset when leaving an inner scope. 627 bool seenSideeffects = false; 628 // Whether we have left a nesting scope (and hence are no longer innermost). 629 bool leftNestingScope = false; 630 while (!worklist.empty()) { 631 Operation *op = worklist.pop_back_val(); 632 // Now walk over the body and clone it. 633 // TODO: This is only correct if there either is no further scf.parallel 634 // nested or this code is side-effect free. Otherwise we might need 635 // predication. We are overly conservative for now and only allow 636 // side-effects in the innermost scope. 637 if (auto nestedParallel = dyn_cast<ParallelOp>(op)) { 638 // Before entering a nested scope, make sure there have been no 639 // sideeffects until now. 640 if (seenSideeffects) 641 return failure(); 642 // A nested scf.parallel needs insertion of code to compute indices. 643 // Insert that now. This will also update the worklist with the loops 644 // body. 645 if (failed(processParallelLoop(nestedParallel, launchOp, cloningMap, 646 worklist, launchBounds, rewriter))) 647 return failure(); 648 } else if (op == launchOp.getOperation()) { 649 // Found our sentinel value. We have finished the operations from one 650 // nesting level, pop one level back up. 651 auto *parent = rewriter.getInsertionPoint()->getParentOp(); 652 rewriter.setInsertionPointAfter(parent); 653 leftNestingScope = true; 654 seenSideeffects = false; 655 } else { 656 // Otherwise we copy it over. 657 Operation *clone = rewriter.clone(*op, cloningMap); 658 cloningMap.map(op->getResults(), clone->getResults()); 659 // Check for side effects. 660 // TODO: Handle region side effects properly. 661 seenSideeffects |= 662 !isMemoryEffectFree(clone) || clone->getNumRegions() != 0; 663 // If we are no longer in the innermost scope, sideeffects are disallowed. 664 if (seenSideeffects && leftNestingScope) 665 return failure(); 666 } 667 } 668 669 // Now that we succeeded creating the launch operation, also update the 670 // bounds. 671 for (auto bound : launchBounds) 672 launchOp.setOperand(getLaunchOpArgumentNum(std::get<0>(bound)), 673 std::get<1>(bound)); 674 675 rewriter.eraseOp(parallelOp); 676 return success(); 677 } 678 679 void mlir::populateParallelLoopToGPUPatterns(RewritePatternSet &patterns) { 680 patterns.add<ParallelToGpuLaunchLowering>(patterns.getContext()); 681 } 682 683 void mlir::configureParallelLoopToGPULegality(ConversionTarget &target) { 684 target.addLegalDialect<memref::MemRefDialect>(); 685 target.addDynamicallyLegalOp<scf::ParallelOp>([](scf::ParallelOp parallelOp) { 686 return !parallelOp->hasAttr(gpu::getMappingAttrName()) || 687 parallelOp->hasAttr(kVisitedAttrName); 688 }); 689 } 690 691 void mlir::finalizeParallelLoopToGPUConversion(Operation *op) { 692 op->walk([](scf::ParallelOp parallelOp) { 693 parallelOp->removeAttr(kVisitedAttrName); 694 }); 695 } 696