xref: /llvm-project/mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp (revision 0c7890c844fdc7adb6d0cf58403e3fdd7407915d)
1 //===- SCFToGPU.cpp - Convert an affine loop nest to a GPU kernel -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements a straightforward conversion of an loop nest into a GPU
10 // kernel.  The caller is expected to guarantee that the conversion is correct
11 // or to further transform the kernel to ensure correctness.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "mlir/Conversion/SCFToGPU/SCFToGPU.h"
16 
17 #include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
20 #include "mlir/Dialect/GPU/GPUDialect.h"
21 #include "mlir/Dialect/GPU/ParallelLoopMapper.h"
22 #include "mlir/Dialect/MemRef/IR/MemRef.h"
23 #include "mlir/Dialect/SCF/SCF.h"
24 #include "mlir/Dialect/StandardOps/IR/Ops.h"
25 #include "mlir/IR/AffineExpr.h"
26 #include "mlir/IR/BlockAndValueMapping.h"
27 #include "mlir/IR/Builders.h"
28 #include "mlir/Pass/Pass.h"
29 #include "mlir/Transforms/DialectConversion.h"
30 #include "mlir/Transforms/LoopUtils.h"
31 #include "mlir/Transforms/Passes.h"
32 #include "mlir/Transforms/RegionUtils.h"
33 #include "llvm/ADT/Sequence.h"
34 #include "llvm/Support/Debug.h"
35 
36 #define DEBUG_TYPE "loops-to-gpu"
37 
38 using namespace mlir;
39 using namespace mlir::scf;
40 
41 // Name of internal attribute to mark visited operations during conversion.
42 //
43 // NOTE: The conversion originally used the following legality criteria:
44 //   `!parallelOp->hasAttr(gpu::getMappingAttrName())`
45 // But the provided pattern might reject some cases based on more detailed
46 // analysis of the `mapping` attribute.
47 // To avoid dialect conversion failure due to non-converted illegal operation
48 // we use this extra Unit attribute as a marker, that the operation was checked
49 // by the pattern and is should be considered as legal in the following legality
50 // checks. The `finalizeParallelLoopToGPUConversion` function performs clean up
51 // of this extra attributes ans is supposed to be called after the dialect
52 // conversion.
53 //
54 // TODO: Implement a cleaner solution, factoring out the "matching" logic
55 // from the pattern and its callees into a separate function that can be called
56 // from both the pattern and the op legality check.
57 static constexpr StringLiteral kVisitedAttrName = "SCFToGPU_visited";
58 
59 // Extract an indexed value from KernelDim3.
60 static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) {
61   switch (pos) {
62   case 0:
63     return dim3.x;
64   case 1:
65     return dim3.y;
66   case 2:
67     return dim3.z;
68   default:
69     llvm_unreachable("dim3 position out of bounds");
70   }
71   return nullptr;
72 }
73 
74 // Get the lower bound-related operands of a loop operation.
75 static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) {
76   return forOp.getLowerBoundOperands();
77 }
78 
79 // Get the upper bound-related operands of a loop operation.
80 static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) {
81   return forOp.getUpperBoundOperands();
82 }
83 
84 // Get a Value that corresponds to the loop step.  If the step is an attribute,
85 // materialize a corresponding constant using builder.
86 static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) {
87   return builder.create<arith::ConstantIndexOp>(forOp.getLoc(),
88                                                 forOp.getStep());
89 }
90 
91 // Get a Value for the loop lower bound.  If the value requires computation,
92 // materialize the instructions using builder.
93 static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) {
94   return lowerAffineLowerBound(forOp, builder);
95 }
96 
97 // Get a Value for the loop upper bound.  If the value requires computation,
98 // materialize the instructions using builder.
99 static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) {
100   return lowerAffineUpperBound(forOp, builder);
101 }
102 
103 // Check the structure of the loop nest:
104 //   - there are enough loops to map to numDims;
105 //   - the loops are perfectly nested;
106 //   - the loop bounds can be computed above the outermost loop.
107 // This roughly corresponds to the "matcher" part of the pattern-based
108 // rewriting infrastructure.
109 static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp,
110                                                      unsigned numDims) {
111   Region &limit = forOp.region();
112   for (unsigned i = 0, e = numDims; i < e; ++i) {
113     Operation *nested = &forOp.getBody()->front();
114     if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) ||
115         !areValuesDefinedAbove(getUpperBoundOperands(forOp), limit))
116       return forOp.emitError(
117           "loops with bounds depending on other mapped loops "
118           "are not supported");
119 
120     // The innermost loop can have an arbitrary body, skip the perfect nesting
121     // check for it.
122     if (i == e - 1)
123       break;
124 
125     auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end();
126     if (forOp.getBody()->empty() || std::next(begin, 2) != end)
127       return forOp.emitError("expected perfectly nested loops in the body");
128 
129     if (!(forOp = dyn_cast<AffineForOp>(nested)))
130       return nested->emitError("expected a nested loop");
131   }
132   return success();
133 }
134 
135 static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp,
136                                                  unsigned numBlockDims,
137                                                  unsigned numThreadDims) {
138   if (numBlockDims < 1 || numThreadDims < 1) {
139     LLVM_DEBUG(llvm::dbgs() << "nothing to map");
140     return success();
141   }
142 
143   if (numBlockDims > 3) {
144     return forOp.emitError("cannot map to more than 3 block dimensions");
145   }
146   if (numThreadDims > 3) {
147     return forOp.emitError("cannot map to more than 3 thread dimensions");
148   }
149   return checkAffineLoopNestMappableImpl(forOp, numBlockDims + numThreadDims);
150 }
151 
152 namespace {
153 // Helper structure that holds common state of the loop to GPU kernel
154 // conversion.
155 struct AffineLoopToGpuConverter {
156   Optional<AffineForOp> collectBounds(AffineForOp forOp, unsigned numLoops);
157 
158   void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp,
159                     unsigned numBlockDims, unsigned numThreadDims);
160 
161   // Ranges of the loops mapped to blocks or threads.
162   SmallVector<Value, 6> dims;
163   // Lower bounds of the loops mapped to blocks or threads.
164   SmallVector<Value, 6> lbs;
165   // Induction variables of the loops mapped to blocks or threads.
166   SmallVector<Value, 6> ivs;
167   // Steps of the loops mapped to blocks or threads.
168   SmallVector<Value, 6> steps;
169 };
170 } // namespace
171 
172 // Return true if the value is obviously a constant "one".
173 static bool isConstantOne(Value value) {
174   if (auto def = value.getDefiningOp<arith::ConstantIndexOp>())
175     return def.value() == 1;
176   return false;
177 }
178 
179 // Collect ranges, bounds, steps and induction variables in preparation for
180 // mapping a loop nest of depth "numLoops" rooted at "forOp" to a GPU kernel.
181 // This may fail if the IR for computing loop bounds cannot be constructed, for
182 // example if an affine loop uses semi-affine maps. Return the last loop to be
183 // mapped on success, llvm::None on failure.
184 Optional<AffineForOp>
185 AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) {
186   OpBuilder builder(forOp.getOperation());
187   dims.reserve(numLoops);
188   lbs.reserve(numLoops);
189   ivs.reserve(numLoops);
190   steps.reserve(numLoops);
191   AffineForOp currentLoop = forOp;
192   for (unsigned i = 0; i < numLoops; ++i) {
193     Value lowerBound = getOrEmitLowerBound(currentLoop, builder);
194     Value upperBound = getOrEmitUpperBound(currentLoop, builder);
195     if (!lowerBound || !upperBound) {
196       return llvm::None;
197     }
198 
199     Value range = builder.create<arith::SubIOp>(currentLoop.getLoc(),
200                                                 upperBound, lowerBound);
201     Value step = getOrCreateStep(currentLoop, builder);
202     if (!isConstantOne(step))
203       range = builder.create<arith::DivSIOp>(currentLoop.getLoc(), range, step);
204     dims.push_back(range);
205 
206     lbs.push_back(lowerBound);
207     ivs.push_back(currentLoop.getInductionVar());
208     steps.push_back(step);
209 
210     if (i != numLoops - 1)
211       currentLoop = cast<AffineForOp>(&currentLoop.getBody()->front());
212   }
213   return currentLoop;
214 }
215 
216 // Replace the rooted at "rootForOp" with a GPU launch operation.  This expects
217 // "innermostForOp" to point to the last loop to be transformed to the kernel,
218 // and to have (numBlockDims + numThreadDims) perfectly nested loops between
219 // "rootForOp" and "innermostForOp".
220 void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
221                                             AffineForOp innermostForOp,
222                                             unsigned numBlockDims,
223                                             unsigned numThreadDims) {
224   OpBuilder builder(rootForOp.getOperation());
225   // Prepare the grid and block sizes for the launch operation.  If there is
226   // no loop mapped to a specific dimension, use constant "1" as its size.
227   Value constOne =
228       (numBlockDims < 3 || numThreadDims < 3)
229           ? builder.create<arith::ConstantIndexOp>(rootForOp.getLoc(), 1)
230           : nullptr;
231   Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne;
232   Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne;
233   Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne;
234   Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne;
235   Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne;
236   Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne;
237 
238   // Create a launch op and move the body region of the innermost loop to the
239   // launch op.
240   auto launchOp = builder.create<gpu::LaunchOp>(
241       rootForOp.getLoc(), gridSizeX, gridSizeY, gridSizeZ, blockSizeX,
242       blockSizeY, blockSizeZ);
243 
244   // Replace the loop terminator (loops contain only a single block) with the
245   // gpu terminator and move the operations from the loop body block to the gpu
246   // launch body block.  Do not move the entire block because of the difference
247   // in block arguments.
248   Operation &terminator = innermostForOp.getBody()->back();
249   Location terminatorLoc = terminator.getLoc();
250   terminator.erase();
251   builder.setInsertionPointToEnd(innermostForOp.getBody());
252   builder.create<gpu::TerminatorOp>(terminatorLoc, llvm::None);
253   launchOp.body().front().getOperations().splice(
254       launchOp.body().front().begin(),
255       innermostForOp.getBody()->getOperations());
256 
257   // Remap the loop iterators to use block/thread identifiers instead.  Loops
258   // may iterate from LB with step S whereas GPU thread/block ids always iterate
259   // from 0 to N with step 1.  Therefore, loop induction variables are replaced
260   // with (gpu-thread/block-id * S) + LB.
261   builder.setInsertionPointToStart(&launchOp.body().front());
262   auto lbArgumentIt = lbs.begin();
263   auto stepArgumentIt = steps.begin();
264   for (auto en : llvm::enumerate(ivs)) {
265     Value id =
266         en.index() < numBlockDims
267             ? getDim3Value(launchOp.getBlockIds(), en.index())
268             : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims);
269     Value step = steps[en.index()];
270     if (!isConstantOne(step))
271       id = builder.create<arith::MulIOp>(rootForOp.getLoc(), step, id);
272 
273     Value ivReplacement =
274         builder.create<arith::AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id);
275     en.value().replaceAllUsesWith(ivReplacement);
276     std::advance(lbArgumentIt, 1);
277     std::advance(stepArgumentIt, 1);
278   }
279 
280   // We are done and can erase the original outermost loop.
281   rootForOp.erase();
282 }
283 
284 // Generic loop to GPU kernel conversion function.
285 static LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp,
286                                                       unsigned numBlockDims,
287                                                       unsigned numThreadDims) {
288   if (failed(checkAffineLoopNestMappable(forOp, numBlockDims, numThreadDims)))
289     return failure();
290 
291   AffineLoopToGpuConverter converter;
292   auto maybeInnerLoop =
293       converter.collectBounds(forOp, numBlockDims + numThreadDims);
294   if (!maybeInnerLoop)
295     return failure();
296   converter.createLaunch(forOp, *maybeInnerLoop, numBlockDims, numThreadDims);
297 
298   return success();
299 }
300 
301 LogicalResult mlir::convertAffineLoopNestToGPULaunch(AffineForOp forOp,
302                                                      unsigned numBlockDims,
303                                                      unsigned numThreadDims) {
304   return ::convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims);
305 }
306 
307 namespace {
308 struct ParallelToGpuLaunchLowering : public OpRewritePattern<ParallelOp> {
309   using OpRewritePattern<ParallelOp>::OpRewritePattern;
310 
311   LogicalResult matchAndRewrite(ParallelOp parallelOp,
312                                 PatternRewriter &rewriter) const override;
313 };
314 } // namespace
315 
316 /// Tries to derive a static upper bound from the defining operation of
317 /// `upperBound`.
318 static Value deriveStaticUpperBound(Value upperBound,
319                                     PatternRewriter &rewriter) {
320   if (auto op = upperBound.getDefiningOp<arith::ConstantIndexOp>()) {
321     return op;
322   }
323 
324   if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) {
325     for (const AffineExpr &result : minOp.map().getResults()) {
326       if (auto constExpr = result.dyn_cast<AffineConstantExpr>()) {
327         return rewriter.create<arith::ConstantIndexOp>(minOp.getLoc(),
328                                                        constExpr.getValue());
329       }
330     }
331   }
332 
333   if (auto multiplyOp = upperBound.getDefiningOp<arith::MulIOp>()) {
334     if (auto lhs = dyn_cast_or_null<arith::ConstantIndexOp>(
335             deriveStaticUpperBound(multiplyOp.getOperand(0), rewriter)
336                 .getDefiningOp()))
337       if (auto rhs = dyn_cast_or_null<arith::ConstantIndexOp>(
338               deriveStaticUpperBound(multiplyOp.getOperand(1), rewriter)
339                   .getDefiningOp())) {
340         // Assumptions about the upper bound of minimum computations no longer
341         // work if multiplied by a negative value, so abort in this case.
342         if (lhs.value() < 0 || rhs.value() < 0)
343           return {};
344 
345         return rewriter.create<arith::ConstantIndexOp>(
346             multiplyOp.getLoc(), lhs.value() * rhs.value());
347       }
348   }
349 
350   return {};
351 }
352 
353 static bool isMappedToProcessor(gpu::Processor processor) {
354   return processor != gpu::Processor::Sequential;
355 }
356 
357 static unsigned getLaunchOpArgumentNum(gpu::Processor processor) {
358   switch (processor) {
359   case gpu::Processor::BlockX:
360     return 0;
361   case gpu::Processor::BlockY:
362     return 1;
363   case gpu::Processor::BlockZ:
364     return 2;
365   case gpu::Processor::ThreadX:
366     return 3;
367   case gpu::Processor::ThreadY:
368     return 4;
369   case gpu::Processor::ThreadZ:
370     return 5;
371   default:;
372   }
373   llvm_unreachable(
374       "invalid processor type while retrieving launch op argument number");
375 }
376 
377 /// Modifies the current transformation state to capture the effect of the given
378 /// `scf.parallel` operation on index substitutions and the operations to be
379 /// inserted.
380 /// Specifically, if a dimension of a parallel loop is mapped to a hardware id,
381 /// this function will
382 /// - compute the loop index based on the hardware id and affine map from the
383 ///   mapping and update `cloningMap` to substitute all uses.
384 /// - derive a new upper bound for the hardware id and augment the provided
385 ///   `gpu.launch operation` accordingly.
386 /// - if the upper bound is imprecise, insert a conditional in the `gpu.launch`
387 ///   and update the rewriter to insert into the conditional's body.
388 /// If the dimension is mapped to sequential,
389 /// - insert a for loop into the body and update the rewriter to insert into
390 ///   the for loop's body.
391 /// - update the `cloningMap` to replace uses of the index with the index of
392 ///   the new for loop.
393 /// In either case,
394 /// - append the instructions from the loops body to worklist, in reverse order.
395 /// To note the end of the current scope in case a loop or conditional was
396 /// inserted, a sentinel (the `gpu.launch` operation) is inserted into the
397 /// worklist. This signals the processor of the worklist to pop the rewriter
398 /// one scope-level up.
399 static LogicalResult processParallelLoop(
400     ParallelOp parallelOp, gpu::LaunchOp launchOp,
401     BlockAndValueMapping &cloningMap, SmallVectorImpl<Operation *> &worklist,
402     DenseMap<gpu::Processor, Value> &bounds, PatternRewriter &rewriter) {
403   // TODO: Verify that this is a valid GPU mapping.
404   // processor ids: 0-2 block [x/y/z], 3-5 -> thread [x/y/z], 6-> sequential
405   ArrayAttr mapping =
406       parallelOp->getAttrOfType<ArrayAttr>(gpu::getMappingAttrName());
407 
408   // TODO: Support reductions.
409   if (!mapping || parallelOp.getNumResults() != 0)
410     return failure();
411 
412   Location loc = parallelOp.getLoc();
413 
414   auto launchIndependent = [&launchOp](Value val) {
415     return val.getParentRegion()->isAncestor(launchOp->getParentRegion());
416   };
417 
418   auto ensureLaunchIndependent = [&rewriter,
419                                   launchIndependent](Value val) -> Value {
420     if (launchIndependent(val))
421       return val;
422     if (auto constOp = val.getDefiningOp<arith::ConstantOp>())
423       return rewriter.create<arith::ConstantOp>(constOp.getLoc(),
424                                                 constOp.getValue());
425     return {};
426   };
427 
428   for (auto config : llvm::zip(mapping, parallelOp.getInductionVars(),
429                                parallelOp.lowerBound(), parallelOp.upperBound(),
430                                parallelOp.step())) {
431     Attribute mappingAttribute;
432     Value iv, lowerBound, upperBound, step;
433     std::tie(mappingAttribute, iv, lowerBound, upperBound, step) = config;
434     auto annotation = mappingAttribute.dyn_cast<gpu::ParallelLoopDimMapping>();
435     if (!annotation)
436       return parallelOp.emitOpError()
437              << "expected mapping attribute for lowering to GPU";
438     Value newIndex;
439     gpu::Processor processor = gpu::getProcessor(annotation);
440 
441     if (isMappedToProcessor(processor)) {
442       // Use the corresponding thread/grid index as replacement for the loop iv.
443       Value operand =
444           launchOp.body().getArgument(getLaunchOpArgumentNum(processor));
445       // Take the indexmap and add the lower bound and step computations in.
446       // This computes operand * step + lowerBound.
447       // Use an affine map here so that it composes nicely with the provided
448       // annotation.
449       AffineMap lowerAndStep = AffineMap::get(
450           1, 2,
451           rewriter.getAffineDimExpr(0) * rewriter.getAffineSymbolExpr(0) +
452               rewriter.getAffineSymbolExpr(1));
453       newIndex = rewriter.create<AffineApplyOp>(
454           loc, annotation.map().getValue().compose(lowerAndStep),
455           ValueRange{operand, step, lowerBound});
456       // If there was also a bound, insert that, too.
457       // TODO: Check that we do not assign bounds twice.
458       if (annotation.bound().getValue()) {
459         // We pass as the single operand to the bound-map the number of
460         // iterations, which is (upperBound - lowerBound) ceilDiv step. To
461         // support inner loops with dynamic upper bounds (as generated by e.g.
462         // tiling), try to derive a max for the bounds. If the used bound for
463         // the hardware id is imprecise, wrap the contained code into a
464         // conditional. If the lower-bound is constant or defined before the
465         // launch, we can use it in the launch bounds. Otherwise fail.
466         if (!launchIndependent(lowerBound) &&
467             !isa_and_nonnull<arith::ConstantOp>(lowerBound.getDefiningOp()))
468           return failure();
469         // The step must also be constant or defined outside of the loop nest.
470         if (!launchIndependent(step) &&
471             !isa_and_nonnull<arith::ConstantOp>(step.getDefiningOp()))
472           return failure();
473         // If the upper-bound is constant or defined before the launch, we can
474         // use it in the launch bounds directly. Otherwise try derive a bound.
475         bool boundIsPrecise =
476             launchIndependent(upperBound) ||
477             isa_and_nonnull<arith::ConstantOp>(upperBound.getDefiningOp());
478         {
479           PatternRewriter::InsertionGuard guard(rewriter);
480           rewriter.setInsertionPoint(launchOp);
481           if (!boundIsPrecise) {
482             upperBound = deriveStaticUpperBound(upperBound, rewriter);
483             if (!upperBound) {
484               return rewriter.notifyMatchFailure(
485                   parallelOp,
486                   "cannot derive loop-invariant upper bound for number of"
487                   "iterations");
488             }
489           }
490           // Compute the number of iterations needed. We compute this as an
491           // affine expression ceilDiv (upperBound - lowerBound) step. We use
492           // affine.apply here so that it composes nicely with the provided map.
493           AffineMap stepMap = AffineMap::get(
494               1, 2,
495               ((rewriter.getAffineDimExpr(0) - rewriter.getAffineSymbolExpr(0))
496                    .ceilDiv(rewriter.getAffineSymbolExpr(1))));
497           Value launchBound = rewriter.create<AffineApplyOp>(
498               loc, annotation.bound().getValue().compose(stepMap),
499               ValueRange{
500                   ensureLaunchIndependent(
501                       cloningMap.lookupOrDefault(upperBound)),
502                   ensureLaunchIndependent(
503                       cloningMap.lookupOrDefault(lowerBound)),
504                   ensureLaunchIndependent(cloningMap.lookupOrDefault(step))});
505           // todo(herhut,ravishankarm): Update the behavior of setMappingAttr
506           // when this condition is relaxed.
507           if (bounds.find(processor) != bounds.end()) {
508             return rewriter.notifyMatchFailure(
509                 parallelOp, "cannot redefine the bound for processor " +
510                                 Twine(static_cast<int64_t>(processor)));
511           }
512           bounds[processor] = launchBound;
513         }
514         if (!boundIsPrecise) {
515           // We are using an approximation, create a surrounding conditional.
516           Value originalBound = std::get<3>(config);
517           arith::CmpIOp pred = rewriter.create<arith::CmpIOp>(
518               loc, arith::CmpIPredicate::slt, newIndex,
519               cloningMap.lookupOrDefault(originalBound));
520           scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false);
521           rewriter.setInsertionPointToStart(&ifOp.thenRegion().front());
522           // Put a sentinel into the worklist so we know when to pop out of the
523           // if body again. We use the launchOp here, as that cannot be part of
524           // the bodies instruction.
525           worklist.push_back(launchOp.getOperation());
526         }
527       }
528     } else {
529       // Create a sequential for loop.
530       auto loopOp = rewriter.create<scf::ForOp>(
531           loc, cloningMap.lookupOrDefault(lowerBound),
532           cloningMap.lookupOrDefault(upperBound),
533           cloningMap.lookupOrDefault(step));
534       newIndex = loopOp.getInductionVar();
535       rewriter.setInsertionPointToStart(loopOp.getBody());
536       // Put a sentinel into the worklist so we know when to pop out of the loop
537       // body again. We use the launchOp here, as that cannot be part of the
538       // bodies instruction.
539       worklist.push_back(launchOp.getOperation());
540     }
541     cloningMap.map(iv, newIndex);
542   }
543 
544   // Propagate custom user defined optional attributes, that can be used at
545   // later stage, such as extension data for GPU kernel dispatch
546   for (const auto &namedAttr : parallelOp->getAttrs()) {
547     if (namedAttr.getName() == gpu::getMappingAttrName() ||
548         namedAttr.getName() == ParallelOp::getOperandSegmentSizeAttr())
549       continue;
550     launchOp->setAttr(namedAttr.getName(), namedAttr.getValue());
551   }
552 
553   Block *body = parallelOp.getBody();
554   worklist.reserve(worklist.size() + body->getOperations().size());
555   for (Operation &op : llvm::reverse(body->without_terminator()))
556     worklist.push_back(&op);
557   return success();
558 }
559 
560 /// Lower a `scf.parallel` operation into a corresponding `gpu.launch`
561 /// operation.
562 ///
563 /// This essentially transforms a loop nest into a corresponding SIMT function.
564 /// The conversion is driven by mapping annotations on the `scf.parallel`
565 /// operations. The mapping is provided via a `DictionaryAttribute` named
566 /// `mapping`, which has three entries:
567 ///  - processor: the hardware id to map to. 0-2 are block dimensions, 3-5 are
568 ///               thread dimensions and 6 is sequential.
569 ///  - map : An affine map that is used to pre-process hardware ids before
570 ///          substitution.
571 ///  - bound : An affine map that is used to compute the bound of the hardware
572 ///            id based on an upper bound of the number of iterations.
573 /// If the `scf.parallel` contains nested `scf.parallel` operations, those
574 /// need to be annotated, as well. Structurally, the transformation works by
575 /// splicing all operations from nested `scf.parallel` operations into a single
576 /// sequence. Indices mapped to hardware ids are substituted with those ids,
577 /// wheras sequential mappings result in a sequential for-loop. To have more
578 /// flexibility when mapping code to hardware ids, the transform supports two
579 /// affine maps. The first `map` is used to compute the actual index for
580 /// substitution from the hardware id. The second `bound` is used to compute the
581 /// launch dimension for the hardware id from the number of iterations the
582 /// mapped loop is performing. Note that the number of iterations might be
583 /// imprecise if the corresponding loop-bounds are loop-dependent. In such case,
584 /// the hardware id might iterate over additional indices. The transformation
585 /// caters for this by predicating the created sequence of instructions on
586 /// the actual loop bound. This only works if an static upper bound for the
587 /// dynamic loop bound can be derived, currently via analyzing `affine.min`
588 /// operations.
589 LogicalResult
590 ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
591                                              PatternRewriter &rewriter) const {
592   // Mark the operation as visited for recursive legality check.
593   parallelOp->setAttr(kVisitedAttrName, rewriter.getUnitAttr());
594 
595   // We can only transform starting at the outer-most loop. Launches inside of
596   // parallel loops are not supported.
597   if (auto parentLoop = parallelOp->getParentOfType<ParallelOp>())
598     return failure();
599   // Create a launch operation. We start with bound one for all grid/block
600   // sizes. Those will be refined later as we discover them from mappings.
601   Location loc = parallelOp.getLoc();
602   Value constantOne =
603       rewriter.create<arith::ConstantIndexOp>(parallelOp.getLoc(), 1);
604   gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>(
605       parallelOp.getLoc(), constantOne, constantOne, constantOne, constantOne,
606       constantOne, constantOne);
607   rewriter.setInsertionPointToEnd(&launchOp.body().front());
608   rewriter.create<gpu::TerminatorOp>(loc);
609   rewriter.setInsertionPointToStart(&launchOp.body().front());
610 
611   BlockAndValueMapping cloningMap;
612   llvm::DenseMap<gpu::Processor, Value> launchBounds;
613   SmallVector<Operation *, 16> worklist;
614   if (failed(processParallelLoop(parallelOp, launchOp, cloningMap, worklist,
615                                  launchBounds, rewriter)))
616     return failure();
617 
618   // Whether we have seen any side-effects. Reset when leaving an inner scope.
619   bool seenSideeffects = false;
620   // Whether we have left a nesting scope (and hence are no longer innermost).
621   bool leftNestingScope = false;
622   while (!worklist.empty()) {
623     Operation *op = worklist.pop_back_val();
624     // Now walk over the body and clone it.
625     // TODO: This is only correct if there either is no further scf.parallel
626     //       nested or this code is side-effect free. Otherwise we might need
627     //       predication. We are overly conservative for now and only allow
628     //       side-effects in the innermost scope.
629     if (auto nestedParallel = dyn_cast<ParallelOp>(op)) {
630       // Before entering a nested scope, make sure there have been no
631       // sideeffects until now.
632       if (seenSideeffects)
633         return failure();
634       // A nested scf.parallel needs insertion of code to compute indices.
635       // Insert that now. This will also update the worklist with the loops
636       // body.
637       if (failed(processParallelLoop(nestedParallel, launchOp, cloningMap,
638                                      worklist, launchBounds, rewriter)))
639         return failure();
640     } else if (op == launchOp.getOperation()) {
641       // Found our sentinel value. We have finished the operations from one
642       // nesting level, pop one level back up.
643       auto parent = rewriter.getInsertionPoint()->getParentOp();
644       rewriter.setInsertionPointAfter(parent);
645       leftNestingScope = true;
646       seenSideeffects = false;
647     } else {
648       // Otherwise we copy it over.
649       Operation *clone = rewriter.clone(*op, cloningMap);
650       cloningMap.map(op->getResults(), clone->getResults());
651       // Check for side effects.
652       // TODO: Handle region side effects properly.
653       seenSideeffects |= !MemoryEffectOpInterface::hasNoEffect(clone) ||
654                          clone->getNumRegions() != 0;
655       // If we are no longer in the innermost scope, sideeffects are disallowed.
656       if (seenSideeffects && leftNestingScope)
657         return failure();
658     }
659   }
660 
661   // Now that we succeeded creating the launch operation, also update the
662   // bounds.
663   for (auto bound : launchBounds)
664     launchOp.setOperand(getLaunchOpArgumentNum(std::get<0>(bound)),
665                         std::get<1>(bound));
666 
667   rewriter.eraseOp(parallelOp);
668   return success();
669 }
670 
671 void mlir::populateParallelLoopToGPUPatterns(RewritePatternSet &patterns) {
672   patterns.add<ParallelToGpuLaunchLowering>(patterns.getContext());
673 }
674 
675 void mlir::configureParallelLoopToGPULegality(ConversionTarget &target) {
676   target.addLegalDialect<memref::MemRefDialect>();
677   target.addDynamicallyLegalOp<scf::ParallelOp>([](scf::ParallelOp parallelOp) {
678     return !parallelOp->hasAttr(gpu::getMappingAttrName()) ||
679            parallelOp->hasAttr(kVisitedAttrName);
680   });
681 }
682 
683 void mlir::finalizeParallelLoopToGPUConversion(Operation *op) {
684   op->walk([](scf::ParallelOp parallelOp) {
685     parallelOp->removeAttr(kVisitedAttrName);
686   });
687 }
688