1 //===- SCFToGPU.cpp - Convert an affine loop nest to a GPU kernel -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements a straightforward conversion of an loop nest into a GPU
10 // kernel. The caller is expected to guarantee that the conversion is correct
11 // or to further transform the kernel to ensure correctness.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "mlir/Conversion/SCFToGPU/SCFToGPU.h"
16
17 #include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
20 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
21 #include "mlir/Dialect/GPU/Transforms/ParallelLoopMapper.h"
22 #include "mlir/Dialect/MemRef/IR/MemRef.h"
23 #include "mlir/Dialect/SCF/IR/SCF.h"
24 #include "mlir/IR/AffineExpr.h"
25 #include "mlir/IR/BlockAndValueMapping.h"
26 #include "mlir/IR/Builders.h"
27 #include "mlir/Pass/Pass.h"
28 #include "mlir/Transforms/DialectConversion.h"
29 #include "mlir/Transforms/Passes.h"
30 #include "mlir/Transforms/RegionUtils.h"
31 #include "llvm/ADT/Sequence.h"
32 #include "llvm/Support/Debug.h"
33
34 #define DEBUG_TYPE "loops-to-gpu"
35
36 using namespace mlir;
37 using namespace mlir::scf;
38
39 // Name of internal attribute to mark visited operations during conversion.
40 //
41 // NOTE: The conversion originally used the following legality criteria:
42 // `!parallelOp->hasAttr(gpu::getMappingAttrName())`
43 // But the provided pattern might reject some cases based on more detailed
44 // analysis of the `mapping` attribute.
45 // To avoid dialect conversion failure due to non-converted illegal operation
46 // we use this extra Unit attribute as a marker, that the operation was checked
47 // by the pattern and is should be considered as legal in the following legality
48 // checks. The `finalizeParallelLoopToGPUConversion` function performs clean up
49 // of this extra attributes ans is supposed to be called after the dialect
50 // conversion.
51 //
52 // TODO: Implement a cleaner solution, factoring out the "matching" logic
53 // from the pattern and its callees into a separate function that can be called
54 // from both the pattern and the op legality check.
55 static constexpr StringLiteral kVisitedAttrName = "SCFToGPU_visited";
56
57 // Extract an indexed value from KernelDim3.
getDim3Value(const gpu::KernelDim3 & dim3,unsigned pos)58 static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) {
59 switch (pos) {
60 case 0:
61 return dim3.x;
62 case 1:
63 return dim3.y;
64 case 2:
65 return dim3.z;
66 default:
67 llvm_unreachable("dim3 position out of bounds");
68 }
69 return nullptr;
70 }
71
72 // Get the lower bound-related operands of a loop operation.
getLowerBoundOperands(AffineForOp forOp)73 static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) {
74 return forOp.getLowerBoundOperands();
75 }
76
77 // Get the upper bound-related operands of a loop operation.
getUpperBoundOperands(AffineForOp forOp)78 static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) {
79 return forOp.getUpperBoundOperands();
80 }
81
82 // Get a Value that corresponds to the loop step. If the step is an attribute,
83 // materialize a corresponding constant using builder.
getOrCreateStep(AffineForOp forOp,OpBuilder & builder)84 static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) {
85 return builder.create<arith::ConstantIndexOp>(forOp.getLoc(),
86 forOp.getStep());
87 }
88
89 // Get a Value for the loop lower bound. If the value requires computation,
90 // materialize the instructions using builder.
getOrEmitLowerBound(AffineForOp forOp,OpBuilder & builder)91 static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) {
92 return lowerAffineLowerBound(forOp, builder);
93 }
94
95 // Get a Value for the loop upper bound. If the value requires computation,
96 // materialize the instructions using builder.
getOrEmitUpperBound(AffineForOp forOp,OpBuilder & builder)97 static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) {
98 return lowerAffineUpperBound(forOp, builder);
99 }
100
101 // Check the structure of the loop nest:
102 // - there are enough loops to map to numDims;
103 // - the loops are perfectly nested;
104 // - the loop bounds can be computed above the outermost loop.
105 // This roughly corresponds to the "matcher" part of the pattern-based
106 // rewriting infrastructure.
checkAffineLoopNestMappableImpl(AffineForOp forOp,unsigned numDims)107 static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp,
108 unsigned numDims) {
109 Region &limit = forOp.getRegion();
110 for (unsigned i = 0, e = numDims; i < e; ++i) {
111 Operation *nested = &forOp.getBody()->front();
112 if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) ||
113 !areValuesDefinedAbove(getUpperBoundOperands(forOp), limit))
114 return forOp.emitError(
115 "loops with bounds depending on other mapped loops "
116 "are not supported");
117
118 // The innermost loop can have an arbitrary body, skip the perfect nesting
119 // check for it.
120 if (i == e - 1)
121 break;
122
123 auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end();
124 if (forOp.getBody()->empty() || std::next(begin, 2) != end)
125 return forOp.emitError("expected perfectly nested loops in the body");
126
127 if (!(forOp = dyn_cast<AffineForOp>(nested)))
128 return nested->emitError("expected a nested loop");
129 }
130 return success();
131 }
132
checkAffineLoopNestMappable(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)133 static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp,
134 unsigned numBlockDims,
135 unsigned numThreadDims) {
136 if (numBlockDims < 1 || numThreadDims < 1) {
137 LLVM_DEBUG(llvm::dbgs() << "nothing to map");
138 return success();
139 }
140
141 if (numBlockDims > 3) {
142 return forOp.emitError("cannot map to more than 3 block dimensions");
143 }
144 if (numThreadDims > 3) {
145 return forOp.emitError("cannot map to more than 3 thread dimensions");
146 }
147 return checkAffineLoopNestMappableImpl(forOp, numBlockDims + numThreadDims);
148 }
149
150 namespace {
151 // Helper structure that holds common state of the loop to GPU kernel
152 // conversion.
153 struct AffineLoopToGpuConverter {
154 Optional<AffineForOp> collectBounds(AffineForOp forOp, unsigned numLoops);
155
156 void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp,
157 unsigned numBlockDims, unsigned numThreadDims);
158
159 // Ranges of the loops mapped to blocks or threads.
160 SmallVector<Value, 6> dims;
161 // Lower bounds of the loops mapped to blocks or threads.
162 SmallVector<Value, 6> lbs;
163 // Induction variables of the loops mapped to blocks or threads.
164 SmallVector<Value, 6> ivs;
165 // Steps of the loops mapped to blocks or threads.
166 SmallVector<Value, 6> steps;
167 };
168 } // namespace
169
170 // Return true if the value is obviously a constant "one".
isConstantOne(Value value)171 static bool isConstantOne(Value value) {
172 if (auto def = value.getDefiningOp<arith::ConstantIndexOp>())
173 return def.value() == 1;
174 return false;
175 }
176
177 // Collect ranges, bounds, steps and induction variables in preparation for
178 // mapping a loop nest of depth "numLoops" rooted at "forOp" to a GPU kernel.
179 // This may fail if the IR for computing loop bounds cannot be constructed, for
180 // example if an affine loop uses semi-affine maps. Return the last loop to be
181 // mapped on success, llvm::None on failure.
182 Optional<AffineForOp>
collectBounds(AffineForOp forOp,unsigned numLoops)183 AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) {
184 OpBuilder builder(forOp.getOperation());
185 dims.reserve(numLoops);
186 lbs.reserve(numLoops);
187 ivs.reserve(numLoops);
188 steps.reserve(numLoops);
189 AffineForOp currentLoop = forOp;
190 for (unsigned i = 0; i < numLoops; ++i) {
191 Value lowerBound = getOrEmitLowerBound(currentLoop, builder);
192 Value upperBound = getOrEmitUpperBound(currentLoop, builder);
193 if (!lowerBound || !upperBound) {
194 return llvm::None;
195 }
196
197 Value range = builder.create<arith::SubIOp>(currentLoop.getLoc(),
198 upperBound, lowerBound);
199 Value step = getOrCreateStep(currentLoop, builder);
200 if (!isConstantOne(step))
201 range = builder.create<arith::DivSIOp>(currentLoop.getLoc(), range, step);
202 dims.push_back(range);
203
204 lbs.push_back(lowerBound);
205 ivs.push_back(currentLoop.getInductionVar());
206 steps.push_back(step);
207
208 if (i != numLoops - 1)
209 currentLoop = cast<AffineForOp>(¤tLoop.getBody()->front());
210 }
211 return currentLoop;
212 }
213
214 // Replace the rooted at "rootForOp" with a GPU launch operation. This expects
215 // "innermostForOp" to point to the last loop to be transformed to the kernel,
216 // and to have (numBlockDims + numThreadDims) perfectly nested loops between
217 // "rootForOp" and "innermostForOp".
createLaunch(AffineForOp rootForOp,AffineForOp innermostForOp,unsigned numBlockDims,unsigned numThreadDims)218 void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
219 AffineForOp innermostForOp,
220 unsigned numBlockDims,
221 unsigned numThreadDims) {
222 OpBuilder builder(rootForOp.getOperation());
223 // Prepare the grid and block sizes for the launch operation. If there is
224 // no loop mapped to a specific dimension, use constant "1" as its size.
225 Value constOne =
226 (numBlockDims < 3 || numThreadDims < 3)
227 ? builder.create<arith::ConstantIndexOp>(rootForOp.getLoc(), 1)
228 : nullptr;
229 Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne;
230 Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne;
231 Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne;
232 Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne;
233 Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne;
234 Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne;
235
236 // Create a launch op and move the body region of the innermost loop to the
237 // launch op.
238 auto launchOp = builder.create<gpu::LaunchOp>(
239 rootForOp.getLoc(), gridSizeX, gridSizeY, gridSizeZ, blockSizeX,
240 blockSizeY, blockSizeZ);
241
242 // Replace the loop terminator (loops contain only a single block) with the
243 // gpu terminator and move the operations from the loop body block to the gpu
244 // launch body block. Do not move the entire block because of the difference
245 // in block arguments.
246 Operation &terminator = innermostForOp.getBody()->back();
247 Location terminatorLoc = terminator.getLoc();
248 terminator.erase();
249 builder.setInsertionPointToEnd(innermostForOp.getBody());
250 builder.create<gpu::TerminatorOp>(terminatorLoc, llvm::None);
251 launchOp.body().front().getOperations().splice(
252 launchOp.body().front().begin(),
253 innermostForOp.getBody()->getOperations());
254
255 // Remap the loop iterators to use block/thread identifiers instead. Loops
256 // may iterate from LB with step S whereas GPU thread/block ids always iterate
257 // from 0 to N with step 1. Therefore, loop induction variables are replaced
258 // with (gpu-thread/block-id * S) + LB.
259 builder.setInsertionPointToStart(&launchOp.body().front());
260 auto *lbArgumentIt = lbs.begin();
261 auto *stepArgumentIt = steps.begin();
262 for (const auto &en : llvm::enumerate(ivs)) {
263 Value id =
264 en.index() < numBlockDims
265 ? getDim3Value(launchOp.getBlockIds(), en.index())
266 : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims);
267 Value step = steps[en.index()];
268 if (!isConstantOne(step))
269 id = builder.create<arith::MulIOp>(rootForOp.getLoc(), step, id);
270
271 Value ivReplacement =
272 builder.create<arith::AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id);
273 en.value().replaceAllUsesWith(ivReplacement);
274 std::advance(lbArgumentIt, 1);
275 std::advance(stepArgumentIt, 1);
276 }
277
278 // We are done and can erase the original outermost loop.
279 rootForOp.erase();
280 }
281
282 // Generic loop to GPU kernel conversion function.
convertAffineLoopNestToGPULaunch(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)283 static LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp,
284 unsigned numBlockDims,
285 unsigned numThreadDims) {
286 if (failed(checkAffineLoopNestMappable(forOp, numBlockDims, numThreadDims)))
287 return failure();
288
289 AffineLoopToGpuConverter converter;
290 auto maybeInnerLoop =
291 converter.collectBounds(forOp, numBlockDims + numThreadDims);
292 if (!maybeInnerLoop)
293 return failure();
294 converter.createLaunch(forOp, *maybeInnerLoop, numBlockDims, numThreadDims);
295
296 return success();
297 }
298
convertAffineLoopNestToGPULaunch(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)299 LogicalResult mlir::convertAffineLoopNestToGPULaunch(AffineForOp forOp,
300 unsigned numBlockDims,
301 unsigned numThreadDims) {
302 return ::convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims);
303 }
304
305 namespace {
306 struct ParallelToGpuLaunchLowering : public OpRewritePattern<ParallelOp> {
307 using OpRewritePattern<ParallelOp>::OpRewritePattern;
308
309 LogicalResult matchAndRewrite(ParallelOp parallelOp,
310 PatternRewriter &rewriter) const override;
311 };
312 } // namespace
313
314 /// Tries to derive a static upper bound from the defining operation of
315 /// `upperBound`.
deriveStaticUpperBound(Value upperBound,PatternRewriter & rewriter)316 static Value deriveStaticUpperBound(Value upperBound,
317 PatternRewriter &rewriter) {
318 if (auto op = upperBound.getDefiningOp<arith::ConstantIndexOp>()) {
319 return op;
320 }
321
322 if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) {
323 for (const AffineExpr &result : minOp.getMap().getResults()) {
324 if (auto constExpr = result.dyn_cast<AffineConstantExpr>()) {
325 return rewriter.create<arith::ConstantIndexOp>(minOp.getLoc(),
326 constExpr.getValue());
327 }
328 }
329 }
330
331 if (auto multiplyOp = upperBound.getDefiningOp<arith::MulIOp>()) {
332 if (auto lhs = dyn_cast_or_null<arith::ConstantIndexOp>(
333 deriveStaticUpperBound(multiplyOp.getOperand(0), rewriter)
334 .getDefiningOp()))
335 if (auto rhs = dyn_cast_or_null<arith::ConstantIndexOp>(
336 deriveStaticUpperBound(multiplyOp.getOperand(1), rewriter)
337 .getDefiningOp())) {
338 // Assumptions about the upper bound of minimum computations no longer
339 // work if multiplied by a negative value, so abort in this case.
340 if (lhs.value() < 0 || rhs.value() < 0)
341 return {};
342
343 return rewriter.create<arith::ConstantIndexOp>(
344 multiplyOp.getLoc(), lhs.value() * rhs.value());
345 }
346 }
347
348 return {};
349 }
350
isMappedToProcessor(gpu::Processor processor)351 static bool isMappedToProcessor(gpu::Processor processor) {
352 return processor != gpu::Processor::Sequential;
353 }
354
getLaunchOpArgumentNum(gpu::Processor processor)355 static unsigned getLaunchOpArgumentNum(gpu::Processor processor) {
356 switch (processor) {
357 case gpu::Processor::BlockX:
358 return 0;
359 case gpu::Processor::BlockY:
360 return 1;
361 case gpu::Processor::BlockZ:
362 return 2;
363 case gpu::Processor::ThreadX:
364 return 3;
365 case gpu::Processor::ThreadY:
366 return 4;
367 case gpu::Processor::ThreadZ:
368 return 5;
369 default:;
370 }
371 llvm_unreachable(
372 "invalid processor type while retrieving launch op argument number");
373 }
374
375 /// Modifies the current transformation state to capture the effect of the given
376 /// `scf.parallel` operation on index substitutions and the operations to be
377 /// inserted.
378 /// Specifically, if a dimension of a parallel loop is mapped to a hardware id,
379 /// this function will
380 /// - compute the loop index based on the hardware id and affine map from the
381 /// mapping and update `cloningMap` to substitute all uses.
382 /// - derive a new upper bound for the hardware id and augment the provided
383 /// `gpu.launch operation` accordingly.
384 /// - if the upper bound is imprecise, insert a conditional in the `gpu.launch`
385 /// and update the rewriter to insert into the conditional's body.
386 /// If the dimension is mapped to sequential,
387 /// - insert a for loop into the body and update the rewriter to insert into
388 /// the for loop's body.
389 /// - update the `cloningMap` to replace uses of the index with the index of
390 /// the new for loop.
391 /// In either case,
392 /// - append the instructions from the loops body to worklist, in reverse order.
393 /// To note the end of the current scope in case a loop or conditional was
394 /// inserted, a sentinel (the `gpu.launch` operation) is inserted into the
395 /// worklist. This signals the processor of the worklist to pop the rewriter
396 /// one scope-level up.
processParallelLoop(ParallelOp parallelOp,gpu::LaunchOp launchOp,BlockAndValueMapping & cloningMap,SmallVectorImpl<Operation * > & worklist,DenseMap<gpu::Processor,Value> & bounds,PatternRewriter & rewriter)397 static LogicalResult processParallelLoop(
398 ParallelOp parallelOp, gpu::LaunchOp launchOp,
399 BlockAndValueMapping &cloningMap, SmallVectorImpl<Operation *> &worklist,
400 DenseMap<gpu::Processor, Value> &bounds, PatternRewriter &rewriter) {
401 // TODO: Verify that this is a valid GPU mapping.
402 // processor ids: 0-2 block [x/y/z], 3-5 -> thread [x/y/z], 6-> sequential
403 ArrayAttr mapping =
404 parallelOp->getAttrOfType<ArrayAttr>(gpu::getMappingAttrName());
405
406 // TODO: Support reductions.
407 if (!mapping || parallelOp.getNumResults() != 0)
408 return failure();
409
410 Location loc = parallelOp.getLoc();
411
412 auto launchIndependent = [&launchOp](Value val) {
413 return val.getParentRegion()->isAncestor(launchOp->getParentRegion());
414 };
415
416 auto ensureLaunchIndependent = [&rewriter,
417 launchIndependent](Value val) -> Value {
418 if (launchIndependent(val))
419 return val;
420 if (auto constOp = val.getDefiningOp<arith::ConstantOp>())
421 return rewriter.create<arith::ConstantOp>(constOp.getLoc(),
422 constOp.getValue());
423 return {};
424 };
425
426 for (auto config : llvm::zip(
427 mapping, parallelOp.getInductionVars(), parallelOp.getLowerBound(),
428 parallelOp.getUpperBound(), parallelOp.getStep())) {
429 Attribute mappingAttribute;
430 Value iv, lowerBound, upperBound, step;
431 std::tie(mappingAttribute, iv, lowerBound, upperBound, step) = config;
432 auto annotation =
433 mappingAttribute.dyn_cast<gpu::ParallelLoopDimMappingAttr>();
434 if (!annotation)
435 return parallelOp.emitOpError()
436 << "expected mapping attribute for lowering to GPU";
437 Value newIndex;
438 gpu::Processor processor = annotation.getProcessor();
439
440 if (isMappedToProcessor(processor)) {
441 // Use the corresponding thread/grid index as replacement for the loop iv.
442 Value operand =
443 launchOp.body().getArgument(getLaunchOpArgumentNum(processor));
444 // Take the indexmap and add the lower bound and step computations in.
445 // This computes operand * step + lowerBound.
446 // Use an affine map here so that it composes nicely with the provided
447 // annotation.
448 AffineMap lowerAndStep = AffineMap::get(
449 1, 2,
450 rewriter.getAffineDimExpr(0) * rewriter.getAffineSymbolExpr(0) +
451 rewriter.getAffineSymbolExpr(1));
452 newIndex = rewriter.create<AffineApplyOp>(
453 loc, annotation.getMap().compose(lowerAndStep),
454 ValueRange{operand, step, lowerBound});
455 // If there was also a bound, insert that, too.
456 // TODO: Check that we do not assign bounds twice.
457 if (annotation.getBound()) {
458 // We pass as the single operand to the bound-map the number of
459 // iterations, which is (upperBound - lowerBound) ceilDiv step. To
460 // support inner loops with dynamic upper bounds (as generated by e.g.
461 // tiling), try to derive a max for the bounds. If the used bound for
462 // the hardware id is imprecise, wrap the contained code into a
463 // conditional. If the lower-bound is constant or defined before the
464 // launch, we can use it in the launch bounds. Otherwise fail.
465 if (!launchIndependent(lowerBound) &&
466 !isa_and_nonnull<arith::ConstantOp>(lowerBound.getDefiningOp()))
467 return failure();
468 // The step must also be constant or defined outside of the loop nest.
469 if (!launchIndependent(step) &&
470 !isa_and_nonnull<arith::ConstantOp>(step.getDefiningOp()))
471 return failure();
472 // If the upper-bound is constant or defined before the launch, we can
473 // use it in the launch bounds directly. Otherwise try derive a bound.
474 bool boundIsPrecise =
475 launchIndependent(upperBound) ||
476 isa_and_nonnull<arith::ConstantOp>(upperBound.getDefiningOp());
477 {
478 PatternRewriter::InsertionGuard guard(rewriter);
479 rewriter.setInsertionPoint(launchOp);
480 if (!boundIsPrecise) {
481 upperBound = deriveStaticUpperBound(upperBound, rewriter);
482 if (!upperBound) {
483 return rewriter.notifyMatchFailure(
484 parallelOp,
485 "cannot derive loop-invariant upper bound for number of"
486 "iterations");
487 }
488 }
489 // Compute the number of iterations needed. We compute this as an
490 // affine expression ceilDiv (upperBound - lowerBound) step. We use
491 // affine.apply here so that it composes nicely with the provided map.
492 AffineMap stepMap = AffineMap::get(
493 1, 2,
494 ((rewriter.getAffineDimExpr(0) - rewriter.getAffineSymbolExpr(0))
495 .ceilDiv(rewriter.getAffineSymbolExpr(1))));
496 Value launchBound = rewriter.create<AffineApplyOp>(
497 loc, annotation.getBound().compose(stepMap),
498 ValueRange{
499 ensureLaunchIndependent(
500 cloningMap.lookupOrDefault(upperBound)),
501 ensureLaunchIndependent(
502 cloningMap.lookupOrDefault(lowerBound)),
503 ensureLaunchIndependent(cloningMap.lookupOrDefault(step))});
504 // todo(herhut,ravishankarm): Update the behavior of setMappingAttr
505 // when this condition is relaxed.
506 if (bounds.find(processor) != bounds.end()) {
507 return rewriter.notifyMatchFailure(
508 parallelOp, "cannot redefine the bound for processor " +
509 Twine(static_cast<int64_t>(processor)));
510 }
511 bounds[processor] = launchBound;
512 }
513 if (!boundIsPrecise) {
514 // We are using an approximation, create a surrounding conditional.
515 Value originalBound = std::get<3>(config);
516 arith::CmpIOp pred = rewriter.create<arith::CmpIOp>(
517 loc, arith::CmpIPredicate::slt, newIndex,
518 cloningMap.lookupOrDefault(originalBound));
519 scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false);
520 rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front());
521 // Put a sentinel into the worklist so we know when to pop out of the
522 // if body again. We use the launchOp here, as that cannot be part of
523 // the bodies instruction.
524 worklist.push_back(launchOp.getOperation());
525 }
526 }
527 } else {
528 // Create a sequential for loop.
529 auto loopOp = rewriter.create<scf::ForOp>(
530 loc, cloningMap.lookupOrDefault(lowerBound),
531 cloningMap.lookupOrDefault(upperBound),
532 cloningMap.lookupOrDefault(step));
533 newIndex = loopOp.getInductionVar();
534 rewriter.setInsertionPointToStart(loopOp.getBody());
535 // Put a sentinel into the worklist so we know when to pop out of the loop
536 // body again. We use the launchOp here, as that cannot be part of the
537 // bodies instruction.
538 worklist.push_back(launchOp.getOperation());
539 }
540 cloningMap.map(iv, newIndex);
541 }
542
543 // Propagate custom user defined optional attributes, that can be used at
544 // later stage, such as extension data for GPU kernel dispatch
545 for (const auto &namedAttr : parallelOp->getAttrs()) {
546 if (namedAttr.getName() == gpu::getMappingAttrName() ||
547 namedAttr.getName() == ParallelOp::getOperandSegmentSizeAttr())
548 continue;
549 launchOp->setAttr(namedAttr.getName(), namedAttr.getValue());
550 }
551
552 Block *body = parallelOp.getBody();
553 worklist.reserve(worklist.size() + body->getOperations().size());
554 for (Operation &op : llvm::reverse(body->without_terminator()))
555 worklist.push_back(&op);
556 return success();
557 }
558
559 /// Lower a `scf.parallel` operation into a corresponding `gpu.launch`
560 /// operation.
561 ///
562 /// This essentially transforms a loop nest into a corresponding SIMT function.
563 /// The conversion is driven by mapping annotations on the `scf.parallel`
564 /// operations. The mapping is provided via a `DictionaryAttribute` named
565 /// `mapping`, which has three entries:
566 /// - processor: the hardware id to map to. 0-2 are block dimensions, 3-5 are
567 /// thread dimensions and 6 is sequential.
568 /// - map : An affine map that is used to pre-process hardware ids before
569 /// substitution.
570 /// - bound : An affine map that is used to compute the bound of the hardware
571 /// id based on an upper bound of the number of iterations.
572 /// If the `scf.parallel` contains nested `scf.parallel` operations, those
573 /// need to be annotated, as well. Structurally, the transformation works by
574 /// splicing all operations from nested `scf.parallel` operations into a single
575 /// sequence. Indices mapped to hardware ids are substituted with those ids,
576 /// wheras sequential mappings result in a sequential for-loop. To have more
577 /// flexibility when mapping code to hardware ids, the transform supports two
578 /// affine maps. The first `map` is used to compute the actual index for
579 /// substitution from the hardware id. The second `bound` is used to compute the
580 /// launch dimension for the hardware id from the number of iterations the
581 /// mapped loop is performing. Note that the number of iterations might be
582 /// imprecise if the corresponding loop-bounds are loop-dependent. In such case,
583 /// the hardware id might iterate over additional indices. The transformation
584 /// caters for this by predicating the created sequence of instructions on
585 /// the actual loop bound. This only works if an static upper bound for the
586 /// dynamic loop bound can be derived, currently via analyzing `affine.min`
587 /// operations.
588 LogicalResult
matchAndRewrite(ParallelOp parallelOp,PatternRewriter & rewriter) const589 ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
590 PatternRewriter &rewriter) const {
591 // Mark the operation as visited for recursive legality check.
592 parallelOp->setAttr(kVisitedAttrName, rewriter.getUnitAttr());
593
594 // We can only transform starting at the outer-most loop. Launches inside of
595 // parallel loops are not supported.
596 if (auto parentLoop = parallelOp->getParentOfType<ParallelOp>())
597 return failure();
598 // Create a launch operation. We start with bound one for all grid/block
599 // sizes. Those will be refined later as we discover them from mappings.
600 Location loc = parallelOp.getLoc();
601 Value constantOne =
602 rewriter.create<arith::ConstantIndexOp>(parallelOp.getLoc(), 1);
603 gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>(
604 parallelOp.getLoc(), constantOne, constantOne, constantOne, constantOne,
605 constantOne, constantOne);
606 rewriter.setInsertionPointToEnd(&launchOp.body().front());
607 rewriter.create<gpu::TerminatorOp>(loc);
608 rewriter.setInsertionPointToStart(&launchOp.body().front());
609
610 BlockAndValueMapping cloningMap;
611 llvm::DenseMap<gpu::Processor, Value> launchBounds;
612 SmallVector<Operation *, 16> worklist;
613 if (failed(processParallelLoop(parallelOp, launchOp, cloningMap, worklist,
614 launchBounds, rewriter)))
615 return failure();
616
617 // Whether we have seen any side-effects. Reset when leaving an inner scope.
618 bool seenSideeffects = false;
619 // Whether we have left a nesting scope (and hence are no longer innermost).
620 bool leftNestingScope = false;
621 while (!worklist.empty()) {
622 Operation *op = worklist.pop_back_val();
623 // Now walk over the body and clone it.
624 // TODO: This is only correct if there either is no further scf.parallel
625 // nested or this code is side-effect free. Otherwise we might need
626 // predication. We are overly conservative for now and only allow
627 // side-effects in the innermost scope.
628 if (auto nestedParallel = dyn_cast<ParallelOp>(op)) {
629 // Before entering a nested scope, make sure there have been no
630 // sideeffects until now.
631 if (seenSideeffects)
632 return failure();
633 // A nested scf.parallel needs insertion of code to compute indices.
634 // Insert that now. This will also update the worklist with the loops
635 // body.
636 if (failed(processParallelLoop(nestedParallel, launchOp, cloningMap,
637 worklist, launchBounds, rewriter)))
638 return failure();
639 } else if (op == launchOp.getOperation()) {
640 // Found our sentinel value. We have finished the operations from one
641 // nesting level, pop one level back up.
642 auto *parent = rewriter.getInsertionPoint()->getParentOp();
643 rewriter.setInsertionPointAfter(parent);
644 leftNestingScope = true;
645 seenSideeffects = false;
646 } else {
647 // Otherwise we copy it over.
648 Operation *clone = rewriter.clone(*op, cloningMap);
649 cloningMap.map(op->getResults(), clone->getResults());
650 // Check for side effects.
651 // TODO: Handle region side effects properly.
652 seenSideeffects |= !MemoryEffectOpInterface::hasNoEffect(clone) ||
653 clone->getNumRegions() != 0;
654 // If we are no longer in the innermost scope, sideeffects are disallowed.
655 if (seenSideeffects && leftNestingScope)
656 return failure();
657 }
658 }
659
660 // Now that we succeeded creating the launch operation, also update the
661 // bounds.
662 for (auto bound : launchBounds)
663 launchOp.setOperand(getLaunchOpArgumentNum(std::get<0>(bound)),
664 std::get<1>(bound));
665
666 rewriter.eraseOp(parallelOp);
667 return success();
668 }
669
populateParallelLoopToGPUPatterns(RewritePatternSet & patterns)670 void mlir::populateParallelLoopToGPUPatterns(RewritePatternSet &patterns) {
671 patterns.add<ParallelToGpuLaunchLowering>(patterns.getContext());
672 }
673
configureParallelLoopToGPULegality(ConversionTarget & target)674 void mlir::configureParallelLoopToGPULegality(ConversionTarget &target) {
675 target.addLegalDialect<memref::MemRefDialect>();
676 target.addDynamicallyLegalOp<scf::ParallelOp>([](scf::ParallelOp parallelOp) {
677 return !parallelOp->hasAttr(gpu::getMappingAttrName()) ||
678 parallelOp->hasAttr(kVisitedAttrName);
679 });
680 }
681
finalizeParallelLoopToGPUConversion(Operation * op)682 void mlir::finalizeParallelLoopToGPUConversion(Operation *op) {
683 op->walk([](scf::ParallelOp parallelOp) {
684 parallelOp->removeAttr(kVisitedAttrName);
685 });
686 }
687