1 //===- Detensorize.cpp - Linalg transformations as patterns ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
11 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
12 #include "mlir/Dialect/Linalg/Passes.h"
13 #include "mlir/Dialect/StandardOps/Transforms/FuncConversions.h"
14 #include "mlir/Dialect/Tensor/IR/Tensor.h"
15 #include "mlir/IR/OpDefinition.h"
16 #include "mlir/Transforms/DialectConversion.h"
17 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
18 #include <iterator>
19 #include <memory>
20 
21 using namespace mlir;
22 using namespace mlir::linalg;
23 
24 static Value sourceMaterializationCallback(OpBuilder &builder, Type type,
25                                            ValueRange inputs, Location loc) {
26   assert(inputs.size() == 1);
27   if (inputs[0].getType().isa<TensorType>())
28     return nullptr;
29 
30   // A detensored value is converted back by creating a new tensor from its
31   // element(s).
32   auto createNewTensorOp = builder.create<tensor::FromElementsOp>(
33       loc, inputs[0].getType(), inputs[0]);
34 
35   // FromElementsOp results in a tensor<1xdtype>, we need to reshape that to
36   // a tensor<dtype> instead.
37   return builder.create<linalg::TensorCollapseShapeOp>(
38       loc, type, createNewTensorOp, ArrayRef<ReassociationExprs>{});
39 }
40 
41 namespace {
42 /// Defines the criteria a TensorType must follow in order to be considered
43 /// "detensorable".
44 ///
45 /// NOTE: For now, only 0-D tensors are supported.
46 ///
47 /// Returns true if tensorType can be detensored.
48 bool canBeDetensored(TensorType tensorType) {
49   return tensorType.hasRank() && tensorType.getRank() == 0;
50 }
51 
52 bool shouldBeDetensored(Operation *op, TypeConverter typeConverter) {
53   GenericOp genericOp = dyn_cast_or_null<GenericOp>(op);
54   return genericOp &&
55          llvm::all_of(
56              genericOp.getInputAndOutputOperands(), [&](OpOperand *opOperand) {
57                return !typeConverter.isLegal(opOperand->get().getType());
58              });
59 }
60 
61 /// A conversion patttern for detensoring `linalg.generic` ops.
62 class DetensorizeGenericOp : public OpConversionPattern<GenericOp> {
63 public:
64   using OpConversionPattern::OpConversionPattern;
65   LogicalResult
66   matchAndRewrite(GenericOp op, OpAdaptor adaptor,
67                   ConversionPatternRewriter &rewriter) const override {
68     Block *originalBlock = op->getBlock();
69 
70     // Gather some information about the op before inling its region.
71     Block *opEntryBlock = &*op.region().begin();
72     YieldOp yieldOp = dyn_cast<YieldOp>(op.region().back().getTerminator());
73 
74     // Split the op's region before the op. This way, we have a clear insertion
75     // point in which the op can be inlined.
76     Block *newBlock = originalBlock->splitBlock(op);
77     rewriter.inlineRegionBefore(op.region(), newBlock);
78     // Now that op's region is inlined, the operands of its YieldOp are mapped
79     // to the materialized target values. Therefore, we can replace the op's
80     // uses with those of its YielOp's operands.
81     rewriter.replaceOp(op, yieldOp->getOperands());
82 
83     // No need for these intermediate blocks, merge them into 1.
84     rewriter.mergeBlocks(opEntryBlock, originalBlock, adaptor.getOperands());
85     rewriter.mergeBlocks(newBlock, originalBlock, {});
86 
87     rewriter.eraseOp(&*Block::iterator(yieldOp));
88 
89     return success();
90   }
91 };
92 
93 /// A conversion pattern for detensoring internal (non-entry) blocks within a
94 /// function.
95 struct FunctionNonEntryBlockConversion : public ConversionPattern {
96   FunctionNonEntryBlockConversion(StringRef functionLikeOpName,
97                                   MLIRContext *ctx, TypeConverter &converter,
98                                   DenseSet<BlockArgument> blockArgsToDetensor)
99       : ConversionPattern(converter, functionLikeOpName, /*benefit=*/1, ctx),
100         blockArgsToDetensor(blockArgsToDetensor) {}
101 
102   LogicalResult
103   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
104                   ConversionPatternRewriter &rewriter) const override {
105     rewriter.startRootUpdate(op);
106     Region &region = function_like_impl::getFunctionBody(op);
107     SmallVector<TypeConverter::SignatureConversion, 2> conversions;
108 
109     for (Block &block : llvm::drop_begin(region, 1)) {
110       conversions.emplace_back(block.getNumArguments());
111       TypeConverter::SignatureConversion &back = conversions.back();
112 
113       for (BlockArgument blockArgument : block.getArguments()) {
114         int idx = blockArgument.getArgNumber();
115 
116         if (blockArgsToDetensor.count(blockArgument))
117           back.addInputs(idx, {getTypeConverter()->convertType(
118                                   block.getArgumentTypes()[idx])});
119         else
120           back.addInputs(idx, {block.getArgumentTypes()[idx]});
121       }
122     }
123 
124     if (failed(rewriter.convertNonEntryRegionTypes(&region, *typeConverter,
125                                                    conversions))) {
126       rewriter.cancelRootUpdate(op);
127       return failure();
128     }
129 
130     rewriter.finalizeRootUpdate(op);
131     return success();
132   }
133 
134 private:
135   const DenseSet<BlockArgument> blockArgsToDetensor;
136 };
137 
138 class DetensorizeTypeConverter : public TypeConverter {
139 public:
140   DetensorizeTypeConverter() {
141     addConversion([](Type type) { return type; });
142 
143     // A TensorType that can be detensored, is converted to the underlying
144     // element type.
145     addConversion([](TensorType tensorType) -> Type {
146       if (canBeDetensored(tensorType))
147         return tensorType.getElementType();
148 
149       return tensorType;
150     });
151 
152     // A tensor value is detensoried by extracting its element(s).
153     addTargetMaterialization([](OpBuilder &builder, Type type,
154                                 ValueRange inputs, Location loc) -> Value {
155       return builder.create<tensor::ExtractOp>(loc, inputs[0], ValueRange{});
156     });
157 
158     addSourceMaterialization(sourceMaterializationCallback);
159     addArgumentMaterialization(sourceMaterializationCallback);
160   }
161 };
162 
163 /// Canonicalizes the pattern of the form
164 ///
165 /// %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
166 /// %reshaped_tensor = linalg.tensor_collapse_shape %tensor []
167 ///     : tensor<1xi32> into tensor<i32>
168 /// %extracted_element = tensor.extract %reshaped_tensor[] : tensor<i32>
169 ///
170 /// to just %element.
171 struct ExtractFromReshapeFromElements
172     : public OpRewritePattern<tensor::ExtractOp> {
173   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
174 
175   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
176                                 PatternRewriter &rewriter) const final {
177     if (!extract.indices().empty())
178       return failure();
179 
180     auto tensorReshape =
181         extract.tensor().getDefiningOp<TensorCollapseShapeOp>();
182     if (tensorReshape == nullptr)
183       return failure();
184 
185     auto tensorFromElements =
186         tensorReshape.getOperand()
187             .getDefiningOp<mlir::tensor::FromElementsOp>();
188     if (tensorFromElements == nullptr)
189       return failure();
190 
191     rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
192     return success();
193   }
194 };
195 
196 /// @see LinalgDetensorize in Linalg/Passes.td for more details.
197 struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
198   LinalgDetensorize() = default;
199   LinalgDetensorize(const LinalgDetensorize &pass)
200       : LinalgDetensorizeBase<LinalgDetensorize>() {}
201 
202   class CostModel {
203   public:
204     virtual ~CostModel() = default;
205 
206     /// A cost model algorithm computes the following outputs:
207     ///
208     /// - opsToDetensor: the list of linalg ops that should be
209     /// detensored.
210     ///
211     /// - blockArgsToDetensor: since the operands and results of detensored
212     /// linalg ops can cross the BB boundary (e.g. a linalg op's input can come
213     /// from a BB argument and a linalg op's output can be passed to successor
214     /// BBs), we need to maintain the sub-set of arguments that should be
215     /// detensored (i.e. converted by typeConverter) for each affected BB.
216     ///
217     /// Example:
218     ///
219     /// For the following snippet:
220     /// ...
221     /// ^bb1(%6: tensor<i32>, %9: tensor<i32>):
222     ///   %7 = linalg.init_tensor [] : tensor<i32>
223     ///   %8 = linalg.generic #attrs
224     ///     ins(%6, %6 : tensor<i32>, tensor<i32>)
225     ///     outs(%7 : tensor<i32>) {
226     ///     ^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
227     ///       %9 = arith.addi %arg0, %arg1 : i32
228     ///       linalg.yield %9 : i32
229     ///   } -> tensor<i32>
230     ///   %10 = "some.op"(%9)
231     ///   br ^bb2(%8 : tensor<i32>)
232     /// ...
233     ///
234     /// if the cost model decides that the linalg.generic op should be
235     /// detensored, then:
236     /// - opsToDetensor should be = {linalg.generic{add}}.
237     /// - blockArgsToDetensor should be = {bb1 -> {0}, bb2 -> {0}}.
238     virtual void compute(FuncOp func, DetensorizeTypeConverter typeConverter,
239                          DenseSet<Operation *> &opsToDetensor,
240                          DenseSet<BlockArgument> &blockArgsToDetensor) = 0;
241 
242     /// From the blockArgsToDetensor set computed by a CostModel
243     /// implementation, this method computes the corresponding branch op
244     /// detensoring. The result is a map from a branch op to a subset of indices
245     /// of its operands. The indices specify which of the branch op's operands
246     /// should be detensored.
247     ///
248     /// For the previous example, this method would compute: {bb2 -> {0}}.
249     static DenseMap<Operation *, DenseSet<int>> computeBranchOpDetensoring(
250         const DenseSet<BlockArgument> &blockArgsToDetensor) {
251       DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
252 
253       for (auto blockArgumentElem : blockArgsToDetensor) {
254         Block *block = blockArgumentElem.getOwner();
255 
256         for (PredecessorIterator pred = block->pred_begin();
257              pred != block->pred_end(); ++pred) {
258           BranchOpInterface terminator =
259               dyn_cast<BranchOpInterface>((*pred)->getTerminator());
260           auto blockOperands =
261               terminator.getSuccessorOperands(pred.getSuccessorIndex());
262 
263           if (!blockOperands || blockOperands->empty())
264             continue;
265 
266           detensorableBranchOps[terminator].insert(
267               blockOperands->getBeginOperandIndex() +
268               blockArgumentElem.getArgNumber());
269         }
270       }
271 
272       return detensorableBranchOps;
273     }
274   };
275 
276   /// Detensorize linalg ops involved in control-flow within a function.
277   ///
278   /// This model starts from BranchOps and CondBranchOps within a function. For
279   /// each such branch, the model then walks the use-def chain for the branch's
280   /// condition backwards in order to understand where the condition's value
281   /// comes from. If the condition value is (indirectly) computed by a linalg op
282   /// that can be detensored, the model then continues walking the use-def chain
283   /// in order to understand where the linalg op's operands come from. This
284   /// leads to discovering a "detensoring component". A detensoring component is
285   /// the set of operations + block arguments that are involved in control-flow
286   /// AND can be detensored.
287   class ControlFlowDetectionModel : public CostModel {
288   public:
289     void compute(FuncOp func, DetensorizeTypeConverter typeConverter,
290                  DenseSet<Operation *> &opsToDetensor,
291                  DenseSet<BlockArgument> &blockArgsToDetensor) override {
292       SmallVector<Value> workList;
293 
294       func.walk([&](CondBranchOp condBr) {
295         for (auto operand : condBr.getOperands()) {
296           workList.push_back(operand);
297         }
298       });
299 
300       func.walk([&](BranchOp br) {
301         for (auto operand : br.getOperands()) {
302           workList.push_back(operand);
303         }
304       });
305 
306       DenseSet<Value> visitedValues;
307       DenseSet<Operation *> visitedOps;
308 
309       // For a (to-be-detesored) value, check if it "escapes" the block by being
310       // passed to terminator. If it does, then workList is updated with the
311       // corresponding argument to the successor block.
312       auto updateWorkListWithSuccessorArguments =
313           [&](Value value, BranchOpInterface terminator) {
314             if (!terminator)
315               return;
316 
317             for (auto operandIdx :
318                  llvm::seq<unsigned>(0, terminator->getOperands().size())) {
319               Value operand = terminator->getOperand(operandIdx);
320 
321               if (operand == value) {
322                 auto succBlockArg =
323                     terminator.getSuccessorBlockArgument(operandIdx);
324 
325                 if (succBlockArg && !blockArgsToDetensor.count(*succBlockArg))
326                   workList.push_back(*succBlockArg);
327               }
328             }
329           };
330 
331       while (!workList.empty()) {
332         Value currentItem = workList.pop_back_val();
333 
334         if (!visitedValues.insert(currentItem).second)
335           continue;
336 
337         // 1   - Look forward:
338         // 1.1 - If currentItem escapes to one or more successors, add
339         // the corresponding successor arguments to workList.
340         updateWorkListWithSuccessorArguments(
341             currentItem, dyn_cast<BranchOpInterface>(
342                              currentItem.getParentBlock()->getTerminator()));
343 
344         // 1.2 - For each user of currentItem, add the defined values to
345         // workList. This way, the user ops can be inspected later if they are
346         // detensorable and if so, their operands will be added to workList to
347         // potentially discover other parts of the detensorable component.
348         for (auto *user : currentItem.getUsers())
349           for (Value result : user->getResults())
350             workList.push_back(result);
351 
352         // 2   - Look backward:
353         // 2.1 - The current item is defined by a block argument. If the owner
354         // block is a non-entry one, then:
355         //       * Add the argument to blockArgsToDetensor.
356         //       * Walk the use-def chain backwards to add each predecessor's
357         //       terminator-operands corresponding to currentItem to workList.
358         if (currentItem.dyn_cast<BlockArgument>()) {
359           BlockArgument currentItemBlockArgument =
360               currentItem.cast<BlockArgument>();
361           Block *ownerBlock = currentItemBlockArgument.getOwner();
362 
363           // Function arguments are not detensored/converted.
364           if (&*ownerBlock->getParent()->begin() == ownerBlock)
365             continue;
366 
367           // This inner-block argument is involved in control-flow, it should be
368           // detensored.
369           blockArgsToDetensor.insert(currentItemBlockArgument);
370 
371           for (PredecessorIterator pred = ownerBlock->pred_begin();
372                pred != ownerBlock->pred_end(); ++pred) {
373             BranchOpInterface predTerminator =
374                 dyn_cast<BranchOpInterface>((*pred)->getTerminator());
375 
376             // TODO: For now, we give up if any of the control-flow components
377             // in a function is not detensorable. Fix that.
378             if (!predTerminator) {
379               opsToDetensor.clear();
380               blockArgsToDetensor.clear();
381               return;
382             }
383 
384             auto ownerBlockOperands =
385                 predTerminator.getSuccessorOperands(pred.getSuccessorIndex());
386 
387             if (!ownerBlockOperands || ownerBlockOperands->empty())
388               continue;
389 
390             // For each predecessor, add the value it passes to that argument to
391             // workList to find out how it's computed.
392             workList.push_back(
393                 ownerBlockOperands
394                     .getValue()[currentItemBlockArgument.getArgNumber()]);
395           }
396 
397           continue;
398         }
399 
400         Operation *currentItemDefiningOp = currentItem.getDefiningOp();
401 
402         if (!visitedOps.insert(currentItemDefiningOp).second)
403           continue;
404 
405         // 2.2 - The current item is computed by a GenericOp. If the op should
406         // be detensored, then:
407         //       * Add it to opsToDetensor.
408         //       * Add its operands to workList to discover other parts of the
409         //       potentially detensorable component.
410         if (auto genericOp = dyn_cast<GenericOp>(currentItemDefiningOp)) {
411           // The op was encountered already, no need to inspect it again.
412           if (opsToDetensor.count(genericOp))
413             continue;
414 
415           // The op should not be detensored, give up on it but continue with
416           // discovering the rest of the control-flow component.
417           if (!shouldBeDetensored(genericOp, typeConverter)) {
418             continue;
419           }
420 
421           opsToDetensor.insert(genericOp);
422 
423           for (Value genericOpOperand : genericOp.inputs())
424             workList.push_back(genericOpOperand);
425 
426           continue;
427         }
428 
429         // 2.3 - The current item is the result of a FromElementsOp, it will be
430         // trivially detensored later as part of canonicalization patterns
431         // applied at the end of detensoring.
432         //
433         // Note: No need to check whether the result type of this op is
434         // detensorable since if it wasn't we wouldn't reach that point in the
435         // work list.
436         if (dyn_cast<tensor::FromElementsOp>(currentItemDefiningOp))
437           continue;
438 
439         // 2.4 - The current item is the result of a scalar op, add all its
440         // operands to the work list.
441         if (llvm::all_of(
442                 currentItemDefiningOp->getResultTypes(),
443                 [&](Type resultType) { return resultType.isIntOrFloat(); }))
444           for (Value scalarOpOperand : currentItemDefiningOp->getOperands())
445             workList.push_back(scalarOpOperand);
446       }
447 
448       // Since the cost model gives up on some ops (see the details of step 2.2
449       // above), block arguments that correspond to the values produced by those
450       // ops should not be detensored as well.
451 
452       DenseSet<BlockArgument> blockArgsToRemove;
453 
454       for (auto &blockArg : blockArgsToDetensor) {
455         Block *block = blockArg.getParentBlock();
456 
457         // For the potentially detensorable block argument, find the
458         // correpsonding operands in predecessor blocks.
459         for (PredecessorIterator pred = block->pred_begin();
460              pred != block->pred_end(); ++pred) {
461           BranchOpInterface terminator =
462               dyn_cast<BranchOpInterface>((*pred)->getTerminator());
463           auto blockOperands =
464               terminator.getSuccessorOperands(pred.getSuccessorIndex());
465 
466           if (!blockOperands || blockOperands->empty())
467             continue;
468 
469           Operation *definingOp =
470               terminator
471                   ->getOperand(blockOperands->getBeginOperandIndex() +
472                                blockArg.getArgNumber())
473                   .getDefiningOp();
474 
475           // If the operand is defined by a GenericOp that will not be
476           // detensored, then do not detensor the corresponding block argument.
477           if (dyn_cast_or_null<GenericOp>(definingOp) &&
478               opsToDetensor.count(definingOp) == 0) {
479             blockArgsToRemove.insert(blockArg);
480             break;
481           }
482         }
483       }
484 
485       for (auto &blockArg : blockArgsToRemove) {
486         blockArgsToDetensor.erase(blockArg);
487       }
488     }
489   };
490 
491   /// Detensorize everything that can detensored.
492   class AggressiveDetensoringModel : public CostModel {
493   public:
494     void compute(FuncOp func, DetensorizeTypeConverter typeConverter,
495                  DenseSet<Operation *> &opsToDetensor,
496                  DenseSet<BlockArgument> &blockArgsToDetensor) override {
497       func.walk([&](GenericOp genericOp) {
498         if (shouldBeDetensored(genericOp, typeConverter))
499           opsToDetensor.insert(genericOp);
500       });
501 
502       for (Block &block : llvm::drop_begin(func.getBody(), 1))
503         for (BlockArgument blockArgument : block.getArguments())
504           blockArgsToDetensor.insert(blockArgument);
505     }
506   };
507 
508   void runOnFunction() override {
509     MLIRContext *context = &getContext();
510     DetensorizeTypeConverter typeConverter;
511     RewritePatternSet patterns(context);
512     ConversionTarget target(*context);
513     DenseSet<Operation *> opsToDetensor;
514     DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
515     DenseSet<BlockArgument> blockArgsToDetensor;
516 
517     if (aggressiveMode.getValue()) {
518       AggressiveDetensoringModel costModel;
519       costModel.compute(getFunction(), typeConverter, opsToDetensor,
520                         blockArgsToDetensor);
521 
522     } else {
523       ControlFlowDetectionModel costModel;
524       costModel.compute(getFunction(), typeConverter, opsToDetensor,
525                         blockArgsToDetensor);
526     }
527 
528     detensorableBranchOps =
529         CostModel::computeBranchOpDetensoring(blockArgsToDetensor);
530 
531     target.addDynamicallyLegalOp<GenericOp>(
532         [&](GenericOp op) { return !opsToDetensor.count(op); });
533 
534     target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
535       // A function is legal if all of its non-entry blocks are legal. We
536       // don't legalize the entry block (i.e. the function's signature)
537       // since detensoring can't happen along external calling convention
538       // boundaries, which we conservatively approximate as all function
539       // signatures.
540       return llvm::all_of(llvm::drop_begin(op.getBody(), 1), [&](Block &block) {
541         if (llvm::any_of(blockArgsToDetensor, [&](BlockArgument blockArgument) {
542               return blockArgument.getOwner() == &block &&
543                      !typeConverter.isLegal(blockArgument.getType());
544             })) {
545           return false;
546         }
547         return true;
548       });
549     });
550 
551     target.markUnknownOpDynamicallyLegal([&](Operation *op) {
552       if (isNotBranchOpInterfaceOrReturnLikeOp(op) ||
553           isLegalForReturnOpTypeConversionPattern(op, typeConverter,
554                                                   /*returnOpAlwaysLegal*/ true))
555         return true;
556 
557       if (auto branchOp = dyn_cast<BranchOpInterface>(op)) {
558         if (!detensorableBranchOps.count(branchOp))
559           return true;
560 
561         for (auto operandIdx : detensorableBranchOps[branchOp])
562           if (!typeConverter.isLegal(
563                   branchOp->getOperand(operandIdx).getType()))
564             return false;
565 
566         return true;
567       }
568 
569       return false;
570     });
571 
572     patterns.insert<DetensorizeGenericOp>(typeConverter, context);
573     patterns.insert<FunctionNonEntryBlockConversion>(FuncOp::getOperationName(),
574                                                      context, typeConverter,
575                                                      blockArgsToDetensor);
576     // Since non-entry block arguments get detensorized, we also need to
577     // update the control flow inside the function to reflect the correct
578     // types.
579     auto shouldConvertBranchOperand = [&](BranchOpInterface branchOp,
580                                           int operandIdx) -> bool {
581       return detensorableBranchOps.count(branchOp) &&
582              detensorableBranchOps[branchOp].count(operandIdx);
583     };
584 
585     populateBranchOpInterfaceTypeConversionPattern(patterns, typeConverter,
586                                                    shouldConvertBranchOperand);
587 
588     if (failed(applyFullConversion(getFunction(), target, std::move(patterns))))
589       signalPassFailure();
590 
591     RewritePatternSet canonPatterns(context);
592     canonPatterns.add<ExtractFromReshapeFromElements>(context);
593     if (failed(applyPatternsAndFoldGreedily(getFunction(),
594                                             std::move(canonPatterns))))
595       signalPassFailure();
596   }
597 
598   Option<bool> aggressiveMode{
599       *this, "aggressive-mode",
600       llvm::cl::desc("Detensorize all ops that qualify for detensoring along "
601                      "with branch operands and basic-block arguments.")};
602 };
603 } // namespace
604 
605 std::unique_ptr<Pass> mlir::createLinalgDetensorizePass() {
606   return std::make_unique<LinalgDetensorize>();
607 }
608