1 //===- Detensorize.cpp - Linalg transformations as patterns ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
11 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
12 #include "mlir/Dialect/Linalg/Passes.h"
13 #include "mlir/Dialect/StandardOps/Transforms/FuncConversions.h"
14 #include "mlir/Dialect/Tensor/IR/Tensor.h"
15 #include "mlir/IR/OpDefinition.h"
16 #include "mlir/Transforms/DialectConversion.h"
17 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
18 #include <iterator>
19 #include <memory>
20 
21 using namespace mlir;
22 using namespace mlir::linalg;
23 
24 static Value sourceMaterializationCallback(OpBuilder &builder, Type type,
25                                            ValueRange inputs, Location loc) {
26   assert(inputs.size() == 1);
27   // A detensored value is converted back by creating a new tensor from its
28   // element(s).
29   auto createNewTensorOp = builder.create<tensor::FromElementsOp>(
30       loc, inputs[0].getType(), inputs[0]);
31 
32   // FromElementsOp results in a tensor<1xdtype>, we need to reshape that to
33   // a tensor<dtype> instead.
34   return builder.create<linalg::TensorReshapeOp>(
35       loc, type, createNewTensorOp, ArrayRef<ReassociationExprs>{});
36 }
37 
38 namespace {
39 /// Defines the criteria a TensorType must follow in order to be considered
40 /// "detensorable".
41 ///
42 /// NOTE: For now, only 0-D tensors are supported.
43 ///
44 /// Returns true if tensorType can be detensored.
45 bool canBeDetensored(TensorType tensorType) {
46   return tensorType.hasRank() && tensorType.getRank() == 0;
47 }
48 
49 bool shouldBeDetensored(Operation *op, TypeConverter typeConverter) {
50   GenericOp genericOp = dyn_cast_or_null<GenericOp>(op);
51   return genericOp && llvm::all_of(genericOp.getShapedOperandTypes(),
52                                    [&](ShapedType shapedType) {
53                                      return !typeConverter.isLegal(shapedType);
54                                    });
55 }
56 
57 /// A conversion patttern for detensoring `linalg.generic` ops.
58 class DetensorizeGenericOp : public OpConversionPattern<GenericOp> {
59 public:
60   using OpConversionPattern::OpConversionPattern;
61   LogicalResult
62   matchAndRewrite(GenericOp op, ArrayRef<Value> operands,
63                   ConversionPatternRewriter &rewriter) const override {
64     Block *originalBlock = op->getBlock();
65 
66     // Gather some information about the op before inling its region.
67     Block *opEntryBlock = &*op.region().begin();
68     YieldOp yieldOp = dyn_cast<YieldOp>(op.region().back().getTerminator());
69 
70     // Split the op's region before the op. This way, we have a clear insertion
71     // point in which the op can be inlined.
72     Block *newBlock = originalBlock->splitBlock(op);
73     rewriter.inlineRegionBefore(op.region(), newBlock);
74     // Now that op's region is inlined, the operands of its YieldOp are mapped
75     // to the materialized target values. Therefore, we can replace the op's
76     // uses with those of its YielOp's operands.
77     rewriter.replaceOp(op, yieldOp->getOperands());
78 
79     // No need for these intermediate blocks, merge them into 1.
80     rewriter.mergeBlocks(opEntryBlock, originalBlock, operands);
81     rewriter.mergeBlocks(newBlock, originalBlock, {});
82 
83     rewriter.eraseOp(&*Block::iterator(yieldOp));
84 
85     return success();
86   }
87 };
88 
89 /// A conversion pattern for detensoring internal (non-entry) blocks within a
90 /// function.
91 struct FunctionNonEntryBlockConversion : public ConversionPattern {
92   FunctionNonEntryBlockConversion(StringRef functionLikeOpName,
93                                   MLIRContext *ctx, TypeConverter &converter,
94                                   DenseSet<BlockArgument> blockArgsToDetensor)
95       : ConversionPattern(converter, functionLikeOpName, /*benefit=*/1, ctx),
96         blockArgsToDetensor(blockArgsToDetensor) {}
97 
98   LogicalResult
99   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
100                   ConversionPatternRewriter &rewriter) const override {
101     rewriter.startRootUpdate(op);
102     Region &region = function_like_impl::getFunctionBody(op);
103     SmallVector<TypeConverter::SignatureConversion, 2> conversions;
104 
105     for (Block &block : llvm::drop_begin(region, 1)) {
106       conversions.emplace_back(block.getNumArguments());
107       TypeConverter::SignatureConversion &back = conversions.back();
108 
109       for (BlockArgument blockArgument : block.getArguments()) {
110         int idx = blockArgument.getArgNumber();
111 
112         if (blockArgsToDetensor.count(blockArgument))
113           back.addInputs(idx, {getTypeConverter()->convertType(
114                                   block.getArgumentTypes()[idx])});
115         else
116           back.addInputs(idx, {block.getArgumentTypes()[idx]});
117       }
118     }
119 
120     if (failed(rewriter.convertNonEntryRegionTypes(&region, *typeConverter,
121                                                    conversions))) {
122       rewriter.cancelRootUpdate(op);
123       return failure();
124     }
125 
126     rewriter.finalizeRootUpdate(op);
127     return success();
128   }
129 
130 private:
131   const DenseSet<BlockArgument> blockArgsToDetensor;
132 };
133 
134 class DetensorizeTypeConverter : public TypeConverter {
135 public:
136   DetensorizeTypeConverter() {
137     addConversion([](Type type) { return type; });
138 
139     // A TensorType that can be detensored, is converted to the underlying
140     // element type.
141     addConversion([](TensorType tensorType) -> Type {
142       if (canBeDetensored(tensorType))
143         return tensorType.getElementType();
144 
145       return tensorType;
146     });
147 
148     // A tensor value is detensoried by extracting its element(s).
149     addTargetMaterialization([](OpBuilder &builder, Type type,
150                                 ValueRange inputs, Location loc) -> Value {
151       return builder.create<tensor::ExtractOp>(loc, inputs[0], ValueRange{});
152     });
153 
154     addSourceMaterialization(sourceMaterializationCallback);
155     addArgumentMaterialization(sourceMaterializationCallback);
156   }
157 };
158 
159 /// Canonicalizes the pattern of the form
160 ///
161 /// %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
162 /// %reshaped_tensor = linalg.tensor_reshape %tensor [] : tensor<1xi32> into
163 ///   tensor<i32>
164 /// %extracted_element = tensor.extract %reshaped_tensor[] : tensor<i32>
165 ///
166 /// to just %element.
167 struct ExtractFromReshapeFromElements
168     : public OpRewritePattern<tensor::ExtractOp> {
169   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
170 
171   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
172                                 PatternRewriter &rewriter) const final {
173     if (extract.indices().size() != 0)
174       return failure();
175 
176     auto tensorReshape = extract.tensor().getDefiningOp<TensorReshapeOp>();
177     if (tensorReshape == nullptr)
178       return failure();
179 
180     auto tensorFromElements =
181         tensorReshape.getOperand()
182             .getDefiningOp<mlir::tensor::FromElementsOp>();
183     if (tensorFromElements == nullptr)
184       return failure();
185 
186     rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
187     return success();
188   }
189 };
190 
191 /// @see LinalgDetensorize in Linalg/Passes.td for more details.
192 struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
193   LinalgDetensorize() = default;
194   LinalgDetensorize(const LinalgDetensorize &pass) {}
195 
196   class CostModel {
197   public:
198     virtual ~CostModel() = default;
199 
200     /// A cost model algorithm computes the following outputs:
201     ///
202     /// - opsToDetensor: the list of linalg ops that should be
203     /// detensored.
204     ///
205     /// - blockArgsToDetensor: since the operands and results of detensored
206     /// linalg ops can cross the BB boundary (e.g. a linalg op's input can come
207     /// from a BB argument and a linalg op's output can be passed to successor
208     /// BBs), we need to maintain the sub-set of arguments that should be
209     /// detensored (i.e. converted by typeConverter) for each affected BB.
210     ///
211     /// Example:
212     ///
213     /// For the following snippet:
214     /// ...
215     /// ^bb1(%6: tensor<i32>, %9: tensor<i32>):
216     ///   %7 = linalg.init_tensor [] : tensor<i32>
217     ///   %8 = linalg.generic #attrs
218     ///     ins(%6, %6 : tensor<i32>, tensor<i32>)
219     ///     outs(%7 : tensor<i32>) {
220     ///     ^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
221     ///       %9 = addi %arg0, %arg1 : i32
222     ///       linalg.yield %9 : i32
223     ///   } -> tensor<i32>
224     ///   %10 = "some.op"(%9)
225     ///   br ^bb2(%8 : tensor<i32>)
226     /// ...
227     ///
228     /// if the cost model decides that the linalg.generic op should be
229     /// detensored, then:
230     /// - opsToDetensor should be = {linalg.generic{add}}.
231     /// - blockArgsToDetensor should be = {bb1 -> {0}, bb2 -> {0}}.
232     virtual void compute(FuncOp func, DetensorizeTypeConverter typeConverter,
233                          DenseSet<Operation *> &opsToDetensor,
234                          DenseSet<BlockArgument> &blockArgsToDetensor) = 0;
235 
236     /// From the blockArgsToDetensor set computed by a CostModel
237     /// implementation, this method computes the corresponding branch op
238     /// detensoring. The result is a map from a branch op to a subset of indices
239     /// of its operands. The indices specify which of the branch op's operands
240     /// should be detensored.
241     ///
242     /// For the previous example, this method would compute: {bb2 -> {0}}.
243     static DenseMap<Operation *, DenseSet<int>> computeBranchOpDetensoring(
244         const DenseSet<BlockArgument> &blockArgsToDetensor) {
245       DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
246 
247       for (auto blockArgumentElem : blockArgsToDetensor) {
248         Block *block = blockArgumentElem.getOwner();
249 
250         for (PredecessorIterator pred = block->pred_begin();
251              pred != block->pred_end(); ++pred) {
252           BranchOpInterface terminator =
253               dyn_cast<BranchOpInterface>((*pred)->getTerminator());
254           auto blockOperands =
255               terminator.getSuccessorOperands(pred.getSuccessorIndex());
256 
257           if (!blockOperands || blockOperands->empty())
258             continue;
259 
260           detensorableBranchOps[terminator].insert(
261               blockOperands->getBeginOperandIndex() +
262               blockArgumentElem.getArgNumber());
263         }
264       }
265 
266       return detensorableBranchOps;
267     }
268   };
269 
270   /// Detensorize linalg ops involved in control-flow within a function.
271   ///
272   /// This model starts from CondBranchOps within a function. For each cond_br,
273   /// the model then walks the use-def chain for the branch's condition
274   /// backwards in order to understand where the condition's value comes from.
275   /// If the condition value is (indirectly) computed by a linalg op that can be
276   /// detensored, the model then continues walking the use-def chain in order to
277   /// understand where the linalg op's operands come from. This leads to
278   /// discovering a "detensoring component". A detensoring component is the set
279   /// of operations + block arguments that are involved in control-flow AND can
280   /// be detensored.
281   ///
282   /// For examples where this model succeeds to discover a detensoring
283   /// component, see:
284   /// - test/Dialect/Linalg/detensorize_while.mlir
285   /// - test/Dialect/Linalg/detesorize_while_pure_cf.mlir.
286   ///
287   /// For an example where this model marks control-flow as "non-detensorable",
288   /// see:
289   /// - test/Dialect/Linalg/detensorize_while_failure.mlir
290   class PureControlFlowDetectionModel : public CostModel {
291   public:
292     void compute(FuncOp func, DetensorizeTypeConverter typeConverter,
293                  DenseSet<Operation *> &opsToDetensor,
294                  DenseSet<BlockArgument> &blockArgsToDetensor) override {
295       SmallVector<Value> workList;
296 
297       func.walk(
298           [&](CondBranchOp condBr) { workList.push_back(condBr.condition()); });
299 
300       DenseSet<Value> visitedValues;
301       DenseSet<Operation *> visitedOps;
302 
303       // For a (to-be-detesored) value, check if it "escapes" the block by being
304       // passed to terminator. If it does, then workList is updated with the
305       // corresponding argument to the successor block.
306       auto updateWorkListWithSuccessorArguments =
307           [&](Value value, BranchOpInterface terminator) {
308             if (!terminator)
309               return;
310 
311             for (auto operandIdx :
312                  llvm::seq<unsigned>(0, terminator->getOperands().size())) {
313               Value operand = terminator->getOperand(operandIdx);
314 
315               if (operand == value) {
316                 auto succBlockArg =
317                     terminator.getSuccessorBlockArgument(operandIdx);
318 
319                 if (succBlockArg && !blockArgsToDetensor.count(*succBlockArg))
320                   workList.push_back(*succBlockArg);
321               }
322             }
323           };
324 
325       while (!workList.empty()) {
326         Value currentItem = workList.pop_back_val();
327 
328         if (!visitedValues.insert(currentItem).second)
329           continue;
330 
331         // 1   - Look forward:
332         // 1.1 - If currentItem escapes to one or more successors, add
333         // the corresponding successor arguments to workList.
334         updateWorkListWithSuccessorArguments(
335             currentItem, dyn_cast<BranchOpInterface>(
336                              currentItem.getParentBlock()->getTerminator()));
337 
338         // 1.2 - For each user of currentItem, add the defined values to
339         // workList. This way, the user ops can be inspected later if they are
340         // detensorable and if so, their operands will be added to workList to
341         // potentially discover other parts of the detensorable component.
342         for (auto *user : currentItem.getUsers())
343           for (Value result : user->getResults())
344             workList.push_back(result);
345 
346         // 2   - Look backward:
347         // 2.1 - The current item is defined by a block argument. If the owner
348         // block is a non-entry one, then:
349         //       * Add the argument to blockArgsToDetensor.
350         //       * Walk the use-def chain backwards to add each predecessor's
351         //       terminator-operands corresponding to currentItem to workList.
352         if (currentItem.dyn_cast<BlockArgument>()) {
353           BlockArgument currentItemBlockArgument =
354               currentItem.cast<BlockArgument>();
355           Block *ownerBlock = currentItemBlockArgument.getOwner();
356 
357           // Function arguments are not detensored/converted.
358           if (&*ownerBlock->getParent()->begin() == ownerBlock)
359             continue;
360 
361           // This inner-block argument is involved in control-flow, it should be
362           // detensored.
363           blockArgsToDetensor.insert(currentItemBlockArgument);
364 
365           for (PredecessorIterator pred = ownerBlock->pred_begin();
366                pred != ownerBlock->pred_end(); ++pred) {
367             BranchOpInterface terminator =
368                 dyn_cast<BranchOpInterface>((*pred)->getTerminator());
369 
370             // TODO: For now, we give up if any of the control-flow components
371             // in a function is not detensorable. Fix that.
372             if (!terminator) {
373               opsToDetensor.clear();
374               blockArgsToDetensor.clear();
375               return;
376             }
377 
378             auto ownerBlockOperands =
379                 terminator.getSuccessorOperands(pred.getSuccessorIndex());
380 
381             if (!ownerBlockOperands || ownerBlockOperands->empty())
382               continue;
383 
384             // For each predecessor, add the value it passes to that argument to
385             // workList to find out how it's computed.
386             workList.push_back(
387                 ownerBlockOperands
388                     .getValue()[currentItemBlockArgument.getArgNumber()]);
389           }
390 
391           continue;
392         }
393 
394         Operation *currentItemDefiningOp = currentItem.getDefiningOp();
395 
396         if (!visitedOps.insert(currentItemDefiningOp).second)
397           continue;
398 
399         // 2.2 - The current item is computed by a GenericOp. If the op should
400         // be detensored, then:
401         //       * Add it to opsToDetensor.
402         //       * Add its operands to workList to discover other parts of the
403         //       potentially detensorable component.
404         if (auto genericOp = dyn_cast<GenericOp>(currentItemDefiningOp)) {
405           // The op was encountered already, no need to inspect it again.
406           if (opsToDetensor.count(genericOp))
407             continue;
408 
409           // TODO: For now, we give up if any of the control-flow components
410           // in a function is not detensorable. Fix that.
411           if (!shouldBeDetensored(genericOp, typeConverter)) {
412             opsToDetensor.clear();
413             blockArgsToDetensor.clear();
414             return;
415           }
416 
417           opsToDetensor.insert(genericOp);
418 
419           for (Value genericOpOperand : genericOp.inputs())
420             workList.push_back(genericOpOperand);
421 
422           continue;
423         }
424 
425         // 2.3 - The current item is the result of a FromElementsOp, it will be
426         // trivially detensored later as part of canonicalization patterns
427         // applied at the end of detensoring.
428         //
429         // Note: No need to check whether the result type of this op is
430         // detensorable since if it wasn't we wouldn't reach that point in the
431         // work list.
432         if (dyn_cast<tensor::FromElementsOp>(currentItemDefiningOp))
433           continue;
434 
435         // 2.4 - The current item is the result of a scalar op, add all its
436         // operands to the work list.
437         if (llvm::all_of(
438                 currentItemDefiningOp->getResultTypes(),
439                 [&](Type resultType) { return resultType.isIntOrFloat(); }))
440           for (Value scalarOpOperand : currentItemDefiningOp->getOperands())
441             workList.push_back(scalarOpOperand);
442       }
443     }
444   };
445 
446   /// Detensorize everything that can detensored.
447   class AggressiveDetensoringModel : public CostModel {
448   public:
449     void compute(FuncOp func, DetensorizeTypeConverter typeConverter,
450                  DenseSet<Operation *> &opsToDetensor,
451                  DenseSet<BlockArgument> &blockArgsToDetensor) override {
452       func.walk([&](GenericOp genericOp) {
453         if (shouldBeDetensored(genericOp, typeConverter))
454           opsToDetensor.insert(genericOp);
455       });
456 
457       for (Block &block : llvm::drop_begin(func.getBody(), 1))
458         for (BlockArgument blockArgument : block.getArguments())
459           blockArgsToDetensor.insert(blockArgument);
460     }
461   };
462 
463   void runOnFunction() override {
464     MLIRContext *context = &getContext();
465     DetensorizeTypeConverter typeConverter;
466     RewritePatternSet patterns(context);
467     ConversionTarget target(*context);
468     DenseSet<Operation *> opsToDetensor;
469     DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
470     DenseSet<BlockArgument> blockArgsToDetensor;
471 
472     if (aggressiveMode.getValue()) {
473       AggressiveDetensoringModel costModel;
474       costModel.compute(getFunction(), typeConverter, opsToDetensor,
475                         blockArgsToDetensor);
476 
477     } else {
478       PureControlFlowDetectionModel costModel;
479       costModel.compute(getFunction(), typeConverter, opsToDetensor,
480                         blockArgsToDetensor);
481     }
482 
483     detensorableBranchOps =
484         CostModel::computeBranchOpDetensoring(blockArgsToDetensor);
485 
486     target.addDynamicallyLegalOp<GenericOp>(
487         [&](GenericOp op) { return !opsToDetensor.count(op); });
488 
489     target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
490       // A function is legal if all of its non-entry blocks are legal. We
491       // don't legalize the entry block (i.e. the function's signature)
492       // since detensoring can't happen along external calling convention
493       // boundaries, which we conservatively approximate as all function
494       // signatures.
495       return llvm::all_of(llvm::drop_begin(op.getBody(), 1), [&](Block &block) {
496         if (llvm::any_of(blockArgsToDetensor, [&](BlockArgument blockArgument) {
497               return blockArgument.getOwner() == &block &&
498                      !typeConverter.isLegal(blockArgument.getType());
499             })) {
500           return false;
501         }
502         return true;
503       });
504     });
505 
506     target.markUnknownOpDynamicallyLegal([&](Operation *op) {
507       if (isNotBranchOpInterfaceOrReturnLikeOp(op) ||
508           isLegalForReturnOpTypeConversionPattern(op, typeConverter,
509                                                   /*returnOpAlwaysLegal*/ true))
510         return true;
511 
512       if (auto branchOp = dyn_cast<BranchOpInterface>(op)) {
513         if (!detensorableBranchOps.count(branchOp))
514           return true;
515 
516         for (auto operandIdx : detensorableBranchOps[branchOp])
517           if (!typeConverter.isLegal(
518                   branchOp->getOperand(operandIdx).getType()))
519             return false;
520 
521         return true;
522       }
523 
524       return false;
525     });
526 
527     patterns.insert<DetensorizeGenericOp>(typeConverter, context);
528     patterns.insert<FunctionNonEntryBlockConversion>(FuncOp::getOperationName(),
529                                                      context, typeConverter,
530                                                      blockArgsToDetensor);
531     // Since non-entry block arguments get detensorized, we also need to
532     // update the control flow inside the function to reflect the correct
533     // types.
534     auto shouldConvertBranchOperand = [&](BranchOpInterface branchOp,
535                                           int operandIdx) -> bool {
536       return detensorableBranchOps.count(branchOp) &&
537              detensorableBranchOps[branchOp].count(operandIdx);
538     };
539 
540     populateBranchOpInterfaceTypeConversionPattern(patterns, typeConverter,
541                                                    shouldConvertBranchOperand);
542 
543     if (failed(applyFullConversion(getFunction(), target, std::move(patterns))))
544       signalPassFailure();
545 
546     RewritePatternSet canonPatterns(context);
547     canonPatterns.add<ExtractFromReshapeFromElements>(context);
548     if (failed(applyPatternsAndFoldGreedily(getFunction(),
549                                             std::move(canonPatterns))))
550       signalPassFailure();
551   }
552 
553   Option<bool> aggressiveMode{
554       *this, "aggressive-mode",
555       llvm::cl::desc("Detensorize all ops that qualify for detensoring along "
556                      "with branch operands and basic-block arguments.")};
557 };
558 } // namespace
559 
560 std::unique_ptr<Pass> mlir::createLinalgDetensorizePass() {
561   return std::make_unique<LinalgDetensorize>();
562 }
563