1 //===- Detensorize.cpp - Linalg transformations as patterns ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Linalg/IR/Linalg.h"
11 #include "mlir/Dialect/Linalg/Passes.h"
12 #include "mlir/Dialect/StandardOps/Transforms/FuncConversions.h"
13 #include "mlir/Dialect/Tensor/IR/Tensor.h"
14 #include "mlir/IR/OpDefinition.h"
15 #include "mlir/Transforms/DialectConversion.h"
16 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
17 #include <iterator>
18 #include <memory>
19 #include <utility>
20 
21 using namespace mlir;
22 using namespace mlir::linalg;
23 
24 static Value sourceMaterializationCallback(OpBuilder &builder, Type type,
25                                            ValueRange inputs, Location loc) {
26   assert(inputs.size() == 1);
27   if (inputs[0].getType().isa<TensorType>())
28     return nullptr;
29 
30   // A detensored value is converted back by creating a new tensor from its
31   // element(s).
32   auto createNewTensorOp =
33       builder.create<tensor::FromElementsOp>(loc, inputs[0]);
34 
35   // FromElementsOp results in a tensor<1xdtype>, we need to reshape that to
36   // a tensor<dtype> instead.
37   return builder.create<tensor::CollapseShapeOp>(
38       loc, type, createNewTensorOp, ArrayRef<ReassociationExprs>{});
39 }
40 
41 namespace {
42 /// Defines the criteria a TensorType must follow in order to be considered
43 /// "detensorable".
44 ///
45 /// NOTE: For now, only 0-D tensors are supported.
46 ///
47 /// Returns true if tensorType can be detensored.
48 bool canBeDetensored(TensorType tensorType) {
49   return tensorType.hasRank() && tensorType.getRank() == 0;
50 }
51 
52 bool shouldBeDetensored(Operation *op, TypeConverter typeConverter) {
53   GenericOp genericOp = dyn_cast_or_null<GenericOp>(op);
54   return genericOp &&
55          llvm::all_of(
56              genericOp.getInputAndOutputOperands(), [&](OpOperand *opOperand) {
57                return !typeConverter.isLegal(opOperand->get().getType());
58              });
59 }
60 
61 /// A conversion patttern for detensoring `linalg.generic` ops.
62 class DetensorizeGenericOp : public OpConversionPattern<GenericOp> {
63 public:
64   using OpConversionPattern::OpConversionPattern;
65   LogicalResult
66   matchAndRewrite(GenericOp op, OpAdaptor adaptor,
67                   ConversionPatternRewriter &rewriter) const override {
68     Block *originalBlock = op->getBlock();
69 
70     // Gather some information about the op before inling its region.
71     Block *opEntryBlock = &*op.region().begin();
72     YieldOp yieldOp = dyn_cast<YieldOp>(op.region().back().getTerminator());
73 
74     // Split the op's region before the op. This way, we have a clear insertion
75     // point in which the op can be inlined.
76     Block *newBlock = rewriter.splitBlock(originalBlock, Block::iterator(op));
77     rewriter.inlineRegionBefore(op.region(), newBlock);
78     // Now that op's region is inlined, the operands of its YieldOp are mapped
79     // to the materialized target values. Therefore, we can replace the op's
80     // uses with those of its YielOp's operands.
81     rewriter.replaceOp(op, yieldOp->getOperands());
82 
83     // No need for these intermediate blocks, merge them into 1.
84     rewriter.mergeBlocks(opEntryBlock, originalBlock, adaptor.getOperands());
85     rewriter.mergeBlocks(newBlock, originalBlock, {});
86 
87     rewriter.eraseOp(&*Block::iterator(yieldOp));
88 
89     return success();
90   }
91 };
92 
93 /// A conversion pattern for detensoring internal (non-entry) blocks within a
94 /// function.
95 struct FunctionNonEntryBlockConversion : public ConversionPattern {
96   FunctionNonEntryBlockConversion(MLIRContext *ctx, TypeConverter &converter,
97                                   DenseSet<BlockArgument> blockArgsToDetensor)
98       : ConversionPattern(converter, MatchTraitOpTypeTag(),
99                           TypeID::get<OpTrait::FunctionLike>(), /*benefit=*/1,
100                           ctx),
101         blockArgsToDetensor(std::move(blockArgsToDetensor)) {}
102 
103   LogicalResult
104   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
105                   ConversionPatternRewriter &rewriter) const override {
106     rewriter.startRootUpdate(op);
107     Region &region = function_like_impl::getFunctionBody(op);
108     SmallVector<TypeConverter::SignatureConversion, 2> conversions;
109 
110     for (Block &block : llvm::drop_begin(region, 1)) {
111       conversions.emplace_back(block.getNumArguments());
112       TypeConverter::SignatureConversion &back = conversions.back();
113 
114       for (BlockArgument blockArgument : block.getArguments()) {
115         int idx = blockArgument.getArgNumber();
116 
117         if (blockArgsToDetensor.count(blockArgument))
118           back.addInputs(idx, {getTypeConverter()->convertType(
119                                   block.getArgumentTypes()[idx])});
120         else
121           back.addInputs(idx, {block.getArgumentTypes()[idx]});
122       }
123     }
124 
125     if (failed(rewriter.convertNonEntryRegionTypes(&region, *typeConverter,
126                                                    conversions))) {
127       rewriter.cancelRootUpdate(op);
128       return failure();
129     }
130 
131     rewriter.finalizeRootUpdate(op);
132     return success();
133   }
134 
135 private:
136   const DenseSet<BlockArgument> blockArgsToDetensor;
137 };
138 
139 class DetensorizeTypeConverter : public TypeConverter {
140 public:
141   DetensorizeTypeConverter() {
142     addConversion([](Type type) { return type; });
143 
144     // A TensorType that can be detensored, is converted to the underlying
145     // element type.
146     addConversion([](TensorType tensorType) -> Type {
147       if (canBeDetensored(tensorType))
148         return tensorType.getElementType();
149 
150       return tensorType;
151     });
152 
153     // A tensor value is detensoried by extracting its element(s).
154     addTargetMaterialization([](OpBuilder &builder, Type type,
155                                 ValueRange inputs, Location loc) -> Value {
156       return builder.create<tensor::ExtractOp>(loc, inputs[0], ValueRange{});
157     });
158 
159     addSourceMaterialization(sourceMaterializationCallback);
160     addArgumentMaterialization(sourceMaterializationCallback);
161   }
162 };
163 
164 /// Canonicalizes the pattern of the form
165 ///
166 /// %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
167 /// %reshaped_tensor = tensor.collapse_shape %tensor []
168 ///     : tensor<1xi32> into tensor<i32>
169 /// %extracted_element = tensor.extract %reshaped_tensor[] : tensor<i32>
170 ///
171 /// to just %element.
172 struct ExtractFromReshapeFromElements
173     : public OpRewritePattern<tensor::ExtractOp> {
174   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
175 
176   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
177                                 PatternRewriter &rewriter) const final {
178     if (!extract.indices().empty())
179       return failure();
180 
181     auto tensorReshape =
182         extract.tensor().getDefiningOp<tensor::CollapseShapeOp>();
183     if (tensorReshape == nullptr)
184       return failure();
185 
186     auto tensorFromElements =
187         tensorReshape.getOperand()
188             .getDefiningOp<mlir::tensor::FromElementsOp>();
189     if (tensorFromElements == nullptr)
190       return failure();
191 
192     rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
193     return success();
194   }
195 };
196 
197 /// @see LinalgDetensorize in Linalg/Passes.td for more details.
198 struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
199   LinalgDetensorize() = default;
200 
201   class CostModel {
202   public:
203     virtual ~CostModel() = default;
204 
205     /// A cost model algorithm computes the following outputs:
206     ///
207     /// - opsToDetensor: the list of linalg ops that should be
208     /// detensored.
209     ///
210     /// - blockArgsToDetensor: since the operands and results of detensored
211     /// linalg ops can cross the BB boundary (e.g. a linalg op's input can come
212     /// from a BB argument and a linalg op's output can be passed to successor
213     /// BBs), we need to maintain the sub-set of arguments that should be
214     /// detensored (i.e. converted by typeConverter) for each affected BB.
215     ///
216     /// Example:
217     ///
218     /// For the following snippet:
219     /// ...
220     /// ^bb1(%6: tensor<i32>, %9: tensor<i32>):
221     ///   %7 = linalg.init_tensor [] : tensor<i32>
222     ///   %8 = linalg.generic #attrs
223     ///     ins(%6, %6 : tensor<i32>, tensor<i32>)
224     ///     outs(%7 : tensor<i32>) {
225     ///     ^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
226     ///       %9 = arith.addi %arg0, %arg1 : i32
227     ///       linalg.yield %9 : i32
228     ///   } -> tensor<i32>
229     ///   %10 = "some.op"(%9)
230     ///   br ^bb2(%8 : tensor<i32>)
231     /// ...
232     ///
233     /// if the cost model decides that the linalg.generic op should be
234     /// detensored, then:
235     /// - opsToDetensor should be = {linalg.generic{add}}.
236     /// - blockArgsToDetensor should be = {bb1 -> {0}, bb2 -> {0}}.
237     virtual void compute(Operation *func,
238                          DetensorizeTypeConverter typeConverter,
239                          DenseSet<Operation *> &opsToDetensor,
240                          DenseSet<BlockArgument> &blockArgsToDetensor) = 0;
241 
242     /// From the blockArgsToDetensor set computed by a CostModel
243     /// implementation, this method computes the corresponding branch op
244     /// detensoring. The result is a map from a branch op to a subset of indices
245     /// of its operands. The indices specify which of the branch op's operands
246     /// should be detensored.
247     ///
248     /// For the previous example, this method would compute: {bb2 -> {0}}.
249     static DenseMap<Operation *, DenseSet<int>> computeBranchOpDetensoring(
250         const DenseSet<BlockArgument> &blockArgsToDetensor) {
251       DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
252 
253       for (auto blockArgumentElem : blockArgsToDetensor) {
254         Block *block = blockArgumentElem.getOwner();
255 
256         for (PredecessorIterator pred = block->pred_begin();
257              pred != block->pred_end(); ++pred) {
258           BranchOpInterface terminator =
259               dyn_cast<BranchOpInterface>((*pred)->getTerminator());
260           auto blockOperands =
261               terminator.getSuccessorOperands(pred.getSuccessorIndex());
262 
263           if (!blockOperands || blockOperands->empty())
264             continue;
265 
266           detensorableBranchOps[terminator].insert(
267               blockOperands->getBeginOperandIndex() +
268               blockArgumentElem.getArgNumber());
269         }
270       }
271 
272       return detensorableBranchOps;
273     }
274   };
275 
276   /// Detensorize linalg ops involved in control-flow within a function.
277   ///
278   /// This model starts from BranchOps and CondBranchOps within a function. For
279   /// each such branch, the model then walks the use-def chain for the branch's
280   /// condition backwards in order to understand where the condition's value
281   /// comes from. If the condition value is (indirectly) computed by a linalg op
282   /// that can be detensored, the model then continues walking the use-def chain
283   /// in order to understand where the linalg op's operands come from. This
284   /// leads to discovering a "detensoring component". A detensoring component is
285   /// the set of operations + block arguments that are involved in control-flow
286   /// AND can be detensored.
287   class ControlFlowDetectionModel : public CostModel {
288   public:
289     void compute(Operation *func, DetensorizeTypeConverter typeConverter,
290                  DenseSet<Operation *> &opsToDetensor,
291                  DenseSet<BlockArgument> &blockArgsToDetensor) override {
292       SmallVector<Value> workList;
293 
294       func->walk([&](CondBranchOp condBr) {
295         for (auto operand : condBr.getOperands()) {
296           workList.push_back(operand);
297         }
298       });
299 
300       func->walk([&](BranchOp br) {
301         for (auto operand : br.getOperands()) {
302           workList.push_back(operand);
303         }
304       });
305 
306       DenseSet<Value> visitedValues;
307       DenseSet<Operation *> visitedOps;
308 
309       // For a (to-be-detesored) value, check if it "escapes" the block by being
310       // passed to terminator. If it does, then workList is updated with the
311       // corresponding argument to the successor block.
312       auto updateWorkListWithSuccessorArguments =
313           [&](Value value, BranchOpInterface terminator) {
314             if (!terminator)
315               return;
316 
317             for (auto operandIdx :
318                  llvm::seq<unsigned>(0, terminator->getOperands().size())) {
319               Value operand = terminator->getOperand(operandIdx);
320 
321               if (operand == value) {
322                 auto succBlockArg =
323                     terminator.getSuccessorBlockArgument(operandIdx);
324 
325                 if (succBlockArg && !blockArgsToDetensor.count(*succBlockArg))
326                   workList.push_back(*succBlockArg);
327               }
328             }
329           };
330 
331       while (!workList.empty()) {
332         Value currentItem = workList.pop_back_val();
333 
334         if (!visitedValues.insert(currentItem).second)
335           continue;
336 
337         // 1   - Look forward:
338         // 1.1 - If currentItem escapes to one or more successors, add
339         // the corresponding successor arguments to workList.
340         updateWorkListWithSuccessorArguments(
341             currentItem, dyn_cast<BranchOpInterface>(
342                              currentItem.getParentBlock()->getTerminator()));
343 
344         // 1.2 - For each user of currentItem, add the defined values to
345         // workList. This way, the user ops can be inspected later if they are
346         // detensorable and if so, their operands will be added to workList to
347         // potentially discover other parts of the detensorable component.
348         for (auto *user : currentItem.getUsers())
349           for (Value result : user->getResults())
350             workList.push_back(result);
351 
352         // 2   - Look backward:
353         // 2.1 - The current item is defined by a block argument. If the owner
354         // block is a non-entry one, then:
355         //       * Add the argument to blockArgsToDetensor.
356         //       * Walk the use-def chain backwards to add each predecessor's
357         //       terminator-operands corresponding to currentItem to workList.
358         if (currentItem.dyn_cast<BlockArgument>()) {
359           BlockArgument currentItemBlockArgument =
360               currentItem.cast<BlockArgument>();
361           Block *ownerBlock = currentItemBlockArgument.getOwner();
362 
363           // Function arguments are not detensored/converted.
364           if (&*ownerBlock->getParent()->begin() == ownerBlock)
365             continue;
366 
367           // This inner-block argument is involved in control-flow, it should be
368           // detensored.
369           blockArgsToDetensor.insert(currentItemBlockArgument);
370 
371           for (PredecessorIterator pred = ownerBlock->pred_begin();
372                pred != ownerBlock->pred_end(); ++pred) {
373             BranchOpInterface predTerminator =
374                 dyn_cast<BranchOpInterface>((*pred)->getTerminator());
375 
376             // TODO: For now, we give up if any of the control-flow components
377             // in a function is not detensorable. Fix that.
378             if (!predTerminator) {
379               opsToDetensor.clear();
380               blockArgsToDetensor.clear();
381               return;
382             }
383 
384             auto ownerBlockOperands =
385                 predTerminator.getSuccessorOperands(pred.getSuccessorIndex());
386 
387             if (!ownerBlockOperands || ownerBlockOperands->empty())
388               continue;
389 
390             // For each predecessor, add the value it passes to that argument to
391             // workList to find out how it's computed.
392             workList.push_back(
393                 ownerBlockOperands
394                     .getValue()[currentItemBlockArgument.getArgNumber()]);
395           }
396 
397           continue;
398         }
399 
400         Operation *currentItemDefiningOp = currentItem.getDefiningOp();
401 
402         if (!visitedOps.insert(currentItemDefiningOp).second)
403           continue;
404 
405         // 2.2 - The current item is computed by a GenericOp. If the op should
406         // be detensored, then:
407         //       * Add it to opsToDetensor.
408         //       * Add its operands to workList to discover other parts of the
409         //       potentially detensorable component.
410         if (auto genericOp = dyn_cast<GenericOp>(currentItemDefiningOp)) {
411           // The op was encountered already, no need to inspect it again.
412           if (opsToDetensor.count(genericOp))
413             continue;
414 
415           // The op should not be detensored, give up on it but continue with
416           // discovering the rest of the control-flow component.
417           if (!shouldBeDetensored(genericOp, typeConverter)) {
418             continue;
419           }
420 
421           opsToDetensor.insert(genericOp);
422 
423           for (Value genericOpOperand : genericOp.inputs())
424             workList.push_back(genericOpOperand);
425 
426           continue;
427         }
428 
429         // 2.3 - The current item is the result of a FromElementsOp, it will be
430         // trivially detensored later as part of canonicalization patterns
431         // applied at the end of detensoring.
432         //
433         // Note: No need to check whether the result type of this op is
434         // detensorable since if it wasn't we wouldn't reach that point in the
435         // work list.
436         if (dyn_cast<tensor::FromElementsOp>(currentItemDefiningOp))
437           continue;
438 
439         // 2.4 - The current item is the result of a scalar op, add all its
440         // operands to the work list.
441         if (llvm::all_of(
442                 currentItemDefiningOp->getResultTypes(),
443                 [&](Type resultType) { return resultType.isIntOrFloat(); }))
444           for (Value scalarOpOperand : currentItemDefiningOp->getOperands())
445             workList.push_back(scalarOpOperand);
446       }
447 
448       // Since the cost model gives up on some ops (see the details of step 2.2
449       // above), block arguments that correspond to the values produced by those
450       // ops should not be detensored as well.
451 
452       DenseSet<BlockArgument> blockArgsToRemove;
453 
454       for (auto &blockArg : blockArgsToDetensor) {
455         Block *block = blockArg.getParentBlock();
456 
457         // For the potentially detensorable block argument, find the
458         // correpsonding operands in predecessor blocks.
459         for (PredecessorIterator pred = block->pred_begin();
460              pred != block->pred_end(); ++pred) {
461           BranchOpInterface terminator =
462               dyn_cast<BranchOpInterface>((*pred)->getTerminator());
463           auto blockOperands =
464               terminator.getSuccessorOperands(pred.getSuccessorIndex());
465 
466           if (!blockOperands || blockOperands->empty())
467             continue;
468 
469           Operation *definingOp =
470               terminator
471                   ->getOperand(blockOperands->getBeginOperandIndex() +
472                                blockArg.getArgNumber())
473                   .getDefiningOp();
474 
475           // If the operand is defined by a GenericOp that will not be
476           // detensored, then do not detensor the corresponding block argument.
477           if (dyn_cast_or_null<GenericOp>(definingOp) &&
478               opsToDetensor.count(definingOp) == 0) {
479             blockArgsToRemove.insert(blockArg);
480             break;
481           }
482         }
483       }
484 
485       for (auto &blockArg : blockArgsToRemove) {
486         blockArgsToDetensor.erase(blockArg);
487       }
488     }
489   };
490 
491   /// Detensorize everything that can detensored.
492   class AggressiveDetensoringModel : public CostModel {
493   public:
494     void compute(Operation *func, DetensorizeTypeConverter typeConverter,
495                  DenseSet<Operation *> &opsToDetensor,
496                  DenseSet<BlockArgument> &blockArgsToDetensor) override {
497       func->walk([&](GenericOp genericOp) {
498         if (shouldBeDetensored(genericOp, typeConverter))
499           opsToDetensor.insert(genericOp);
500       });
501 
502       for (Block &block :
503            llvm::drop_begin(function_like_impl::getFunctionBody(func), 1))
504         for (BlockArgument blockArgument : block.getArguments())
505           blockArgsToDetensor.insert(blockArgument);
506     }
507   };
508 
509   void runOnOperation() override {
510     assert(getOperation()->hasTrait<OpTrait::FunctionLike>() &&
511            "DetensorizePass can only be run on FunctionLike operations");
512     MLIRContext *context = &getContext();
513     DetensorizeTypeConverter typeConverter;
514     RewritePatternSet patterns(context);
515     ConversionTarget target(*context);
516     DenseSet<Operation *> opsToDetensor;
517     DenseMap<Operation *, DenseSet<int>> detensorableBranchOps;
518     DenseSet<BlockArgument> blockArgsToDetensor;
519 
520     if (aggressiveMode.getValue()) {
521       AggressiveDetensoringModel costModel;
522       costModel.compute(getOperation(), typeConverter, opsToDetensor,
523                         blockArgsToDetensor);
524 
525     } else {
526       ControlFlowDetectionModel costModel;
527       costModel.compute(getOperation(), typeConverter, opsToDetensor,
528                         blockArgsToDetensor);
529     }
530 
531     detensorableBranchOps =
532         CostModel::computeBranchOpDetensoring(blockArgsToDetensor);
533 
534     target.addDynamicallyLegalOp<GenericOp>(
535         [&](GenericOp op) { return !opsToDetensor.count(op); });
536 
537     target.markUnknownOpDynamicallyLegal([&](Operation *op) {
538       // A function is legal if all of its non-entry blocks are legal. We
539       // don't legalize the entry block (i.e. the function's signature)
540       // since detensoring can't happen along external calling convention
541       // boundaries, which we conservatively approximate as all function
542       // signatures.
543       if (op->hasTrait<OpTrait::FunctionLike>()) {
544         auto &body = function_like_impl::getFunctionBody(op);
545         return llvm::all_of(llvm::drop_begin(body, 1), [&](Block &block) {
546           return !llvm::any_of(
547               blockArgsToDetensor, [&](BlockArgument blockArgument) {
548                 return blockArgument.getOwner() == &block &&
549                        !typeConverter.isLegal(blockArgument.getType());
550               });
551         });
552       }
553 
554       if (isNotBranchOpInterfaceOrReturnLikeOp(op) ||
555           isLegalForReturnOpTypeConversionPattern(op, typeConverter,
556                                                   /*returnOpAlwaysLegal*/ true))
557         return true;
558 
559       if (auto branchOp = dyn_cast<BranchOpInterface>(op)) {
560         if (!detensorableBranchOps.count(branchOp))
561           return true;
562 
563         for (auto operandIdx : detensorableBranchOps[branchOp])
564           if (!typeConverter.isLegal(
565                   branchOp->getOperand(operandIdx).getType()))
566             return false;
567 
568         return true;
569       }
570 
571       return false;
572     });
573 
574     patterns.insert<DetensorizeGenericOp>(typeConverter, context);
575     patterns.insert<FunctionNonEntryBlockConversion>(context, typeConverter,
576                                                      blockArgsToDetensor);
577     // Since non-entry block arguments get detensorized, we also need to
578     // update the control flow inside the function to reflect the correct
579     // types.
580     auto shouldConvertBranchOperand = [&](BranchOpInterface branchOp,
581                                           int operandIdx) -> bool {
582       return detensorableBranchOps.count(branchOp) &&
583              detensorableBranchOps[branchOp].count(operandIdx);
584     };
585 
586     populateBranchOpInterfaceTypeConversionPattern(patterns, typeConverter,
587                                                    shouldConvertBranchOperand);
588 
589     if (failed(
590             applyFullConversion(getOperation(), target, std::move(patterns))))
591       signalPassFailure();
592 
593     RewritePatternSet canonPatterns(context);
594     canonPatterns.add<ExtractFromReshapeFromElements>(context);
595     if (failed(applyPatternsAndFoldGreedily(getOperation(),
596                                             std::move(canonPatterns))))
597       signalPassFailure();
598   }
599 };
600 } // namespace
601 
602 std::unique_ptr<Pass> mlir::createLinalgDetensorizePass() {
603   return std::make_unique<LinalgDetensorize>();
604 }
605