1 //===- LowerMatrixIntrinsics.cpp -  Lower matrix intrinsics -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Lower matrix intrinsics to vector operations.
10 //
11 // TODO:
12 //  * Improve fusion:
13 //   * Support more cases, e.g. multiply-add, multiply-sub, operands/results
14 //     transposed.
15 //   * Improve cost-modeling, e.g. choose different number of rows/columns
16 //     columns for tiles, consider cost of copies on alias.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
21 #include "llvm/ADT/GraphTraits.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/DomTreeUpdater.h"
26 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DebugInfoMetadata.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/InitializePasses.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
43 
44 using namespace llvm;
45 using namespace PatternMatch;
46 
47 #define DEBUG_TYPE "lower-matrix-intrinsics"
48 
49 static cl::opt<bool> EnableShapePropagation(
50     "matrix-propagate-shape", cl::init(true), cl::Hidden,
51     cl::desc("Enable/disable shape propagation from matrix intrinsics to other "
52              "instructions."));
53 
54 static cl::opt<bool>
55     FuseMatrix("fuse-matrix", cl::init(true), cl::Hidden,
56                cl::desc("Enable/disable fusing matrix instructions."));
57 // TODO: Allow and use non-square tiles.
58 static cl::opt<unsigned> TileSize(
59     "fuse-matrix-tile-size", cl::init(4), cl::Hidden,
60     cl::desc(
61         "Tile size for matrix instruction fusion using square-shaped tiles."));
62 static cl::opt<bool> ForceFusion(
63     "force-fuse-matrix", cl::init(false), cl::Hidden,
64     cl::desc("Force matrix instruction fusion even if not profitable."));
65 static cl::opt<bool> AllowContractEnabled(
66     "matrix-allow-contract", cl::init(false), cl::Hidden,
67     cl::desc("Allow the use of FMAs if available and profitable. This may "
68              "result in different results, due to less rounding error."));
69 
70 enum class MatrixLayoutTy { ColumnMajor, RowMajor };
71 
72 static cl::opt<MatrixLayoutTy> MatrixLayout(
73     "matrix-default-layout", cl::init(MatrixLayoutTy::ColumnMajor),
74     cl::desc("Sets the default matrix layout"),
75     cl::values(clEnumValN(MatrixLayoutTy::ColumnMajor, "column-major",
76                           "Use column-major layout"),
77                clEnumValN(MatrixLayoutTy::RowMajor, "row-major",
78                           "Use row-major layout")));
79 
80 /// Helper function to either return Scope, if it is a subprogram or the
81 /// attached subprogram for a local scope.
82 static DISubprogram *getSubprogram(DIScope *Scope) {
83   if (auto *Subprogram = dyn_cast<DISubprogram>(Scope))
84     return Subprogram;
85   return cast<DILocalScope>(Scope)->getSubprogram();
86 }
87 
88 namespace {
89 
90 // Given an element pointer \p BasePtr to the start of a (sub) matrix, compute
91 // the start address of vector \p VecIdx with type (\p EltType x \p NumElements)
92 // assuming \p Stride elements between start two consecutive vectors.
93 // \p Stride must be >= \p NumElements.
94 // For column-major matrixes, the function computes the address of a column
95 // vectors and \p NumElements must be set to the number of elements in a column
96 // (= number of rows of the matrix). For row-major matrixes, the function
97 // computes the address of a row vector and \p NumElements must be set to the
98 // number of elements in a column (= number of columns of the matrix).
99 //
100 // Consider a 4x4 matrix in column-mjaor layout like below
101 //
102 //      0       1      2      3
103 // 0   v_0_0  v_0_1  v_0_2  v_0_3
104 // 1   v_1_0  v_1_1  v_1_2  v_1_3
105 // 2   v_2_0  v_2_1  v_2_2  v_2_3
106 // 3   v_3_0  v_3_1  v_3_2  v_3_3
107 
108 // To compute the column addresses for a 2x3 sub-matrix at row 1 and column 1,
109 // we need a pointer to the first element of the submatrix as base pointer.
110 // Then we can use computeVectorAddr to compute the addresses for the columns
111 // of the sub-matrix.
112 //
113 // Column 0: computeVectorAddr(Base, 0 (column), 4 (stride), 2 (num rows), ..)
114 //           -> just returns Base
115 // Column 1: computeVectorAddr(Base, 1 (column), 4 (stride), 2 (num rows), ..)
116 //           -> returns Base + (1 * 4)
117 // Column 2: computeVectorAddr(Base, 2 (column), 4 (stride), 2 (num rows), ..)
118 //           -> returns Base + (2 * 4)
119 //
120 // The graphic below illustrates the number of elements in a column (marked
121 // with |) and the number of skipped elements (marked with }).
122 //
123 //         v_0_0  v_0_1 {v_0_2 {v_0_3
124 //                Base   Col 1  Col 2
125 //                  |     |      |
126 //         v_1_0 |v_1_1 |v_1_2 |v_1_3
127 //         v_2_0 |v_2_1 |v_2_2 |v_2_3
128 //         v_3_0 {v_3_1 {v_3_2  v_3_3
129 //
130 Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride,
131                          unsigned NumElements, Type *EltType,
132                          IRBuilder<> &Builder) {
133 
134   assert((!isa<ConstantInt>(Stride) ||
135           cast<ConstantInt>(Stride)->getZExtValue() >= NumElements) &&
136          "Stride must be >= the number of elements in the result vector.");
137   unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
138 
139   // Compute the start of the vector with index VecIdx as VecIdx * Stride.
140   Value *VecStart = Builder.CreateMul(VecIdx, Stride, "vec.start");
141 
142   // Get pointer to the start of the selected vector. Skip GEP creation,
143   // if we select vector 0.
144   if (isa<ConstantInt>(VecStart) && cast<ConstantInt>(VecStart)->isZero())
145     VecStart = BasePtr;
146   else
147     VecStart = Builder.CreateGEP(EltType, BasePtr, VecStart, "vec.gep");
148 
149   // Cast elementwise vector start pointer to a pointer to a vector
150   // (EltType x NumElements)*.
151   Type *VecType = VectorType::get(EltType, NumElements);
152   Type *VecPtrType = PointerType::get(VecType, AS);
153   return Builder.CreatePointerCast(VecStart, VecPtrType, "vec.cast");
154 }
155 
156 /// LowerMatrixIntrinsics contains the methods used to lower matrix intrinsics.
157 ///
158 /// Currently, the lowering for each matrix intrinsic is done as follows:
159 /// 1. Propagate the shape information from intrinsics to connected
160 /// instructions.
161 /// 2. Lower instructions with shape information (assuming column-major layout).
162 ///  The lowering works similarly using row-major layout.
163 ///  2.1. Get column vectors for each argument. If we already lowered the
164 ///       definition of an argument, use the produced column vectors directly.
165 ///       If not, split the operand vector containing an embedded matrix into
166 ///       a set of column vectors,
167 ///  2.2. Lower the instruction in terms of columnwise operations, which yields
168 ///       a set of column vectors containing result matrix. Note that we lower
169 ///       all instructions that have shape information. Besides the intrinsics,
170 ///       this includes stores for example.
171 ///  2.3. Update uses of the lowered instruction. If we have shape information
172 ///       for a user, there is nothing to do, as we will look up the result
173 ///       column matrix when lowering the user. For other uses, we embed the
174 ///       result matrix in a flat vector and update the use.
175 ///  2.4. Cache the result column matrix for the instruction we lowered
176 /// 3. After we lowered all instructions in a function, remove the now
177 ///    obsolete instructions.
178 ///
179 class LowerMatrixIntrinsics {
180   Function &Func;
181   const DataLayout &DL;
182   const TargetTransformInfo &TTI;
183   AliasAnalysis &AA;
184   DominatorTree &DT;
185   LoopInfo &LI;
186   OptimizationRemarkEmitter &ORE;
187 
188   /// Contains estimates of the number of operations (loads, stores, compute) required to lower a matrix operation.
189   struct OpInfoTy {
190     /// Number of stores emitted to generate this matrix.
191     unsigned NumStores = 0;
192     /// Number of loads emitted to generate this matrix.
193     unsigned NumLoads = 0;
194     /// Number of compute operations emitted to generate this matrix.
195     unsigned NumComputeOps = 0;
196 
197     OpInfoTy &operator+=(const OpInfoTy &RHS) {
198       NumStores += RHS.NumStores;
199       NumLoads += RHS.NumLoads;
200       NumComputeOps += RHS.NumComputeOps;
201       return *this;
202     }
203   };
204 
205   /// Wrapper class representing a matrix as a set of vectors, either in row or
206   /// column major layout. All vectors must have the same vector type.
207   class MatrixTy {
208     SmallVector<Value *, 16> Vectors;
209 
210     OpInfoTy OpInfo;
211 
212     bool IsColumnMajor = true;
213 
214   public:
215     MatrixTy()
216         : Vectors(),
217           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
218     MatrixTy(ArrayRef<Value *> Vectors)
219         : Vectors(Vectors.begin(), Vectors.end()),
220           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
221     MatrixTy(unsigned NumRows, unsigned NumColumns, Type *EltTy)
222         : IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {
223 
224       unsigned D = isColumnMajor() ? NumColumns : NumRows;
225       for (unsigned J = 0; J < D; ++J)
226         addVector(UndefValue::get(
227             VectorType::get(EltTy, isColumnMajor() ? NumRows : NumColumns)));
228     }
229 
230     Value *getVector(unsigned i) const { return Vectors[i]; }
231     Value *getColumn(unsigned i) const {
232       assert(isColumnMajor() && "only supported for column-major matrixes");
233       return Vectors[i];
234     }
235     Value *getRow(unsigned i) const {
236       assert(!isColumnMajor() && "only supported for row-major matrixes");
237       return Vectors[i];
238     }
239 
240     void setVector(unsigned i, Value *V) { Vectors[i] = V; }
241 
242     Type *getElementType() { return getVectorTy()->getElementType(); }
243 
244     unsigned getNumVectors() const {
245       if (isColumnMajor())
246         return getNumColumns();
247       return getNumRows();
248     }
249 
250     unsigned getNumColumns() const {
251       if (isColumnMajor())
252         return Vectors.size();
253       else {
254         assert(Vectors.size() > 0 && "Cannot call getNumRows without columns");
255         return cast<VectorType>(Vectors[0]->getType())->getNumElements();
256       }
257     }
258     unsigned getNumRows() const {
259       if (isColumnMajor()) {
260         assert(Vectors.size() > 0 && "Cannot call getNumRows without columns");
261         return cast<VectorType>(Vectors[0]->getType())->getNumElements();
262       } else
263         return Vectors.size();
264     }
265 
266     void addVector(Value *V) { Vectors.push_back(V); }
267     VectorType *getColumnTy() {
268       assert(isColumnMajor() && "only supported for column-major matrixes");
269       return getVectorTy();
270     }
271 
272     VectorType *getVectorTy() {
273       return cast<VectorType>(Vectors[0]->getType());
274     }
275 
276     iterator_range<SmallVector<Value *, 8>::iterator> columns() {
277       assert(isColumnMajor() &&
278              "columns() only supported for column-major matrixes");
279       return make_range(Vectors.begin(), Vectors.end());
280     }
281 
282     iterator_range<SmallVector<Value *, 8>::iterator> vectors() {
283       return make_range(Vectors.begin(), Vectors.end());
284     }
285 
286     /// Embed the vectors of the matrix into a flat vector by concatenating
287     /// them.
288     Value *embedInVector(IRBuilder<> &Builder) const {
289       return Vectors.size() == 1 ? Vectors[0]
290                                  : concatenateVectors(Builder, Vectors);
291     }
292 
293     MatrixTy &addNumLoads(unsigned N) {
294       OpInfo.NumLoads += N;
295       return *this;
296     }
297 
298     void setNumLoads(unsigned N) { OpInfo.NumLoads = N; }
299 
300     MatrixTy &addNumStores(unsigned N) {
301       OpInfo.NumStores += N;
302       return *this;
303     }
304 
305     MatrixTy &addNumComputeOps(unsigned N) {
306       OpInfo.NumComputeOps += N;
307       return *this;
308     }
309 
310     unsigned getNumStores() const { return OpInfo.NumStores; }
311     unsigned getNumLoads() const { return OpInfo.NumLoads; }
312     unsigned getNumComputeOps() const { return OpInfo.NumComputeOps; }
313 
314     const OpInfoTy &getOpInfo() const { return OpInfo; }
315 
316     bool isColumnMajor() const { return IsColumnMajor; }
317 
318     unsigned getStride() const {
319       if (isColumnMajor())
320         return getNumRows();
321       return getNumColumns();
322     }
323 
324     /// Extract a vector of \p NumElts starting at index (\p I, \p J). If the
325     /// matrix is column-major, the result vector is extracted from a column
326     /// vector, otherwise from a row vector.
327     Value *extractVector(unsigned I, unsigned J, unsigned NumElts,
328                          IRBuilder<> &Builder) const {
329       Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I);
330       Value *Undef = UndefValue::get(Vec->getType());
331       return Builder.CreateShuffleVector(
332           Vec, Undef, createSequentialMask(isColumnMajor() ? I : J, NumElts, 0),
333           "block");
334     }
335   };
336 
337   struct ShapeInfo {
338     unsigned NumRows;
339     unsigned NumColumns;
340 
341     bool IsColumnMajor;
342 
343     ShapeInfo(unsigned NumRows = 0, unsigned NumColumns = 0)
344         : NumRows(NumRows), NumColumns(NumColumns),
345           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
346 
347     ShapeInfo(Value *NumRows, Value *NumColumns)
348         : ShapeInfo(cast<ConstantInt>(NumRows)->getZExtValue(),
349                     cast<ConstantInt>(NumColumns)->getZExtValue()) {}
350 
351     bool operator==(const ShapeInfo &other) {
352       return NumRows == other.NumRows && NumColumns == other.NumColumns;
353     }
354     bool operator!=(const ShapeInfo &other) { return !(*this == other); }
355 
356     /// Returns true if shape-information is defined, meaning both dimensions
357     /// are != 0.
358     operator bool() const {
359       assert(NumRows == 0 || NumColumns != 0);
360       return NumRows != 0;
361     }
362 
363     unsigned getStride() const {
364       if (IsColumnMajor)
365         return NumRows;
366       return NumColumns;
367     }
368 
369     unsigned getNumVectors() const {
370       if (IsColumnMajor)
371         return NumColumns;
372       return NumRows;
373     }
374   };
375 
376   /// Maps instructions to their shape information. The shape information
377   /// describes the shape to be used while lowering. This matches the shape of
378   /// the result value of the instruction, with the only exceptions being store
379   /// instructions and the matrix_columnwise_store intrinsics. For those, the
380   /// shape information indicates that those instructions should be lowered
381   /// using shape information as well.
382   DenseMap<Value *, ShapeInfo> ShapeMap;
383 
384   /// List of instructions to remove. While lowering, we are not replacing all
385   /// users of a lowered instruction, if shape information is available and
386   /// those need to be removed after we finished lowering.
387   SmallVector<Instruction *, 16> ToRemove;
388 
389   /// Map from instructions to their produced column matrix.
390   MapVector<Value *, MatrixTy> Inst2ColumnMatrix;
391 
392 public:
393   LowerMatrixIntrinsics(Function &F, TargetTransformInfo &TTI,
394                         AliasAnalysis &AA, DominatorTree &DT, LoopInfo &LI,
395                         OptimizationRemarkEmitter &ORE)
396       : Func(F), DL(F.getParent()->getDataLayout()), TTI(TTI), AA(AA), DT(DT),
397         LI(LI), ORE(ORE) {}
398 
399   unsigned getNumOps(Type *VT) {
400     assert(isa<VectorType>(VT) && "Expected vector type");
401     return getNumOps(VT->getScalarType(),
402                      cast<VectorType>(VT)->getNumElements());
403   }
404 
405   //
406   /// Return the estimated number of vector ops required for an operation on
407   /// \p VT * N.
408   unsigned getNumOps(Type *ST, unsigned N) {
409     return std::ceil((ST->getPrimitiveSizeInBits() * N).getFixedSize() /
410                      double(TTI.getRegisterBitWidth(true)));
411   }
412 
413   /// Return the set of vectors that a matrix value is lowered to.
414   ///
415   /// If we lowered \p MatrixVal, just return the cache result matrix. Otherwise
416   /// split the flat vector \p MatrixVal containing a matrix with shape \p SI
417   /// into vectors.
418   MatrixTy getMatrix(Value *MatrixVal, const ShapeInfo &SI,
419                      IRBuilder<> &Builder) {
420     VectorType *VType = dyn_cast<VectorType>(MatrixVal->getType());
421     assert(VType && "MatrixVal must be a vector type");
422     assert(VType->getNumElements() == SI.NumRows * SI.NumColumns &&
423            "The vector size must match the number of matrix elements");
424 
425     // Check if we lowered MatrixVal using shape information. In that case,
426     // return the existing matrix, if it matches the requested shape
427     // information. If there is a mis-match, embed the result in a flat
428     // vector and split it later.
429     auto Found = Inst2ColumnMatrix.find(MatrixVal);
430     if (Found != Inst2ColumnMatrix.end()) {
431       MatrixTy &M = Found->second;
432       // Return the found matrix, if its shape matches the requested shape
433       // information
434       if (SI.NumRows == M.getNumRows() && SI.NumColumns == M.getNumColumns())
435         return M;
436 
437       MatrixVal = M.embedInVector(Builder);
438     }
439 
440     // Otherwise split MatrixVal.
441     SmallVector<Value *, 16> SplitVecs;
442     Value *Undef = UndefValue::get(VType);
443     for (unsigned MaskStart = 0; MaskStart < VType->getNumElements();
444          MaskStart += SI.getStride()) {
445       Value *V = Builder.CreateShuffleVector(
446           MatrixVal, Undef, createSequentialMask(MaskStart, SI.getStride(), 0),
447           "split");
448       SplitVecs.push_back(V);
449     }
450 
451     return {SplitVecs};
452   }
453 
454   /// If \p V already has a known shape return false.  Otherwise set the shape
455   /// for instructions that support it.
456   bool setShapeInfo(Value *V, ShapeInfo Shape) {
457     assert(Shape && "Shape not set");
458     if (isa<UndefValue>(V) || !supportsShapeInfo(V))
459       return false;
460 
461     auto SIter = ShapeMap.find(V);
462     if (SIter != ShapeMap.end()) {
463       LLVM_DEBUG(dbgs() << "  not overriding existing shape: "
464                         << SIter->second.NumRows << " "
465                         << SIter->second.NumColumns << " for " << *V << "\n");
466       return false;
467     }
468 
469     ShapeMap.insert({V, Shape});
470     LLVM_DEBUG(dbgs() << "  " << Shape.NumRows << " x " << Shape.NumColumns
471                       << " for " << *V << "\n");
472     return true;
473   }
474 
475   bool isUniformShape(Value *V) {
476     Instruction *I = dyn_cast<Instruction>(V);
477     if (!I)
478       return true;
479 
480     switch (I->getOpcode()) {
481     case Instruction::FAdd:
482     case Instruction::FSub:
483     case Instruction::FMul: // Scalar multiply.
484     case Instruction::Add:
485     case Instruction::Mul:
486     case Instruction::Sub:
487       return true;
488     default:
489       return false;
490     }
491   }
492 
493   /// Returns true if shape information can be used for \p V. The supported
494   /// instructions must match the instructions that can be lowered by this pass.
495   bool supportsShapeInfo(Value *V) {
496     Instruction *Inst = dyn_cast<Instruction>(V);
497     if (!Inst)
498       return false;
499 
500     IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
501     if (II)
502       switch (II->getIntrinsicID()) {
503       case Intrinsic::matrix_multiply:
504       case Intrinsic::matrix_transpose:
505       case Intrinsic::matrix_columnwise_load:
506       case Intrinsic::matrix_columnwise_store:
507         return true;
508       default:
509         return false;
510       }
511     return isUniformShape(V) || isa<StoreInst>(V) || isa<LoadInst>(V);
512   }
513 
514   /// Propagate the shape information of instructions to their users.
515   /// The work list contains instructions for which we can compute the shape,
516   /// either based on the information provided by matrix intrinsics or known
517   /// shapes of operands.
518   SmallVector<Instruction *, 32>
519   propagateShapeForward(SmallVectorImpl<Instruction *> &WorkList) {
520     SmallVector<Instruction *, 32> NewWorkList;
521     // Pop an element for which we guaranteed to have at least one of the
522     // operand shapes.  Add the shape for this and then add users to the work
523     // list.
524     LLVM_DEBUG(dbgs() << "Forward-propagate shapes:\n");
525     while (!WorkList.empty()) {
526       Instruction *Inst = WorkList.back();
527       WorkList.pop_back();
528 
529       // New entry, set the value and insert operands
530       bool Propagate = false;
531 
532       Value *MatrixA;
533       Value *MatrixB;
534       Value *M;
535       Value *N;
536       Value *K;
537       if (match(Inst, m_Intrinsic<Intrinsic::matrix_multiply>(
538                           m_Value(MatrixA), m_Value(MatrixB), m_Value(M),
539                           m_Value(N), m_Value(K)))) {
540         Propagate = setShapeInfo(Inst, {M, K});
541       } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_transpose>(
542                                  m_Value(MatrixA), m_Value(M), m_Value(N)))) {
543         // Flip dimensions.
544         Propagate = setShapeInfo(Inst, {N, M});
545       } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_columnwise_store>(
546                                  m_Value(MatrixA), m_Value(), m_Value(),
547                                  m_Value(M), m_Value(N)))) {
548         Propagate = setShapeInfo(Inst, {N, M});
549       } else if (match(Inst,
550                        m_Intrinsic<Intrinsic::matrix_columnwise_load>(
551                            m_Value(), m_Value(), m_Value(M), m_Value(N)))) {
552         Propagate = setShapeInfo(Inst, {M, N});
553       } else if (match(Inst, m_Store(m_Value(MatrixA), m_Value()))) {
554         auto OpShape = ShapeMap.find(MatrixA);
555         if (OpShape != ShapeMap.end())
556           setShapeInfo(Inst, OpShape->second);
557         continue;
558       } else if (isUniformShape(Inst)) {
559         // Find the first operand that has a known shape and use that.
560         for (auto &Op : Inst->operands()) {
561           auto OpShape = ShapeMap.find(Op.get());
562           if (OpShape != ShapeMap.end()) {
563             Propagate |= setShapeInfo(Inst, OpShape->second);
564             break;
565           }
566         }
567       }
568 
569       if (Propagate) {
570         NewWorkList.push_back(Inst);
571         for (auto *User : Inst->users())
572           if (ShapeMap.count(User) == 0)
573             WorkList.push_back(cast<Instruction>(User));
574       }
575     }
576 
577     return NewWorkList;
578   }
579 
580   /// Propagate the shape to operands of instructions with shape information.
581   /// \p Worklist contains the instruction for which we already know the shape.
582   SmallVector<Instruction *, 32>
583   propagateShapeBackward(SmallVectorImpl<Instruction *> &WorkList) {
584     SmallVector<Instruction *, 32> NewWorkList;
585 
586     auto pushInstruction = [](Value *V,
587                               SmallVectorImpl<Instruction *> &WorkList) {
588       Instruction *I = dyn_cast<Instruction>(V);
589       if (I)
590         WorkList.push_back(I);
591     };
592     // Pop an element with known shape.  Traverse the operands, if their shape
593     // derives from the result shape and is unknown, add it and add them to the
594     // worklist.
595     LLVM_DEBUG(dbgs() << "Backward-propagate shapes:\n");
596     while (!WorkList.empty()) {
597       Value *V = WorkList.back();
598       WorkList.pop_back();
599 
600       size_t BeforeProcessingV = WorkList.size();
601       if (!isa<Instruction>(V))
602         continue;
603 
604       Value *MatrixA;
605       Value *MatrixB;
606       Value *M;
607       Value *N;
608       Value *K;
609       if (match(V, m_Intrinsic<Intrinsic::matrix_multiply>(
610                        m_Value(MatrixA), m_Value(MatrixB), m_Value(M),
611                        m_Value(N), m_Value(K)))) {
612         if (setShapeInfo(MatrixA, {M, N}))
613           pushInstruction(MatrixA, WorkList);
614 
615         if (setShapeInfo(MatrixB, {N, K}))
616           pushInstruction(MatrixB, WorkList);
617 
618       } else if (match(V, m_Intrinsic<Intrinsic::matrix_transpose>(
619                               m_Value(MatrixA), m_Value(M), m_Value(N)))) {
620         // Flip dimensions.
621         if (setShapeInfo(MatrixA, {M, N}))
622           pushInstruction(MatrixA, WorkList);
623       } else if (match(V, m_Intrinsic<Intrinsic::matrix_columnwise_store>(
624                               m_Value(MatrixA), m_Value(), m_Value(),
625                               m_Value(M), m_Value(N)))) {
626         if (setShapeInfo(MatrixA, {M, N})) {
627           pushInstruction(MatrixA, WorkList);
628         }
629       } else if (isa<LoadInst>(V) ||
630                  match(V, m_Intrinsic<Intrinsic::matrix_columnwise_load>())) {
631         // Nothing to do, no matrix input.
632       } else if (isa<StoreInst>(V)) {
633         // Nothing to do.  We forward-propagated to this so we would just
634         // backward propagate to an instruction with an already known shape.
635       } else if (isUniformShape(V)) {
636         // Propagate to all operands.
637         ShapeInfo Shape = ShapeMap[V];
638         for (Use &U : cast<Instruction>(V)->operands()) {
639           if (setShapeInfo(U.get(), Shape))
640             pushInstruction(U.get(), WorkList);
641         }
642       }
643       // After we discovered new shape info for new instructions in the
644       // worklist, we use their users as seeds for the next round of forward
645       // propagation.
646       for (size_t I = BeforeProcessingV; I != WorkList.size(); I++)
647         for (User *U : WorkList[I]->users())
648           if (isa<Instruction>(U) && V != U)
649             NewWorkList.push_back(cast<Instruction>(U));
650     }
651     return NewWorkList;
652   }
653 
654   bool Visit() {
655     if (EnableShapePropagation) {
656       SmallVector<Instruction *, 32> WorkList;
657 
658       // Initially only the shape of matrix intrinsics is known.
659       // Initialize the work list with ops carrying shape information.
660       for (BasicBlock &BB : Func)
661         for (Instruction &Inst : BB) {
662           IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Inst);
663           if (!II)
664             continue;
665 
666           switch (II->getIntrinsicID()) {
667           case Intrinsic::matrix_multiply:
668           case Intrinsic::matrix_transpose:
669           case Intrinsic::matrix_columnwise_load:
670           case Intrinsic::matrix_columnwise_store:
671             WorkList.push_back(&Inst);
672             break;
673           default:
674             break;
675           }
676         }
677       // Propagate shapes until nothing changes any longer.
678       while (!WorkList.empty()) {
679         WorkList = propagateShapeForward(WorkList);
680         WorkList = propagateShapeBackward(WorkList);
681       }
682     }
683 
684     bool Changed = false;
685     SmallVector<CallInst *, 16> MaybeFusableInsts;
686     SmallVector<Instruction *, 16> MatrixInsts;
687 
688     // First, collect all instructions with shape information and candidates for
689     // fusion (currently only matrix multiplies).
690     ReversePostOrderTraversal<Function *> RPOT(&Func);
691     for (auto *BB : RPOT)
692       for (Instruction &I : *BB) {
693         if (ShapeMap.find(&I) == ShapeMap.end())
694           continue;
695         if (match(&I, m_Intrinsic<Intrinsic::matrix_multiply>()))
696           MaybeFusableInsts.push_back(cast<CallInst>(&I));
697         MatrixInsts.push_back(&I);
698       }
699 
700     // Second, try to fuse candidates.
701     SmallPtrSet<Instruction *, 16> FusedInsts;
702     for (CallInst *CI : MaybeFusableInsts)
703       LowerMatrixMultiplyFused(CI, FusedInsts);
704     Changed = !FusedInsts.empty();
705 
706     // Third, lower remaining instructions with shape information.
707     for (Instruction *Inst : MatrixInsts) {
708       if (FusedInsts.find(Inst) != FusedInsts.end())
709         continue;
710 
711       IRBuilder<> Builder(Inst);
712 
713       if (CallInst *CInst = dyn_cast<CallInst>(Inst))
714         Changed |= VisitCallInst(CInst);
715 
716       Value *Op1;
717       Value *Op2;
718       if (auto *BinOp = dyn_cast<BinaryOperator>(Inst))
719         Changed |= VisitBinaryOperator(BinOp);
720       if (match(Inst, m_Load(m_Value(Op1))))
721         Changed |= VisitLoad(Inst, Op1, Builder);
722       else if (match(Inst, m_Store(m_Value(Op1), m_Value(Op2))))
723         Changed |= VisitStore(Inst, Op1, Op2, Builder);
724     }
725 
726     RemarkGenerator RemarkGen(Inst2ColumnMatrix, ORE, Func);
727     RemarkGen.emitRemarks();
728 
729     for (Instruction *Inst : reverse(ToRemove))
730       Inst->eraseFromParent();
731 
732     return Changed;
733   }
734 
735   LoadInst *createVectorLoad(Value *ColumnPtr, Type *EltType,
736                              IRBuilder<> &Builder) {
737     return Builder.CreateAlignedLoad(
738         ColumnPtr, Align(DL.getABITypeAlignment(EltType)), "col.load");
739   }
740 
741   StoreInst *createVectorStore(Value *ColumnValue, Value *ColumnPtr,
742                                Type *EltType, IRBuilder<> &Builder) {
743     return Builder.CreateAlignedStore(ColumnValue, ColumnPtr,
744                                       DL.getABITypeAlign(EltType));
745   }
746 
747   /// Turns \p BasePtr into an elementwise pointer to \p EltType.
748   Value *createElementPtr(Value *BasePtr, Type *EltType, IRBuilder<> &Builder) {
749     unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
750     Type *EltPtrType = PointerType::get(EltType, AS);
751     return Builder.CreatePointerCast(BasePtr, EltPtrType);
752   }
753 
754   /// Replace intrinsic calls
755   bool VisitCallInst(CallInst *Inst) {
756     if (!Inst->getCalledFunction() || !Inst->getCalledFunction()->isIntrinsic())
757       return false;
758 
759     switch (Inst->getCalledFunction()->getIntrinsicID()) {
760     case Intrinsic::matrix_multiply:
761       LowerMultiply(Inst);
762       break;
763     case Intrinsic::matrix_transpose:
764       LowerTranspose(Inst);
765       break;
766     case Intrinsic::matrix_columnwise_load:
767       LowerColumnwiseLoad(Inst);
768       break;
769     case Intrinsic::matrix_columnwise_store:
770       LowerColumnwiseStore(Inst);
771       break;
772     default:
773       return false;
774     }
775     return true;
776   }
777 
778   /// Load a matrix with \p Shape starting at \p Ptr and using \p Stride between
779   /// vectors.
780   MatrixTy loadMatrix(Type *Ty, Value *Ptr, Value *Stride, ShapeInfo Shape,
781                       IRBuilder<> &Builder) {
782     auto VType = cast<VectorType>(Ty);
783     Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
784     MatrixTy Result;
785     for (unsigned I = 0, E = Shape.getNumVectors(); I < E; ++I) {
786       Value *GEP = computeVectorAddr(EltPtr, Builder.getInt32(I), Stride,
787                                      Shape.getStride(), VType->getElementType(),
788                                      Builder);
789       Value *Vector = createVectorLoad(GEP, VType->getElementType(), Builder);
790       Result.addVector(Vector);
791     }
792     return Result.addNumLoads(getNumOps(Result.getVectorTy()) *
793                               Result.getNumVectors());
794   }
795 
796   /// Loads a sub-matrix with shape \p ResultShape from a \p R x \p C matrix,
797   /// starting at \p MatrixPtr[I][J].
798   MatrixTy loadMatrix(Value *MatrixPtr, ShapeInfo MatrixShape, Value *I,
799                       Value *J, ShapeInfo ResultShape, Type *EltTy,
800                       IRBuilder<> &Builder) {
801 
802     Value *Offset = Builder.CreateAdd(
803         Builder.CreateMul(J, Builder.getInt32(MatrixShape.getStride())), I);
804 
805     unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
806     Value *EltPtr =
807         Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS));
808     Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset);
809     Type *TileTy =
810         VectorType::get(EltTy, ResultShape.NumRows * ResultShape.NumColumns);
811     Type *TilePtrTy = PointerType::get(TileTy, AS);
812     Value *TilePtr =
813         Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
814 
815     return loadMatrix(TileTy, TilePtr,
816                       Builder.getInt32(MatrixShape.getStride()), ResultShape,
817                       Builder);
818   }
819 
820   /// Lower a load instruction with shape information.
821   void LowerLoad(Instruction *Inst, Value *Ptr, Value *Stride,
822                  ShapeInfo Shape) {
823     IRBuilder<> Builder(Inst);
824     finalizeLowering(Inst,
825                      loadMatrix(Inst->getType(), Ptr, Stride, Shape, Builder),
826                      Builder);
827   }
828 
829   /// Lowers llvm.matrix.columnwise.load.
830   ///
831   /// The intrinsic loads a matrix from memory using a stride between columns.
832   void LowerColumnwiseLoad(CallInst *Inst) {
833     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
834            "Intrinsic only supports column-major layout!");
835     Value *Ptr = Inst->getArgOperand(0);
836     Value *Stride = Inst->getArgOperand(1);
837     LowerLoad(Inst, Ptr, Stride,
838               {Inst->getArgOperand(2), Inst->getArgOperand(3)});
839   }
840 
841   /// Stores a sub-matrix \p StoreVal into the \p R x \p C matrix starting at \p
842   /// MatrixPtr[I][J].
843   void storeMatrix(const MatrixTy &StoreVal, Value *MatrixPtr,
844                    ShapeInfo MatrixShape, Value *I, Value *J, Type *EltTy,
845                    IRBuilder<> &Builder) {
846     Value *Offset = Builder.CreateAdd(
847         Builder.CreateMul(J, Builder.getInt32(MatrixShape.getStride())), I);
848 
849     unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
850     Value *EltPtr =
851         Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS));
852     Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset);
853     Type *TileTy = VectorType::get(EltTy, StoreVal.getNumRows() *
854                                               StoreVal.getNumColumns());
855     Type *TilePtrTy = PointerType::get(TileTy, AS);
856     Value *TilePtr =
857         Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
858 
859     storeMatrix(TileTy, StoreVal, TilePtr,
860                 Builder.getInt32(MatrixShape.getStride()), Builder);
861   }
862 
863   /// Store matrix \p StoreVal starting at \p Ptr and using \p Stride between
864   /// vectors.
865   MatrixTy storeMatrix(Type *Ty, MatrixTy StoreVal, Value *Ptr, Value *Stride,
866                        IRBuilder<> &Builder) {
867     auto VType = cast<VectorType>(Ty);
868     Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
869     for (auto Vec : enumerate(StoreVal.vectors())) {
870       Value *GEP = computeVectorAddr(EltPtr, Builder.getInt32(Vec.index()),
871                                      Stride, StoreVal.getStride(),
872                                      VType->getElementType(), Builder);
873       createVectorStore(Vec.value(), GEP, VType->getElementType(), Builder);
874     }
875     return MatrixTy().addNumStores(getNumOps(StoreVal.getVectorTy()) *
876                                    StoreVal.getNumVectors());
877   }
878 
879   /// Lower a store instruction with shape information.
880   void LowerStore(Instruction *Inst, Value *Matrix, Value *Ptr, Value *Stride,
881                   ShapeInfo Shape) {
882     IRBuilder<> Builder(Inst);
883     auto StoreVal = getMatrix(Matrix, Shape, Builder);
884     finalizeLowering(
885         Inst, storeMatrix(Matrix->getType(), StoreVal, Ptr, Stride, Builder),
886         Builder);
887   }
888 
889   /// Lowers llvm.matrix.columnwise.store.
890   ///
891   /// The intrinsic store a matrix back memory using a stride between columns.
892   void LowerColumnwiseStore(CallInst *Inst) {
893     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
894            "Intrinsic only supports column-major layout!");
895     Value *Matrix = Inst->getArgOperand(0);
896     Value *Ptr = Inst->getArgOperand(1);
897     Value *Stride = Inst->getArgOperand(2);
898     LowerStore(Inst, Matrix, Ptr, Stride,
899                {Inst->getArgOperand(3), Inst->getArgOperand(4)});
900   }
901 
902   // Set elements I..I+NumElts-1 to Block
903   Value *insertVector(Value *Col, unsigned I, Value *Block,
904                       IRBuilder<> &Builder) {
905 
906     // First, bring Block to the same size as Col
907     unsigned BlockNumElts =
908         cast<VectorType>(Block->getType())->getNumElements();
909     unsigned NumElts = cast<VectorType>(Col->getType())->getNumElements();
910     assert(NumElts >= BlockNumElts && "Too few elements for current block");
911 
912     Value *Undef = UndefValue::get(Block->getType());
913     Block = Builder.CreateShuffleVector(
914         Block, Undef,
915         createSequentialMask(0, BlockNumElts, NumElts - BlockNumElts));
916 
917     // If Col is 7 long and I is 2 and BlockNumElts is 2 the mask is: 0, 1, 7,
918     // 8, 4, 5, 6
919     SmallVector<int, 16> Mask;
920     unsigned i;
921     for (i = 0; i < I; i++)
922       Mask.push_back(i);
923 
924     unsigned VecNumElts = cast<VectorType>(Col->getType())->getNumElements();
925     for (; i < I + BlockNumElts; i++)
926       Mask.push_back(i - I + VecNumElts);
927 
928     for (; i < VecNumElts; i++)
929       Mask.push_back(i);
930 
931     return Builder.CreateShuffleVector(Col, Block, Mask);
932   }
933 
934   Value *createMulAdd(Value *Sum, Value *A, Value *B, bool UseFPOp,
935                       IRBuilder<> &Builder, bool AllowContraction,
936                       unsigned &NumComputeOps) {
937     NumComputeOps += getNumOps(A->getType());
938     if (!Sum)
939       return UseFPOp ? Builder.CreateFMul(A, B) : Builder.CreateMul(A, B);
940 
941     if (UseFPOp) {
942       if (AllowContraction) {
943         // Use fmuladd for floating point operations and let the backend decide
944         // if that's profitable.
945         Function *FMulAdd = Intrinsic::getDeclaration(
946             Func.getParent(), Intrinsic::fmuladd, A->getType());
947         return Builder.CreateCall(FMulAdd, {A, B, Sum});
948       }
949       NumComputeOps += getNumOps(A->getType());
950       Value *Mul = Builder.CreateFMul(A, B);
951       return Builder.CreateFAdd(Sum, Mul);
952     }
953 
954     NumComputeOps += getNumOps(A->getType());
955     Value *Mul = Builder.CreateMul(A, B);
956     return Builder.CreateAdd(Sum, Mul);
957   }
958 
959   /// Cache \p Matrix as result of \p Inst and update the uses of \p Inst. For
960   /// users with shape information, there's nothing to do: the will use the
961   /// cached value when they are lowered. For other users, \p Matrix is
962   /// flattened and the uses are updated to use it. Also marks \p Inst for
963   /// deletion.
964   void finalizeLowering(Instruction *Inst, MatrixTy Matrix,
965                         IRBuilder<> &Builder) {
966     Inst2ColumnMatrix.insert(std::make_pair(Inst, Matrix));
967 
968     ToRemove.push_back(Inst);
969     Value *Flattened = nullptr;
970     for (auto I = Inst->use_begin(), E = Inst->use_end(); I != E;) {
971       Use &U = *I++;
972       if (ShapeMap.find(U.getUser()) == ShapeMap.end()) {
973         if (!Flattened)
974           Flattened = Matrix.embedInVector(Builder);
975         U.set(Flattened);
976       }
977     }
978   }
979 
980   /// Compute \p Result += \p A * \p B for input matrices with left-associating
981   /// addition.
982   void emitMatrixMultiply(MatrixTy &Result, const MatrixTy &A,
983                           const MatrixTy &B, bool AllowContraction,
984                           IRBuilder<> &Builder, bool isTiled) {
985     const unsigned VF = std::max<unsigned>(
986         TTI.getRegisterBitWidth(true) /
987             Result.getElementType()->getPrimitiveSizeInBits().getFixedSize(),
988         1U);
989     unsigned R = Result.getNumRows();
990     unsigned C = Result.getNumColumns();
991     unsigned M = A.getNumColumns();
992 
993     bool IsFP = Result.getElementType()->isFloatingPointTy();
994     assert(A.isColumnMajor() == B.isColumnMajor() &&
995            Result.isColumnMajor() == A.isColumnMajor() &&
996            "operands must agree on matrix layout");
997     unsigned NumComputeOps = 0;
998     if (A.isColumnMajor()) {
999       // Multiply columns from the first operand with scalars from the second
1000       // operand. Then move along the K axes and accumulate the columns.  With
1001       // this the adds can be vectorized without reassociation.
1002       for (unsigned J = 0; J < C; ++J) {
1003         unsigned BlockSize = VF;
1004         // If Result is zero, we don't need to accumulate in the K==0 iteration.
1005         bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J));
1006 
1007         for (unsigned I = 0; I < R; I += BlockSize) {
1008           // Gradually lower the vectorization factor to cover the remainder.
1009           while (I + BlockSize > R)
1010             BlockSize /= 2;
1011 
1012           Value *Sum = isTiled ? Result.extractVector(I, J, BlockSize, Builder)
1013                                : nullptr;
1014           for (unsigned K = 0; K < M; ++K) {
1015             Value *L = A.extractVector(I, K, BlockSize, Builder);
1016             Value *RH = Builder.CreateExtractElement(B.getColumn(J), K);
1017             Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat");
1018             Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat,
1019                                Result.getElementType()->isFloatingPointTy(),
1020                                Builder, AllowContraction, NumComputeOps);
1021           }
1022           Result.setVector(J,
1023                            insertVector(Result.getVector(J), I, Sum, Builder));
1024         }
1025       }
1026     } else {
1027       // Multiply rows from the second operand with scalars from the first
1028       // operand. Then move along the K axes and accumulate the rows.  With this
1029       // the adds can be vectorized without reassociation.
1030       for (unsigned I = 0; I < R; ++I) {
1031         unsigned BlockSize = VF;
1032         bool isSumZero = isa<ConstantAggregateZero>(Result.getRow(I));
1033         for (unsigned J = 0; J < C; J += BlockSize) {
1034           // Gradually lower the vectorization factor to cover the remainder.
1035           while (J + BlockSize > C)
1036             BlockSize /= 2;
1037 
1038           Value *Sum = nullptr;
1039           for (unsigned K = 0; K < M; ++K) {
1040             Value *R = B.extractVector(K, J, BlockSize, Builder);
1041             Value *LH = Builder.CreateExtractElement(A.getVector(I), K);
1042             Value *Splat = Builder.CreateVectorSplat(BlockSize, LH, "splat");
1043             Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, Splat, R,
1044                                IsFP, Builder, AllowContraction, NumComputeOps);
1045           }
1046           Result.setVector(I,
1047                            insertVector(Result.getVector(I), J, Sum, Builder));
1048         }
1049       }
1050     }
1051     Result.addNumComputeOps(NumComputeOps);
1052   }
1053 
1054   /// Ensure that the memory in \p Load does not alias \p Store by potentially
1055   /// copying it to a new location.  This new or otherwise the original location
1056   /// is returned.
1057   Value *getNonAliasingPointer(LoadInst *Load, StoreInst *Store,
1058                                CallInst *MatMul) {
1059     MemoryLocation StoreLoc = MemoryLocation::get(Store);
1060     MemoryLocation LoadLoc = MemoryLocation::get(Load);
1061 
1062     AliasResult LdAliased = AA.alias(LoadLoc, StoreLoc);
1063 
1064     // If we can statically determine noalias we're good.
1065     if (!LdAliased)
1066       return Load->getPointerOperand();
1067 
1068     // Create code to check if the memory locations of the Load and Store
1069     // overlap and if they do, copy Load's operand to a new buffer.
1070 
1071     // First, create  new blocks for 2n part of the check and the copy.
1072     BasicBlock *Check0 = MatMul->getParent();
1073     // FIXME: Use lazy DTU and update SplitBlock to accept a DTU instead of a
1074     // DT. Manually collect dominator tree updates, to avoid unnecessary work,
1075     // as we adjust Check0 and Check1's branches.
1076     SmallVector<DominatorTree::UpdateType, 4> DTUpdates;
1077     for (BasicBlock *Succ : successors(Check0))
1078       DTUpdates.push_back({DT.Delete, Check0, Succ});
1079 
1080     BasicBlock *Check1 = SplitBlock(MatMul->getParent(), MatMul, nullptr, &LI,
1081                                     nullptr, "alias_cont");
1082     BasicBlock *Copy =
1083         SplitBlock(MatMul->getParent(), MatMul, nullptr, &LI, nullptr, "copy");
1084     BasicBlock *Fusion = SplitBlock(MatMul->getParent(), MatMul, nullptr, &LI,
1085                                     nullptr, "no_alias");
1086 
1087     // Check if the loaded memory location begins before the end of the store
1088     // location. If the condition holds, they might overlap, otherwise they are
1089     // guaranteed to not overlap.
1090     IRBuilder<> Builder(MatMul);
1091     Check0->getTerminator()->eraseFromParent();
1092     Builder.SetInsertPoint(Check0);
1093     Type *IntPtrTy = Builder.getIntPtrTy(Load->getModule()->getDataLayout());
1094     Value *StoreBegin = Builder.CreatePtrToInt(
1095         const_cast<Value *>(StoreLoc.Ptr), IntPtrTy, "store.begin");
1096     Value *StoreEnd = Builder.CreateAdd(
1097         StoreBegin, ConstantInt::get(IntPtrTy, StoreLoc.Size.getValue()),
1098         "store.end", true, true);
1099     Value *LoadBegin = Builder.CreatePtrToInt(const_cast<Value *>(LoadLoc.Ptr),
1100                                               IntPtrTy, "load.begin");
1101     Builder.CreateCondBr(Builder.CreateICmpULT(LoadBegin, StoreEnd), Check1,
1102                          Fusion);
1103 
1104     // Check if the store begins before the end of the load location. If the
1105     // condition holds, they alias, otherwise they are guaranteed to not
1106     // overlap.
1107     Check1->getTerminator()->eraseFromParent();
1108     Builder.SetInsertPoint(Check1, Check1->begin());
1109     Value *LoadEnd = Builder.CreateAdd(
1110         LoadBegin, ConstantInt::get(IntPtrTy, LoadLoc.Size.getValue()),
1111         "load.end", true, true);
1112     Builder.CreateCondBr(Builder.CreateICmpULT(StoreBegin, LoadEnd), Copy,
1113                          Fusion);
1114 
1115     // Copy load operand to new alloca.
1116     Builder.SetInsertPoint(Copy, Copy->begin());
1117     AllocaInst *NewLd =
1118         Builder.CreateAlloca(Load->getType(), Load->getPointerAddressSpace());
1119     Builder.CreateMemCpy(NewLd, NewLd->getAlign(),
1120                          Load->getPointerOperand(), Load->getAlign(),
1121                          LoadLoc.Size.getValue());
1122     Builder.SetInsertPoint(Fusion, Fusion->begin());
1123     PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3);
1124     PHI->addIncoming(Load->getPointerOperand(), Check0);
1125     PHI->addIncoming(Load->getPointerOperand(), Check1);
1126     PHI->addIncoming(NewLd, Copy);
1127 
1128     // Adjust DT.
1129     DTUpdates.push_back({DT.Insert, Check0, Check1});
1130     DTUpdates.push_back({DT.Insert, Check0, Fusion});
1131     DTUpdates.push_back({DT.Insert, Check1, Copy});
1132     DTUpdates.push_back({DT.Insert, Check1, Fusion});
1133     DT.applyUpdates(DTUpdates);
1134     return PHI;
1135   }
1136 
1137   bool isFusionProfitable(CallInst *MatMul) {
1138     if (ForceFusion)
1139       return true;
1140 
1141     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1142     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1143 
1144     const unsigned R = LShape.NumRows;
1145     const unsigned C = RShape.NumColumns;
1146     const unsigned M = LShape.NumColumns;
1147     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1148 
1149     const unsigned VF =
1150         std::max<unsigned>(TTI.getRegisterBitWidth(true) /
1151                                EltType->getPrimitiveSizeInBits().getFixedSize(),
1152                            1U);
1153 
1154     // Cost model for tiling
1155     //
1156     // For tiling to be beneficial, we need reuse either along the R or
1157     // the C axis.  We vectorize along the R axis so that means at least
1158     // 3 elements.
1159     // TODO: Also consider cost of copying if operands alias.
1160     if (R <= VF && C == 1)
1161       return false;
1162     // Then we need enough elements to exceed the number of vector
1163     // registers we have.  Note that this is an oversimplification since
1164     // fusing also takes some extra loads which may exceed the number of
1165     // reloads necessary.
1166     unsigned Op0Regs = (R + VF - 1) / VF * M;
1167     unsigned Op1Regs = (M + VF - 1) / VF * C;
1168     return Op0Regs + Op1Regs > TTI.getNumberOfRegisters(true);
1169   }
1170 
1171   MatrixTy getZeroMatrix(Type *EltType, unsigned R, unsigned C) {
1172     MatrixTy Res;
1173     Type *ColumType = VectorType::get(EltType, R);
1174     for (unsigned I = 0; I < C; ++I)
1175       Res.addVector(ConstantAggregateZero::get(ColumType));
1176     return Res;
1177   }
1178 
1179   void emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1,
1180                       StoreInst *Store,
1181                       SmallPtrSetImpl<Instruction *> &FusedInsts) {
1182     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
1183            "Tiling only supported for column-major matrixes at the moment!");
1184     if (!isFusionProfitable(MatMul))
1185       return;
1186 
1187     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1188     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1189 
1190     const unsigned R = LShape.NumRows;
1191     const unsigned C = RShape.NumColumns;
1192     const unsigned M = LShape.NumColumns;
1193     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1194 
1195     Value *APtr = getNonAliasingPointer(LoadOp0, Store, MatMul);
1196     Value *BPtr = getNonAliasingPointer(LoadOp1, Store, MatMul);
1197     Value *CPtr = Store->getPointerOperand();
1198 
1199     bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) &&
1200                                                   MatMul->hasAllowContract());
1201     IRBuilder<> Builder(Store);
1202     for (unsigned J = 0; J < C; J += TileSize)
1203       for (unsigned I = 0; I < R; I += TileSize) {
1204         const unsigned TileR = std::min(R - I, unsigned(TileSize));
1205         const unsigned TileC = std::min(C - J, unsigned(TileSize));
1206         MatrixTy Res = getZeroMatrix(EltType, TileR, TileC);
1207 
1208         for (unsigned K = 0; K < M; K += TileSize) {
1209           const unsigned TileM = std::min(M - K, unsigned(TileSize));
1210           MatrixTy A =
1211               loadMatrix(APtr, LShape, Builder.getInt32(I), Builder.getInt32(K),
1212                          {TileR, TileM}, EltType, Builder);
1213           MatrixTy B =
1214               loadMatrix(BPtr, RShape, Builder.getInt32(K), Builder.getInt32(J),
1215                          {TileM, TileC}, EltType, Builder);
1216           emitMatrixMultiply(Res, A, B, AllowContract, Builder, true);
1217         }
1218         storeMatrix(Res, CPtr, {R, M}, Builder.getInt32(I), Builder.getInt32(J),
1219                     EltType, Builder);
1220       }
1221 
1222     // Mark eliminated instructions as fused and remove them.
1223     FusedInsts.insert(Store);
1224     FusedInsts.insert(MatMul);
1225     Store->eraseFromParent();
1226     MatMul->eraseFromParent();
1227     if (LoadOp0->hasNUses(0)) {
1228       FusedInsts.insert(LoadOp0);
1229       LoadOp0->eraseFromParent();
1230     }
1231     if (LoadOp1->hasNUses(0)) {
1232       FusedInsts.insert(LoadOp1);
1233       LoadOp1->eraseFromParent();
1234     }
1235   }
1236 
1237   /// Try to lower matrix multiply chains by fusing operations.
1238   ///
1239   /// Currently we only lower {ld, ld} -> matmul -> st chains.
1240   //
1241   /// No need to return a MatrixTy object for the result of the operation, since
1242   /// the single store user will be lowered as part of this. Instructions that
1243   /// are completely eliminated by fusion are added to \p FusedInsts.
1244   void LowerMatrixMultiplyFused(CallInst *MatMul,
1245                                 SmallPtrSetImpl<Instruction *> &FusedInsts) {
1246     if (!FuseMatrix || !MatMul->hasOneUse() ||
1247         MatrixLayout != MatrixLayoutTy::ColumnMajor)
1248       return;
1249 
1250     auto *LoadOp0 = dyn_cast<LoadInst>(MatMul->getOperand(0));
1251     auto *LoadOp1 = dyn_cast<LoadInst>(MatMul->getOperand(1));
1252     auto *Store = dyn_cast<StoreInst>(*MatMul->user_begin());
1253     if (LoadOp0 && LoadOp1 && Store) {
1254       // The store address must dominate the MatMul instruction, otherwise
1255       // we create invalid IR.
1256       // FIXME: See if we can hoist the store address computation.
1257       auto *AddrI = dyn_cast<Instruction>(Store->getOperand(1));
1258       if (AddrI && (!DT.dominates(AddrI, MatMul)))
1259         return;
1260 
1261       emitSIMDTiling(MatMul, LoadOp0, LoadOp1, Store, FusedInsts);
1262       return;
1263     }
1264   }
1265 
1266   /// Lowers llvm.matrix.multiply.
1267   void LowerMultiply(CallInst *MatMul) {
1268     IRBuilder<> Builder(MatMul);
1269     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1270     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1271     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1272 
1273     const MatrixTy &Lhs = getMatrix(MatMul->getArgOperand(0), LShape, Builder);
1274     const MatrixTy &Rhs = getMatrix(MatMul->getArgOperand(1), RShape, Builder);
1275 
1276     const unsigned R = LShape.NumRows;
1277     const unsigned C = RShape.NumColumns;
1278     assert(LShape.NumColumns == RShape.NumRows);
1279 
1280     // Initialize the output
1281     MatrixTy Result(R, C, EltType);
1282 
1283     bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) &&
1284                                                   MatMul->hasAllowContract());
1285     emitMatrixMultiply(Result, Lhs, Rhs, AllowContract, Builder, false);
1286     finalizeLowering(MatMul, Result, Builder);
1287   }
1288 
1289   /// Lowers llvm.matrix.transpose.
1290   void LowerTranspose(CallInst *Inst) {
1291     MatrixTy Result;
1292     IRBuilder<> Builder(Inst);
1293     Value *InputVal = Inst->getArgOperand(0);
1294     VectorType *VectorTy = cast<VectorType>(InputVal->getType());
1295     ShapeInfo ArgShape(Inst->getArgOperand(1), Inst->getArgOperand(2));
1296     MatrixTy InputMatrix = getMatrix(InputVal, ArgShape, Builder);
1297     assert(InputMatrix.isColumnMajor() &&
1298            "Row-major code-gen not supported yet!");
1299 
1300     for (unsigned Row = 0; Row < ArgShape.NumRows; ++Row) {
1301       // Build a single column vector for this row. First initialize it.
1302       Value *ResultColumn = UndefValue::get(
1303           VectorType::get(VectorTy->getElementType(), ArgShape.NumColumns));
1304 
1305       // Go through the elements of this row and insert it into the resulting
1306       // column vector.
1307       for (auto C : enumerate(InputMatrix.columns())) {
1308         Value *Elt = Builder.CreateExtractElement(C.value(), Row);
1309         // We insert at index Column since that is the row index after the
1310         // transpose.
1311         ResultColumn =
1312             Builder.CreateInsertElement(ResultColumn, Elt, C.index());
1313       }
1314       Result.addVector(ResultColumn);
1315     }
1316 
1317     // TODO: Improve estimate of operations needed for transposes. Currently we
1318     // just count the insertelement/extractelement instructions, but do not
1319     // account for later simplifications/combines.
1320     finalizeLowering(
1321         Inst,
1322         Result.addNumComputeOps(2 * ArgShape.NumRows * ArgShape.NumColumns),
1323         Builder);
1324   }
1325 
1326   /// Lower load instructions, if shape information is available.
1327   bool VisitLoad(Instruction *Inst, Value *Ptr, IRBuilder<> &Builder) {
1328     auto I = ShapeMap.find(Inst);
1329     if (I == ShapeMap.end())
1330       return false;
1331 
1332     LowerLoad(Inst, Ptr, Builder.getInt32(I->second.getStride()), I->second);
1333     return true;
1334   }
1335 
1336   bool VisitStore(Instruction *Inst, Value *StoredVal, Value *Ptr,
1337                   IRBuilder<> &Builder) {
1338     auto I = ShapeMap.find(StoredVal);
1339     if (I == ShapeMap.end())
1340       return false;
1341 
1342     LowerStore(Inst, StoredVal, Ptr, Builder.getInt32(I->second.getStride()),
1343                I->second);
1344     return true;
1345   }
1346 
1347   /// Lower binary operators, if shape information is available.
1348   bool VisitBinaryOperator(BinaryOperator *Inst) {
1349     auto I = ShapeMap.find(Inst);
1350     if (I == ShapeMap.end())
1351       return false;
1352 
1353     Value *Lhs = Inst->getOperand(0);
1354     Value *Rhs = Inst->getOperand(1);
1355 
1356     IRBuilder<> Builder(Inst);
1357     ShapeInfo &Shape = I->second;
1358 
1359     MatrixTy Result;
1360     MatrixTy A = getMatrix(Lhs, Shape, Builder);
1361     MatrixTy B = getMatrix(Rhs, Shape, Builder);
1362     assert(A.isColumnMajor() == B.isColumnMajor() &&
1363            Result.isColumnMajor() == A.isColumnMajor() &&
1364            "operands must agree on matrix layout");
1365 
1366     // Helper to perform binary op on vectors.
1367     auto BuildVectorOp = [&Builder, Inst](Value *LHS, Value *RHS) {
1368       switch (Inst->getOpcode()) {
1369       case Instruction::Add:
1370         return Builder.CreateAdd(LHS, RHS);
1371       case Instruction::Mul:
1372         return Builder.CreateMul(LHS, RHS);
1373       case Instruction::Sub:
1374         return Builder.CreateSub(LHS, RHS);
1375       case Instruction::FAdd:
1376         return Builder.CreateFAdd(LHS, RHS);
1377       case Instruction::FMul:
1378         return Builder.CreateFMul(LHS, RHS);
1379       case Instruction::FSub:
1380         return Builder.CreateFSub(LHS, RHS);
1381       default:
1382         llvm_unreachable("Unsupported binary operator for matrix");
1383       }
1384     };
1385 
1386     for (unsigned I = 0; I < Shape.getNumVectors(); ++I)
1387       Result.addVector(BuildVectorOp(A.getVector(I), B.getVector(I)));
1388 
1389     finalizeLowering(Inst,
1390                      Result.addNumComputeOps(getNumOps(Result.getVectorTy()) *
1391                                              Result.getNumVectors()),
1392                      Builder);
1393     return true;
1394   }
1395 
1396   /// Helper to linearize a matrix expression tree into a string. Currently
1397   /// matrix expressions are linarized by starting at an expression leaf and
1398   /// linearizing bottom up.
1399   struct ExprLinearizer {
1400     unsigned LengthToBreak = 100;
1401     std::string Str;
1402     raw_string_ostream Stream;
1403     unsigned LineLength = 0;
1404     const DataLayout &DL;
1405 
1406     /// Mapping from instructions to matrixes. It is used to identify
1407     /// matrix instructions.
1408     const MapVector<Value *, MatrixTy> &Inst2Matrix;
1409 
1410     /// Mapping from values to the leaves of all expressions that the value is
1411     /// part of.
1412     const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared;
1413 
1414     /// Set of matrix expressions in the scope of a given DISubprogram.
1415     const SmallSetVector<Value *, 32> &ExprsInSubprogram;
1416 
1417     /// Leaf node of the expression to linearize.
1418     Value *Leaf;
1419 
1420     /// Used to keep track of sub-expressions that get reused while linearizing
1421     /// the expression. Re-used sub-expressions are marked as (reused).
1422     SmallPtrSet<Value *, 8> ReusedExprs;
1423 
1424     ExprLinearizer(const DataLayout &DL,
1425                    const MapVector<Value *, MatrixTy> &Inst2Matrix,
1426                    const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
1427                    const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1428                    Value *Leaf)
1429         : Str(), Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared),
1430           ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {}
1431 
1432     void indent(unsigned N) {
1433       LineLength += N;
1434       for (unsigned i = 0; i < N; i++)
1435         Stream << " ";
1436     }
1437 
1438     void lineBreak() {
1439       Stream << "\n";
1440       LineLength = 0;
1441     }
1442 
1443     void maybeIndent(unsigned Indent) {
1444       if (LineLength >= LengthToBreak)
1445         lineBreak();
1446 
1447       if (LineLength == 0)
1448         indent(Indent);
1449     }
1450 
1451     void write(StringRef S) {
1452       LineLength += S.size();
1453       Stream << S;
1454     }
1455 
1456     Value *getUnderlyingObjectThroughLoads(Value *V) {
1457       if (Value *Ptr = getPointerOperand(V))
1458         return getUnderlyingObjectThroughLoads(Ptr);
1459       else if (V->getType()->isPointerTy())
1460         return GetUnderlyingObject(V, DL);
1461       return V;
1462     }
1463 
1464     /// Returns true if \p V is a matrix value in the given subprogram.
1465     bool isMatrix(Value *V) const { return ExprsInSubprogram.count(V); }
1466 
1467     /// If \p V is a matrix value, print its shape as as NumRows x NumColumns to
1468     /// \p SS.
1469     void prettyPrintMatrixType(Value *V, raw_string_ostream &SS) {
1470       auto M = Inst2Matrix.find(V);
1471       if (M == Inst2Matrix.end())
1472         SS << "unknown";
1473       else {
1474         SS << M->second.getNumRows();
1475         SS << "x";
1476         SS << M->second.getNumColumns();
1477       }
1478     }
1479 
1480     /// Write the called function name. Handles calls to llvm.matrix.*
1481     /// specially: we write the name, followed by the dimensions of the input
1482     /// matrixes, followed by the scalar type name.
1483     void writeFnName(CallInst *CI) {
1484       if (!CI->getCalledFunction())
1485         write("<no called fn>");
1486       else {
1487         StringRef Name = CI->getCalledFunction()->getName();
1488         if (!Name.startswith("llvm.matrix")) {
1489           write(Name);
1490           return;
1491         }
1492         IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
1493         write(StringRef(Intrinsic::getName(II->getIntrinsicID(), {}))
1494                   .drop_front(StringRef("llvm.matrix.").size()));
1495         write(".");
1496         std::string Tmp = "";
1497         raw_string_ostream SS(Tmp);
1498 
1499         switch (II->getIntrinsicID()) {
1500         case Intrinsic::matrix_multiply:
1501           prettyPrintMatrixType(II->getOperand(0), SS);
1502           SS << ".";
1503           prettyPrintMatrixType(II->getOperand(1), SS);
1504           SS << "." << *II->getType()->getScalarType();
1505           break;
1506         case Intrinsic::matrix_transpose:
1507           prettyPrintMatrixType(II->getOperand(0), SS);
1508           SS << "." << *II->getType()->getScalarType();
1509           break;
1510         case Intrinsic::matrix_columnwise_load:
1511           prettyPrintMatrixType(II, SS);
1512           SS << "." << *II->getType()->getScalarType();
1513           break;
1514         case Intrinsic::matrix_columnwise_store:
1515           prettyPrintMatrixType(II->getOperand(0), SS);
1516           SS << "." << *II->getOperand(0)->getType()->getScalarType();
1517           break;
1518         default:
1519           llvm_unreachable("Unhandled case");
1520         }
1521         SS.flush();
1522         write(Tmp);
1523       }
1524     }
1525 
1526     unsigned getNumShapeArgs(CallInst *CI) const {
1527       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
1528         switch (II->getIntrinsicID()) {
1529         case Intrinsic::matrix_multiply:
1530           return 3;
1531         case Intrinsic::matrix_transpose:
1532         case Intrinsic::matrix_columnwise_load:
1533         case Intrinsic::matrix_columnwise_store:
1534           return 2;
1535         default:
1536           return 0;
1537         }
1538       }
1539       return 0;
1540     }
1541 
1542     /// Special printing for values: for pointers, we print if they refer to an
1543     /// (function) external address or a stack address, for other values we
1544     /// either print the constant or "scalar"/"matrix" for other values.
1545     void write(Value *V) {
1546       V = getUnderlyingObjectThroughLoads(V);
1547       if (V->getType()->isPointerTy()) {
1548         if (isa<AllocaInst>(V)) {
1549           Stream << "stack addr";
1550           LineLength += StringRef("stack addr").size();
1551         } else {
1552           Stream << "addr";
1553           LineLength += StringRef("addr").size();
1554         }
1555         if (!V->getName().empty()) {
1556           Stream << " %" << V->getName() << "";
1557           LineLength += V->getName().size() + 2;
1558         }
1559         return;
1560       }
1561 
1562       std::string Tmp;
1563       raw_string_ostream TmpStream(Tmp);
1564 
1565       if (auto *CI = dyn_cast<ConstantInt>(V))
1566         TmpStream << CI->getValue();
1567       else if (isa<Constant>(V))
1568         TmpStream << "constant";
1569       else {
1570         if (isMatrix(V))
1571           TmpStream << "matrix";
1572         else
1573           TmpStream << "scalar";
1574       }
1575       TmpStream.flush();
1576       Tmp = std::string(StringRef(Tmp).trim());
1577       LineLength += Tmp.size();
1578       Stream << Tmp;
1579     }
1580 
1581     /// Linearize expression \p Expr starting at an indentation of \p Indent.
1582     /// Expressions that are re-used multiple times are prefixed with (reused)
1583     /// at the re-used root instruction.
1584     void linearizeExpr(Value *Expr, unsigned Indent, bool ParentReused,
1585                        bool ParentShared) {
1586       auto *I = cast<Instruction>(Expr);
1587       maybeIndent(Indent);
1588       SmallVector<Value *, 8> Ops;
1589 
1590       // Is Expr shared with other expression leaves?
1591       bool ExprShared = false;
1592 
1593       // Deal with shared subtrees. Mark them as shared, if required.
1594       if (!ParentShared) {
1595         auto SI = Shared.find(Expr);
1596         assert(SI != Shared.end() && SI->second.find(Leaf) != SI->second.end());
1597 
1598         for (Value *S : SI->second) {
1599           if (S == Leaf)
1600             continue;
1601           DebugLoc DL = cast<Instruction>(S)->getDebugLoc();
1602           write("shared with remark at line " + std::to_string(DL.getLine()) +
1603                 " column " + std::to_string(DL.getCol()) + " (");
1604         }
1605         ExprShared = SI->second.size() > 1;
1606       }
1607 
1608       bool Reused = !ReusedExprs.insert(Expr).second;
1609       if (Reused && !ParentReused)
1610         write("(reused) ");
1611 
1612       if (auto *CI = dyn_cast<CallInst>(I)) {
1613         writeFnName(CI);
1614 
1615         Ops.append(CI->arg_begin(), CI->arg_end() - getNumShapeArgs(CI));
1616       } else if (isa<BitCastInst>(Expr)) {
1617         // Special case bitcasts, which are used to materialize matrixes from
1618         // non-matrix ops.
1619         write("matrix");
1620         return;
1621       } else {
1622         Ops.append(I->value_op_begin(), I->value_op_end());
1623         write(std::string(I->getOpcodeName()));
1624       }
1625 
1626       write(std::string("("));
1627 
1628       unsigned NumOpsToBreak = 1;
1629       if (match(Expr, m_Intrinsic<Intrinsic::matrix_columnwise_load>()))
1630         NumOpsToBreak = 2;
1631 
1632       for (Value *Op : Ops) {
1633         if (Ops.size() > NumOpsToBreak)
1634           lineBreak();
1635 
1636         maybeIndent(Indent + 1);
1637         if (isMatrix(Op))
1638           linearizeExpr(Op, Indent + 1, Reused, ExprShared);
1639         else
1640           write(Op);
1641         if (Op != Ops.back())
1642           write(", ");
1643       }
1644 
1645       write(")");
1646     }
1647 
1648     const std::string &getResult() {
1649       Stream.flush();
1650       return Str;
1651     }
1652   };
1653 
1654   /// Generate remarks for matrix operations in a function. To generate remarks
1655   /// for matrix expressions, the following approach is used:
1656   /// 1. Use the inlined-at debug information to group matrix operations to the
1657   ///    DISubprograms they are contained in.
1658   /// 2. Collect leaves of matrix expressions (done in
1659   ///    RemarkGenerator::getExpressionLeaves) for each subprogram - expression
1660   //     mapping.  Leaves are lowered matrix instructions without other matrix
1661   //     users (like stores) in the current subprogram.
1662   /// 3. For each leaf, create a remark containing a linearizied version of the
1663   ///    matrix expression. The expression is linearized by a recursive
1664   ///    bottom-up traversal of the matrix operands, starting at a leaf. Note
1665   ///    that multiple leaves can share sub-expressions. Shared subexpressions
1666   ///    are explicitly marked as shared().
1667   struct RemarkGenerator {
1668     const MapVector<Value *, MatrixTy> &Inst2Matrix;
1669     OptimizationRemarkEmitter &ORE;
1670     Function &Func;
1671     const DataLayout &DL;
1672 
1673     RemarkGenerator(const MapVector<Value *, MatrixTy> &Inst2Matrix,
1674                     OptimizationRemarkEmitter &ORE, Function &Func)
1675         : Inst2Matrix(Inst2Matrix), ORE(ORE), Func(Func),
1676           DL(Func.getParent()->getDataLayout()) {}
1677 
1678     /// Return all leaves of the expressions in \p ExprsInSubprogram. Those are
1679     /// instructions in Inst2Matrix returning void or without any users in
1680     /// \p ExprsInSubprogram. Currently that should only include stores.
1681     SmallVector<Value *, 4>
1682     getExpressionLeaves(const SmallSetVector<Value *, 32> &ExprsInSubprogram) {
1683       SmallVector<Value *, 4> Leaves;
1684       for (auto *Expr : ExprsInSubprogram)
1685         if (Expr->getType()->isVoidTy() ||
1686             !any_of(Expr->users(), [&ExprsInSubprogram](User *U) {
1687               return ExprsInSubprogram.count(U);
1688             }))
1689           Leaves.push_back(Expr);
1690       return Leaves;
1691     }
1692 
1693     /// Recursively traverse expression \p V starting at \p Leaf and add \p Leaf
1694     /// to all visited expressions in \p Shared. Limit the matrix operations to
1695     /// the ones in \p ExprsInSubprogram.
1696     void collectSharedInfo(Value *Leaf, Value *V,
1697                            const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1698                            DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) {
1699 
1700       if (!ExprsInSubprogram.count(V))
1701         return;
1702 
1703       auto I = Shared.insert({V, {}});
1704       I.first->second.insert(Leaf);
1705 
1706       for (Value *Op : cast<Instruction>(V)->operand_values())
1707         collectSharedInfo(Leaf, Op, ExprsInSubprogram, Shared);
1708       return;
1709     }
1710 
1711     /// Calculate the number of exclusive and shared op counts for expression
1712     /// starting at \p V. Expressions used multiple times are counted once.
1713     /// Limit the matrix operations to the ones in \p ExprsInSubprogram.
1714     std::pair<OpInfoTy, OpInfoTy>
1715     sumOpInfos(Value *Root, SmallPtrSetImpl<Value *> &ReusedExprs,
1716                const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1717                DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) const {
1718       if (!ExprsInSubprogram.count(Root))
1719         return {};
1720 
1721       // Already counted this expression. Stop.
1722       if (!ReusedExprs.insert(Root).second)
1723         return {};
1724 
1725       OpInfoTy SharedCount;
1726       OpInfoTy Count;
1727 
1728       auto I = Shared.find(Root);
1729       auto CM = Inst2Matrix.find(Root);
1730       if (I->second.size() == 1)
1731         Count = CM->second.getOpInfo();
1732       else
1733         SharedCount = CM->second.getOpInfo();
1734 
1735       for (Value *Op : cast<Instruction>(Root)->operand_values()) {
1736         auto C = sumOpInfos(Op, ReusedExprs, ExprsInSubprogram, Shared);
1737         Count += C.first;
1738         SharedCount += C.second;
1739       }
1740       return {Count, SharedCount};
1741     }
1742 
1743     void emitRemarks() {
1744       if (!ORE.allowExtraAnalysis(DEBUG_TYPE))
1745         return;
1746 
1747       // Map matrix operations to their containting subprograms, by traversing
1748       // the inlinedAt chain. If the function does not have a DISubprogram, we
1749       // only map them to the containing function.
1750       MapVector<DISubprogram *, SmallVector<Value *, 8>> Subprog2Exprs;
1751       for (auto &KV : Inst2Matrix) {
1752         if (Func.getSubprogram()) {
1753           auto *I = cast<Instruction>(KV.first);
1754           DILocation *Context = I->getDebugLoc();
1755           while (Context) {
1756             auto I =
1757                 Subprog2Exprs.insert({getSubprogram(Context->getScope()), {}});
1758             I.first->second.push_back(KV.first);
1759             Context = DebugLoc(Context).getInlinedAt();
1760           }
1761         } else {
1762           auto I = Subprog2Exprs.insert({nullptr, {}});
1763           I.first->second.push_back(KV.first);
1764         }
1765       }
1766       for (auto &KV : Subprog2Exprs) {
1767         SmallSetVector<Value *, 32> ExprsInSubprogram(KV.second.begin(),
1768                                                       KV.second.end());
1769         auto Leaves = getExpressionLeaves(ExprsInSubprogram);
1770 
1771         DenseMap<Value *, SmallPtrSet<Value *, 2>> Shared;
1772         for (Value *Leaf : Leaves)
1773           collectSharedInfo(Leaf, Leaf, ExprsInSubprogram, Shared);
1774 
1775         // Generate remarks for each leaf.
1776         for (auto *L : Leaves) {
1777 
1778           DebugLoc Loc = cast<Instruction>(L)->getDebugLoc();
1779           DILocation *Context = cast<Instruction>(L)->getDebugLoc();
1780           while (Context) {
1781             if (getSubprogram(Context->getScope()) == KV.first) {
1782               Loc = Context;
1783               break;
1784             }
1785             Context = DebugLoc(Context).getInlinedAt();
1786           }
1787 
1788           SmallPtrSet<Value *, 8> ReusedExprs;
1789           OpInfoTy Counts, SharedCounts;
1790           std::tie(Counts, SharedCounts) =
1791               sumOpInfos(L, ReusedExprs, ExprsInSubprogram, Shared);
1792 
1793           OptimizationRemark Rem(DEBUG_TYPE, "matrix-lowered", Loc,
1794                                  cast<Instruction>(L)->getParent());
1795 
1796           Rem << "Lowered with ";
1797           Rem << ore::NV("NumStores", Counts.NumStores) << " stores, "
1798               << ore::NV("NumLoads", Counts.NumLoads) << " loads, "
1799               << ore::NV("NumComputeOps", Counts.NumComputeOps)
1800               << " compute ops";
1801 
1802           if (SharedCounts.NumStores > 0 || SharedCounts.NumLoads > 0 ||
1803               SharedCounts.NumComputeOps > 0) {
1804             Rem << ",\nadditionally "
1805                 << ore::NV("NumStores", SharedCounts.NumStores) << " stores, "
1806                 << ore::NV("NumLoads", SharedCounts.NumLoads) << " loads, "
1807                 << ore::NV("NumFPOps", SharedCounts.NumComputeOps)
1808                 << " compute ops"
1809                 << " are shared with other expressions";
1810           }
1811 
1812           Rem << ("\n" + linearize(L, Shared, ExprsInSubprogram, DL));
1813           ORE.emit(Rem);
1814         }
1815       }
1816     }
1817 
1818     std::string
1819     linearize(Value *L,
1820               const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
1821               const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1822               const DataLayout &DL) {
1823       ExprLinearizer Lin(DL, Inst2Matrix, Shared, ExprsInSubprogram, L);
1824       Lin.linearizeExpr(L, 0, false, false);
1825       return Lin.getResult();
1826     }
1827   };
1828 };
1829 } // namespace
1830 
1831 PreservedAnalyses LowerMatrixIntrinsicsPass::run(Function &F,
1832                                                  FunctionAnalysisManager &AM) {
1833   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1834   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
1835   auto &AA = AM.getResult<AAManager>(F);
1836   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1837   auto &LI = AM.getResult<LoopAnalysis>(F);
1838 
1839   LowerMatrixIntrinsics LMT(F, TTI, AA, DT, LI, ORE);
1840   if (LMT.Visit()) {
1841     PreservedAnalyses PA;
1842     PA.preserveSet<CFGAnalyses>();
1843     return PA;
1844   }
1845   return PreservedAnalyses::all();
1846 }
1847 
1848 namespace {
1849 
1850 class LowerMatrixIntrinsicsLegacyPass : public FunctionPass {
1851 public:
1852   static char ID;
1853 
1854   LowerMatrixIntrinsicsLegacyPass() : FunctionPass(ID) {
1855     initializeLowerMatrixIntrinsicsLegacyPassPass(
1856         *PassRegistry::getPassRegistry());
1857   }
1858 
1859   bool runOnFunction(Function &F) override {
1860     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1861     auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1862     auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1863     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1864     auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1865     LowerMatrixIntrinsics LMT(F, TTI, AA, DT, LI, ORE);
1866     bool C = LMT.Visit();
1867     return C;
1868   }
1869 
1870   void getAnalysisUsage(AnalysisUsage &AU) const override {
1871     AU.addRequired<TargetTransformInfoWrapperPass>();
1872     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1873     AU.addRequired<AAResultsWrapperPass>();
1874     AU.addRequired<DominatorTreeWrapperPass>();
1875     AU.addPreserved<DominatorTreeWrapperPass>();
1876     AU.addRequired<LoopInfoWrapperPass>();
1877     AU.addPreserved<LoopInfoWrapperPass>();
1878   }
1879 };
1880 } // namespace
1881 
1882 static const char pass_name[] = "Lower the matrix intrinsics";
1883 char LowerMatrixIntrinsicsLegacyPass::ID = 0;
1884 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name,
1885                       false, false)
1886 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
1887 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1888 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1889 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
1890 INITIALIZE_PASS_END(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name,
1891                     false, false)
1892 
1893 Pass *llvm::createLowerMatrixIntrinsicsPass() {
1894   return new LowerMatrixIntrinsicsLegacyPass();
1895 }
1896