1 //===- LowerMatrixIntrinsics.cpp -  Lower matrix intrinsics -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Lower matrix intrinsics to vector operations.
10 //
11 // TODO:
12 //  * Improve fusion:
13 //   * Support more cases, e.g. multiply-add, multiply-sub, operands/results
14 //     transposed.
15 //   * Improve cost-modeling, e.g. choose different number of rows/columns
16 //     columns for tiles, consider cost of copies on alias.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
21 #include "llvm/ADT/GraphTraits.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/DomTreeUpdater.h"
26 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DebugInfoMetadata.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/InitializePasses.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/Alignment.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Transforms/Scalar.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/LoopUtils.h"
46 #include "llvm/Transforms/Utils/MatrixUtils.h"
47 
48 using namespace llvm;
49 using namespace PatternMatch;
50 
51 #define DEBUG_TYPE "lower-matrix-intrinsics"
52 
53 static cl::opt<bool> EnableShapePropagation(
54     "matrix-propagate-shape", cl::init(true), cl::Hidden,
55     cl::desc("Enable/disable shape propagation from matrix intrinsics to other "
56              "instructions."));
57 
58 static cl::opt<bool>
59     FuseMatrix("fuse-matrix", cl::init(true), cl::Hidden,
60                cl::desc("Enable/disable fusing matrix instructions."));
61 // TODO: Allow and use non-square tiles.
62 static cl::opt<unsigned> TileSize(
63     "fuse-matrix-tile-size", cl::init(4), cl::Hidden,
64     cl::desc(
65         "Tile size for matrix instruction fusion using square-shaped tiles."));
66 static cl::opt<bool> TileUseLoops("fuse-matrix-use-loops", cl::init(false),
67                                   cl::Hidden,
68                                   cl::desc("Generate loop nest for tiling."));
69 static cl::opt<bool> ForceFusion(
70     "force-fuse-matrix", cl::init(false), cl::Hidden,
71     cl::desc("Force matrix instruction fusion even if not profitable."));
72 static cl::opt<bool> AllowContractEnabled(
73     "matrix-allow-contract", cl::init(false), cl::Hidden,
74     cl::desc("Allow the use of FMAs if available and profitable. This may "
75              "result in different results, due to less rounding error."));
76 
77 enum class MatrixLayoutTy { ColumnMajor, RowMajor };
78 
79 static cl::opt<MatrixLayoutTy> MatrixLayout(
80     "matrix-default-layout", cl::init(MatrixLayoutTy::ColumnMajor),
81     cl::desc("Sets the default matrix layout"),
82     cl::values(clEnumValN(MatrixLayoutTy::ColumnMajor, "column-major",
83                           "Use column-major layout"),
84                clEnumValN(MatrixLayoutTy::RowMajor, "row-major",
85                           "Use row-major layout")));
86 
87 /// Helper function to either return Scope, if it is a subprogram or the
88 /// attached subprogram for a local scope.
89 static DISubprogram *getSubprogram(DIScope *Scope) {
90   if (auto *Subprogram = dyn_cast<DISubprogram>(Scope))
91     return Subprogram;
92   return cast<DILocalScope>(Scope)->getSubprogram();
93 }
94 
95 namespace {
96 
97 // Given an element pointer \p BasePtr to the start of a (sub) matrix, compute
98 // the start address of vector \p VecIdx with type (\p EltType x \p NumElements)
99 // assuming \p Stride elements between start two consecutive vectors.
100 // \p Stride must be >= \p NumElements.
101 // For column-major matrixes, the function computes the address of a column
102 // vectors and \p NumElements must be set to the number of elements in a column
103 // (= number of rows of the matrix). For row-major matrixes, the function
104 // computes the address of a row vector and \p NumElements must be set to the
105 // number of elements in a column (= number of columns of the matrix).
106 //
107 // Consider a 4x4 matrix in column-mjaor layout like below
108 //
109 //      0       1      2      3
110 // 0   v_0_0  v_0_1  v_0_2  v_0_3
111 // 1   v_1_0  v_1_1  v_1_2  v_1_3
112 // 2   v_2_0  v_2_1  v_2_2  v_2_3
113 // 3   v_3_0  v_3_1  v_3_2  v_3_3
114 
115 // To compute the column addresses for a 2x3 sub-matrix at row 1 and column 1,
116 // we need a pointer to the first element of the submatrix as base pointer.
117 // Then we can use computeVectorAddr to compute the addresses for the columns
118 // of the sub-matrix.
119 //
120 // Column 0: computeVectorAddr(Base, 0 (column), 4 (stride), 2 (num rows), ..)
121 //           -> just returns Base
122 // Column 1: computeVectorAddr(Base, 1 (column), 4 (stride), 2 (num rows), ..)
123 //           -> returns Base + (1 * 4)
124 // Column 2: computeVectorAddr(Base, 2 (column), 4 (stride), 2 (num rows), ..)
125 //           -> returns Base + (2 * 4)
126 //
127 // The graphic below illustrates the number of elements in a column (marked
128 // with |) and the number of skipped elements (marked with }).
129 //
130 //         v_0_0  v_0_1 {v_0_2 {v_0_3
131 //                Base   Col 1  Col 2
132 //                  |     |      |
133 //         v_1_0 |v_1_1 |v_1_2 |v_1_3
134 //         v_2_0 |v_2_1 |v_2_2 |v_2_3
135 //         v_3_0 {v_3_1 {v_3_2  v_3_3
136 //
137 Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride,
138                          unsigned NumElements, Type *EltType,
139                          IRBuilder<> &Builder) {
140 
141   assert((!isa<ConstantInt>(Stride) ||
142           cast<ConstantInt>(Stride)->getZExtValue() >= NumElements) &&
143          "Stride must be >= the number of elements in the result vector.");
144   unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
145 
146   // Compute the start of the vector with index VecIdx as VecIdx * Stride.
147   Value *VecStart = Builder.CreateMul(VecIdx, Stride, "vec.start");
148 
149   // Get pointer to the start of the selected vector. Skip GEP creation,
150   // if we select vector 0.
151   if (isa<ConstantInt>(VecStart) && cast<ConstantInt>(VecStart)->isZero())
152     VecStart = BasePtr;
153   else
154     VecStart = Builder.CreateGEP(EltType, BasePtr, VecStart, "vec.gep");
155 
156   // Cast elementwise vector start pointer to a pointer to a vector
157   // (EltType x NumElements)*.
158   auto *VecType = FixedVectorType::get(EltType, NumElements);
159   Type *VecPtrType = PointerType::get(VecType, AS);
160   return Builder.CreatePointerCast(VecStart, VecPtrType, "vec.cast");
161 }
162 
163 /// LowerMatrixIntrinsics contains the methods used to lower matrix intrinsics.
164 ///
165 /// Currently, the lowering for each matrix intrinsic is done as follows:
166 /// 1. Propagate the shape information from intrinsics to connected
167 /// instructions.
168 /// 2. Lower instructions with shape information (assuming column-major layout).
169 ///  The lowering works similarly using row-major layout.
170 ///  2.1. Get column vectors for each argument. If we already lowered the
171 ///       definition of an argument, use the produced column vectors directly.
172 ///       If not, split the operand vector containing an embedded matrix into
173 ///       a set of column vectors,
174 ///  2.2. Lower the instruction in terms of column major operations, which
175 ///       yields a set of column vectors containing result matrix. Note that we
176 ///       lower all instructions that have shape information. Besides the
177 ///       intrinsics, this includes stores for example.
178 ///  2.3. Update uses of the lowered instruction. If we have shape information
179 ///       for a user, there is nothing to do, as we will look up the result
180 ///       column matrix when lowering the user. For other uses, we embed the
181 ///       result matrix in a flat vector and update the use.
182 ///  2.4. Cache the result column matrix for the instruction we lowered
183 /// 3. After we lowered all instructions in a function, remove the now
184 ///    obsolete instructions.
185 ///
186 class LowerMatrixIntrinsics {
187   Function &Func;
188   const DataLayout &DL;
189   const TargetTransformInfo &TTI;
190   AliasAnalysis *AA;
191   DominatorTree *DT;
192   LoopInfo *LI;
193   OptimizationRemarkEmitter *ORE;
194 
195   /// Contains estimates of the number of operations (loads, stores, compute) required to lower a matrix operation.
196   struct OpInfoTy {
197     /// Number of stores emitted to generate this matrix.
198     unsigned NumStores = 0;
199     /// Number of loads emitted to generate this matrix.
200     unsigned NumLoads = 0;
201     /// Number of compute operations emitted to generate this matrix.
202     unsigned NumComputeOps = 0;
203 
204     OpInfoTy &operator+=(const OpInfoTy &RHS) {
205       NumStores += RHS.NumStores;
206       NumLoads += RHS.NumLoads;
207       NumComputeOps += RHS.NumComputeOps;
208       return *this;
209     }
210   };
211 
212   /// Wrapper class representing a matrix as a set of vectors, either in row or
213   /// column major layout. All vectors must have the same vector type.
214   class MatrixTy {
215     SmallVector<Value *, 16> Vectors;
216 
217     OpInfoTy OpInfo;
218 
219     bool IsColumnMajor = true;
220 
221   public:
222     MatrixTy()
223         : Vectors(),
224           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
225     MatrixTy(ArrayRef<Value *> Vectors)
226         : Vectors(Vectors.begin(), Vectors.end()),
227           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
228     MatrixTy(unsigned NumRows, unsigned NumColumns, Type *EltTy)
229         : IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {
230 
231       unsigned D = isColumnMajor() ? NumColumns : NumRows;
232       for (unsigned J = 0; J < D; ++J)
233         addVector(UndefValue::get(FixedVectorType::get(
234             EltTy, isColumnMajor() ? NumRows : NumColumns)));
235     }
236 
237     Value *getVector(unsigned i) const { return Vectors[i]; }
238     Value *getColumn(unsigned i) const {
239       assert(isColumnMajor() && "only supported for column-major matrixes");
240       return Vectors[i];
241     }
242     Value *getRow(unsigned i) const {
243       assert(!isColumnMajor() && "only supported for row-major matrixes");
244       return Vectors[i];
245     }
246 
247     void setVector(unsigned i, Value *V) { Vectors[i] = V; }
248 
249     Type *getElementType() const { return getVectorTy()->getElementType(); }
250 
251     unsigned getNumVectors() const {
252       if (isColumnMajor())
253         return getNumColumns();
254       return getNumRows();
255     }
256 
257     unsigned getNumColumns() const {
258       if (isColumnMajor())
259         return Vectors.size();
260       else {
261         assert(Vectors.size() > 0 && "Cannot call getNumRows without columns");
262         return cast<FixedVectorType>(Vectors[0]->getType())->getNumElements();
263       }
264     }
265     unsigned getNumRows() const {
266       if (isColumnMajor()) {
267         assert(Vectors.size() > 0 && "Cannot call getNumRows without columns");
268         return cast<FixedVectorType>(Vectors[0]->getType())->getNumElements();
269       } else
270         return Vectors.size();
271     }
272 
273     void addVector(Value *V) { Vectors.push_back(V); }
274     VectorType *getColumnTy() {
275       assert(isColumnMajor() && "only supported for column-major matrixes");
276       return getVectorTy();
277     }
278 
279     VectorType *getVectorTy() const {
280       return cast<VectorType>(Vectors[0]->getType());
281     }
282 
283     iterator_range<SmallVector<Value *, 8>::iterator> columns() {
284       assert(isColumnMajor() &&
285              "columns() only supported for column-major matrixes");
286       return make_range(Vectors.begin(), Vectors.end());
287     }
288 
289     iterator_range<SmallVector<Value *, 8>::iterator> vectors() {
290       return make_range(Vectors.begin(), Vectors.end());
291     }
292 
293     /// Embed the vectors of the matrix into a flat vector by concatenating
294     /// them.
295     Value *embedInVector(IRBuilder<> &Builder) const {
296       return Vectors.size() == 1 ? Vectors[0]
297                                  : concatenateVectors(Builder, Vectors);
298     }
299 
300     MatrixTy &addNumLoads(unsigned N) {
301       OpInfo.NumLoads += N;
302       return *this;
303     }
304 
305     void setNumLoads(unsigned N) { OpInfo.NumLoads = N; }
306 
307     MatrixTy &addNumStores(unsigned N) {
308       OpInfo.NumStores += N;
309       return *this;
310     }
311 
312     MatrixTy &addNumComputeOps(unsigned N) {
313       OpInfo.NumComputeOps += N;
314       return *this;
315     }
316 
317     unsigned getNumStores() const { return OpInfo.NumStores; }
318     unsigned getNumLoads() const { return OpInfo.NumLoads; }
319     unsigned getNumComputeOps() const { return OpInfo.NumComputeOps; }
320 
321     const OpInfoTy &getOpInfo() const { return OpInfo; }
322 
323     bool isColumnMajor() const { return IsColumnMajor; }
324 
325     unsigned getStride() const {
326       if (isColumnMajor())
327         return getNumRows();
328       return getNumColumns();
329     }
330 
331     /// Extract a vector of \p NumElts starting at index (\p I, \p J). If the
332     /// matrix is column-major, the result vector is extracted from a column
333     /// vector, otherwise from a row vector.
334     Value *extractVector(unsigned I, unsigned J, unsigned NumElts,
335                          IRBuilder<> &Builder) const {
336       Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I);
337       return Builder.CreateShuffleVector(
338           Vec, createSequentialMask(isColumnMajor() ? I : J, NumElts, 0),
339           "block");
340     }
341   };
342 
343   struct ShapeInfo {
344     unsigned NumRows;
345     unsigned NumColumns;
346 
347     bool IsColumnMajor;
348 
349     ShapeInfo(unsigned NumRows = 0, unsigned NumColumns = 0)
350         : NumRows(NumRows), NumColumns(NumColumns),
351           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
352 
353     ShapeInfo(Value *NumRows, Value *NumColumns)
354         : ShapeInfo(cast<ConstantInt>(NumRows)->getZExtValue(),
355                     cast<ConstantInt>(NumColumns)->getZExtValue()) {}
356 
357     bool operator==(const ShapeInfo &other) {
358       return NumRows == other.NumRows && NumColumns == other.NumColumns;
359     }
360     bool operator!=(const ShapeInfo &other) { return !(*this == other); }
361 
362     /// Returns true if shape-information is defined, meaning both dimensions
363     /// are != 0.
364     operator bool() const {
365       assert(NumRows == 0 || NumColumns != 0);
366       return NumRows != 0;
367     }
368 
369     unsigned getStride() const {
370       if (IsColumnMajor)
371         return NumRows;
372       return NumColumns;
373     }
374 
375     unsigned getNumVectors() const {
376       if (IsColumnMajor)
377         return NumColumns;
378       return NumRows;
379     }
380   };
381 
382   /// Maps instructions to their shape information. The shape information
383   /// describes the shape to be used while lowering. This matches the shape of
384   /// the result value of the instruction, with the only exceptions being store
385   /// instructions and the matrix_column_major_store intrinsics. For those, the
386   /// shape information indicates that those instructions should be lowered
387   /// using shape information as well.
388   DenseMap<Value *, ShapeInfo> ShapeMap;
389 
390   /// List of instructions to remove. While lowering, we are not replacing all
391   /// users of a lowered instruction, if shape information is available and
392   /// those need to be removed after we finished lowering.
393   SmallVector<Instruction *, 16> ToRemove;
394 
395   /// Map from instructions to their produced column matrix.
396   MapVector<Value *, MatrixTy> Inst2ColumnMatrix;
397 
398 public:
399   LowerMatrixIntrinsics(Function &F, TargetTransformInfo &TTI,
400                         AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI,
401                         OptimizationRemarkEmitter *ORE)
402       : Func(F), DL(F.getParent()->getDataLayout()), TTI(TTI), AA(AA), DT(DT),
403         LI(LI), ORE(ORE) {}
404 
405   unsigned getNumOps(Type *VT) {
406     assert(isa<VectorType>(VT) && "Expected vector type");
407     return getNumOps(VT->getScalarType(),
408                      cast<FixedVectorType>(VT)->getNumElements());
409   }
410 
411   //
412   /// Return the estimated number of vector ops required for an operation on
413   /// \p VT * N.
414   unsigned getNumOps(Type *ST, unsigned N) {
415     return std::ceil((ST->getPrimitiveSizeInBits() * N).getFixedSize() /
416                      double(TTI.getRegisterBitWidth(true)));
417   }
418 
419   /// Return the set of vectors that a matrix value is lowered to.
420   ///
421   /// If we lowered \p MatrixVal, just return the cache result matrix. Otherwise
422   /// split the flat vector \p MatrixVal containing a matrix with shape \p SI
423   /// into vectors.
424   MatrixTy getMatrix(Value *MatrixVal, const ShapeInfo &SI,
425                      IRBuilder<> &Builder) {
426     VectorType *VType = dyn_cast<VectorType>(MatrixVal->getType());
427     assert(VType && "MatrixVal must be a vector type");
428     assert(cast<FixedVectorType>(VType)->getNumElements() ==
429                SI.NumRows * SI.NumColumns &&
430            "The vector size must match the number of matrix elements");
431 
432     // Check if we lowered MatrixVal using shape information. In that case,
433     // return the existing matrix, if it matches the requested shape
434     // information. If there is a mis-match, embed the result in a flat
435     // vector and split it later.
436     auto Found = Inst2ColumnMatrix.find(MatrixVal);
437     if (Found != Inst2ColumnMatrix.end()) {
438       MatrixTy &M = Found->second;
439       // Return the found matrix, if its shape matches the requested shape
440       // information
441       if (SI.NumRows == M.getNumRows() && SI.NumColumns == M.getNumColumns())
442         return M;
443 
444       MatrixVal = M.embedInVector(Builder);
445     }
446 
447     // Otherwise split MatrixVal.
448     SmallVector<Value *, 16> SplitVecs;
449     for (unsigned MaskStart = 0;
450          MaskStart < cast<FixedVectorType>(VType)->getNumElements();
451          MaskStart += SI.getStride()) {
452       Value *V = Builder.CreateShuffleVector(
453           MatrixVal, createSequentialMask(MaskStart, SI.getStride(), 0),
454           "split");
455       SplitVecs.push_back(V);
456     }
457 
458     return {SplitVecs};
459   }
460 
461   /// If \p V already has a known shape return false.  Otherwise set the shape
462   /// for instructions that support it.
463   bool setShapeInfo(Value *V, ShapeInfo Shape) {
464     assert(Shape && "Shape not set");
465     if (isa<UndefValue>(V) || !supportsShapeInfo(V))
466       return false;
467 
468     auto SIter = ShapeMap.find(V);
469     if (SIter != ShapeMap.end()) {
470       LLVM_DEBUG(dbgs() << "  not overriding existing shape: "
471                         << SIter->second.NumRows << " "
472                         << SIter->second.NumColumns << " for " << *V << "\n");
473       return false;
474     }
475 
476     ShapeMap.insert({V, Shape});
477     LLVM_DEBUG(dbgs() << "  " << Shape.NumRows << " x " << Shape.NumColumns
478                       << " for " << *V << "\n");
479     return true;
480   }
481 
482   bool isUniformShape(Value *V) {
483     Instruction *I = dyn_cast<Instruction>(V);
484     if (!I)
485       return true;
486 
487     switch (I->getOpcode()) {
488     case Instruction::FAdd:
489     case Instruction::FSub:
490     case Instruction::FMul: // Scalar multiply.
491     case Instruction::Add:
492     case Instruction::Mul:
493     case Instruction::Sub:
494       return true;
495     default:
496       return false;
497     }
498   }
499 
500   /// Returns true if shape information can be used for \p V. The supported
501   /// instructions must match the instructions that can be lowered by this pass.
502   bool supportsShapeInfo(Value *V) {
503     Instruction *Inst = dyn_cast<Instruction>(V);
504     if (!Inst)
505       return false;
506 
507     IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
508     if (II)
509       switch (II->getIntrinsicID()) {
510       case Intrinsic::matrix_multiply:
511       case Intrinsic::matrix_transpose:
512       case Intrinsic::matrix_column_major_load:
513       case Intrinsic::matrix_column_major_store:
514         return true;
515       default:
516         return false;
517       }
518     return isUniformShape(V) || isa<StoreInst>(V) || isa<LoadInst>(V);
519   }
520 
521   /// Propagate the shape information of instructions to their users.
522   /// The work list contains instructions for which we can compute the shape,
523   /// either based on the information provided by matrix intrinsics or known
524   /// shapes of operands.
525   SmallVector<Instruction *, 32>
526   propagateShapeForward(SmallVectorImpl<Instruction *> &WorkList) {
527     SmallVector<Instruction *, 32> NewWorkList;
528     // Pop an element for which we guaranteed to have at least one of the
529     // operand shapes.  Add the shape for this and then add users to the work
530     // list.
531     LLVM_DEBUG(dbgs() << "Forward-propagate shapes:\n");
532     while (!WorkList.empty()) {
533       Instruction *Inst = WorkList.back();
534       WorkList.pop_back();
535 
536       // New entry, set the value and insert operands
537       bool Propagate = false;
538 
539       Value *MatrixA;
540       Value *MatrixB;
541       Value *M;
542       Value *N;
543       Value *K;
544       if (match(Inst, m_Intrinsic<Intrinsic::matrix_multiply>(
545                           m_Value(MatrixA), m_Value(MatrixB), m_Value(M),
546                           m_Value(N), m_Value(K)))) {
547         Propagate = setShapeInfo(Inst, {M, K});
548       } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_transpose>(
549                                  m_Value(MatrixA), m_Value(M), m_Value(N)))) {
550         // Flip dimensions.
551         Propagate = setShapeInfo(Inst, {N, M});
552       } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_column_major_store>(
553                                  m_Value(MatrixA), m_Value(), m_Value(),
554                                  m_Value(), m_Value(M), m_Value(N)))) {
555         Propagate = setShapeInfo(Inst, {N, M});
556       } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_column_major_load>(
557                                  m_Value(), m_Value(), m_Value(), m_Value(M),
558                                  m_Value(N)))) {
559         Propagate = setShapeInfo(Inst, {M, N});
560       } else if (match(Inst, m_Store(m_Value(MatrixA), m_Value()))) {
561         auto OpShape = ShapeMap.find(MatrixA);
562         if (OpShape != ShapeMap.end())
563           setShapeInfo(Inst, OpShape->second);
564         continue;
565       } else if (isUniformShape(Inst)) {
566         // Find the first operand that has a known shape and use that.
567         for (auto &Op : Inst->operands()) {
568           auto OpShape = ShapeMap.find(Op.get());
569           if (OpShape != ShapeMap.end()) {
570             Propagate |= setShapeInfo(Inst, OpShape->second);
571             break;
572           }
573         }
574       }
575 
576       if (Propagate) {
577         NewWorkList.push_back(Inst);
578         for (auto *User : Inst->users())
579           if (ShapeMap.count(User) == 0)
580             WorkList.push_back(cast<Instruction>(User));
581       }
582     }
583 
584     return NewWorkList;
585   }
586 
587   /// Propagate the shape to operands of instructions with shape information.
588   /// \p Worklist contains the instruction for which we already know the shape.
589   SmallVector<Instruction *, 32>
590   propagateShapeBackward(SmallVectorImpl<Instruction *> &WorkList) {
591     SmallVector<Instruction *, 32> NewWorkList;
592 
593     auto pushInstruction = [](Value *V,
594                               SmallVectorImpl<Instruction *> &WorkList) {
595       Instruction *I = dyn_cast<Instruction>(V);
596       if (I)
597         WorkList.push_back(I);
598     };
599     // Pop an element with known shape.  Traverse the operands, if their shape
600     // derives from the result shape and is unknown, add it and add them to the
601     // worklist.
602     LLVM_DEBUG(dbgs() << "Backward-propagate shapes:\n");
603     while (!WorkList.empty()) {
604       Value *V = WorkList.back();
605       WorkList.pop_back();
606 
607       size_t BeforeProcessingV = WorkList.size();
608       if (!isa<Instruction>(V))
609         continue;
610 
611       Value *MatrixA;
612       Value *MatrixB;
613       Value *M;
614       Value *N;
615       Value *K;
616       if (match(V, m_Intrinsic<Intrinsic::matrix_multiply>(
617                        m_Value(MatrixA), m_Value(MatrixB), m_Value(M),
618                        m_Value(N), m_Value(K)))) {
619         if (setShapeInfo(MatrixA, {M, N}))
620           pushInstruction(MatrixA, WorkList);
621 
622         if (setShapeInfo(MatrixB, {N, K}))
623           pushInstruction(MatrixB, WorkList);
624 
625       } else if (match(V, m_Intrinsic<Intrinsic::matrix_transpose>(
626                               m_Value(MatrixA), m_Value(M), m_Value(N)))) {
627         // Flip dimensions.
628         if (setShapeInfo(MatrixA, {M, N}))
629           pushInstruction(MatrixA, WorkList);
630       } else if (match(V, m_Intrinsic<Intrinsic::matrix_column_major_store>(
631                               m_Value(MatrixA), m_Value(), m_Value(), m_Value(),
632                               m_Value(M), m_Value(N)))) {
633         if (setShapeInfo(MatrixA, {M, N})) {
634           pushInstruction(MatrixA, WorkList);
635         }
636       } else if (isa<LoadInst>(V) ||
637                  match(V, m_Intrinsic<Intrinsic::matrix_column_major_load>())) {
638         // Nothing to do, no matrix input.
639       } else if (isa<StoreInst>(V)) {
640         // Nothing to do.  We forward-propagated to this so we would just
641         // backward propagate to an instruction with an already known shape.
642       } else if (isUniformShape(V)) {
643         // Propagate to all operands.
644         ShapeInfo Shape = ShapeMap[V];
645         for (Use &U : cast<Instruction>(V)->operands()) {
646           if (setShapeInfo(U.get(), Shape))
647             pushInstruction(U.get(), WorkList);
648         }
649       }
650       // After we discovered new shape info for new instructions in the
651       // worklist, we use their users as seeds for the next round of forward
652       // propagation.
653       for (size_t I = BeforeProcessingV; I != WorkList.size(); I++)
654         for (User *U : WorkList[I]->users())
655           if (isa<Instruction>(U) && V != U)
656             NewWorkList.push_back(cast<Instruction>(U));
657     }
658     return NewWorkList;
659   }
660 
661   bool Visit() {
662     if (EnableShapePropagation) {
663       SmallVector<Instruction *, 32> WorkList;
664 
665       // Initially only the shape of matrix intrinsics is known.
666       // Initialize the work list with ops carrying shape information.
667       for (BasicBlock &BB : Func)
668         for (Instruction &Inst : BB) {
669           IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Inst);
670           if (!II)
671             continue;
672 
673           switch (II->getIntrinsicID()) {
674           case Intrinsic::matrix_multiply:
675           case Intrinsic::matrix_transpose:
676           case Intrinsic::matrix_column_major_load:
677           case Intrinsic::matrix_column_major_store:
678             WorkList.push_back(&Inst);
679             break;
680           default:
681             break;
682           }
683         }
684       // Propagate shapes until nothing changes any longer.
685       while (!WorkList.empty()) {
686         WorkList = propagateShapeForward(WorkList);
687         WorkList = propagateShapeBackward(WorkList);
688       }
689     }
690 
691     bool Changed = false;
692     SmallVector<CallInst *, 16> MaybeFusableInsts;
693     SmallVector<Instruction *, 16> MatrixInsts;
694 
695     // First, collect all instructions with shape information and candidates for
696     // fusion (currently only matrix multiplies).
697     ReversePostOrderTraversal<Function *> RPOT(&Func);
698     for (auto *BB : RPOT)
699       for (Instruction &I : *BB) {
700         if (ShapeMap.find(&I) == ShapeMap.end())
701           continue;
702         if (match(&I, m_Intrinsic<Intrinsic::matrix_multiply>()))
703           MaybeFusableInsts.push_back(cast<CallInst>(&I));
704         MatrixInsts.push_back(&I);
705       }
706 
707     // Second, try to fuse candidates.
708     SmallPtrSet<Instruction *, 16> FusedInsts;
709     for (CallInst *CI : MaybeFusableInsts)
710       LowerMatrixMultiplyFused(CI, FusedInsts);
711     Changed = !FusedInsts.empty();
712 
713     // Third, lower remaining instructions with shape information.
714     for (Instruction *Inst : MatrixInsts) {
715       if (FusedInsts.count(Inst))
716         continue;
717 
718       IRBuilder<> Builder(Inst);
719 
720       if (CallInst *CInst = dyn_cast<CallInst>(Inst))
721         Changed |= VisitCallInst(CInst);
722 
723       Value *Op1;
724       Value *Op2;
725       if (auto *BinOp = dyn_cast<BinaryOperator>(Inst))
726         Changed |= VisitBinaryOperator(BinOp);
727       if (match(Inst, m_Load(m_Value(Op1))))
728         Changed |= VisitLoad(cast<LoadInst>(Inst), Op1, Builder);
729       else if (match(Inst, m_Store(m_Value(Op1), m_Value(Op2))))
730         Changed |= VisitStore(cast<StoreInst>(Inst), Op1, Op2, Builder);
731     }
732 
733     if (ORE) {
734       RemarkGenerator RemarkGen(Inst2ColumnMatrix, *ORE, Func);
735       RemarkGen.emitRemarks();
736     }
737 
738     for (Instruction *Inst : reverse(ToRemove))
739       Inst->eraseFromParent();
740 
741     return Changed;
742   }
743 
744   /// Turns \p BasePtr into an elementwise pointer to \p EltType.
745   Value *createElementPtr(Value *BasePtr, Type *EltType, IRBuilder<> &Builder) {
746     unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
747     Type *EltPtrType = PointerType::get(EltType, AS);
748     return Builder.CreatePointerCast(BasePtr, EltPtrType);
749   }
750 
751   /// Replace intrinsic calls
752   bool VisitCallInst(CallInst *Inst) {
753     if (!Inst->getCalledFunction() || !Inst->getCalledFunction()->isIntrinsic())
754       return false;
755 
756     switch (Inst->getCalledFunction()->getIntrinsicID()) {
757     case Intrinsic::matrix_multiply:
758       LowerMultiply(Inst);
759       break;
760     case Intrinsic::matrix_transpose:
761       LowerTranspose(Inst);
762       break;
763     case Intrinsic::matrix_column_major_load:
764       LowerColumnMajorLoad(Inst);
765       break;
766     case Intrinsic::matrix_column_major_store:
767       LowerColumnMajorStore(Inst);
768       break;
769     default:
770       return false;
771     }
772     return true;
773   }
774 
775   /// Compute the alignment for a column/row \p Idx with \p Stride between them.
776   /// The address at \p Idx == 0 has alignment \p A. If \p Stride is a
777   /// ConstantInt, reduce the initial alignment based on the byte offset. For
778   /// non-ConstantInt strides, return the common alignment of the initial
779   /// alignment and the element size in bytes.
780   Align getAlignForIndex(unsigned Idx, Value *Stride, Type *ElementTy,
781                          MaybeAlign A) const {
782     Align InitialAlign = DL.getValueOrABITypeAlignment(A, ElementTy);
783     if (Idx == 0)
784       return InitialAlign;
785 
786     TypeSize ElementSizeInBits = DL.getTypeSizeInBits(ElementTy);
787     if (auto *ConstStride = dyn_cast<ConstantInt>(Stride)) {
788       uint64_t StrideInBytes =
789           ConstStride->getZExtValue() * ElementSizeInBits / 8;
790       return commonAlignment(InitialAlign, Idx * StrideInBytes);
791     }
792     return commonAlignment(InitialAlign, ElementSizeInBits / 8);
793   }
794 
795   /// Load a matrix with \p Shape starting at \p Ptr and using \p Stride between
796   /// vectors.
797   MatrixTy loadMatrix(Type *Ty, Value *Ptr, MaybeAlign MAlign, Value *Stride,
798                       bool IsVolatile, ShapeInfo Shape, IRBuilder<> &Builder) {
799     auto VType = cast<VectorType>(Ty);
800     Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
801     MatrixTy Result;
802     for (unsigned I = 0, E = Shape.getNumVectors(); I < E; ++I) {
803       Value *GEP = computeVectorAddr(EltPtr, Builder.getInt64(I), Stride,
804                                      Shape.getStride(), VType->getElementType(),
805                                      Builder);
806       Value *Vector = Builder.CreateAlignedLoad(
807           GEP, getAlignForIndex(I, Stride, VType->getElementType(), MAlign),
808           IsVolatile, "col.load");
809 
810       Result.addVector(Vector);
811     }
812     return Result.addNumLoads(getNumOps(Result.getVectorTy()) *
813                               Result.getNumVectors());
814   }
815 
816   /// Loads a sub-matrix with shape \p ResultShape from a \p R x \p C matrix,
817   /// starting at \p MatrixPtr[I][J].
818   MatrixTy loadMatrix(Value *MatrixPtr, MaybeAlign Align, bool IsVolatile,
819                       ShapeInfo MatrixShape, Value *I, Value *J,
820                       ShapeInfo ResultShape, Type *EltTy,
821                       IRBuilder<> &Builder) {
822 
823     Value *Offset = Builder.CreateAdd(
824         Builder.CreateMul(J, Builder.getInt64(MatrixShape.getStride())), I);
825 
826     unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
827     Value *EltPtr =
828         Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS));
829     Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset);
830     auto *TileTy = FixedVectorType::get(EltTy, ResultShape.NumRows *
831                                                    ResultShape.NumColumns);
832     Type *TilePtrTy = PointerType::get(TileTy, AS);
833     Value *TilePtr =
834         Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
835 
836     return loadMatrix(TileTy, TilePtr, Align,
837                       Builder.getInt64(MatrixShape.getStride()), IsVolatile,
838                       ResultShape, Builder);
839   }
840 
841   /// Lower a load instruction with shape information.
842   void LowerLoad(Instruction *Inst, Value *Ptr, MaybeAlign Align, Value *Stride,
843                  bool IsVolatile, ShapeInfo Shape) {
844     IRBuilder<> Builder(Inst);
845     finalizeLowering(Inst,
846                      loadMatrix(Inst->getType(), Ptr, Align, Stride, IsVolatile,
847                                 Shape, Builder),
848                      Builder);
849   }
850 
851   /// Lowers llvm.matrix.column.major.load.
852   ///
853   /// The intrinsic loads a matrix from memory using a stride between columns.
854   void LowerColumnMajorLoad(CallInst *Inst) {
855     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
856            "Intrinsic only supports column-major layout!");
857     Value *Ptr = Inst->getArgOperand(0);
858     Value *Stride = Inst->getArgOperand(1);
859     LowerLoad(Inst, Ptr, Inst->getParamAlign(0), Stride,
860               cast<ConstantInt>(Inst->getArgOperand(2))->isOne(),
861               {Inst->getArgOperand(3), Inst->getArgOperand(4)});
862   }
863 
864   /// Stores a sub-matrix \p StoreVal into the \p R x \p C matrix starting at \p
865   /// MatrixPtr[I][J].
866   void storeMatrix(const MatrixTy &StoreVal, Value *MatrixPtr,
867                    MaybeAlign MAlign, bool IsVolatile, ShapeInfo MatrixShape,
868                    Value *I, Value *J, Type *EltTy, IRBuilder<> &Builder) {
869     Value *Offset = Builder.CreateAdd(
870         Builder.CreateMul(J, Builder.getInt64(MatrixShape.getStride())), I);
871 
872     unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
873     Value *EltPtr =
874         Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS));
875     Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset);
876     auto *TileTy = FixedVectorType::get(EltTy, StoreVal.getNumRows() *
877                                                    StoreVal.getNumColumns());
878     Type *TilePtrTy = PointerType::get(TileTy, AS);
879     Value *TilePtr =
880         Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
881 
882     storeMatrix(TileTy, StoreVal, TilePtr, MAlign,
883                 Builder.getInt64(MatrixShape.getStride()), IsVolatile, Builder);
884   }
885 
886   /// Store matrix \p StoreVal starting at \p Ptr and using \p Stride between
887   /// vectors.
888   MatrixTy storeMatrix(Type *Ty, MatrixTy StoreVal, Value *Ptr,
889                        MaybeAlign MAlign, Value *Stride, bool IsVolatile,
890                        IRBuilder<> &Builder) {
891     auto VType = cast<VectorType>(Ty);
892     Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
893     for (auto Vec : enumerate(StoreVal.vectors())) {
894       Value *GEP = computeVectorAddr(EltPtr, Builder.getInt64(Vec.index()),
895                                      Stride, StoreVal.getStride(),
896                                      VType->getElementType(), Builder);
897       Builder.CreateAlignedStore(Vec.value(), GEP,
898                                  getAlignForIndex(Vec.index(), Stride,
899                                                   VType->getElementType(),
900                                                   MAlign),
901                                  IsVolatile);
902     }
903     return MatrixTy().addNumStores(getNumOps(StoreVal.getVectorTy()) *
904                                    StoreVal.getNumVectors());
905   }
906 
907   /// Lower a store instruction with shape information.
908   void LowerStore(Instruction *Inst, Value *Matrix, Value *Ptr, MaybeAlign A,
909                   Value *Stride, bool IsVolatile, ShapeInfo Shape) {
910     IRBuilder<> Builder(Inst);
911     auto StoreVal = getMatrix(Matrix, Shape, Builder);
912     finalizeLowering(Inst,
913                      storeMatrix(Matrix->getType(), StoreVal, Ptr, A, Stride,
914                                  IsVolatile, Builder),
915                      Builder);
916   }
917 
918   /// Lowers llvm.matrix.column.major.store.
919   ///
920   /// The intrinsic store a matrix back memory using a stride between columns.
921   void LowerColumnMajorStore(CallInst *Inst) {
922     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
923            "Intrinsic only supports column-major layout!");
924     Value *Matrix = Inst->getArgOperand(0);
925     Value *Ptr = Inst->getArgOperand(1);
926     Value *Stride = Inst->getArgOperand(2);
927     LowerStore(Inst, Matrix, Ptr, Inst->getParamAlign(1), Stride,
928                cast<ConstantInt>(Inst->getArgOperand(3))->isOne(),
929                {Inst->getArgOperand(4), Inst->getArgOperand(5)});
930   }
931 
932   // Set elements I..I+NumElts-1 to Block
933   Value *insertVector(Value *Col, unsigned I, Value *Block,
934                       IRBuilder<> &Builder) {
935 
936     // First, bring Block to the same size as Col
937     unsigned BlockNumElts =
938         cast<FixedVectorType>(Block->getType())->getNumElements();
939     unsigned NumElts = cast<FixedVectorType>(Col->getType())->getNumElements();
940     assert(NumElts >= BlockNumElts && "Too few elements for current block");
941 
942     Block = Builder.CreateShuffleVector(
943         Block, createSequentialMask(0, BlockNumElts, NumElts - BlockNumElts));
944 
945     // If Col is 7 long and I is 2 and BlockNumElts is 2 the mask is: 0, 1, 7,
946     // 8, 4, 5, 6
947     SmallVector<int, 16> Mask;
948     unsigned i;
949     for (i = 0; i < I; i++)
950       Mask.push_back(i);
951 
952     unsigned VecNumElts =
953         cast<FixedVectorType>(Col->getType())->getNumElements();
954     for (; i < I + BlockNumElts; i++)
955       Mask.push_back(i - I + VecNumElts);
956 
957     for (; i < VecNumElts; i++)
958       Mask.push_back(i);
959 
960     return Builder.CreateShuffleVector(Col, Block, Mask);
961   }
962 
963   Value *createMulAdd(Value *Sum, Value *A, Value *B, bool UseFPOp,
964                       IRBuilder<> &Builder, bool AllowContraction,
965                       unsigned &NumComputeOps) {
966     NumComputeOps += getNumOps(A->getType());
967     if (!Sum)
968       return UseFPOp ? Builder.CreateFMul(A, B) : Builder.CreateMul(A, B);
969 
970     if (UseFPOp) {
971       if (AllowContraction) {
972         // Use fmuladd for floating point operations and let the backend decide
973         // if that's profitable.
974         Function *FMulAdd = Intrinsic::getDeclaration(
975             Func.getParent(), Intrinsic::fmuladd, A->getType());
976         return Builder.CreateCall(FMulAdd, {A, B, Sum});
977       }
978       NumComputeOps += getNumOps(A->getType());
979       Value *Mul = Builder.CreateFMul(A, B);
980       return Builder.CreateFAdd(Sum, Mul);
981     }
982 
983     NumComputeOps += getNumOps(A->getType());
984     Value *Mul = Builder.CreateMul(A, B);
985     return Builder.CreateAdd(Sum, Mul);
986   }
987 
988   /// Cache \p Matrix as result of \p Inst and update the uses of \p Inst. For
989   /// users with shape information, there's nothing to do: the will use the
990   /// cached value when they are lowered. For other users, \p Matrix is
991   /// flattened and the uses are updated to use it. Also marks \p Inst for
992   /// deletion.
993   void finalizeLowering(Instruction *Inst, MatrixTy Matrix,
994                         IRBuilder<> &Builder) {
995     Inst2ColumnMatrix.insert(std::make_pair(Inst, Matrix));
996 
997     ToRemove.push_back(Inst);
998     Value *Flattened = nullptr;
999     for (auto I = Inst->use_begin(), E = Inst->use_end(); I != E;) {
1000       Use &U = *I++;
1001       if (ShapeMap.find(U.getUser()) == ShapeMap.end()) {
1002         if (!Flattened)
1003           Flattened = Matrix.embedInVector(Builder);
1004         U.set(Flattened);
1005       }
1006     }
1007   }
1008 
1009   /// Compute \p Result += \p A * \p B for input matrices with left-associating
1010   /// addition.
1011   void emitMatrixMultiply(MatrixTy &Result, const MatrixTy &A,
1012                           const MatrixTy &B, bool AllowContraction,
1013                           IRBuilder<> &Builder, bool isTiled) {
1014     const unsigned VF = std::max<unsigned>(
1015         TTI.getRegisterBitWidth(true) /
1016             Result.getElementType()->getPrimitiveSizeInBits().getFixedSize(),
1017         1U);
1018     unsigned R = Result.getNumRows();
1019     unsigned C = Result.getNumColumns();
1020     unsigned M = A.getNumColumns();
1021 
1022     bool IsFP = Result.getElementType()->isFloatingPointTy();
1023     assert(A.isColumnMajor() == B.isColumnMajor() &&
1024            Result.isColumnMajor() == A.isColumnMajor() &&
1025            "operands must agree on matrix layout");
1026     unsigned NumComputeOps = 0;
1027     if (A.isColumnMajor()) {
1028       // Multiply columns from the first operand with scalars from the second
1029       // operand. Then move along the K axes and accumulate the columns.  With
1030       // this the adds can be vectorized without reassociation.
1031       for (unsigned J = 0; J < C; ++J) {
1032         unsigned BlockSize = VF;
1033         // If Result is zero, we don't need to accumulate in the K==0 iteration.
1034         bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J));
1035 
1036         for (unsigned I = 0; I < R; I += BlockSize) {
1037           // Gradually lower the vectorization factor to cover the remainder.
1038           while (I + BlockSize > R)
1039             BlockSize /= 2;
1040 
1041           Value *Sum = isTiled ? Result.extractVector(I, J, BlockSize, Builder)
1042                                : nullptr;
1043           for (unsigned K = 0; K < M; ++K) {
1044             Value *L = A.extractVector(I, K, BlockSize, Builder);
1045             Value *RH = Builder.CreateExtractElement(B.getColumn(J), K);
1046             Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat");
1047             Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat,
1048                                Result.getElementType()->isFloatingPointTy(),
1049                                Builder, AllowContraction, NumComputeOps);
1050           }
1051           Result.setVector(J,
1052                            insertVector(Result.getVector(J), I, Sum, Builder));
1053         }
1054       }
1055     } else {
1056       // Multiply rows from the second operand with scalars from the first
1057       // operand. Then move along the K axes and accumulate the rows.  With this
1058       // the adds can be vectorized without reassociation.
1059       for (unsigned I = 0; I < R; ++I) {
1060         unsigned BlockSize = VF;
1061         bool isSumZero = isa<ConstantAggregateZero>(Result.getRow(I));
1062         for (unsigned J = 0; J < C; J += BlockSize) {
1063           // Gradually lower the vectorization factor to cover the remainder.
1064           while (J + BlockSize > C)
1065             BlockSize /= 2;
1066 
1067           Value *Sum = nullptr;
1068           for (unsigned K = 0; K < M; ++K) {
1069             Value *R = B.extractVector(K, J, BlockSize, Builder);
1070             Value *LH = Builder.CreateExtractElement(A.getVector(I), K);
1071             Value *Splat = Builder.CreateVectorSplat(BlockSize, LH, "splat");
1072             Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, Splat, R,
1073                                IsFP, Builder, AllowContraction, NumComputeOps);
1074           }
1075           Result.setVector(I,
1076                            insertVector(Result.getVector(I), J, Sum, Builder));
1077         }
1078       }
1079     }
1080     Result.addNumComputeOps(NumComputeOps);
1081   }
1082 
1083   /// Ensure that the memory in \p Load does not alias \p Store by potentially
1084   /// copying it to a new location.  This new or otherwise the original location
1085   /// is returned.
1086   Value *getNonAliasingPointer(LoadInst *Load, StoreInst *Store,
1087                                CallInst *MatMul) {
1088     MemoryLocation StoreLoc = MemoryLocation::get(Store);
1089     MemoryLocation LoadLoc = MemoryLocation::get(Load);
1090 
1091     AliasResult LdAliased = AA->alias(LoadLoc, StoreLoc);
1092 
1093     // If we can statically determine noalias we're good.
1094     if (!LdAliased)
1095       return Load->getPointerOperand();
1096 
1097     // Create code to check if the memory locations of the Load and Store
1098     // overlap and if they do, copy Load's operand to a new buffer.
1099 
1100     // First, create  new blocks for 2n part of the check and the copy.
1101     BasicBlock *Check0 = MatMul->getParent();
1102     // FIXME: Use lazy DTU and update SplitBlock to accept a DTU instead of a
1103     // DT. Manually collect dominator tree updates, to avoid unnecessary work,
1104     // as we adjust Check0 and Check1's branches.
1105     SmallVector<DominatorTree::UpdateType, 4> DTUpdates;
1106     for (BasicBlock *Succ : successors(Check0))
1107       DTUpdates.push_back({DT->Delete, Check0, Succ});
1108 
1109     BasicBlock *Check1 = SplitBlock(MatMul->getParent(), MatMul, nullptr, LI,
1110                                     nullptr, "alias_cont");
1111     BasicBlock *Copy =
1112         SplitBlock(MatMul->getParent(), MatMul, nullptr, LI, nullptr, "copy");
1113     BasicBlock *Fusion = SplitBlock(MatMul->getParent(), MatMul, nullptr, LI,
1114                                     nullptr, "no_alias");
1115 
1116     // Check if the loaded memory location begins before the end of the store
1117     // location. If the condition holds, they might overlap, otherwise they are
1118     // guaranteed to not overlap.
1119     IRBuilder<> Builder(MatMul);
1120     Check0->getTerminator()->eraseFromParent();
1121     Builder.SetInsertPoint(Check0);
1122     Type *IntPtrTy = Builder.getIntPtrTy(Load->getModule()->getDataLayout());
1123     Value *StoreBegin = Builder.CreatePtrToInt(
1124         const_cast<Value *>(StoreLoc.Ptr), IntPtrTy, "store.begin");
1125     Value *StoreEnd = Builder.CreateAdd(
1126         StoreBegin, ConstantInt::get(IntPtrTy, StoreLoc.Size.getValue()),
1127         "store.end", true, true);
1128     Value *LoadBegin = Builder.CreatePtrToInt(const_cast<Value *>(LoadLoc.Ptr),
1129                                               IntPtrTy, "load.begin");
1130     Builder.CreateCondBr(Builder.CreateICmpULT(LoadBegin, StoreEnd), Check1,
1131                          Fusion);
1132 
1133     // Check if the store begins before the end of the load location. If the
1134     // condition holds, they alias, otherwise they are guaranteed to not
1135     // overlap.
1136     Check1->getTerminator()->eraseFromParent();
1137     Builder.SetInsertPoint(Check1, Check1->begin());
1138     Value *LoadEnd = Builder.CreateAdd(
1139         LoadBegin, ConstantInt::get(IntPtrTy, LoadLoc.Size.getValue()),
1140         "load.end", true, true);
1141     Builder.CreateCondBr(Builder.CreateICmpULT(StoreBegin, LoadEnd), Copy,
1142                          Fusion);
1143 
1144     // Copy load operand to new alloca.
1145     Builder.SetInsertPoint(Copy, Copy->begin());
1146     AllocaInst *NewLd =
1147         Builder.CreateAlloca(Load->getType(), Load->getPointerAddressSpace());
1148     Builder.CreateMemCpy(NewLd, NewLd->getAlign(),
1149                          Load->getPointerOperand(), Load->getAlign(),
1150                          LoadLoc.Size.getValue());
1151     Builder.SetInsertPoint(Fusion, Fusion->begin());
1152     PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3);
1153     PHI->addIncoming(Load->getPointerOperand(), Check0);
1154     PHI->addIncoming(Load->getPointerOperand(), Check1);
1155     PHI->addIncoming(NewLd, Copy);
1156 
1157     // Adjust DT.
1158     DTUpdates.push_back({DT->Insert, Check0, Check1});
1159     DTUpdates.push_back({DT->Insert, Check0, Fusion});
1160     DTUpdates.push_back({DT->Insert, Check1, Copy});
1161     DTUpdates.push_back({DT->Insert, Check1, Fusion});
1162     DT->applyUpdates(DTUpdates);
1163     return PHI;
1164   }
1165 
1166   bool isFusionProfitable(CallInst *MatMul) {
1167     if (ForceFusion)
1168       return true;
1169 
1170     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1171     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1172 
1173     const unsigned R = LShape.NumRows;
1174     const unsigned C = RShape.NumColumns;
1175     const unsigned M = LShape.NumColumns;
1176     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1177 
1178     const unsigned VF =
1179         std::max<unsigned>(TTI.getRegisterBitWidth(true) /
1180                                EltType->getPrimitiveSizeInBits().getFixedSize(),
1181                            1U);
1182 
1183     // Cost model for tiling
1184     //
1185     // For tiling to be beneficial, we need reuse either along the R or
1186     // the C axis.  We vectorize along the R axis so that means at least
1187     // 3 elements.
1188     // TODO: Also consider cost of copying if operands alias.
1189     if (R <= VF && C == 1)
1190       return false;
1191     // Then we need enough elements to exceed the number of vector
1192     // registers we have.  Note that this is an oversimplification since
1193     // fusing also takes some extra loads which may exceed the number of
1194     // reloads necessary.
1195     unsigned Op0Regs = (R + VF - 1) / VF * M;
1196     unsigned Op1Regs = (M + VF - 1) / VF * C;
1197     return Op0Regs + Op1Regs > TTI.getNumberOfRegisters(true);
1198   }
1199 
1200   MatrixTy getZeroMatrix(Type *EltType, unsigned R, unsigned C) {
1201     MatrixTy Res;
1202     auto *ColumType = FixedVectorType::get(EltType, R);
1203     for (unsigned I = 0; I < C; ++I)
1204       Res.addVector(ConstantAggregateZero::get(ColumType));
1205     return Res;
1206   }
1207 
1208   void createTiledLoops(CallInst *MatMul, Value *LPtr, ShapeInfo LShape,
1209                         Value *RPtr, ShapeInfo RShape, StoreInst *Store,
1210                         bool AllowContract) {
1211     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1212 
1213     // Create the main tiling loop nest.
1214     TileInfo TI(LShape.NumRows, RShape.NumColumns, LShape.NumColumns, TileSize);
1215     DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1216     Instruction *InsertI = cast<Instruction>(MatMul);
1217     BasicBlock *Start = InsertI->getParent();
1218     BasicBlock *End =
1219         SplitBlock(InsertI->getParent(), InsertI, DT, LI, nullptr, "continue");
1220     IRBuilder<> Builder(MatMul);
1221     BasicBlock *InnerBody = TI.CreateTiledLoops(Start, End, Builder, DTU, *LI);
1222 
1223     Type *TileVecTy =
1224         FixedVectorType::get(MatMul->getType()->getScalarType(), TileSize);
1225     MatrixTy TileResult;
1226     // Insert in the inner loop header.
1227     Builder.SetInsertPoint(TI.InnerLoopHeader->getTerminator());
1228     // Create PHI nodes for the result columns to accumulate across iterations.
1229     SmallVector<PHINode *, 4> ColumnPhis;
1230     for (unsigned I = 0; I < TileSize; I++) {
1231       auto *Phi = Builder.CreatePHI(TileVecTy, 2, "result.vec." + Twine(I));
1232       Phi->addIncoming(ConstantAggregateZero::get(TileVecTy),
1233                        TI.RowLoopHeader->getSingleSuccessor());
1234       TileResult.addVector(Phi);
1235       ColumnPhis.push_back(Phi);
1236     }
1237 
1238     // Insert in the inner loop body, which computes
1239     //   Res += Load(CurrentRow, K) * Load(K, CurrentColumn)
1240     Builder.SetInsertPoint(InnerBody->getTerminator());
1241     // Load tiles of the operands.
1242     MatrixTy A = loadMatrix(LPtr, {}, false, LShape, TI.CurrentRow, TI.CurrentK,
1243                             {TileSize, TileSize}, EltType, Builder);
1244     MatrixTy B = loadMatrix(RPtr, {}, false, RShape, TI.CurrentK, TI.CurrentCol,
1245                             {TileSize, TileSize}, EltType, Builder);
1246     emitMatrixMultiply(TileResult, A, B, AllowContract, Builder, true);
1247     // Store result after the inner loop is done.
1248     Builder.SetInsertPoint(TI.RowLoopLatch->getTerminator());
1249     storeMatrix(TileResult, Store->getPointerOperand(), Store->getAlign(),
1250                 Store->isVolatile(), {LShape.NumRows, RShape.NumColumns},
1251                 TI.CurrentRow, TI.CurrentCol, EltType, Builder);
1252 
1253     for (unsigned I = 0; I < TileResult.getNumVectors(); I++)
1254       ColumnPhis[I]->addIncoming(TileResult.getVector(I), TI.InnerLoopLatch);
1255 
1256     // Force unrolling of a few iterations of the inner loop, to make sure there
1257     // is enough work per iteration.
1258     // FIXME: The unroller should make this decision directly instead, but
1259     // currently the cost-model is not up to the task.
1260     unsigned InnerLoopUnrollCount = std::min(10u, LShape.NumColumns / TileSize);
1261     addStringMetadataToLoop(LI->getLoopFor(TI.InnerLoopHeader),
1262                             "llvm.loop.unroll.count", InnerLoopUnrollCount);
1263   }
1264 
1265   void emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1,
1266                       StoreInst *Store,
1267                       SmallPtrSetImpl<Instruction *> &FusedInsts) {
1268     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
1269            "Tiling only supported for column-major matrixes at the moment!");
1270     if (!isFusionProfitable(MatMul))
1271       return;
1272 
1273     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1274     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1275 
1276     const unsigned R = LShape.NumRows;
1277     const unsigned C = RShape.NumColumns;
1278     const unsigned M = LShape.NumColumns;
1279     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1280 
1281     Value *APtr = getNonAliasingPointer(LoadOp0, Store, MatMul);
1282     Value *BPtr = getNonAliasingPointer(LoadOp1, Store, MatMul);
1283     Value *CPtr = Store->getPointerOperand();
1284 
1285     bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) &&
1286                                                   MatMul->hasAllowContract());
1287     if (TileUseLoops && (R % TileSize == 0 && C % TileSize == 0))
1288       createTiledLoops(MatMul, APtr, LShape, BPtr, RShape, Store,
1289                        AllowContract);
1290     else {
1291       IRBuilder<> Builder(Store);
1292       for (unsigned J = 0; J < C; J += TileSize)
1293         for (unsigned I = 0; I < R; I += TileSize) {
1294           const unsigned TileR = std::min(R - I, unsigned(TileSize));
1295           const unsigned TileC = std::min(C - J, unsigned(TileSize));
1296           MatrixTy Res = getZeroMatrix(EltType, TileR, TileC);
1297 
1298           for (unsigned K = 0; K < M; K += TileSize) {
1299             const unsigned TileM = std::min(M - K, unsigned(TileSize));
1300             MatrixTy A =
1301                 loadMatrix(APtr, LoadOp0->getAlign(), LoadOp0->isVolatile(),
1302                            LShape, Builder.getInt64(I), Builder.getInt64(K),
1303                            {TileR, TileM}, EltType, Builder);
1304             MatrixTy B =
1305                 loadMatrix(BPtr, LoadOp1->getAlign(), LoadOp1->isVolatile(),
1306                            RShape, Builder.getInt64(K), Builder.getInt64(J),
1307                            {TileM, TileC}, EltType, Builder);
1308             emitMatrixMultiply(Res, A, B, AllowContract, Builder, true);
1309           }
1310           storeMatrix(Res, CPtr, Store->getAlign(), Store->isVolatile(), {R, M},
1311                       Builder.getInt64(I), Builder.getInt64(J), EltType,
1312                       Builder);
1313         }
1314     }
1315 
1316     // Mark eliminated instructions as fused and remove them.
1317     FusedInsts.insert(Store);
1318     FusedInsts.insert(MatMul);
1319     Store->eraseFromParent();
1320     MatMul->eraseFromParent();
1321     if (LoadOp0->hasNUses(0)) {
1322       FusedInsts.insert(LoadOp0);
1323       LoadOp0->eraseFromParent();
1324     }
1325     if (LoadOp1->hasNUses(0)) {
1326       FusedInsts.insert(LoadOp1);
1327       LoadOp1->eraseFromParent();
1328     }
1329   }
1330 
1331   /// Try to lower matrix multiply chains by fusing operations.
1332   ///
1333   /// Currently we only lower {ld, ld} -> matmul -> st chains.
1334   //
1335   /// No need to return a MatrixTy object for the result of the operation, since
1336   /// the single store user will be lowered as part of this. Instructions that
1337   /// are completely eliminated by fusion are added to \p FusedInsts.
1338   void LowerMatrixMultiplyFused(CallInst *MatMul,
1339                                 SmallPtrSetImpl<Instruction *> &FusedInsts) {
1340     if (!FuseMatrix || !MatMul->hasOneUse() ||
1341         MatrixLayout != MatrixLayoutTy::ColumnMajor || !DT)
1342       return;
1343 
1344     assert(AA && LI && "Analyses should be available");
1345 
1346     auto *LoadOp0 = dyn_cast<LoadInst>(MatMul->getOperand(0));
1347     auto *LoadOp1 = dyn_cast<LoadInst>(MatMul->getOperand(1));
1348     auto *Store = dyn_cast<StoreInst>(*MatMul->user_begin());
1349     if (LoadOp0 && LoadOp1 && Store) {
1350       // The store address must dominate the MatMul instruction, otherwise
1351       // we create invalid IR.
1352       // FIXME: See if we can hoist the store address computation.
1353       auto *AddrI = dyn_cast<Instruction>(Store->getOperand(1));
1354       if (AddrI && (!DT->dominates(AddrI, MatMul)))
1355         return;
1356 
1357       emitSIMDTiling(MatMul, LoadOp0, LoadOp1, Store, FusedInsts);
1358       return;
1359     }
1360   }
1361 
1362   /// Lowers llvm.matrix.multiply.
1363   void LowerMultiply(CallInst *MatMul) {
1364     IRBuilder<> Builder(MatMul);
1365     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1366     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1367     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1368 
1369     const MatrixTy &Lhs = getMatrix(MatMul->getArgOperand(0), LShape, Builder);
1370     const MatrixTy &Rhs = getMatrix(MatMul->getArgOperand(1), RShape, Builder);
1371     assert(Lhs.getElementType() == Rhs.getElementType() &&
1372            "Matrix multiply argument element types do not match.");
1373 
1374     const unsigned R = LShape.NumRows;
1375     const unsigned C = RShape.NumColumns;
1376     assert(LShape.NumColumns == RShape.NumRows);
1377 
1378     // Initialize the output
1379     MatrixTy Result(R, C, EltType);
1380     assert(Lhs.getElementType() == Result.getElementType() &&
1381            "Matrix multiply result element type does not match arguments.");
1382 
1383     bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) &&
1384                                                   MatMul->hasAllowContract());
1385     emitMatrixMultiply(Result, Lhs, Rhs, AllowContract, Builder, false);
1386     finalizeLowering(MatMul, Result, Builder);
1387   }
1388 
1389   /// Lowers llvm.matrix.transpose.
1390   void LowerTranspose(CallInst *Inst) {
1391     MatrixTy Result;
1392     IRBuilder<> Builder(Inst);
1393     Value *InputVal = Inst->getArgOperand(0);
1394     VectorType *VectorTy = cast<VectorType>(InputVal->getType());
1395     ShapeInfo ArgShape(Inst->getArgOperand(1), Inst->getArgOperand(2));
1396     MatrixTy InputMatrix = getMatrix(InputVal, ArgShape, Builder);
1397 
1398     const unsigned NewNumVecs =
1399         InputMatrix.isColumnMajor() ? ArgShape.NumRows : ArgShape.NumColumns;
1400     const unsigned NewNumElts =
1401         InputMatrix.isColumnMajor() ? ArgShape.NumColumns : ArgShape.NumRows;
1402 
1403     for (unsigned I = 0; I < NewNumVecs; ++I) {
1404       // Build a single result vector. First initialize it.
1405       Value *ResultVector = UndefValue::get(
1406           FixedVectorType::get(VectorTy->getElementType(), NewNumElts));
1407       // Go through the old elements and insert it into the resulting vector.
1408       for (auto J : enumerate(InputMatrix.vectors())) {
1409         Value *Elt = Builder.CreateExtractElement(J.value(), I);
1410         // Row and column indices are transposed.
1411         ResultVector =
1412             Builder.CreateInsertElement(ResultVector, Elt, J.index());
1413       }
1414       Result.addVector(ResultVector);
1415     }
1416 
1417     // TODO: Improve estimate of operations needed for transposes. Currently we
1418     // just count the insertelement/extractelement instructions, but do not
1419     // account for later simplifications/combines.
1420     finalizeLowering(
1421         Inst,
1422         Result.addNumComputeOps(2 * ArgShape.NumRows * ArgShape.NumColumns),
1423         Builder);
1424   }
1425 
1426   /// Lower load instructions, if shape information is available.
1427   bool VisitLoad(LoadInst *Inst, Value *Ptr, IRBuilder<> &Builder) {
1428     auto I = ShapeMap.find(Inst);
1429     if (I == ShapeMap.end())
1430       return false;
1431 
1432     LowerLoad(Inst, Ptr, Inst->getAlign(),
1433               Builder.getInt64(I->second.getStride()), Inst->isVolatile(),
1434               I->second);
1435     return true;
1436   }
1437 
1438   bool VisitStore(StoreInst *Inst, Value *StoredVal, Value *Ptr,
1439                   IRBuilder<> &Builder) {
1440     auto I = ShapeMap.find(StoredVal);
1441     if (I == ShapeMap.end())
1442       return false;
1443 
1444     LowerStore(Inst, StoredVal, Ptr, Inst->getAlign(),
1445                Builder.getInt64(I->second.getStride()), Inst->isVolatile(),
1446                I->second);
1447     return true;
1448   }
1449 
1450   /// Lower binary operators, if shape information is available.
1451   bool VisitBinaryOperator(BinaryOperator *Inst) {
1452     auto I = ShapeMap.find(Inst);
1453     if (I == ShapeMap.end())
1454       return false;
1455 
1456     Value *Lhs = Inst->getOperand(0);
1457     Value *Rhs = Inst->getOperand(1);
1458 
1459     IRBuilder<> Builder(Inst);
1460     ShapeInfo &Shape = I->second;
1461 
1462     MatrixTy Result;
1463     MatrixTy A = getMatrix(Lhs, Shape, Builder);
1464     MatrixTy B = getMatrix(Rhs, Shape, Builder);
1465     assert(A.isColumnMajor() == B.isColumnMajor() &&
1466            Result.isColumnMajor() == A.isColumnMajor() &&
1467            "operands must agree on matrix layout");
1468 
1469     // Helper to perform binary op on vectors.
1470     auto BuildVectorOp = [&Builder, Inst](Value *LHS, Value *RHS) {
1471       switch (Inst->getOpcode()) {
1472       case Instruction::Add:
1473         return Builder.CreateAdd(LHS, RHS);
1474       case Instruction::Mul:
1475         return Builder.CreateMul(LHS, RHS);
1476       case Instruction::Sub:
1477         return Builder.CreateSub(LHS, RHS);
1478       case Instruction::FAdd:
1479         return Builder.CreateFAdd(LHS, RHS);
1480       case Instruction::FMul:
1481         return Builder.CreateFMul(LHS, RHS);
1482       case Instruction::FSub:
1483         return Builder.CreateFSub(LHS, RHS);
1484       default:
1485         llvm_unreachable("Unsupported binary operator for matrix");
1486       }
1487     };
1488 
1489     for (unsigned I = 0; I < Shape.getNumVectors(); ++I)
1490       Result.addVector(BuildVectorOp(A.getVector(I), B.getVector(I)));
1491 
1492     finalizeLowering(Inst,
1493                      Result.addNumComputeOps(getNumOps(Result.getVectorTy()) *
1494                                              Result.getNumVectors()),
1495                      Builder);
1496     return true;
1497   }
1498 
1499   /// Helper to linearize a matrix expression tree into a string. Currently
1500   /// matrix expressions are linarized by starting at an expression leaf and
1501   /// linearizing bottom up.
1502   struct ExprLinearizer {
1503     unsigned LengthToBreak = 100;
1504     std::string Str;
1505     raw_string_ostream Stream;
1506     unsigned LineLength = 0;
1507     const DataLayout &DL;
1508 
1509     /// Mapping from instructions to matrixes. It is used to identify
1510     /// matrix instructions.
1511     const MapVector<Value *, MatrixTy> &Inst2Matrix;
1512 
1513     /// Mapping from values to the leaves of all expressions that the value is
1514     /// part of.
1515     const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared;
1516 
1517     /// Set of matrix expressions in the scope of a given DISubprogram.
1518     const SmallSetVector<Value *, 32> &ExprsInSubprogram;
1519 
1520     /// Leaf node of the expression to linearize.
1521     Value *Leaf;
1522 
1523     /// Used to keep track of sub-expressions that get reused while linearizing
1524     /// the expression. Re-used sub-expressions are marked as (reused).
1525     SmallPtrSet<Value *, 8> ReusedExprs;
1526 
1527     ExprLinearizer(const DataLayout &DL,
1528                    const MapVector<Value *, MatrixTy> &Inst2Matrix,
1529                    const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
1530                    const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1531                    Value *Leaf)
1532         : Str(), Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared),
1533           ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {}
1534 
1535     void indent(unsigned N) {
1536       LineLength += N;
1537       for (unsigned i = 0; i < N; i++)
1538         Stream << " ";
1539     }
1540 
1541     void lineBreak() {
1542       Stream << "\n";
1543       LineLength = 0;
1544     }
1545 
1546     void maybeIndent(unsigned Indent) {
1547       if (LineLength >= LengthToBreak)
1548         lineBreak();
1549 
1550       if (LineLength == 0)
1551         indent(Indent);
1552     }
1553 
1554     void write(StringRef S) {
1555       LineLength += S.size();
1556       Stream << S;
1557     }
1558 
1559     Value *getUnderlyingObjectThroughLoads(Value *V) {
1560       if (Value *Ptr = getPointerOperand(V))
1561         return getUnderlyingObjectThroughLoads(Ptr);
1562       else if (V->getType()->isPointerTy())
1563         return getUnderlyingObject(V);
1564       return V;
1565     }
1566 
1567     /// Returns true if \p V is a matrix value in the given subprogram.
1568     bool isMatrix(Value *V) const { return ExprsInSubprogram.count(V); }
1569 
1570     /// If \p V is a matrix value, print its shape as as NumRows x NumColumns to
1571     /// \p SS.
1572     void prettyPrintMatrixType(Value *V, raw_string_ostream &SS) {
1573       auto M = Inst2Matrix.find(V);
1574       if (M == Inst2Matrix.end())
1575         SS << "unknown";
1576       else {
1577         SS << M->second.getNumRows();
1578         SS << "x";
1579         SS << M->second.getNumColumns();
1580       }
1581     }
1582 
1583     /// Write the called function name. Handles calls to llvm.matrix.*
1584     /// specially: we write the name, followed by the dimensions of the input
1585     /// matrixes, followed by the scalar type name.
1586     void writeFnName(CallInst *CI) {
1587       if (!CI->getCalledFunction())
1588         write("<no called fn>");
1589       else {
1590         StringRef Name = CI->getCalledFunction()->getName();
1591         if (!Name.startswith("llvm.matrix")) {
1592           write(Name);
1593           return;
1594         }
1595         IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
1596         write(StringRef(Intrinsic::getName(II->getIntrinsicID(), {}))
1597                   .drop_front(StringRef("llvm.matrix.").size()));
1598         write(".");
1599         std::string Tmp;
1600         raw_string_ostream SS(Tmp);
1601 
1602         switch (II->getIntrinsicID()) {
1603         case Intrinsic::matrix_multiply:
1604           prettyPrintMatrixType(II->getOperand(0), SS);
1605           SS << ".";
1606           prettyPrintMatrixType(II->getOperand(1), SS);
1607           SS << "." << *II->getType()->getScalarType();
1608           break;
1609         case Intrinsic::matrix_transpose:
1610           prettyPrintMatrixType(II->getOperand(0), SS);
1611           SS << "." << *II->getType()->getScalarType();
1612           break;
1613         case Intrinsic::matrix_column_major_load:
1614           prettyPrintMatrixType(II, SS);
1615           SS << "." << *II->getType()->getScalarType();
1616           break;
1617         case Intrinsic::matrix_column_major_store:
1618           prettyPrintMatrixType(II->getOperand(0), SS);
1619           SS << "." << *II->getOperand(0)->getType()->getScalarType();
1620           break;
1621         default:
1622           llvm_unreachable("Unhandled case");
1623         }
1624         SS.flush();
1625         write(Tmp);
1626       }
1627     }
1628 
1629     unsigned getNumShapeArgs(CallInst *CI) const {
1630       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
1631         switch (II->getIntrinsicID()) {
1632         case Intrinsic::matrix_multiply:
1633           return 3;
1634         case Intrinsic::matrix_transpose:
1635           return 2;
1636         case Intrinsic::matrix_column_major_load:
1637         case Intrinsic::matrix_column_major_store:
1638           return 3;
1639         default:
1640           return 0;
1641         }
1642       }
1643       return 0;
1644     }
1645 
1646     /// Special printing for values: for pointers, we print if they refer to an
1647     /// (function) external address or a stack address, for other values we
1648     /// either print the constant or "scalar"/"matrix" for other values.
1649     void write(Value *V) {
1650       V = getUnderlyingObjectThroughLoads(V);
1651       if (V->getType()->isPointerTy()) {
1652         if (isa<AllocaInst>(V)) {
1653           Stream << "stack addr";
1654           LineLength += StringRef("stack addr").size();
1655         } else {
1656           Stream << "addr";
1657           LineLength += StringRef("addr").size();
1658         }
1659         if (!V->getName().empty()) {
1660           Stream << " %" << V->getName() << "";
1661           LineLength += V->getName().size() + 2;
1662         }
1663         return;
1664       }
1665 
1666       std::string Tmp;
1667       raw_string_ostream TmpStream(Tmp);
1668 
1669       if (auto *CI = dyn_cast<ConstantInt>(V))
1670         TmpStream << CI->getValue();
1671       else if (isa<Constant>(V))
1672         TmpStream << "constant";
1673       else {
1674         if (isMatrix(V))
1675           TmpStream << "matrix";
1676         else
1677           TmpStream << "scalar";
1678       }
1679       TmpStream.flush();
1680       Tmp = std::string(StringRef(Tmp).trim());
1681       LineLength += Tmp.size();
1682       Stream << Tmp;
1683     }
1684 
1685     /// Linearize expression \p Expr starting at an indentation of \p Indent.
1686     /// Expressions that are re-used multiple times are prefixed with (reused)
1687     /// at the re-used root instruction.
1688     void linearizeExpr(Value *Expr, unsigned Indent, bool ParentReused,
1689                        bool ParentShared) {
1690       auto *I = cast<Instruction>(Expr);
1691       maybeIndent(Indent);
1692       SmallVector<Value *, 8> Ops;
1693 
1694       // Is Expr shared with other expression leaves?
1695       bool ExprShared = false;
1696 
1697       // Deal with shared subtrees. Mark them as shared, if required.
1698       if (!ParentShared) {
1699         auto SI = Shared.find(Expr);
1700         assert(SI != Shared.end() && SI->second.count(Leaf));
1701 
1702         for (Value *S : SI->second) {
1703           if (S == Leaf)
1704             continue;
1705           DebugLoc DL = cast<Instruction>(S)->getDebugLoc();
1706           write("shared with remark at line " + std::to_string(DL.getLine()) +
1707                 " column " + std::to_string(DL.getCol()) + " (");
1708         }
1709         ExprShared = SI->second.size() > 1;
1710       }
1711 
1712       bool Reused = !ReusedExprs.insert(Expr).second;
1713       if (Reused && !ParentReused)
1714         write("(reused) ");
1715 
1716       if (auto *CI = dyn_cast<CallInst>(I)) {
1717         writeFnName(CI);
1718 
1719         Ops.append(CI->arg_begin(), CI->arg_end() - getNumShapeArgs(CI));
1720       } else if (isa<BitCastInst>(Expr)) {
1721         // Special case bitcasts, which are used to materialize matrixes from
1722         // non-matrix ops.
1723         write("matrix");
1724         return;
1725       } else {
1726         Ops.append(I->value_op_begin(), I->value_op_end());
1727         write(std::string(I->getOpcodeName()));
1728       }
1729 
1730       write(std::string("("));
1731 
1732       unsigned NumOpsToBreak = 1;
1733       if (match(Expr, m_Intrinsic<Intrinsic::matrix_column_major_load>()))
1734         NumOpsToBreak = 2;
1735 
1736       for (Value *Op : Ops) {
1737         if (Ops.size() > NumOpsToBreak)
1738           lineBreak();
1739 
1740         maybeIndent(Indent + 1);
1741         if (isMatrix(Op))
1742           linearizeExpr(Op, Indent + 1, Reused, ExprShared);
1743         else
1744           write(Op);
1745         if (Op != Ops.back())
1746           write(", ");
1747       }
1748 
1749       write(")");
1750     }
1751 
1752     const std::string &getResult() {
1753       Stream.flush();
1754       return Str;
1755     }
1756   };
1757 
1758   /// Generate remarks for matrix operations in a function. To generate remarks
1759   /// for matrix expressions, the following approach is used:
1760   /// 1. Use the inlined-at debug information to group matrix operations to the
1761   ///    DISubprograms they are contained in.
1762   /// 2. Collect leaves of matrix expressions (done in
1763   ///    RemarkGenerator::getExpressionLeaves) for each subprogram - expression
1764   //     mapping.  Leaves are lowered matrix instructions without other matrix
1765   //     users (like stores) in the current subprogram.
1766   /// 3. For each leaf, create a remark containing a linearizied version of the
1767   ///    matrix expression. The expression is linearized by a recursive
1768   ///    bottom-up traversal of the matrix operands, starting at a leaf. Note
1769   ///    that multiple leaves can share sub-expressions. Shared subexpressions
1770   ///    are explicitly marked as shared().
1771   struct RemarkGenerator {
1772     const MapVector<Value *, MatrixTy> &Inst2Matrix;
1773     OptimizationRemarkEmitter &ORE;
1774     Function &Func;
1775     const DataLayout &DL;
1776 
1777     RemarkGenerator(const MapVector<Value *, MatrixTy> &Inst2Matrix,
1778                     OptimizationRemarkEmitter &ORE, Function &Func)
1779         : Inst2Matrix(Inst2Matrix), ORE(ORE), Func(Func),
1780           DL(Func.getParent()->getDataLayout()) {}
1781 
1782     /// Return all leaves of the expressions in \p ExprsInSubprogram. Those are
1783     /// instructions in Inst2Matrix returning void or without any users in
1784     /// \p ExprsInSubprogram. Currently that should only include stores.
1785     SmallVector<Value *, 4>
1786     getExpressionLeaves(const SmallSetVector<Value *, 32> &ExprsInSubprogram) {
1787       SmallVector<Value *, 4> Leaves;
1788       for (auto *Expr : ExprsInSubprogram)
1789         if (Expr->getType()->isVoidTy() ||
1790             !any_of(Expr->users(), [&ExprsInSubprogram](User *U) {
1791               return ExprsInSubprogram.count(U);
1792             }))
1793           Leaves.push_back(Expr);
1794       return Leaves;
1795     }
1796 
1797     /// Recursively traverse expression \p V starting at \p Leaf and add \p Leaf
1798     /// to all visited expressions in \p Shared. Limit the matrix operations to
1799     /// the ones in \p ExprsInSubprogram.
1800     void collectSharedInfo(Value *Leaf, Value *V,
1801                            const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1802                            DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) {
1803 
1804       if (!ExprsInSubprogram.count(V))
1805         return;
1806 
1807       auto I = Shared.insert({V, {}});
1808       I.first->second.insert(Leaf);
1809 
1810       for (Value *Op : cast<Instruction>(V)->operand_values())
1811         collectSharedInfo(Leaf, Op, ExprsInSubprogram, Shared);
1812     }
1813 
1814     /// Calculate the number of exclusive and shared op counts for expression
1815     /// starting at \p V. Expressions used multiple times are counted once.
1816     /// Limit the matrix operations to the ones in \p ExprsInSubprogram.
1817     std::pair<OpInfoTy, OpInfoTy>
1818     sumOpInfos(Value *Root, SmallPtrSetImpl<Value *> &ReusedExprs,
1819                const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1820                DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) const {
1821       if (!ExprsInSubprogram.count(Root))
1822         return {};
1823 
1824       // Already counted this expression. Stop.
1825       if (!ReusedExprs.insert(Root).second)
1826         return {};
1827 
1828       OpInfoTy SharedCount;
1829       OpInfoTy Count;
1830 
1831       auto I = Shared.find(Root);
1832       auto CM = Inst2Matrix.find(Root);
1833       if (I->second.size() == 1)
1834         Count = CM->second.getOpInfo();
1835       else
1836         SharedCount = CM->second.getOpInfo();
1837 
1838       for (Value *Op : cast<Instruction>(Root)->operand_values()) {
1839         auto C = sumOpInfos(Op, ReusedExprs, ExprsInSubprogram, Shared);
1840         Count += C.first;
1841         SharedCount += C.second;
1842       }
1843       return {Count, SharedCount};
1844     }
1845 
1846     void emitRemarks() {
1847       if (!ORE.allowExtraAnalysis(DEBUG_TYPE))
1848         return;
1849 
1850       // Map matrix operations to their containting subprograms, by traversing
1851       // the inlinedAt chain. If the function does not have a DISubprogram, we
1852       // only map them to the containing function.
1853       MapVector<DISubprogram *, SmallVector<Value *, 8>> Subprog2Exprs;
1854       for (auto &KV : Inst2Matrix) {
1855         if (Func.getSubprogram()) {
1856           auto *I = cast<Instruction>(KV.first);
1857           DILocation *Context = I->getDebugLoc();
1858           while (Context) {
1859             auto I =
1860                 Subprog2Exprs.insert({getSubprogram(Context->getScope()), {}});
1861             I.first->second.push_back(KV.first);
1862             Context = DebugLoc(Context).getInlinedAt();
1863           }
1864         } else {
1865           auto I = Subprog2Exprs.insert({nullptr, {}});
1866           I.first->second.push_back(KV.first);
1867         }
1868       }
1869       for (auto &KV : Subprog2Exprs) {
1870         SmallSetVector<Value *, 32> ExprsInSubprogram(KV.second.begin(),
1871                                                       KV.second.end());
1872         auto Leaves = getExpressionLeaves(ExprsInSubprogram);
1873 
1874         DenseMap<Value *, SmallPtrSet<Value *, 2>> Shared;
1875         for (Value *Leaf : Leaves)
1876           collectSharedInfo(Leaf, Leaf, ExprsInSubprogram, Shared);
1877 
1878         // Generate remarks for each leaf.
1879         for (auto *L : Leaves) {
1880 
1881           DebugLoc Loc = cast<Instruction>(L)->getDebugLoc();
1882           DILocation *Context = cast<Instruction>(L)->getDebugLoc();
1883           while (Context) {
1884             if (getSubprogram(Context->getScope()) == KV.first) {
1885               Loc = Context;
1886               break;
1887             }
1888             Context = DebugLoc(Context).getInlinedAt();
1889           }
1890 
1891           SmallPtrSet<Value *, 8> ReusedExprs;
1892           OpInfoTy Counts, SharedCounts;
1893           std::tie(Counts, SharedCounts) =
1894               sumOpInfos(L, ReusedExprs, ExprsInSubprogram, Shared);
1895 
1896           OptimizationRemark Rem(DEBUG_TYPE, "matrix-lowered", Loc,
1897                                  cast<Instruction>(L)->getParent());
1898 
1899           Rem << "Lowered with ";
1900           Rem << ore::NV("NumStores", Counts.NumStores) << " stores, "
1901               << ore::NV("NumLoads", Counts.NumLoads) << " loads, "
1902               << ore::NV("NumComputeOps", Counts.NumComputeOps)
1903               << " compute ops";
1904 
1905           if (SharedCounts.NumStores > 0 || SharedCounts.NumLoads > 0 ||
1906               SharedCounts.NumComputeOps > 0) {
1907             Rem << ",\nadditionally "
1908                 << ore::NV("NumStores", SharedCounts.NumStores) << " stores, "
1909                 << ore::NV("NumLoads", SharedCounts.NumLoads) << " loads, "
1910                 << ore::NV("NumFPOps", SharedCounts.NumComputeOps)
1911                 << " compute ops"
1912                 << " are shared with other expressions";
1913           }
1914 
1915           Rem << ("\n" + linearize(L, Shared, ExprsInSubprogram, DL));
1916           ORE.emit(Rem);
1917         }
1918       }
1919     }
1920 
1921     std::string
1922     linearize(Value *L,
1923               const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
1924               const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1925               const DataLayout &DL) {
1926       ExprLinearizer Lin(DL, Inst2Matrix, Shared, ExprsInSubprogram, L);
1927       Lin.linearizeExpr(L, 0, false, false);
1928       return Lin.getResult();
1929     }
1930   };
1931 };
1932 } // namespace
1933 
1934 PreservedAnalyses LowerMatrixIntrinsicsPass::run(Function &F,
1935                                                  FunctionAnalysisManager &AM) {
1936   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1937   OptimizationRemarkEmitter *ORE = nullptr;
1938   AAResults *AA = nullptr;
1939   DominatorTree *DT = nullptr;
1940   LoopInfo *LI = nullptr;
1941 
1942   if (!Minimal) {
1943     ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
1944     AA = &AM.getResult<AAManager>(F);
1945     DT = &AM.getResult<DominatorTreeAnalysis>(F);
1946     LI = &AM.getResult<LoopAnalysis>(F);
1947   }
1948 
1949   LowerMatrixIntrinsics LMT(F, TTI, AA, DT, LI, ORE);
1950   if (LMT.Visit()) {
1951     PreservedAnalyses PA;
1952     if (!Minimal) {
1953       PA.preserve<LoopAnalysis>();
1954       PA.preserve<DominatorTreeAnalysis>();
1955     }
1956     return PA;
1957   }
1958   return PreservedAnalyses::all();
1959 }
1960 
1961 namespace {
1962 
1963 class LowerMatrixIntrinsicsLegacyPass : public FunctionPass {
1964 public:
1965   static char ID;
1966 
1967   LowerMatrixIntrinsicsLegacyPass() : FunctionPass(ID) {
1968     initializeLowerMatrixIntrinsicsLegacyPassPass(
1969         *PassRegistry::getPassRegistry());
1970   }
1971 
1972   bool runOnFunction(Function &F) override {
1973     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1974     auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
1975     auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1976     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1977     auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1978     LowerMatrixIntrinsics LMT(F, TTI, &AA, &DT, &LI, &ORE);
1979     bool C = LMT.Visit();
1980     return C;
1981   }
1982 
1983   void getAnalysisUsage(AnalysisUsage &AU) const override {
1984     AU.addRequired<TargetTransformInfoWrapperPass>();
1985     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
1986     AU.addRequired<AAResultsWrapperPass>();
1987     AU.addRequired<DominatorTreeWrapperPass>();
1988     AU.addPreserved<DominatorTreeWrapperPass>();
1989     AU.addRequired<LoopInfoWrapperPass>();
1990     AU.addPreserved<LoopInfoWrapperPass>();
1991   }
1992 };
1993 } // namespace
1994 
1995 static const char pass_name[] = "Lower the matrix intrinsics";
1996 char LowerMatrixIntrinsicsLegacyPass::ID = 0;
1997 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name,
1998                       false, false)
1999 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
2000 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2001 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2002 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2003 INITIALIZE_PASS_END(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name,
2004                     false, false)
2005 
2006 Pass *llvm::createLowerMatrixIntrinsicsPass() {
2007   return new LowerMatrixIntrinsicsLegacyPass();
2008 }
2009 
2010 namespace {
2011 
2012 /// A lightweight version of the matrix lowering pass that only requires TTI.
2013 /// Advanced features that require DT, AA or ORE like tiling are disabled. This
2014 /// is used to lower matrix intrinsics if the main lowering pass is not run, for
2015 /// example with -O0.
2016 class LowerMatrixIntrinsicsMinimalLegacyPass : public FunctionPass {
2017 public:
2018   static char ID;
2019 
2020   LowerMatrixIntrinsicsMinimalLegacyPass() : FunctionPass(ID) {
2021     initializeLowerMatrixIntrinsicsMinimalLegacyPassPass(
2022         *PassRegistry::getPassRegistry());
2023   }
2024 
2025   bool runOnFunction(Function &F) override {
2026     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2027     LowerMatrixIntrinsics LMT(F, TTI, nullptr, nullptr, nullptr, nullptr);
2028     bool C = LMT.Visit();
2029     return C;
2030   }
2031 
2032   void getAnalysisUsage(AnalysisUsage &AU) const override {
2033     AU.addRequired<TargetTransformInfoWrapperPass>();
2034     AU.setPreservesCFG();
2035   }
2036 };
2037 } // namespace
2038 
2039 static const char pass_name_minimal[] = "Lower the matrix intrinsics (minimal)";
2040 char LowerMatrixIntrinsicsMinimalLegacyPass::ID = 0;
2041 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsMinimalLegacyPass,
2042                       "lower-matrix-intrinsics-minimal", pass_name_minimal,
2043                       false, false)
2044 INITIALIZE_PASS_END(LowerMatrixIntrinsicsMinimalLegacyPass,
2045                     "lower-matrix-intrinsics-minimal", pass_name_minimal, false,
2046                     false)
2047 
2048 Pass *llvm::createLowerMatrixIntrinsicsMinimalPass() {
2049   return new LowerMatrixIntrinsicsMinimalLegacyPass();
2050 }
2051