1 //===- LowerMatrixIntrinsics.cpp - Lower matrix intrinsics -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Lower matrix intrinsics to vector operations. 10 // 11 // TODO: 12 // * Improve fusion: 13 // * Support more cases, e.g. multiply-add, multiply-sub, operands/results 14 // transposed. 15 // * Improve cost-modeling, e.g. choose different number of rows/columns 16 // columns for tiles, consider cost of copies on alias. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h" 21 #include "llvm/ADT/GraphTraits.h" 22 #include "llvm/ADT/PostOrderIterator.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/DomTreeUpdater.h" 26 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/Analysis/VectorUtils.h" 30 #include "llvm/IR/CFG.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/DebugInfoMetadata.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/PatternMatch.h" 38 #include "llvm/InitializePasses.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/Alignment.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Transforms/Scalar.h" 44 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 45 #include "llvm/Transforms/Utils/LoopUtils.h" 46 #include "llvm/Transforms/Utils/MatrixUtils.h" 47 48 using namespace llvm; 49 using namespace PatternMatch; 50 51 #define DEBUG_TYPE "lower-matrix-intrinsics" 52 53 static cl::opt<bool> EnableShapePropagation( 54 "matrix-propagate-shape", cl::init(true), cl::Hidden, 55 cl::desc("Enable/disable shape propagation from matrix intrinsics to other " 56 "instructions.")); 57 58 static cl::opt<bool> 59 FuseMatrix("fuse-matrix", cl::init(true), cl::Hidden, 60 cl::desc("Enable/disable fusing matrix instructions.")); 61 // TODO: Allow and use non-square tiles. 62 static cl::opt<unsigned> TileSize( 63 "fuse-matrix-tile-size", cl::init(4), cl::Hidden, 64 cl::desc( 65 "Tile size for matrix instruction fusion using square-shaped tiles.")); 66 static cl::opt<bool> TileUseLoops("fuse-matrix-use-loops", cl::init(false), 67 cl::Hidden, 68 cl::desc("Generate loop nest for tiling.")); 69 static cl::opt<bool> ForceFusion( 70 "force-fuse-matrix", cl::init(false), cl::Hidden, 71 cl::desc("Force matrix instruction fusion even if not profitable.")); 72 static cl::opt<bool> AllowContractEnabled( 73 "matrix-allow-contract", cl::init(false), cl::Hidden, 74 cl::desc("Allow the use of FMAs if available and profitable. This may " 75 "result in different results, due to less rounding error.")); 76 77 enum class MatrixLayoutTy { ColumnMajor, RowMajor }; 78 79 static cl::opt<MatrixLayoutTy> MatrixLayout( 80 "matrix-default-layout", cl::init(MatrixLayoutTy::ColumnMajor), 81 cl::desc("Sets the default matrix layout"), 82 cl::values(clEnumValN(MatrixLayoutTy::ColumnMajor, "column-major", 83 "Use column-major layout"), 84 clEnumValN(MatrixLayoutTy::RowMajor, "row-major", 85 "Use row-major layout"))); 86 87 /// Helper function to either return Scope, if it is a subprogram or the 88 /// attached subprogram for a local scope. 89 static DISubprogram *getSubprogram(DIScope *Scope) { 90 if (auto *Subprogram = dyn_cast<DISubprogram>(Scope)) 91 return Subprogram; 92 return cast<DILocalScope>(Scope)->getSubprogram(); 93 } 94 95 namespace { 96 97 // Given an element pointer \p BasePtr to the start of a (sub) matrix, compute 98 // the start address of vector \p VecIdx with type (\p EltType x \p NumElements) 99 // assuming \p Stride elements between start two consecutive vectors. 100 // \p Stride must be >= \p NumElements. 101 // For column-major matrixes, the function computes the address of a column 102 // vectors and \p NumElements must be set to the number of elements in a column 103 // (= number of rows of the matrix). For row-major matrixes, the function 104 // computes the address of a row vector and \p NumElements must be set to the 105 // number of elements in a column (= number of columns of the matrix). 106 // 107 // Consider a 4x4 matrix in column-mjaor layout like below 108 // 109 // 0 1 2 3 110 // 0 v_0_0 v_0_1 v_0_2 v_0_3 111 // 1 v_1_0 v_1_1 v_1_2 v_1_3 112 // 2 v_2_0 v_2_1 v_2_2 v_2_3 113 // 3 v_3_0 v_3_1 v_3_2 v_3_3 114 115 // To compute the column addresses for a 2x3 sub-matrix at row 1 and column 1, 116 // we need a pointer to the first element of the submatrix as base pointer. 117 // Then we can use computeVectorAddr to compute the addresses for the columns 118 // of the sub-matrix. 119 // 120 // Column 0: computeVectorAddr(Base, 0 (column), 4 (stride), 2 (num rows), ..) 121 // -> just returns Base 122 // Column 1: computeVectorAddr(Base, 1 (column), 4 (stride), 2 (num rows), ..) 123 // -> returns Base + (1 * 4) 124 // Column 2: computeVectorAddr(Base, 2 (column), 4 (stride), 2 (num rows), ..) 125 // -> returns Base + (2 * 4) 126 // 127 // The graphic below illustrates the number of elements in a column (marked 128 // with |) and the number of skipped elements (marked with }). 129 // 130 // v_0_0 v_0_1 {v_0_2 {v_0_3 131 // Base Col 1 Col 2 132 // | | | 133 // v_1_0 |v_1_1 |v_1_2 |v_1_3 134 // v_2_0 |v_2_1 |v_2_2 |v_2_3 135 // v_3_0 {v_3_1 {v_3_2 v_3_3 136 // 137 Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride, 138 unsigned NumElements, Type *EltType, 139 IRBuilder<> &Builder) { 140 141 assert((!isa<ConstantInt>(Stride) || 142 cast<ConstantInt>(Stride)->getZExtValue() >= NumElements) && 143 "Stride must be >= the number of elements in the result vector."); 144 unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace(); 145 146 // Compute the start of the vector with index VecIdx as VecIdx * Stride. 147 Value *VecStart = Builder.CreateMul(VecIdx, Stride, "vec.start"); 148 149 // Get pointer to the start of the selected vector. Skip GEP creation, 150 // if we select vector 0. 151 if (isa<ConstantInt>(VecStart) && cast<ConstantInt>(VecStart)->isZero()) 152 VecStart = BasePtr; 153 else 154 VecStart = Builder.CreateGEP(EltType, BasePtr, VecStart, "vec.gep"); 155 156 // Cast elementwise vector start pointer to a pointer to a vector 157 // (EltType x NumElements)*. 158 auto *VecType = FixedVectorType::get(EltType, NumElements); 159 Type *VecPtrType = PointerType::get(VecType, AS); 160 return Builder.CreatePointerCast(VecStart, VecPtrType, "vec.cast"); 161 } 162 163 /// LowerMatrixIntrinsics contains the methods used to lower matrix intrinsics. 164 /// 165 /// Currently, the lowering for each matrix intrinsic is done as follows: 166 /// 1. Propagate the shape information from intrinsics to connected 167 /// instructions. 168 /// 2. Lower instructions with shape information (assuming column-major layout). 169 /// The lowering works similarly using row-major layout. 170 /// 2.1. Get column vectors for each argument. If we already lowered the 171 /// definition of an argument, use the produced column vectors directly. 172 /// If not, split the operand vector containing an embedded matrix into 173 /// a set of column vectors, 174 /// 2.2. Lower the instruction in terms of column major operations, which 175 /// yields a set of column vectors containing result matrix. Note that we 176 /// lower all instructions that have shape information. Besides the 177 /// intrinsics, this includes stores for example. 178 /// 2.3. Update uses of the lowered instruction. If we have shape information 179 /// for a user, there is nothing to do, as we will look up the result 180 /// column matrix when lowering the user. For other uses, we embed the 181 /// result matrix in a flat vector and update the use. 182 /// 2.4. Cache the result column matrix for the instruction we lowered 183 /// 3. After we lowered all instructions in a function, remove the now 184 /// obsolete instructions. 185 /// 186 class LowerMatrixIntrinsics { 187 Function &Func; 188 const DataLayout &DL; 189 const TargetTransformInfo &TTI; 190 AliasAnalysis *AA; 191 DominatorTree *DT; 192 LoopInfo *LI; 193 OptimizationRemarkEmitter *ORE; 194 195 /// Contains estimates of the number of operations (loads, stores, compute) required to lower a matrix operation. 196 struct OpInfoTy { 197 /// Number of stores emitted to generate this matrix. 198 unsigned NumStores = 0; 199 /// Number of loads emitted to generate this matrix. 200 unsigned NumLoads = 0; 201 /// Number of compute operations emitted to generate this matrix. 202 unsigned NumComputeOps = 0; 203 204 OpInfoTy &operator+=(const OpInfoTy &RHS) { 205 NumStores += RHS.NumStores; 206 NumLoads += RHS.NumLoads; 207 NumComputeOps += RHS.NumComputeOps; 208 return *this; 209 } 210 }; 211 212 /// Wrapper class representing a matrix as a set of vectors, either in row or 213 /// column major layout. All vectors must have the same vector type. 214 class MatrixTy { 215 SmallVector<Value *, 16> Vectors; 216 217 OpInfoTy OpInfo; 218 219 bool IsColumnMajor = true; 220 221 public: 222 MatrixTy() 223 : Vectors(), 224 IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {} 225 MatrixTy(ArrayRef<Value *> Vectors) 226 : Vectors(Vectors.begin(), Vectors.end()), 227 IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {} 228 MatrixTy(unsigned NumRows, unsigned NumColumns, Type *EltTy) 229 : IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) { 230 231 unsigned D = isColumnMajor() ? NumColumns : NumRows; 232 for (unsigned J = 0; J < D; ++J) 233 addVector(UndefValue::get(FixedVectorType::get( 234 EltTy, isColumnMajor() ? NumRows : NumColumns))); 235 } 236 237 Value *getVector(unsigned i) const { return Vectors[i]; } 238 Value *getColumn(unsigned i) const { 239 assert(isColumnMajor() && "only supported for column-major matrixes"); 240 return Vectors[i]; 241 } 242 Value *getRow(unsigned i) const { 243 assert(!isColumnMajor() && "only supported for row-major matrixes"); 244 return Vectors[i]; 245 } 246 247 void setVector(unsigned i, Value *V) { Vectors[i] = V; } 248 249 Type *getElementType() const { return getVectorTy()->getElementType(); } 250 251 unsigned getNumVectors() const { 252 if (isColumnMajor()) 253 return getNumColumns(); 254 return getNumRows(); 255 } 256 257 unsigned getNumColumns() const { 258 if (isColumnMajor()) 259 return Vectors.size(); 260 else { 261 assert(Vectors.size() > 0 && "Cannot call getNumRows without columns"); 262 return cast<FixedVectorType>(Vectors[0]->getType())->getNumElements(); 263 } 264 } 265 unsigned getNumRows() const { 266 if (isColumnMajor()) { 267 assert(Vectors.size() > 0 && "Cannot call getNumRows without columns"); 268 return cast<FixedVectorType>(Vectors[0]->getType())->getNumElements(); 269 } else 270 return Vectors.size(); 271 } 272 273 void addVector(Value *V) { Vectors.push_back(V); } 274 VectorType *getColumnTy() { 275 assert(isColumnMajor() && "only supported for column-major matrixes"); 276 return getVectorTy(); 277 } 278 279 VectorType *getVectorTy() const { 280 return cast<VectorType>(Vectors[0]->getType()); 281 } 282 283 iterator_range<SmallVector<Value *, 8>::iterator> columns() { 284 assert(isColumnMajor() && 285 "columns() only supported for column-major matrixes"); 286 return make_range(Vectors.begin(), Vectors.end()); 287 } 288 289 iterator_range<SmallVector<Value *, 8>::iterator> vectors() { 290 return make_range(Vectors.begin(), Vectors.end()); 291 } 292 293 /// Embed the vectors of the matrix into a flat vector by concatenating 294 /// them. 295 Value *embedInVector(IRBuilder<> &Builder) const { 296 return Vectors.size() == 1 ? Vectors[0] 297 : concatenateVectors(Builder, Vectors); 298 } 299 300 MatrixTy &addNumLoads(unsigned N) { 301 OpInfo.NumLoads += N; 302 return *this; 303 } 304 305 void setNumLoads(unsigned N) { OpInfo.NumLoads = N; } 306 307 MatrixTy &addNumStores(unsigned N) { 308 OpInfo.NumStores += N; 309 return *this; 310 } 311 312 MatrixTy &addNumComputeOps(unsigned N) { 313 OpInfo.NumComputeOps += N; 314 return *this; 315 } 316 317 unsigned getNumStores() const { return OpInfo.NumStores; } 318 unsigned getNumLoads() const { return OpInfo.NumLoads; } 319 unsigned getNumComputeOps() const { return OpInfo.NumComputeOps; } 320 321 const OpInfoTy &getOpInfo() const { return OpInfo; } 322 323 bool isColumnMajor() const { return IsColumnMajor; } 324 325 unsigned getStride() const { 326 if (isColumnMajor()) 327 return getNumRows(); 328 return getNumColumns(); 329 } 330 331 /// Extract a vector of \p NumElts starting at index (\p I, \p J). If the 332 /// matrix is column-major, the result vector is extracted from a column 333 /// vector, otherwise from a row vector. 334 Value *extractVector(unsigned I, unsigned J, unsigned NumElts, 335 IRBuilder<> &Builder) const { 336 Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I); 337 return Builder.CreateShuffleVector( 338 Vec, createSequentialMask(isColumnMajor() ? I : J, NumElts, 0), 339 "block"); 340 } 341 }; 342 343 struct ShapeInfo { 344 unsigned NumRows; 345 unsigned NumColumns; 346 347 bool IsColumnMajor; 348 349 ShapeInfo(unsigned NumRows = 0, unsigned NumColumns = 0) 350 : NumRows(NumRows), NumColumns(NumColumns), 351 IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {} 352 353 ShapeInfo(Value *NumRows, Value *NumColumns) 354 : ShapeInfo(cast<ConstantInt>(NumRows)->getZExtValue(), 355 cast<ConstantInt>(NumColumns)->getZExtValue()) {} 356 357 bool operator==(const ShapeInfo &other) { 358 return NumRows == other.NumRows && NumColumns == other.NumColumns; 359 } 360 bool operator!=(const ShapeInfo &other) { return !(*this == other); } 361 362 /// Returns true if shape-information is defined, meaning both dimensions 363 /// are != 0. 364 operator bool() const { 365 assert(NumRows == 0 || NumColumns != 0); 366 return NumRows != 0; 367 } 368 369 unsigned getStride() const { 370 if (IsColumnMajor) 371 return NumRows; 372 return NumColumns; 373 } 374 375 unsigned getNumVectors() const { 376 if (IsColumnMajor) 377 return NumColumns; 378 return NumRows; 379 } 380 }; 381 382 /// Maps instructions to their shape information. The shape information 383 /// describes the shape to be used while lowering. This matches the shape of 384 /// the result value of the instruction, with the only exceptions being store 385 /// instructions and the matrix_column_major_store intrinsics. For those, the 386 /// shape information indicates that those instructions should be lowered 387 /// using shape information as well. 388 DenseMap<Value *, ShapeInfo> ShapeMap; 389 390 /// List of instructions to remove. While lowering, we are not replacing all 391 /// users of a lowered instruction, if shape information is available and 392 /// those need to be removed after we finished lowering. 393 SmallVector<Instruction *, 16> ToRemove; 394 395 /// Map from instructions to their produced column matrix. 396 MapVector<Value *, MatrixTy> Inst2ColumnMatrix; 397 398 public: 399 LowerMatrixIntrinsics(Function &F, TargetTransformInfo &TTI, 400 AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI, 401 OptimizationRemarkEmitter *ORE) 402 : Func(F), DL(F.getParent()->getDataLayout()), TTI(TTI), AA(AA), DT(DT), 403 LI(LI), ORE(ORE) {} 404 405 unsigned getNumOps(Type *VT) { 406 assert(isa<VectorType>(VT) && "Expected vector type"); 407 return getNumOps(VT->getScalarType(), 408 cast<FixedVectorType>(VT)->getNumElements()); 409 } 410 411 // 412 /// Return the estimated number of vector ops required for an operation on 413 /// \p VT * N. 414 unsigned getNumOps(Type *ST, unsigned N) { 415 return std::ceil((ST->getPrimitiveSizeInBits() * N).getFixedSize() / 416 double(TTI.getRegisterBitWidth( 417 TargetTransformInfo::RGK_FixedWidthVector) 418 .getFixedSize())); 419 } 420 421 /// Return the set of vectors that a matrix value is lowered to. 422 /// 423 /// If we lowered \p MatrixVal, just return the cache result matrix. Otherwise 424 /// split the flat vector \p MatrixVal containing a matrix with shape \p SI 425 /// into vectors. 426 MatrixTy getMatrix(Value *MatrixVal, const ShapeInfo &SI, 427 IRBuilder<> &Builder) { 428 VectorType *VType = dyn_cast<VectorType>(MatrixVal->getType()); 429 assert(VType && "MatrixVal must be a vector type"); 430 assert(cast<FixedVectorType>(VType)->getNumElements() == 431 SI.NumRows * SI.NumColumns && 432 "The vector size must match the number of matrix elements"); 433 434 // Check if we lowered MatrixVal using shape information. In that case, 435 // return the existing matrix, if it matches the requested shape 436 // information. If there is a mis-match, embed the result in a flat 437 // vector and split it later. 438 auto Found = Inst2ColumnMatrix.find(MatrixVal); 439 if (Found != Inst2ColumnMatrix.end()) { 440 MatrixTy &M = Found->second; 441 // Return the found matrix, if its shape matches the requested shape 442 // information 443 if (SI.NumRows == M.getNumRows() && SI.NumColumns == M.getNumColumns()) 444 return M; 445 446 MatrixVal = M.embedInVector(Builder); 447 } 448 449 // Otherwise split MatrixVal. 450 SmallVector<Value *, 16> SplitVecs; 451 for (unsigned MaskStart = 0; 452 MaskStart < cast<FixedVectorType>(VType)->getNumElements(); 453 MaskStart += SI.getStride()) { 454 Value *V = Builder.CreateShuffleVector( 455 MatrixVal, createSequentialMask(MaskStart, SI.getStride(), 0), 456 "split"); 457 SplitVecs.push_back(V); 458 } 459 460 return {SplitVecs}; 461 } 462 463 /// If \p V already has a known shape return false. Otherwise set the shape 464 /// for instructions that support it. 465 bool setShapeInfo(Value *V, ShapeInfo Shape) { 466 assert(Shape && "Shape not set"); 467 if (isa<UndefValue>(V) || !supportsShapeInfo(V)) 468 return false; 469 470 auto SIter = ShapeMap.find(V); 471 if (SIter != ShapeMap.end()) { 472 LLVM_DEBUG(dbgs() << " not overriding existing shape: " 473 << SIter->second.NumRows << " " 474 << SIter->second.NumColumns << " for " << *V << "\n"); 475 return false; 476 } 477 478 ShapeMap.insert({V, Shape}); 479 LLVM_DEBUG(dbgs() << " " << Shape.NumRows << " x " << Shape.NumColumns 480 << " for " << *V << "\n"); 481 return true; 482 } 483 484 bool isUniformShape(Value *V) { 485 Instruction *I = dyn_cast<Instruction>(V); 486 if (!I) 487 return true; 488 489 switch (I->getOpcode()) { 490 case Instruction::FAdd: 491 case Instruction::FSub: 492 case Instruction::FMul: // Scalar multiply. 493 case Instruction::FNeg: 494 case Instruction::Add: 495 case Instruction::Mul: 496 case Instruction::Sub: 497 return true; 498 default: 499 return false; 500 } 501 } 502 503 /// Returns true if shape information can be used for \p V. The supported 504 /// instructions must match the instructions that can be lowered by this pass. 505 bool supportsShapeInfo(Value *V) { 506 Instruction *Inst = dyn_cast<Instruction>(V); 507 if (!Inst) 508 return false; 509 510 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst); 511 if (II) 512 switch (II->getIntrinsicID()) { 513 case Intrinsic::matrix_multiply: 514 case Intrinsic::matrix_transpose: 515 case Intrinsic::matrix_column_major_load: 516 case Intrinsic::matrix_column_major_store: 517 return true; 518 default: 519 return false; 520 } 521 return isUniformShape(V) || isa<StoreInst>(V) || isa<LoadInst>(V); 522 } 523 524 /// Propagate the shape information of instructions to their users. 525 /// The work list contains instructions for which we can compute the shape, 526 /// either based on the information provided by matrix intrinsics or known 527 /// shapes of operands. 528 SmallVector<Instruction *, 32> 529 propagateShapeForward(SmallVectorImpl<Instruction *> &WorkList) { 530 SmallVector<Instruction *, 32> NewWorkList; 531 // Pop an element for which we guaranteed to have at least one of the 532 // operand shapes. Add the shape for this and then add users to the work 533 // list. 534 LLVM_DEBUG(dbgs() << "Forward-propagate shapes:\n"); 535 while (!WorkList.empty()) { 536 Instruction *Inst = WorkList.pop_back_val(); 537 538 // New entry, set the value and insert operands 539 bool Propagate = false; 540 541 Value *MatrixA; 542 Value *MatrixB; 543 Value *M; 544 Value *N; 545 Value *K; 546 if (match(Inst, m_Intrinsic<Intrinsic::matrix_multiply>( 547 m_Value(MatrixA), m_Value(MatrixB), m_Value(M), 548 m_Value(N), m_Value(K)))) { 549 Propagate = setShapeInfo(Inst, {M, K}); 550 } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_transpose>( 551 m_Value(MatrixA), m_Value(M), m_Value(N)))) { 552 // Flip dimensions. 553 Propagate = setShapeInfo(Inst, {N, M}); 554 } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_column_major_store>( 555 m_Value(MatrixA), m_Value(), m_Value(), 556 m_Value(), m_Value(M), m_Value(N)))) { 557 Propagate = setShapeInfo(Inst, {N, M}); 558 } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_column_major_load>( 559 m_Value(), m_Value(), m_Value(), m_Value(M), 560 m_Value(N)))) { 561 Propagate = setShapeInfo(Inst, {M, N}); 562 } else if (match(Inst, m_Store(m_Value(MatrixA), m_Value()))) { 563 auto OpShape = ShapeMap.find(MatrixA); 564 if (OpShape != ShapeMap.end()) 565 setShapeInfo(Inst, OpShape->second); 566 continue; 567 } else if (isUniformShape(Inst)) { 568 // Find the first operand that has a known shape and use that. 569 for (auto &Op : Inst->operands()) { 570 auto OpShape = ShapeMap.find(Op.get()); 571 if (OpShape != ShapeMap.end()) { 572 Propagate |= setShapeInfo(Inst, OpShape->second); 573 break; 574 } 575 } 576 } 577 578 if (Propagate) { 579 NewWorkList.push_back(Inst); 580 for (auto *User : Inst->users()) 581 if (ShapeMap.count(User) == 0) 582 WorkList.push_back(cast<Instruction>(User)); 583 } 584 } 585 586 return NewWorkList; 587 } 588 589 /// Propagate the shape to operands of instructions with shape information. 590 /// \p Worklist contains the instruction for which we already know the shape. 591 SmallVector<Instruction *, 32> 592 propagateShapeBackward(SmallVectorImpl<Instruction *> &WorkList) { 593 SmallVector<Instruction *, 32> NewWorkList; 594 595 auto pushInstruction = [](Value *V, 596 SmallVectorImpl<Instruction *> &WorkList) { 597 Instruction *I = dyn_cast<Instruction>(V); 598 if (I) 599 WorkList.push_back(I); 600 }; 601 // Pop an element with known shape. Traverse the operands, if their shape 602 // derives from the result shape and is unknown, add it and add them to the 603 // worklist. 604 LLVM_DEBUG(dbgs() << "Backward-propagate shapes:\n"); 605 while (!WorkList.empty()) { 606 Value *V = WorkList.pop_back_val(); 607 608 size_t BeforeProcessingV = WorkList.size(); 609 if (!isa<Instruction>(V)) 610 continue; 611 612 Value *MatrixA; 613 Value *MatrixB; 614 Value *M; 615 Value *N; 616 Value *K; 617 if (match(V, m_Intrinsic<Intrinsic::matrix_multiply>( 618 m_Value(MatrixA), m_Value(MatrixB), m_Value(M), 619 m_Value(N), m_Value(K)))) { 620 if (setShapeInfo(MatrixA, {M, N})) 621 pushInstruction(MatrixA, WorkList); 622 623 if (setShapeInfo(MatrixB, {N, K})) 624 pushInstruction(MatrixB, WorkList); 625 626 } else if (match(V, m_Intrinsic<Intrinsic::matrix_transpose>( 627 m_Value(MatrixA), m_Value(M), m_Value(N)))) { 628 // Flip dimensions. 629 if (setShapeInfo(MatrixA, {M, N})) 630 pushInstruction(MatrixA, WorkList); 631 } else if (match(V, m_Intrinsic<Intrinsic::matrix_column_major_store>( 632 m_Value(MatrixA), m_Value(), m_Value(), m_Value(), 633 m_Value(M), m_Value(N)))) { 634 if (setShapeInfo(MatrixA, {M, N})) { 635 pushInstruction(MatrixA, WorkList); 636 } 637 } else if (isa<LoadInst>(V) || 638 match(V, m_Intrinsic<Intrinsic::matrix_column_major_load>())) { 639 // Nothing to do, no matrix input. 640 } else if (isa<StoreInst>(V)) { 641 // Nothing to do. We forward-propagated to this so we would just 642 // backward propagate to an instruction with an already known shape. 643 } else if (isUniformShape(V)) { 644 // Propagate to all operands. 645 ShapeInfo Shape = ShapeMap[V]; 646 for (Use &U : cast<Instruction>(V)->operands()) { 647 if (setShapeInfo(U.get(), Shape)) 648 pushInstruction(U.get(), WorkList); 649 } 650 } 651 // After we discovered new shape info for new instructions in the 652 // worklist, we use their users as seeds for the next round of forward 653 // propagation. 654 for (size_t I = BeforeProcessingV; I != WorkList.size(); I++) 655 for (User *U : WorkList[I]->users()) 656 if (isa<Instruction>(U) && V != U) 657 NewWorkList.push_back(cast<Instruction>(U)); 658 } 659 return NewWorkList; 660 } 661 662 bool Visit() { 663 if (EnableShapePropagation) { 664 SmallVector<Instruction *, 32> WorkList; 665 666 // Initially only the shape of matrix intrinsics is known. 667 // Initialize the work list with ops carrying shape information. 668 for (BasicBlock &BB : Func) 669 for (Instruction &Inst : BB) { 670 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Inst); 671 if (!II) 672 continue; 673 674 switch (II->getIntrinsicID()) { 675 case Intrinsic::matrix_multiply: 676 case Intrinsic::matrix_transpose: 677 case Intrinsic::matrix_column_major_load: 678 case Intrinsic::matrix_column_major_store: 679 WorkList.push_back(&Inst); 680 break; 681 default: 682 break; 683 } 684 } 685 // Propagate shapes until nothing changes any longer. 686 while (!WorkList.empty()) { 687 WorkList = propagateShapeForward(WorkList); 688 WorkList = propagateShapeBackward(WorkList); 689 } 690 } 691 692 bool Changed = false; 693 SmallVector<CallInst *, 16> MaybeFusableInsts; 694 SmallVector<Instruction *, 16> MatrixInsts; 695 696 // First, collect all instructions with shape information and candidates for 697 // fusion (currently only matrix multiplies). 698 ReversePostOrderTraversal<Function *> RPOT(&Func); 699 for (auto *BB : RPOT) 700 for (Instruction &I : *BB) { 701 if (ShapeMap.find(&I) == ShapeMap.end()) 702 continue; 703 if (match(&I, m_Intrinsic<Intrinsic::matrix_multiply>())) 704 MaybeFusableInsts.push_back(cast<CallInst>(&I)); 705 MatrixInsts.push_back(&I); 706 } 707 708 // Second, try to fuse candidates. 709 SmallPtrSet<Instruction *, 16> FusedInsts; 710 for (CallInst *CI : MaybeFusableInsts) 711 LowerMatrixMultiplyFused(CI, FusedInsts); 712 Changed = !FusedInsts.empty(); 713 714 // Third, lower remaining instructions with shape information. 715 for (Instruction *Inst : MatrixInsts) { 716 if (FusedInsts.count(Inst)) 717 continue; 718 719 IRBuilder<> Builder(Inst); 720 721 if (CallInst *CInst = dyn_cast<CallInst>(Inst)) 722 Changed |= VisitCallInst(CInst); 723 724 Value *Op1; 725 Value *Op2; 726 if (auto *BinOp = dyn_cast<BinaryOperator>(Inst)) 727 Changed |= VisitBinaryOperator(BinOp); 728 if (auto *UnOp = dyn_cast<UnaryOperator>(Inst)) 729 Changed |= VisitUnaryOperator(UnOp); 730 if (match(Inst, m_Load(m_Value(Op1)))) 731 Changed |= VisitLoad(cast<LoadInst>(Inst), Op1, Builder); 732 else if (match(Inst, m_Store(m_Value(Op1), m_Value(Op2)))) 733 Changed |= VisitStore(cast<StoreInst>(Inst), Op1, Op2, Builder); 734 } 735 736 if (ORE) { 737 RemarkGenerator RemarkGen(Inst2ColumnMatrix, *ORE, Func); 738 RemarkGen.emitRemarks(); 739 } 740 741 for (Instruction *Inst : reverse(ToRemove)) 742 Inst->eraseFromParent(); 743 744 return Changed; 745 } 746 747 /// Turns \p BasePtr into an elementwise pointer to \p EltType. 748 Value *createElementPtr(Value *BasePtr, Type *EltType, IRBuilder<> &Builder) { 749 unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace(); 750 Type *EltPtrType = PointerType::get(EltType, AS); 751 return Builder.CreatePointerCast(BasePtr, EltPtrType); 752 } 753 754 /// Replace intrinsic calls 755 bool VisitCallInst(CallInst *Inst) { 756 if (!Inst->getCalledFunction() || !Inst->getCalledFunction()->isIntrinsic()) 757 return false; 758 759 switch (Inst->getCalledFunction()->getIntrinsicID()) { 760 case Intrinsic::matrix_multiply: 761 LowerMultiply(Inst); 762 break; 763 case Intrinsic::matrix_transpose: 764 LowerTranspose(Inst); 765 break; 766 case Intrinsic::matrix_column_major_load: 767 LowerColumnMajorLoad(Inst); 768 break; 769 case Intrinsic::matrix_column_major_store: 770 LowerColumnMajorStore(Inst); 771 break; 772 default: 773 return false; 774 } 775 return true; 776 } 777 778 /// Compute the alignment for a column/row \p Idx with \p Stride between them. 779 /// The address at \p Idx == 0 has alignment \p A. If \p Stride is a 780 /// ConstantInt, reduce the initial alignment based on the byte offset. For 781 /// non-ConstantInt strides, return the common alignment of the initial 782 /// alignment and the element size in bytes. 783 Align getAlignForIndex(unsigned Idx, Value *Stride, Type *ElementTy, 784 MaybeAlign A) const { 785 Align InitialAlign = DL.getValueOrABITypeAlignment(A, ElementTy); 786 if (Idx == 0) 787 return InitialAlign; 788 789 TypeSize ElementSizeInBits = DL.getTypeSizeInBits(ElementTy); 790 if (auto *ConstStride = dyn_cast<ConstantInt>(Stride)) { 791 uint64_t StrideInBytes = 792 ConstStride->getZExtValue() * ElementSizeInBits / 8; 793 return commonAlignment(InitialAlign, Idx * StrideInBytes); 794 } 795 return commonAlignment(InitialAlign, ElementSizeInBits / 8); 796 } 797 798 /// Load a matrix with \p Shape starting at \p Ptr and using \p Stride between 799 /// vectors. 800 MatrixTy loadMatrix(Type *Ty, Value *Ptr, MaybeAlign MAlign, Value *Stride, 801 bool IsVolatile, ShapeInfo Shape, IRBuilder<> &Builder) { 802 auto *VType = cast<VectorType>(Ty); 803 Type *EltTy = VType->getElementType(); 804 Type *VecTy = FixedVectorType::get(EltTy, Shape.getStride()); 805 Value *EltPtr = createElementPtr(Ptr, EltTy, Builder); 806 MatrixTy Result; 807 for (unsigned I = 0, E = Shape.getNumVectors(); I < E; ++I) { 808 Value *GEP = computeVectorAddr(EltPtr, Builder.getInt64(I), Stride, 809 Shape.getStride(), EltTy, Builder); 810 Value *Vector = Builder.CreateAlignedLoad( 811 VecTy, GEP, getAlignForIndex(I, Stride, EltTy, MAlign), 812 IsVolatile, "col.load"); 813 814 Result.addVector(Vector); 815 } 816 return Result.addNumLoads(getNumOps(Result.getVectorTy()) * 817 Result.getNumVectors()); 818 } 819 820 /// Loads a sub-matrix with shape \p ResultShape from a \p R x \p C matrix, 821 /// starting at \p MatrixPtr[I][J]. 822 MatrixTy loadMatrix(Value *MatrixPtr, MaybeAlign Align, bool IsVolatile, 823 ShapeInfo MatrixShape, Value *I, Value *J, 824 ShapeInfo ResultShape, Type *EltTy, 825 IRBuilder<> &Builder) { 826 827 Value *Offset = Builder.CreateAdd( 828 Builder.CreateMul(J, Builder.getInt64(MatrixShape.getStride())), I); 829 830 unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace(); 831 Value *EltPtr = 832 Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS)); 833 Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset); 834 auto *TileTy = FixedVectorType::get(EltTy, ResultShape.NumRows * 835 ResultShape.NumColumns); 836 Type *TilePtrTy = PointerType::get(TileTy, AS); 837 Value *TilePtr = 838 Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast"); 839 840 return loadMatrix(TileTy, TilePtr, Align, 841 Builder.getInt64(MatrixShape.getStride()), IsVolatile, 842 ResultShape, Builder); 843 } 844 845 /// Lower a load instruction with shape information. 846 void LowerLoad(Instruction *Inst, Value *Ptr, MaybeAlign Align, Value *Stride, 847 bool IsVolatile, ShapeInfo Shape) { 848 IRBuilder<> Builder(Inst); 849 finalizeLowering(Inst, 850 loadMatrix(Inst->getType(), Ptr, Align, Stride, IsVolatile, 851 Shape, Builder), 852 Builder); 853 } 854 855 /// Lowers llvm.matrix.column.major.load. 856 /// 857 /// The intrinsic loads a matrix from memory using a stride between columns. 858 void LowerColumnMajorLoad(CallInst *Inst) { 859 assert(MatrixLayout == MatrixLayoutTy::ColumnMajor && 860 "Intrinsic only supports column-major layout!"); 861 Value *Ptr = Inst->getArgOperand(0); 862 Value *Stride = Inst->getArgOperand(1); 863 LowerLoad(Inst, Ptr, Inst->getParamAlign(0), Stride, 864 cast<ConstantInt>(Inst->getArgOperand(2))->isOne(), 865 {Inst->getArgOperand(3), Inst->getArgOperand(4)}); 866 } 867 868 /// Stores a sub-matrix \p StoreVal into the \p R x \p C matrix starting at \p 869 /// MatrixPtr[I][J]. 870 void storeMatrix(const MatrixTy &StoreVal, Value *MatrixPtr, 871 MaybeAlign MAlign, bool IsVolatile, ShapeInfo MatrixShape, 872 Value *I, Value *J, Type *EltTy, IRBuilder<> &Builder) { 873 Value *Offset = Builder.CreateAdd( 874 Builder.CreateMul(J, Builder.getInt64(MatrixShape.getStride())), I); 875 876 unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace(); 877 Value *EltPtr = 878 Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS)); 879 Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset); 880 auto *TileTy = FixedVectorType::get(EltTy, StoreVal.getNumRows() * 881 StoreVal.getNumColumns()); 882 Type *TilePtrTy = PointerType::get(TileTy, AS); 883 Value *TilePtr = 884 Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast"); 885 886 storeMatrix(TileTy, StoreVal, TilePtr, MAlign, 887 Builder.getInt64(MatrixShape.getStride()), IsVolatile, Builder); 888 } 889 890 /// Store matrix \p StoreVal starting at \p Ptr and using \p Stride between 891 /// vectors. 892 MatrixTy storeMatrix(Type *Ty, MatrixTy StoreVal, Value *Ptr, 893 MaybeAlign MAlign, Value *Stride, bool IsVolatile, 894 IRBuilder<> &Builder) { 895 auto VType = cast<VectorType>(Ty); 896 Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder); 897 for (auto Vec : enumerate(StoreVal.vectors())) { 898 Value *GEP = computeVectorAddr(EltPtr, Builder.getInt64(Vec.index()), 899 Stride, StoreVal.getStride(), 900 VType->getElementType(), Builder); 901 Builder.CreateAlignedStore(Vec.value(), GEP, 902 getAlignForIndex(Vec.index(), Stride, 903 VType->getElementType(), 904 MAlign), 905 IsVolatile); 906 } 907 return MatrixTy().addNumStores(getNumOps(StoreVal.getVectorTy()) * 908 StoreVal.getNumVectors()); 909 } 910 911 /// Lower a store instruction with shape information. 912 void LowerStore(Instruction *Inst, Value *Matrix, Value *Ptr, MaybeAlign A, 913 Value *Stride, bool IsVolatile, ShapeInfo Shape) { 914 IRBuilder<> Builder(Inst); 915 auto StoreVal = getMatrix(Matrix, Shape, Builder); 916 finalizeLowering(Inst, 917 storeMatrix(Matrix->getType(), StoreVal, Ptr, A, Stride, 918 IsVolatile, Builder), 919 Builder); 920 } 921 922 /// Lowers llvm.matrix.column.major.store. 923 /// 924 /// The intrinsic store a matrix back memory using a stride between columns. 925 void LowerColumnMajorStore(CallInst *Inst) { 926 assert(MatrixLayout == MatrixLayoutTy::ColumnMajor && 927 "Intrinsic only supports column-major layout!"); 928 Value *Matrix = Inst->getArgOperand(0); 929 Value *Ptr = Inst->getArgOperand(1); 930 Value *Stride = Inst->getArgOperand(2); 931 LowerStore(Inst, Matrix, Ptr, Inst->getParamAlign(1), Stride, 932 cast<ConstantInt>(Inst->getArgOperand(3))->isOne(), 933 {Inst->getArgOperand(4), Inst->getArgOperand(5)}); 934 } 935 936 // Set elements I..I+NumElts-1 to Block 937 Value *insertVector(Value *Col, unsigned I, Value *Block, 938 IRBuilder<> &Builder) { 939 940 // First, bring Block to the same size as Col 941 unsigned BlockNumElts = 942 cast<FixedVectorType>(Block->getType())->getNumElements(); 943 unsigned NumElts = cast<FixedVectorType>(Col->getType())->getNumElements(); 944 assert(NumElts >= BlockNumElts && "Too few elements for current block"); 945 946 Block = Builder.CreateShuffleVector( 947 Block, createSequentialMask(0, BlockNumElts, NumElts - BlockNumElts)); 948 949 // If Col is 7 long and I is 2 and BlockNumElts is 2 the mask is: 0, 1, 7, 950 // 8, 4, 5, 6 951 SmallVector<int, 16> Mask; 952 unsigned i; 953 for (i = 0; i < I; i++) 954 Mask.push_back(i); 955 956 unsigned VecNumElts = 957 cast<FixedVectorType>(Col->getType())->getNumElements(); 958 for (; i < I + BlockNumElts; i++) 959 Mask.push_back(i - I + VecNumElts); 960 961 for (; i < VecNumElts; i++) 962 Mask.push_back(i); 963 964 return Builder.CreateShuffleVector(Col, Block, Mask); 965 } 966 967 Value *createMulAdd(Value *Sum, Value *A, Value *B, bool UseFPOp, 968 IRBuilder<> &Builder, bool AllowContraction, 969 unsigned &NumComputeOps) { 970 NumComputeOps += getNumOps(A->getType()); 971 if (!Sum) 972 return UseFPOp ? Builder.CreateFMul(A, B) : Builder.CreateMul(A, B); 973 974 if (UseFPOp) { 975 if (AllowContraction) { 976 // Use fmuladd for floating point operations and let the backend decide 977 // if that's profitable. 978 Function *FMulAdd = Intrinsic::getDeclaration( 979 Func.getParent(), Intrinsic::fmuladd, A->getType()); 980 return Builder.CreateCall(FMulAdd, {A, B, Sum}); 981 } 982 NumComputeOps += getNumOps(A->getType()); 983 Value *Mul = Builder.CreateFMul(A, B); 984 return Builder.CreateFAdd(Sum, Mul); 985 } 986 987 NumComputeOps += getNumOps(A->getType()); 988 Value *Mul = Builder.CreateMul(A, B); 989 return Builder.CreateAdd(Sum, Mul); 990 } 991 992 /// Cache \p Matrix as result of \p Inst and update the uses of \p Inst. For 993 /// users with shape information, there's nothing to do: the will use the 994 /// cached value when they are lowered. For other users, \p Matrix is 995 /// flattened and the uses are updated to use it. Also marks \p Inst for 996 /// deletion. 997 void finalizeLowering(Instruction *Inst, MatrixTy Matrix, 998 IRBuilder<> &Builder) { 999 Inst2ColumnMatrix.insert(std::make_pair(Inst, Matrix)); 1000 1001 ToRemove.push_back(Inst); 1002 Value *Flattened = nullptr; 1003 for (Use &U : llvm::make_early_inc_range(Inst->uses())) { 1004 if (ShapeMap.find(U.getUser()) == ShapeMap.end()) { 1005 if (!Flattened) 1006 Flattened = Matrix.embedInVector(Builder); 1007 U.set(Flattened); 1008 } 1009 } 1010 } 1011 1012 /// Compute \p Result += \p A * \p B for input matrices with left-associating 1013 /// addition. 1014 void emitMatrixMultiply(MatrixTy &Result, const MatrixTy &A, 1015 const MatrixTy &B, bool AllowContraction, 1016 IRBuilder<> &Builder, bool isTiled) { 1017 const unsigned VF = std::max<unsigned>( 1018 TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 1019 .getFixedSize() / 1020 Result.getElementType()->getPrimitiveSizeInBits().getFixedSize(), 1021 1U); 1022 unsigned R = Result.getNumRows(); 1023 unsigned C = Result.getNumColumns(); 1024 unsigned M = A.getNumColumns(); 1025 1026 bool IsFP = Result.getElementType()->isFloatingPointTy(); 1027 assert(A.isColumnMajor() == B.isColumnMajor() && 1028 Result.isColumnMajor() == A.isColumnMajor() && 1029 "operands must agree on matrix layout"); 1030 unsigned NumComputeOps = 0; 1031 if (A.isColumnMajor()) { 1032 // Multiply columns from the first operand with scalars from the second 1033 // operand. Then move along the K axes and accumulate the columns. With 1034 // this the adds can be vectorized without reassociation. 1035 for (unsigned J = 0; J < C; ++J) { 1036 unsigned BlockSize = VF; 1037 // If Result is zero, we don't need to accumulate in the K==0 iteration. 1038 bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J)); 1039 1040 for (unsigned I = 0; I < R; I += BlockSize) { 1041 // Gradually lower the vectorization factor to cover the remainder. 1042 while (I + BlockSize > R) 1043 BlockSize /= 2; 1044 1045 Value *Sum = isTiled ? Result.extractVector(I, J, BlockSize, Builder) 1046 : nullptr; 1047 for (unsigned K = 0; K < M; ++K) { 1048 Value *L = A.extractVector(I, K, BlockSize, Builder); 1049 Value *RH = Builder.CreateExtractElement(B.getColumn(J), K); 1050 Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat"); 1051 Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat, 1052 Result.getElementType()->isFloatingPointTy(), 1053 Builder, AllowContraction, NumComputeOps); 1054 } 1055 Result.setVector(J, 1056 insertVector(Result.getVector(J), I, Sum, Builder)); 1057 } 1058 } 1059 } else { 1060 // Multiply rows from the second operand with scalars from the first 1061 // operand. Then move along the K axes and accumulate the rows. With this 1062 // the adds can be vectorized without reassociation. 1063 for (unsigned I = 0; I < R; ++I) { 1064 unsigned BlockSize = VF; 1065 bool isSumZero = isa<ConstantAggregateZero>(Result.getRow(I)); 1066 for (unsigned J = 0; J < C; J += BlockSize) { 1067 // Gradually lower the vectorization factor to cover the remainder. 1068 while (J + BlockSize > C) 1069 BlockSize /= 2; 1070 1071 Value *Sum = nullptr; 1072 for (unsigned K = 0; K < M; ++K) { 1073 Value *R = B.extractVector(K, J, BlockSize, Builder); 1074 Value *LH = Builder.CreateExtractElement(A.getVector(I), K); 1075 Value *Splat = Builder.CreateVectorSplat(BlockSize, LH, "splat"); 1076 Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, Splat, R, 1077 IsFP, Builder, AllowContraction, NumComputeOps); 1078 } 1079 Result.setVector(I, 1080 insertVector(Result.getVector(I), J, Sum, Builder)); 1081 } 1082 } 1083 } 1084 Result.addNumComputeOps(NumComputeOps); 1085 } 1086 1087 /// Ensure that the memory in \p Load does not alias \p Store by potentially 1088 /// copying it to a new location. This new or otherwise the original location 1089 /// is returned. 1090 Value *getNonAliasingPointer(LoadInst *Load, StoreInst *Store, 1091 CallInst *MatMul) { 1092 MemoryLocation StoreLoc = MemoryLocation::get(Store); 1093 MemoryLocation LoadLoc = MemoryLocation::get(Load); 1094 1095 // If we can statically determine noalias we're good. 1096 if (AA->isNoAlias(LoadLoc, StoreLoc)) 1097 return Load->getPointerOperand(); 1098 1099 // Create code to check if the memory locations of the Load and Store 1100 // overlap and if they do, copy Load's operand to a new buffer. 1101 1102 // First, create new blocks for 2n part of the check and the copy. 1103 BasicBlock *Check0 = MatMul->getParent(); 1104 // FIXME: Use lazy DTU and update SplitBlock to accept a DTU instead of a 1105 // DT. Manually collect dominator tree updates, to avoid unnecessary work, 1106 // as we adjust Check0 and Check1's branches. 1107 SmallVector<DominatorTree::UpdateType, 4> DTUpdates; 1108 for (BasicBlock *Succ : successors(Check0)) 1109 DTUpdates.push_back({DT->Delete, Check0, Succ}); 1110 1111 BasicBlock *Check1 = 1112 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI, 1113 nullptr, "alias_cont"); 1114 BasicBlock *Copy = 1115 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI, 1116 nullptr, "copy"); 1117 BasicBlock *Fusion = 1118 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI, 1119 nullptr, "no_alias"); 1120 1121 // Check if the loaded memory location begins before the end of the store 1122 // location. If the condition holds, they might overlap, otherwise they are 1123 // guaranteed to not overlap. 1124 IRBuilder<> Builder(MatMul); 1125 Check0->getTerminator()->eraseFromParent(); 1126 Builder.SetInsertPoint(Check0); 1127 Type *IntPtrTy = Builder.getIntPtrTy(Load->getModule()->getDataLayout()); 1128 Value *StoreBegin = Builder.CreatePtrToInt( 1129 const_cast<Value *>(StoreLoc.Ptr), IntPtrTy, "store.begin"); 1130 Value *StoreEnd = Builder.CreateAdd( 1131 StoreBegin, ConstantInt::get(IntPtrTy, StoreLoc.Size.getValue()), 1132 "store.end", true, true); 1133 Value *LoadBegin = Builder.CreatePtrToInt(const_cast<Value *>(LoadLoc.Ptr), 1134 IntPtrTy, "load.begin"); 1135 Builder.CreateCondBr(Builder.CreateICmpULT(LoadBegin, StoreEnd), Check1, 1136 Fusion); 1137 1138 // Check if the store begins before the end of the load location. If the 1139 // condition holds, they alias, otherwise they are guaranteed to not 1140 // overlap. 1141 Check1->getTerminator()->eraseFromParent(); 1142 Builder.SetInsertPoint(Check1, Check1->begin()); 1143 Value *LoadEnd = Builder.CreateAdd( 1144 LoadBegin, ConstantInt::get(IntPtrTy, LoadLoc.Size.getValue()), 1145 "load.end", true, true); 1146 Builder.CreateCondBr(Builder.CreateICmpULT(StoreBegin, LoadEnd), Copy, 1147 Fusion); 1148 1149 // Copy load operand to new alloca. 1150 Builder.SetInsertPoint(Copy, Copy->begin()); 1151 AllocaInst *NewLd = 1152 Builder.CreateAlloca(Load->getType(), Load->getPointerAddressSpace()); 1153 Builder.CreateMemCpy(NewLd, NewLd->getAlign(), 1154 Load->getPointerOperand(), Load->getAlign(), 1155 LoadLoc.Size.getValue()); 1156 Builder.SetInsertPoint(Fusion, Fusion->begin()); 1157 PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3); 1158 PHI->addIncoming(Load->getPointerOperand(), Check0); 1159 PHI->addIncoming(Load->getPointerOperand(), Check1); 1160 PHI->addIncoming(NewLd, Copy); 1161 1162 // Adjust DT. 1163 DTUpdates.push_back({DT->Insert, Check0, Check1}); 1164 DTUpdates.push_back({DT->Insert, Check0, Fusion}); 1165 DTUpdates.push_back({DT->Insert, Check1, Copy}); 1166 DTUpdates.push_back({DT->Insert, Check1, Fusion}); 1167 DT->applyUpdates(DTUpdates); 1168 return PHI; 1169 } 1170 1171 bool isFusionProfitable(CallInst *MatMul) { 1172 if (ForceFusion) 1173 return true; 1174 1175 ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3)); 1176 ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4)); 1177 1178 const unsigned R = LShape.NumRows; 1179 const unsigned C = RShape.NumColumns; 1180 const unsigned M = LShape.NumColumns; 1181 auto *EltType = cast<VectorType>(MatMul->getType())->getElementType(); 1182 1183 const unsigned VF = std::max<unsigned>( 1184 TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 1185 .getFixedSize() / 1186 EltType->getPrimitiveSizeInBits().getFixedSize(), 1187 1U); 1188 1189 // Cost model for tiling 1190 // 1191 // For tiling to be beneficial, we need reuse either along the R or 1192 // the C axis. We vectorize along the R axis so that means at least 1193 // 3 elements. 1194 // TODO: Also consider cost of copying if operands alias. 1195 if (R <= VF && C == 1) 1196 return false; 1197 // Then we need enough elements to exceed the number of vector 1198 // registers we have. Note that this is an oversimplification since 1199 // fusing also takes some extra loads which may exceed the number of 1200 // reloads necessary. 1201 unsigned Op0Regs = (R + VF - 1) / VF * M; 1202 unsigned Op1Regs = (M + VF - 1) / VF * C; 1203 return Op0Regs + Op1Regs > TTI.getNumberOfRegisters(true); 1204 } 1205 1206 MatrixTy getZeroMatrix(Type *EltType, unsigned R, unsigned C) { 1207 MatrixTy Res; 1208 auto *ColumType = FixedVectorType::get(EltType, R); 1209 for (unsigned I = 0; I < C; ++I) 1210 Res.addVector(ConstantAggregateZero::get(ColumType)); 1211 return Res; 1212 } 1213 1214 void createTiledLoops(CallInst *MatMul, Value *LPtr, ShapeInfo LShape, 1215 Value *RPtr, ShapeInfo RShape, StoreInst *Store, 1216 bool AllowContract) { 1217 auto *EltType = cast<VectorType>(MatMul->getType())->getElementType(); 1218 1219 // Create the main tiling loop nest. 1220 TileInfo TI(LShape.NumRows, RShape.NumColumns, LShape.NumColumns, TileSize); 1221 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 1222 Instruction *InsertI = cast<Instruction>(MatMul); 1223 BasicBlock *Start = InsertI->getParent(); 1224 BasicBlock *End = 1225 SplitBlock(InsertI->getParent(), InsertI, DT, LI, nullptr, "continue"); 1226 IRBuilder<> Builder(MatMul); 1227 BasicBlock *InnerBody = TI.CreateTiledLoops(Start, End, Builder, DTU, *LI); 1228 1229 Type *TileVecTy = 1230 FixedVectorType::get(MatMul->getType()->getScalarType(), TileSize); 1231 MatrixTy TileResult; 1232 // Insert in the inner loop header. 1233 Builder.SetInsertPoint(TI.InnerLoopHeader->getTerminator()); 1234 // Create PHI nodes for the result columns to accumulate across iterations. 1235 SmallVector<PHINode *, 4> ColumnPhis; 1236 for (unsigned I = 0; I < TileSize; I++) { 1237 auto *Phi = Builder.CreatePHI(TileVecTy, 2, "result.vec." + Twine(I)); 1238 Phi->addIncoming(ConstantAggregateZero::get(TileVecTy), 1239 TI.RowLoopHeader->getSingleSuccessor()); 1240 TileResult.addVector(Phi); 1241 ColumnPhis.push_back(Phi); 1242 } 1243 1244 // Insert in the inner loop body, which computes 1245 // Res += Load(CurrentRow, K) * Load(K, CurrentColumn) 1246 Builder.SetInsertPoint(InnerBody->getTerminator()); 1247 // Load tiles of the operands. 1248 MatrixTy A = loadMatrix(LPtr, {}, false, LShape, TI.CurrentRow, TI.CurrentK, 1249 {TileSize, TileSize}, EltType, Builder); 1250 MatrixTy B = loadMatrix(RPtr, {}, false, RShape, TI.CurrentK, TI.CurrentCol, 1251 {TileSize, TileSize}, EltType, Builder); 1252 emitMatrixMultiply(TileResult, A, B, AllowContract, Builder, true); 1253 // Store result after the inner loop is done. 1254 Builder.SetInsertPoint(TI.RowLoopLatch->getTerminator()); 1255 storeMatrix(TileResult, Store->getPointerOperand(), Store->getAlign(), 1256 Store->isVolatile(), {LShape.NumRows, RShape.NumColumns}, 1257 TI.CurrentRow, TI.CurrentCol, EltType, Builder); 1258 1259 for (unsigned I = 0; I < TileResult.getNumVectors(); I++) 1260 ColumnPhis[I]->addIncoming(TileResult.getVector(I), TI.InnerLoopLatch); 1261 1262 // Force unrolling of a few iterations of the inner loop, to make sure there 1263 // is enough work per iteration. 1264 // FIXME: The unroller should make this decision directly instead, but 1265 // currently the cost-model is not up to the task. 1266 unsigned InnerLoopUnrollCount = std::min(10u, LShape.NumColumns / TileSize); 1267 addStringMetadataToLoop(LI->getLoopFor(TI.InnerLoopHeader), 1268 "llvm.loop.unroll.count", InnerLoopUnrollCount); 1269 } 1270 1271 void emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1, 1272 StoreInst *Store, 1273 SmallPtrSetImpl<Instruction *> &FusedInsts) { 1274 assert(MatrixLayout == MatrixLayoutTy::ColumnMajor && 1275 "Tiling only supported for column-major matrixes at the moment!"); 1276 if (!isFusionProfitable(MatMul)) 1277 return; 1278 1279 ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3)); 1280 ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4)); 1281 1282 const unsigned R = LShape.NumRows; 1283 const unsigned C = RShape.NumColumns; 1284 const unsigned M = LShape.NumColumns; 1285 auto *EltType = cast<VectorType>(MatMul->getType())->getElementType(); 1286 1287 Value *APtr = getNonAliasingPointer(LoadOp0, Store, MatMul); 1288 Value *BPtr = getNonAliasingPointer(LoadOp1, Store, MatMul); 1289 Value *CPtr = Store->getPointerOperand(); 1290 1291 bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) && 1292 MatMul->hasAllowContract()); 1293 if (TileUseLoops && (R % TileSize == 0 && C % TileSize == 0)) 1294 createTiledLoops(MatMul, APtr, LShape, BPtr, RShape, Store, 1295 AllowContract); 1296 else { 1297 IRBuilder<> Builder(Store); 1298 for (unsigned J = 0; J < C; J += TileSize) 1299 for (unsigned I = 0; I < R; I += TileSize) { 1300 const unsigned TileR = std::min(R - I, unsigned(TileSize)); 1301 const unsigned TileC = std::min(C - J, unsigned(TileSize)); 1302 MatrixTy Res = getZeroMatrix(EltType, TileR, TileC); 1303 1304 for (unsigned K = 0; K < M; K += TileSize) { 1305 const unsigned TileM = std::min(M - K, unsigned(TileSize)); 1306 MatrixTy A = 1307 loadMatrix(APtr, LoadOp0->getAlign(), LoadOp0->isVolatile(), 1308 LShape, Builder.getInt64(I), Builder.getInt64(K), 1309 {TileR, TileM}, EltType, Builder); 1310 MatrixTy B = 1311 loadMatrix(BPtr, LoadOp1->getAlign(), LoadOp1->isVolatile(), 1312 RShape, Builder.getInt64(K), Builder.getInt64(J), 1313 {TileM, TileC}, EltType, Builder); 1314 emitMatrixMultiply(Res, A, B, AllowContract, Builder, true); 1315 } 1316 storeMatrix(Res, CPtr, Store->getAlign(), Store->isVolatile(), {R, M}, 1317 Builder.getInt64(I), Builder.getInt64(J), EltType, 1318 Builder); 1319 } 1320 } 1321 1322 // Mark eliminated instructions as fused and remove them. 1323 FusedInsts.insert(Store); 1324 FusedInsts.insert(MatMul); 1325 Store->eraseFromParent(); 1326 MatMul->eraseFromParent(); 1327 if (LoadOp0->hasNUses(0)) { 1328 FusedInsts.insert(LoadOp0); 1329 LoadOp0->eraseFromParent(); 1330 } 1331 if (LoadOp1->hasNUses(0)) { 1332 FusedInsts.insert(LoadOp1); 1333 LoadOp1->eraseFromParent(); 1334 } 1335 } 1336 1337 /// Try to lower matrix multiply chains by fusing operations. 1338 /// 1339 /// Currently we only lower {ld, ld} -> matmul -> st chains. 1340 // 1341 /// No need to return a MatrixTy object for the result of the operation, since 1342 /// the single store user will be lowered as part of this. Instructions that 1343 /// are completely eliminated by fusion are added to \p FusedInsts. 1344 void LowerMatrixMultiplyFused(CallInst *MatMul, 1345 SmallPtrSetImpl<Instruction *> &FusedInsts) { 1346 if (!FuseMatrix || !MatMul->hasOneUse() || 1347 MatrixLayout != MatrixLayoutTy::ColumnMajor || !DT) 1348 return; 1349 1350 assert(AA && LI && "Analyses should be available"); 1351 1352 auto *LoadOp0 = dyn_cast<LoadInst>(MatMul->getOperand(0)); 1353 auto *LoadOp1 = dyn_cast<LoadInst>(MatMul->getOperand(1)); 1354 auto *Store = dyn_cast<StoreInst>(*MatMul->user_begin()); 1355 if (LoadOp0 && LoadOp1 && Store) { 1356 // The store address must dominate the MatMul instruction, otherwise 1357 // we create invalid IR. 1358 // FIXME: See if we can hoist the store address computation. 1359 auto *AddrI = dyn_cast<Instruction>(Store->getOperand(1)); 1360 if (AddrI && (!DT->dominates(AddrI, MatMul))) 1361 return; 1362 1363 emitSIMDTiling(MatMul, LoadOp0, LoadOp1, Store, FusedInsts); 1364 return; 1365 } 1366 } 1367 1368 /// Lowers llvm.matrix.multiply. 1369 void LowerMultiply(CallInst *MatMul) { 1370 IRBuilder<> Builder(MatMul); 1371 auto *EltType = cast<VectorType>(MatMul->getType())->getElementType(); 1372 ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3)); 1373 ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4)); 1374 1375 const MatrixTy &Lhs = getMatrix(MatMul->getArgOperand(0), LShape, Builder); 1376 const MatrixTy &Rhs = getMatrix(MatMul->getArgOperand(1), RShape, Builder); 1377 assert(Lhs.getElementType() == Rhs.getElementType() && 1378 "Matrix multiply argument element types do not match."); 1379 1380 const unsigned R = LShape.NumRows; 1381 const unsigned C = RShape.NumColumns; 1382 assert(LShape.NumColumns == RShape.NumRows); 1383 1384 // Initialize the output 1385 MatrixTy Result(R, C, EltType); 1386 assert(Lhs.getElementType() == Result.getElementType() && 1387 "Matrix multiply result element type does not match arguments."); 1388 1389 bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) && 1390 MatMul->hasAllowContract()); 1391 emitMatrixMultiply(Result, Lhs, Rhs, AllowContract, Builder, false); 1392 finalizeLowering(MatMul, Result, Builder); 1393 } 1394 1395 /// Lowers llvm.matrix.transpose. 1396 void LowerTranspose(CallInst *Inst) { 1397 MatrixTy Result; 1398 IRBuilder<> Builder(Inst); 1399 Value *InputVal = Inst->getArgOperand(0); 1400 VectorType *VectorTy = cast<VectorType>(InputVal->getType()); 1401 ShapeInfo ArgShape(Inst->getArgOperand(1), Inst->getArgOperand(2)); 1402 MatrixTy InputMatrix = getMatrix(InputVal, ArgShape, Builder); 1403 1404 const unsigned NewNumVecs = 1405 InputMatrix.isColumnMajor() ? ArgShape.NumRows : ArgShape.NumColumns; 1406 const unsigned NewNumElts = 1407 InputMatrix.isColumnMajor() ? ArgShape.NumColumns : ArgShape.NumRows; 1408 1409 for (unsigned I = 0; I < NewNumVecs; ++I) { 1410 // Build a single result vector. First initialize it. 1411 Value *ResultVector = UndefValue::get( 1412 FixedVectorType::get(VectorTy->getElementType(), NewNumElts)); 1413 // Go through the old elements and insert it into the resulting vector. 1414 for (auto J : enumerate(InputMatrix.vectors())) { 1415 Value *Elt = Builder.CreateExtractElement(J.value(), I); 1416 // Row and column indices are transposed. 1417 ResultVector = 1418 Builder.CreateInsertElement(ResultVector, Elt, J.index()); 1419 } 1420 Result.addVector(ResultVector); 1421 } 1422 1423 // TODO: Improve estimate of operations needed for transposes. Currently we 1424 // just count the insertelement/extractelement instructions, but do not 1425 // account for later simplifications/combines. 1426 finalizeLowering( 1427 Inst, 1428 Result.addNumComputeOps(2 * ArgShape.NumRows * ArgShape.NumColumns), 1429 Builder); 1430 } 1431 1432 /// Lower load instructions, if shape information is available. 1433 bool VisitLoad(LoadInst *Inst, Value *Ptr, IRBuilder<> &Builder) { 1434 auto I = ShapeMap.find(Inst); 1435 if (I == ShapeMap.end()) 1436 return false; 1437 1438 LowerLoad(Inst, Ptr, Inst->getAlign(), 1439 Builder.getInt64(I->second.getStride()), Inst->isVolatile(), 1440 I->second); 1441 return true; 1442 } 1443 1444 bool VisitStore(StoreInst *Inst, Value *StoredVal, Value *Ptr, 1445 IRBuilder<> &Builder) { 1446 auto I = ShapeMap.find(StoredVal); 1447 if (I == ShapeMap.end()) 1448 return false; 1449 1450 LowerStore(Inst, StoredVal, Ptr, Inst->getAlign(), 1451 Builder.getInt64(I->second.getStride()), Inst->isVolatile(), 1452 I->second); 1453 return true; 1454 } 1455 1456 /// Lower binary operators, if shape information is available. 1457 bool VisitBinaryOperator(BinaryOperator *Inst) { 1458 auto I = ShapeMap.find(Inst); 1459 if (I == ShapeMap.end()) 1460 return false; 1461 1462 Value *Lhs = Inst->getOperand(0); 1463 Value *Rhs = Inst->getOperand(1); 1464 1465 IRBuilder<> Builder(Inst); 1466 ShapeInfo &Shape = I->second; 1467 1468 MatrixTy Result; 1469 MatrixTy A = getMatrix(Lhs, Shape, Builder); 1470 MatrixTy B = getMatrix(Rhs, Shape, Builder); 1471 assert(A.isColumnMajor() == B.isColumnMajor() && 1472 Result.isColumnMajor() == A.isColumnMajor() && 1473 "operands must agree on matrix layout"); 1474 1475 // Helper to perform binary op on vectors. 1476 auto BuildVectorOp = [&Builder, Inst](Value *LHS, Value *RHS) { 1477 switch (Inst->getOpcode()) { 1478 case Instruction::Add: 1479 return Builder.CreateAdd(LHS, RHS); 1480 case Instruction::Mul: 1481 return Builder.CreateMul(LHS, RHS); 1482 case Instruction::Sub: 1483 return Builder.CreateSub(LHS, RHS); 1484 case Instruction::FAdd: 1485 return Builder.CreateFAdd(LHS, RHS); 1486 case Instruction::FMul: 1487 return Builder.CreateFMul(LHS, RHS); 1488 case Instruction::FSub: 1489 return Builder.CreateFSub(LHS, RHS); 1490 default: 1491 llvm_unreachable("Unsupported binary operator for matrix"); 1492 } 1493 }; 1494 1495 for (unsigned I = 0; I < Shape.getNumVectors(); ++I) 1496 Result.addVector(BuildVectorOp(A.getVector(I), B.getVector(I))); 1497 1498 finalizeLowering(Inst, 1499 Result.addNumComputeOps(getNumOps(Result.getVectorTy()) * 1500 Result.getNumVectors()), 1501 Builder); 1502 return true; 1503 } 1504 1505 /// Lower unary operators, if shape information is available. 1506 bool VisitUnaryOperator(UnaryOperator *Inst) { 1507 auto I = ShapeMap.find(Inst); 1508 if (I == ShapeMap.end()) 1509 return false; 1510 1511 Value *Op = Inst->getOperand(0); 1512 1513 IRBuilder<> Builder(Inst); 1514 ShapeInfo &Shape = I->second; 1515 1516 MatrixTy Result; 1517 MatrixTy M = getMatrix(Op, Shape, Builder); 1518 1519 // Helper to perform unary op on vectors. 1520 auto BuildVectorOp = [&Builder, Inst](Value *Op) { 1521 switch (Inst->getOpcode()) { 1522 case Instruction::FNeg: 1523 return Builder.CreateFNeg(Op); 1524 default: 1525 llvm_unreachable("Unsupported unary operator for matrix"); 1526 } 1527 }; 1528 1529 for (unsigned I = 0; I < Shape.getNumVectors(); ++I) 1530 Result.addVector(BuildVectorOp(M.getVector(I))); 1531 1532 finalizeLowering(Inst, 1533 Result.addNumComputeOps(getNumOps(Result.getVectorTy()) * 1534 Result.getNumVectors()), 1535 Builder); 1536 return true; 1537 } 1538 1539 /// Helper to linearize a matrix expression tree into a string. Currently 1540 /// matrix expressions are linarized by starting at an expression leaf and 1541 /// linearizing bottom up. 1542 struct ExprLinearizer { 1543 unsigned LengthToBreak = 100; 1544 std::string Str; 1545 raw_string_ostream Stream; 1546 unsigned LineLength = 0; 1547 const DataLayout &DL; 1548 1549 /// Mapping from instructions to matrixes. It is used to identify 1550 /// matrix instructions. 1551 const MapVector<Value *, MatrixTy> &Inst2Matrix; 1552 1553 /// Mapping from values to the leaves of all expressions that the value is 1554 /// part of. 1555 const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared; 1556 1557 /// Set of matrix expressions in the scope of a given DISubprogram. 1558 const SmallSetVector<Value *, 32> &ExprsInSubprogram; 1559 1560 /// Leaf node of the expression to linearize. 1561 Value *Leaf; 1562 1563 /// Used to keep track of sub-expressions that get reused while linearizing 1564 /// the expression. Re-used sub-expressions are marked as (reused). 1565 SmallPtrSet<Value *, 8> ReusedExprs; 1566 1567 ExprLinearizer(const DataLayout &DL, 1568 const MapVector<Value *, MatrixTy> &Inst2Matrix, 1569 const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared, 1570 const SmallSetVector<Value *, 32> &ExprsInSubprogram, 1571 Value *Leaf) 1572 : Str(), Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared), 1573 ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {} 1574 1575 void indent(unsigned N) { 1576 LineLength += N; 1577 for (unsigned i = 0; i < N; i++) 1578 Stream << " "; 1579 } 1580 1581 void lineBreak() { 1582 Stream << "\n"; 1583 LineLength = 0; 1584 } 1585 1586 void maybeIndent(unsigned Indent) { 1587 if (LineLength >= LengthToBreak) 1588 lineBreak(); 1589 1590 if (LineLength == 0) 1591 indent(Indent); 1592 } 1593 1594 void write(StringRef S) { 1595 LineLength += S.size(); 1596 Stream << S; 1597 } 1598 1599 Value *getUnderlyingObjectThroughLoads(Value *V) { 1600 if (Value *Ptr = getPointerOperand(V)) 1601 return getUnderlyingObjectThroughLoads(Ptr); 1602 else if (V->getType()->isPointerTy()) 1603 return getUnderlyingObject(V); 1604 return V; 1605 } 1606 1607 /// Returns true if \p V is a matrix value in the given subprogram. 1608 bool isMatrix(Value *V) const { return ExprsInSubprogram.count(V); } 1609 1610 /// If \p V is a matrix value, print its shape as as NumRows x NumColumns to 1611 /// \p SS. 1612 void prettyPrintMatrixType(Value *V, raw_string_ostream &SS) { 1613 auto M = Inst2Matrix.find(V); 1614 if (M == Inst2Matrix.end()) 1615 SS << "unknown"; 1616 else { 1617 SS << M->second.getNumRows(); 1618 SS << "x"; 1619 SS << M->second.getNumColumns(); 1620 } 1621 } 1622 1623 /// Write the called function name. Handles calls to llvm.matrix.* 1624 /// specially: we write the name, followed by the dimensions of the input 1625 /// matrixes, followed by the scalar type name. 1626 void writeFnName(CallInst *CI) { 1627 if (!CI->getCalledFunction()) 1628 write("<no called fn>"); 1629 else { 1630 StringRef Name = CI->getCalledFunction()->getName(); 1631 if (!Name.startswith("llvm.matrix")) { 1632 write(Name); 1633 return; 1634 } 1635 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 1636 write(StringRef(Intrinsic::getName(II->getIntrinsicID(), {})) 1637 .drop_front(StringRef("llvm.matrix.").size())); 1638 write("."); 1639 std::string Tmp; 1640 raw_string_ostream SS(Tmp); 1641 1642 switch (II->getIntrinsicID()) { 1643 case Intrinsic::matrix_multiply: 1644 prettyPrintMatrixType(II->getOperand(0), SS); 1645 SS << "."; 1646 prettyPrintMatrixType(II->getOperand(1), SS); 1647 SS << "." << *II->getType()->getScalarType(); 1648 break; 1649 case Intrinsic::matrix_transpose: 1650 prettyPrintMatrixType(II->getOperand(0), SS); 1651 SS << "." << *II->getType()->getScalarType(); 1652 break; 1653 case Intrinsic::matrix_column_major_load: 1654 prettyPrintMatrixType(II, SS); 1655 SS << "." << *II->getType()->getScalarType(); 1656 break; 1657 case Intrinsic::matrix_column_major_store: 1658 prettyPrintMatrixType(II->getOperand(0), SS); 1659 SS << "." << *II->getOperand(0)->getType()->getScalarType(); 1660 break; 1661 default: 1662 llvm_unreachable("Unhandled case"); 1663 } 1664 SS.flush(); 1665 write(Tmp); 1666 } 1667 } 1668 1669 unsigned getNumShapeArgs(CallInst *CI) const { 1670 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) { 1671 switch (II->getIntrinsicID()) { 1672 case Intrinsic::matrix_multiply: 1673 return 3; 1674 case Intrinsic::matrix_transpose: 1675 return 2; 1676 case Intrinsic::matrix_column_major_load: 1677 case Intrinsic::matrix_column_major_store: 1678 return 3; 1679 default: 1680 return 0; 1681 } 1682 } 1683 return 0; 1684 } 1685 1686 /// Special printing for values: for pointers, we print if they refer to an 1687 /// (function) external address or a stack address, for other values we 1688 /// either print the constant or "scalar"/"matrix" for other values. 1689 void write(Value *V) { 1690 V = getUnderlyingObjectThroughLoads(V); 1691 if (V->getType()->isPointerTy()) { 1692 if (isa<AllocaInst>(V)) { 1693 Stream << "stack addr"; 1694 LineLength += StringRef("stack addr").size(); 1695 } else { 1696 Stream << "addr"; 1697 LineLength += StringRef("addr").size(); 1698 } 1699 if (!V->getName().empty()) { 1700 Stream << " %" << V->getName() << ""; 1701 LineLength += V->getName().size() + 2; 1702 } 1703 return; 1704 } 1705 1706 std::string Tmp; 1707 raw_string_ostream TmpStream(Tmp); 1708 1709 if (auto *CI = dyn_cast<ConstantInt>(V)) 1710 TmpStream << CI->getValue(); 1711 else if (isa<Constant>(V)) 1712 TmpStream << "constant"; 1713 else { 1714 if (isMatrix(V)) 1715 TmpStream << "matrix"; 1716 else 1717 TmpStream << "scalar"; 1718 } 1719 TmpStream.flush(); 1720 Tmp = std::string(StringRef(Tmp).trim()); 1721 LineLength += Tmp.size(); 1722 Stream << Tmp; 1723 } 1724 1725 /// Linearize expression \p Expr starting at an indentation of \p Indent. 1726 /// Expressions that are re-used multiple times are prefixed with (reused) 1727 /// at the re-used root instruction. 1728 void linearizeExpr(Value *Expr, unsigned Indent, bool ParentReused, 1729 bool ParentShared) { 1730 auto *I = cast<Instruction>(Expr); 1731 maybeIndent(Indent); 1732 SmallVector<Value *, 8> Ops; 1733 1734 // Is Expr shared with other expression leaves? 1735 bool ExprShared = false; 1736 1737 // Deal with shared subtrees. Mark them as shared, if required. 1738 if (!ParentShared) { 1739 auto SI = Shared.find(Expr); 1740 assert(SI != Shared.end() && SI->second.count(Leaf)); 1741 1742 for (Value *S : SI->second) { 1743 if (S == Leaf) 1744 continue; 1745 DebugLoc DL = cast<Instruction>(S)->getDebugLoc(); 1746 write("shared with remark at line " + std::to_string(DL.getLine()) + 1747 " column " + std::to_string(DL.getCol()) + " ("); 1748 } 1749 ExprShared = SI->second.size() > 1; 1750 } 1751 1752 bool Reused = !ReusedExprs.insert(Expr).second; 1753 if (Reused && !ParentReused) 1754 write("(reused) "); 1755 1756 if (auto *CI = dyn_cast<CallInst>(I)) { 1757 writeFnName(CI); 1758 1759 Ops.append(CI->arg_begin(), CI->arg_end() - getNumShapeArgs(CI)); 1760 } else if (isa<BitCastInst>(Expr)) { 1761 // Special case bitcasts, which are used to materialize matrixes from 1762 // non-matrix ops. 1763 write("matrix"); 1764 return; 1765 } else { 1766 Ops.append(I->value_op_begin(), I->value_op_end()); 1767 write(std::string(I->getOpcodeName())); 1768 } 1769 1770 write(std::string("(")); 1771 1772 unsigned NumOpsToBreak = 1; 1773 if (match(Expr, m_Intrinsic<Intrinsic::matrix_column_major_load>())) 1774 NumOpsToBreak = 2; 1775 1776 for (Value *Op : Ops) { 1777 if (Ops.size() > NumOpsToBreak) 1778 lineBreak(); 1779 1780 maybeIndent(Indent + 1); 1781 if (isMatrix(Op)) 1782 linearizeExpr(Op, Indent + 1, Reused, ExprShared); 1783 else 1784 write(Op); 1785 if (Op != Ops.back()) 1786 write(", "); 1787 } 1788 1789 write(")"); 1790 } 1791 1792 const std::string &getResult() { 1793 Stream.flush(); 1794 return Str; 1795 } 1796 }; 1797 1798 /// Generate remarks for matrix operations in a function. To generate remarks 1799 /// for matrix expressions, the following approach is used: 1800 /// 1. Use the inlined-at debug information to group matrix operations to the 1801 /// DISubprograms they are contained in. 1802 /// 2. Collect leaves of matrix expressions (done in 1803 /// RemarkGenerator::getExpressionLeaves) for each subprogram - expression 1804 // mapping. Leaves are lowered matrix instructions without other matrix 1805 // users (like stores) in the current subprogram. 1806 /// 3. For each leaf, create a remark containing a linearizied version of the 1807 /// matrix expression. The expression is linearized by a recursive 1808 /// bottom-up traversal of the matrix operands, starting at a leaf. Note 1809 /// that multiple leaves can share sub-expressions. Shared subexpressions 1810 /// are explicitly marked as shared(). 1811 struct RemarkGenerator { 1812 const MapVector<Value *, MatrixTy> &Inst2Matrix; 1813 OptimizationRemarkEmitter &ORE; 1814 Function &Func; 1815 const DataLayout &DL; 1816 1817 RemarkGenerator(const MapVector<Value *, MatrixTy> &Inst2Matrix, 1818 OptimizationRemarkEmitter &ORE, Function &Func) 1819 : Inst2Matrix(Inst2Matrix), ORE(ORE), Func(Func), 1820 DL(Func.getParent()->getDataLayout()) {} 1821 1822 /// Return all leaves of the expressions in \p ExprsInSubprogram. Those are 1823 /// instructions in Inst2Matrix returning void or without any users in 1824 /// \p ExprsInSubprogram. Currently that should only include stores. 1825 SmallVector<Value *, 4> 1826 getExpressionLeaves(const SmallSetVector<Value *, 32> &ExprsInSubprogram) { 1827 SmallVector<Value *, 4> Leaves; 1828 for (auto *Expr : ExprsInSubprogram) 1829 if (Expr->getType()->isVoidTy() || 1830 !any_of(Expr->users(), [&ExprsInSubprogram](User *U) { 1831 return ExprsInSubprogram.count(U); 1832 })) 1833 Leaves.push_back(Expr); 1834 return Leaves; 1835 } 1836 1837 /// Recursively traverse expression \p V starting at \p Leaf and add \p Leaf 1838 /// to all visited expressions in \p Shared. Limit the matrix operations to 1839 /// the ones in \p ExprsInSubprogram. 1840 void collectSharedInfo(Value *Leaf, Value *V, 1841 const SmallSetVector<Value *, 32> &ExprsInSubprogram, 1842 DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) { 1843 1844 if (!ExprsInSubprogram.count(V)) 1845 return; 1846 1847 auto I = Shared.insert({V, {}}); 1848 I.first->second.insert(Leaf); 1849 1850 for (Value *Op : cast<Instruction>(V)->operand_values()) 1851 collectSharedInfo(Leaf, Op, ExprsInSubprogram, Shared); 1852 } 1853 1854 /// Calculate the number of exclusive and shared op counts for expression 1855 /// starting at \p V. Expressions used multiple times are counted once. 1856 /// Limit the matrix operations to the ones in \p ExprsInSubprogram. 1857 std::pair<OpInfoTy, OpInfoTy> 1858 sumOpInfos(Value *Root, SmallPtrSetImpl<Value *> &ReusedExprs, 1859 const SmallSetVector<Value *, 32> &ExprsInSubprogram, 1860 DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) const { 1861 if (!ExprsInSubprogram.count(Root)) 1862 return {}; 1863 1864 // Already counted this expression. Stop. 1865 if (!ReusedExprs.insert(Root).second) 1866 return {}; 1867 1868 OpInfoTy SharedCount; 1869 OpInfoTy Count; 1870 1871 auto I = Shared.find(Root); 1872 auto CM = Inst2Matrix.find(Root); 1873 if (I->second.size() == 1) 1874 Count = CM->second.getOpInfo(); 1875 else 1876 SharedCount = CM->second.getOpInfo(); 1877 1878 for (Value *Op : cast<Instruction>(Root)->operand_values()) { 1879 auto C = sumOpInfos(Op, ReusedExprs, ExprsInSubprogram, Shared); 1880 Count += C.first; 1881 SharedCount += C.second; 1882 } 1883 return {Count, SharedCount}; 1884 } 1885 1886 void emitRemarks() { 1887 if (!ORE.allowExtraAnalysis(DEBUG_TYPE)) 1888 return; 1889 1890 // Map matrix operations to their containting subprograms, by traversing 1891 // the inlinedAt chain. If the function does not have a DISubprogram, we 1892 // only map them to the containing function. 1893 MapVector<DISubprogram *, SmallVector<Value *, 8>> Subprog2Exprs; 1894 for (auto &KV : Inst2Matrix) { 1895 if (Func.getSubprogram()) { 1896 auto *I = cast<Instruction>(KV.first); 1897 DILocation *Context = I->getDebugLoc(); 1898 while (Context) { 1899 auto I = 1900 Subprog2Exprs.insert({getSubprogram(Context->getScope()), {}}); 1901 I.first->second.push_back(KV.first); 1902 Context = DebugLoc(Context).getInlinedAt(); 1903 } 1904 } else { 1905 auto I = Subprog2Exprs.insert({nullptr, {}}); 1906 I.first->second.push_back(KV.first); 1907 } 1908 } 1909 for (auto &KV : Subprog2Exprs) { 1910 SmallSetVector<Value *, 32> ExprsInSubprogram(KV.second.begin(), 1911 KV.second.end()); 1912 auto Leaves = getExpressionLeaves(ExprsInSubprogram); 1913 1914 DenseMap<Value *, SmallPtrSet<Value *, 2>> Shared; 1915 for (Value *Leaf : Leaves) 1916 collectSharedInfo(Leaf, Leaf, ExprsInSubprogram, Shared); 1917 1918 // Generate remarks for each leaf. 1919 for (auto *L : Leaves) { 1920 1921 DebugLoc Loc = cast<Instruction>(L)->getDebugLoc(); 1922 DILocation *Context = cast<Instruction>(L)->getDebugLoc(); 1923 while (Context) { 1924 if (getSubprogram(Context->getScope()) == KV.first) { 1925 Loc = Context; 1926 break; 1927 } 1928 Context = DebugLoc(Context).getInlinedAt(); 1929 } 1930 1931 SmallPtrSet<Value *, 8> ReusedExprs; 1932 OpInfoTy Counts, SharedCounts; 1933 std::tie(Counts, SharedCounts) = 1934 sumOpInfos(L, ReusedExprs, ExprsInSubprogram, Shared); 1935 1936 OptimizationRemark Rem(DEBUG_TYPE, "matrix-lowered", Loc, 1937 cast<Instruction>(L)->getParent()); 1938 1939 Rem << "Lowered with "; 1940 Rem << ore::NV("NumStores", Counts.NumStores) << " stores, " 1941 << ore::NV("NumLoads", Counts.NumLoads) << " loads, " 1942 << ore::NV("NumComputeOps", Counts.NumComputeOps) 1943 << " compute ops"; 1944 1945 if (SharedCounts.NumStores > 0 || SharedCounts.NumLoads > 0 || 1946 SharedCounts.NumComputeOps > 0) { 1947 Rem << ",\nadditionally " 1948 << ore::NV("NumStores", SharedCounts.NumStores) << " stores, " 1949 << ore::NV("NumLoads", SharedCounts.NumLoads) << " loads, " 1950 << ore::NV("NumFPOps", SharedCounts.NumComputeOps) 1951 << " compute ops" 1952 << " are shared with other expressions"; 1953 } 1954 1955 Rem << ("\n" + linearize(L, Shared, ExprsInSubprogram, DL)); 1956 ORE.emit(Rem); 1957 } 1958 } 1959 } 1960 1961 std::string 1962 linearize(Value *L, 1963 const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared, 1964 const SmallSetVector<Value *, 32> &ExprsInSubprogram, 1965 const DataLayout &DL) { 1966 ExprLinearizer Lin(DL, Inst2Matrix, Shared, ExprsInSubprogram, L); 1967 Lin.linearizeExpr(L, 0, false, false); 1968 return Lin.getResult(); 1969 } 1970 }; 1971 }; 1972 } // namespace 1973 1974 PreservedAnalyses LowerMatrixIntrinsicsPass::run(Function &F, 1975 FunctionAnalysisManager &AM) { 1976 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 1977 OptimizationRemarkEmitter *ORE = nullptr; 1978 AAResults *AA = nullptr; 1979 DominatorTree *DT = nullptr; 1980 LoopInfo *LI = nullptr; 1981 1982 if (!Minimal) { 1983 ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 1984 AA = &AM.getResult<AAManager>(F); 1985 DT = &AM.getResult<DominatorTreeAnalysis>(F); 1986 LI = &AM.getResult<LoopAnalysis>(F); 1987 } 1988 1989 LowerMatrixIntrinsics LMT(F, TTI, AA, DT, LI, ORE); 1990 if (LMT.Visit()) { 1991 PreservedAnalyses PA; 1992 if (!Minimal) { 1993 PA.preserve<LoopAnalysis>(); 1994 PA.preserve<DominatorTreeAnalysis>(); 1995 } 1996 return PA; 1997 } 1998 return PreservedAnalyses::all(); 1999 } 2000 2001 namespace { 2002 2003 class LowerMatrixIntrinsicsLegacyPass : public FunctionPass { 2004 public: 2005 static char ID; 2006 2007 LowerMatrixIntrinsicsLegacyPass() : FunctionPass(ID) { 2008 initializeLowerMatrixIntrinsicsLegacyPassPass( 2009 *PassRegistry::getPassRegistry()); 2010 } 2011 2012 bool runOnFunction(Function &F) override { 2013 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2014 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2015 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2016 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2017 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2018 LowerMatrixIntrinsics LMT(F, TTI, &AA, &DT, &LI, &ORE); 2019 bool C = LMT.Visit(); 2020 return C; 2021 } 2022 2023 void getAnalysisUsage(AnalysisUsage &AU) const override { 2024 AU.addRequired<TargetTransformInfoWrapperPass>(); 2025 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2026 AU.addRequired<AAResultsWrapperPass>(); 2027 AU.addRequired<DominatorTreeWrapperPass>(); 2028 AU.addPreserved<DominatorTreeWrapperPass>(); 2029 AU.addRequired<LoopInfoWrapperPass>(); 2030 AU.addPreserved<LoopInfoWrapperPass>(); 2031 } 2032 }; 2033 } // namespace 2034 2035 static const char pass_name[] = "Lower the matrix intrinsics"; 2036 char LowerMatrixIntrinsicsLegacyPass::ID = 0; 2037 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name, 2038 false, false) 2039 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 2040 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2041 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2042 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 2043 INITIALIZE_PASS_END(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name, 2044 false, false) 2045 2046 Pass *llvm::createLowerMatrixIntrinsicsPass() { 2047 return new LowerMatrixIntrinsicsLegacyPass(); 2048 } 2049 2050 namespace { 2051 2052 /// A lightweight version of the matrix lowering pass that only requires TTI. 2053 /// Advanced features that require DT, AA or ORE like tiling are disabled. This 2054 /// is used to lower matrix intrinsics if the main lowering pass is not run, for 2055 /// example with -O0. 2056 class LowerMatrixIntrinsicsMinimalLegacyPass : public FunctionPass { 2057 public: 2058 static char ID; 2059 2060 LowerMatrixIntrinsicsMinimalLegacyPass() : FunctionPass(ID) { 2061 initializeLowerMatrixIntrinsicsMinimalLegacyPassPass( 2062 *PassRegistry::getPassRegistry()); 2063 } 2064 2065 bool runOnFunction(Function &F) override { 2066 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2067 LowerMatrixIntrinsics LMT(F, TTI, nullptr, nullptr, nullptr, nullptr); 2068 bool C = LMT.Visit(); 2069 return C; 2070 } 2071 2072 void getAnalysisUsage(AnalysisUsage &AU) const override { 2073 AU.addRequired<TargetTransformInfoWrapperPass>(); 2074 AU.setPreservesCFG(); 2075 } 2076 }; 2077 } // namespace 2078 2079 static const char pass_name_minimal[] = "Lower the matrix intrinsics (minimal)"; 2080 char LowerMatrixIntrinsicsMinimalLegacyPass::ID = 0; 2081 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsMinimalLegacyPass, 2082 "lower-matrix-intrinsics-minimal", pass_name_minimal, 2083 false, false) 2084 INITIALIZE_PASS_END(LowerMatrixIntrinsicsMinimalLegacyPass, 2085 "lower-matrix-intrinsics-minimal", pass_name_minimal, false, 2086 false) 2087 2088 Pass *llvm::createLowerMatrixIntrinsicsMinimalPass() { 2089 return new LowerMatrixIntrinsicsMinimalLegacyPass(); 2090 } 2091