1 //===--------------------- InterleavedAccessPass.cpp ----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the Interleaved Access pass, which identifies 11 // interleaved memory accesses and transforms them into target specific 12 // intrinsics. 13 // 14 // An interleaved load reads data from memory into several vectors, with 15 // DE-interleaving the data on a factor. An interleaved store writes several 16 // vectors to memory with RE-interleaving the data on a factor. 17 // 18 // As interleaved accesses are difficult to identified in CodeGen (mainly 19 // because the VECTOR_SHUFFLE DAG node is quite different from the shufflevector 20 // IR), we identify and transform them to intrinsics in this pass so the 21 // intrinsics can be easily matched into target specific instructions later in 22 // CodeGen. 23 // 24 // E.g. An interleaved load (Factor = 2): 25 // %wide.vec = load <8 x i32>, <8 x i32>* %ptr 26 // %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6> 27 // %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7> 28 // 29 // It could be transformed into a ld2 intrinsic in AArch64 backend or a vld2 30 // intrinsic in ARM backend. 31 // 32 // E.g. An interleaved store (Factor = 3): 33 // %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, 34 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> 35 // store <12 x i32> %i.vec, <12 x i32>* %ptr 36 // 37 // It could be transformed into a st3 intrinsic in AArch64 backend or a vst3 38 // intrinsic in ARM backend. 39 // 40 //===----------------------------------------------------------------------===// 41 42 #include "llvm/CodeGen/Passes.h" 43 #include "llvm/IR/InstIterator.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/Support/MathExtras.h" 46 #include "llvm/Support/raw_ostream.h" 47 #include "llvm/Target/TargetLowering.h" 48 #include "llvm/Target/TargetSubtargetInfo.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "interleaved-access" 53 54 static cl::opt<bool> LowerInterleavedAccesses( 55 "lower-interleaved-accesses", 56 cl::desc("Enable lowering interleaved accesses to intrinsics"), 57 cl::init(true), cl::Hidden); 58 59 static unsigned MaxFactor; // The maximum supported interleave factor. 60 61 namespace llvm { 62 static void initializeInterleavedAccessPass(PassRegistry &); 63 } 64 65 namespace { 66 67 class InterleavedAccess : public FunctionPass { 68 69 public: 70 static char ID; 71 InterleavedAccess(const TargetMachine *TM = nullptr) 72 : FunctionPass(ID), TM(TM), TLI(nullptr) { 73 initializeInterleavedAccessPass(*PassRegistry::getPassRegistry()); 74 } 75 76 const char *getPassName() const override { return "Interleaved Access Pass"; } 77 78 bool runOnFunction(Function &F) override; 79 80 private: 81 const TargetMachine *TM; 82 const TargetLowering *TLI; 83 84 /// \brief Transform an interleaved load into target specific intrinsics. 85 bool lowerInterleavedLoad(LoadInst *LI, 86 SmallVector<Instruction *, 32> &DeadInsts); 87 88 /// \brief Transform an interleaved store into target specific intrinsics. 89 bool lowerInterleavedStore(StoreInst *SI, 90 SmallVector<Instruction *, 32> &DeadInsts); 91 }; 92 } // end anonymous namespace. 93 94 char InterleavedAccess::ID = 0; 95 INITIALIZE_TM_PASS(InterleavedAccess, "interleaved-access", 96 "Lower interleaved memory accesses to target specific intrinsics", 97 false, false) 98 99 FunctionPass *llvm::createInterleavedAccessPass(const TargetMachine *TM) { 100 return new InterleavedAccess(TM); 101 } 102 103 /// \brief Check if the mask is a DE-interleave mask of the given factor 104 /// \p Factor like: 105 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor> 106 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor, 107 unsigned &Index) { 108 // Check all potential start indices from 0 to (Factor - 1). 109 for (Index = 0; Index < Factor; Index++) { 110 unsigned i = 0; 111 112 // Check that elements are in ascending order by Factor. Ignore undef 113 // elements. 114 for (; i < Mask.size(); i++) 115 if (Mask[i] >= 0 && static_cast<unsigned>(Mask[i]) != Index + i * Factor) 116 break; 117 118 if (i == Mask.size()) 119 return true; 120 } 121 122 return false; 123 } 124 125 /// \brief Check if the mask is a DE-interleave mask for an interleaved load. 126 /// 127 /// E.g. DE-interleave masks (Factor = 2) could be: 128 /// <0, 2, 4, 6> (mask of index 0 to extract even elements) 129 /// <1, 3, 5, 7> (mask of index 1 to extract odd elements) 130 static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor, 131 unsigned &Index) { 132 if (Mask.size() < 2) 133 return false; 134 135 // Check potential Factors. 136 for (Factor = 2; Factor <= MaxFactor; Factor++) 137 if (isDeInterleaveMaskOfFactor(Mask, Factor, Index)) 138 return true; 139 140 return false; 141 } 142 143 /// \brief Check if the mask is RE-interleave mask for an interleaved store. 144 /// 145 /// I.e. <0, NumSubElts, ... , NumSubElts*(Factor - 1), 1, NumSubElts + 1, ...> 146 /// 147 /// E.g. The RE-interleave mask (Factor = 2) could be: 148 /// <0, 4, 1, 5, 2, 6, 3, 7> 149 static bool isReInterleaveMask(ArrayRef<int> Mask, unsigned &Factor) { 150 unsigned NumElts = Mask.size(); 151 if (NumElts < 4) 152 return false; 153 154 // Check potential Factors. 155 for (Factor = 2; Factor <= MaxFactor; Factor++) { 156 if (NumElts % Factor) 157 continue; 158 159 unsigned NumSubElts = NumElts / Factor; 160 if (!isPowerOf2_32(NumSubElts)) 161 continue; 162 163 // Check whether each element matchs the RE-interleaved rule. Ignore undef 164 // elements. 165 unsigned i = 0; 166 for (; i < NumElts; i++) 167 if (Mask[i] >= 0 && 168 static_cast<unsigned>(Mask[i]) != 169 (i % Factor) * NumSubElts + i / Factor) 170 break; 171 172 // Find a RE-interleaved mask of current factor. 173 if (i == NumElts) 174 return true; 175 } 176 177 return false; 178 } 179 180 bool InterleavedAccess::lowerInterleavedLoad( 181 LoadInst *LI, SmallVector<Instruction *, 32> &DeadInsts) { 182 if (!LI->isSimple()) 183 return false; 184 185 SmallVector<ShuffleVectorInst *, 4> Shuffles; 186 187 // Check if all users of this load are shufflevectors. 188 for (auto UI = LI->user_begin(), E = LI->user_end(); UI != E; UI++) { 189 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(*UI); 190 if (!SVI || !isa<UndefValue>(SVI->getOperand(1))) 191 return false; 192 193 Shuffles.push_back(SVI); 194 } 195 196 if (Shuffles.empty()) 197 return false; 198 199 unsigned Factor, Index; 200 201 // Check if the first shufflevector is DE-interleave shuffle. 202 if (!isDeInterleaveMask(Shuffles[0]->getShuffleMask(), Factor, Index)) 203 return false; 204 205 // Holds the corresponding index for each DE-interleave shuffle. 206 SmallVector<unsigned, 4> Indices; 207 Indices.push_back(Index); 208 209 Type *VecTy = Shuffles[0]->getType(); 210 211 // Check if other shufflevectors are also DE-interleaved of the same type 212 // and factor as the first shufflevector. 213 for (unsigned i = 1; i < Shuffles.size(); i++) { 214 if (Shuffles[i]->getType() != VecTy) 215 return false; 216 217 if (!isDeInterleaveMaskOfFactor(Shuffles[i]->getShuffleMask(), Factor, 218 Index)) 219 return false; 220 221 Indices.push_back(Index); 222 } 223 224 DEBUG(dbgs() << "IA: Found an interleaved load: " << *LI << "\n"); 225 226 // Try to create target specific intrinsics to replace the load and shuffles. 227 if (!TLI->lowerInterleavedLoad(LI, Shuffles, Indices, Factor)) 228 return false; 229 230 for (auto SVI : Shuffles) 231 DeadInsts.push_back(SVI); 232 233 DeadInsts.push_back(LI); 234 return true; 235 } 236 237 bool InterleavedAccess::lowerInterleavedStore( 238 StoreInst *SI, SmallVector<Instruction *, 32> &DeadInsts) { 239 if (!SI->isSimple()) 240 return false; 241 242 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(SI->getValueOperand()); 243 if (!SVI || !SVI->hasOneUse()) 244 return false; 245 246 // Check if the shufflevector is RE-interleave shuffle. 247 unsigned Factor; 248 if (!isReInterleaveMask(SVI->getShuffleMask(), Factor)) 249 return false; 250 251 DEBUG(dbgs() << "IA: Found an interleaved store: " << *SI << "\n"); 252 253 // Try to create target specific intrinsics to replace the store and shuffle. 254 if (!TLI->lowerInterleavedStore(SI, SVI, Factor)) 255 return false; 256 257 // Already have a new target specific interleaved store. Erase the old store. 258 DeadInsts.push_back(SI); 259 DeadInsts.push_back(SVI); 260 return true; 261 } 262 263 bool InterleavedAccess::runOnFunction(Function &F) { 264 if (!TM || !LowerInterleavedAccesses) 265 return false; 266 267 DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n"); 268 269 TLI = TM->getSubtargetImpl(F)->getTargetLowering(); 270 MaxFactor = TLI->getMaxSupportedInterleaveFactor(); 271 272 // Holds dead instructions that will be erased later. 273 SmallVector<Instruction *, 32> DeadInsts; 274 bool Changed = false; 275 276 for (auto &I : instructions(F)) { 277 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) 278 Changed |= lowerInterleavedLoad(LI, DeadInsts); 279 280 if (StoreInst *SI = dyn_cast<StoreInst>(&I)) 281 Changed |= lowerInterleavedStore(SI, DeadInsts); 282 } 283 284 for (auto I : DeadInsts) 285 I->eraseFromParent(); 286 287 return Changed; 288 } 289