1 //===- X86AvoidStoreForwardingBlockis.cpp - Avoid HW Store Forward Block --===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // If a load follows a store and reloads data that the store has written to 10 // memory, Intel microarchitectures can in many cases forward the data directly 11 // from the store to the load, This "store forwarding" saves cycles by enabling 12 // the load to directly obtain the data instead of accessing the data from 13 // cache or memory. 14 // A "store forward block" occurs in cases that a store cannot be forwarded to 15 // the load. The most typical case of store forward block on Intel Core 16 // microarchitecture that a small store cannot be forwarded to a large load. 17 // The estimated penalty for a store forward block is ~13 cycles. 18 // 19 // This pass tries to recognize and handle cases where "store forward block" 20 // is created by the compiler when lowering memcpy calls to a sequence 21 // of a load and a store. 22 // 23 // The pass currently only handles cases where memcpy is lowered to 24 // XMM/YMM registers, it tries to break the memcpy into smaller copies. 25 // breaking the memcpy should be possible since there is no atomicity 26 // guarantee for loads and stores to XMM/YMM. 27 // 28 // It could be better for performance to solve the problem by loading 29 // to XMM/YMM then inserting the partial store before storing back from XMM/YMM 30 // to memory, but this will result in a more conservative optimization since it 31 // requires we prove that all memory accesses between the blocking store and the 32 // load must alias/don't alias before we can move the store, whereas the 33 // transformation done here is correct regardless to other memory accesses. 34 //===----------------------------------------------------------------------===// 35 36 #include "X86InstrInfo.h" 37 #include "X86Subtarget.h" 38 #include "llvm/CodeGen/MachineBasicBlock.h" 39 #include "llvm/CodeGen/MachineFunction.h" 40 #include "llvm/CodeGen/MachineFunctionPass.h" 41 #include "llvm/CodeGen/MachineInstr.h" 42 #include "llvm/CodeGen/MachineInstrBuilder.h" 43 #include "llvm/CodeGen/MachineOperand.h" 44 #include "llvm/CodeGen/MachineRegisterInfo.h" 45 #include "llvm/IR/DebugInfoMetadata.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/Function.h" 48 #include "llvm/MC/MCInstrDesc.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86-avoid-SFB" 53 54 static cl::opt<bool> DisableX86AvoidStoreForwardBlocks( 55 "x86-disable-avoid-SFB", cl::Hidden, 56 cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false)); 57 58 static cl::opt<unsigned> X86AvoidSFBInspectionLimit( 59 "x86-sfb-inspection-limit", 60 cl::desc("X86: Number of instructions backward to " 61 "inspect for store forwarding blocks."), 62 cl::init(20), cl::Hidden); 63 64 namespace { 65 66 using DisplacementSizeMap = std::map<int64_t, unsigned>; 67 68 class X86AvoidSFBPass : public MachineFunctionPass { 69 public: 70 static char ID; 71 X86AvoidSFBPass() : MachineFunctionPass(ID) { } 72 73 StringRef getPassName() const override { 74 return "X86 Avoid Store Forwarding Blocks"; 75 } 76 77 bool runOnMachineFunction(MachineFunction &MF) override; 78 79 void getAnalysisUsage(AnalysisUsage &AU) const override { 80 MachineFunctionPass::getAnalysisUsage(AU); 81 AU.addRequired<AAResultsWrapperPass>(); 82 } 83 84 private: 85 MachineRegisterInfo *MRI; 86 const X86InstrInfo *TII; 87 const X86RegisterInfo *TRI; 88 SmallVector<std::pair<MachineInstr *, MachineInstr *>, 2> 89 BlockedLoadsStoresPairs; 90 SmallVector<MachineInstr *, 2> ForRemoval; 91 AliasAnalysis *AA; 92 93 /// Returns couples of Load then Store to memory which look 94 /// like a memcpy. 95 void findPotentiallylBlockedCopies(MachineFunction &MF); 96 /// Break the memcpy's load and store into smaller copies 97 /// such that each memory load that was blocked by a smaller store 98 /// would now be copied separately. 99 void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst, 100 const DisplacementSizeMap &BlockingStoresDispSizeMap); 101 /// Break a copy of size Size to smaller copies. 102 void buildCopies(int Size, MachineInstr *LoadInst, int64_t LdDispImm, 103 MachineInstr *StoreInst, int64_t StDispImm, 104 int64_t LMMOffset, int64_t SMMOffset); 105 106 void buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, int64_t LoadDisp, 107 MachineInstr *StoreInst, unsigned NStoreOpcode, 108 int64_t StoreDisp, unsigned Size, int64_t LMMOffset, 109 int64_t SMMOffset); 110 111 bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2) const; 112 113 unsigned getRegSizeInBytes(MachineInstr *Inst); 114 }; 115 116 } // end anonymous namespace 117 118 char X86AvoidSFBPass::ID = 0; 119 120 INITIALIZE_PASS_BEGIN(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking", 121 false, false) 122 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 123 INITIALIZE_PASS_END(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking", false, 124 false) 125 126 FunctionPass *llvm::createX86AvoidStoreForwardingBlocks() { 127 return new X86AvoidSFBPass(); 128 } 129 130 static bool isXMMLoadOpcode(unsigned Opcode) { 131 return Opcode == X86::MOVUPSrm || Opcode == X86::MOVAPSrm || 132 Opcode == X86::VMOVUPSrm || Opcode == X86::VMOVAPSrm || 133 Opcode == X86::VMOVUPDrm || Opcode == X86::VMOVAPDrm || 134 Opcode == X86::VMOVDQUrm || Opcode == X86::VMOVDQArm || 135 Opcode == X86::VMOVUPSZ128rm || Opcode == X86::VMOVAPSZ128rm || 136 Opcode == X86::VMOVUPDZ128rm || Opcode == X86::VMOVAPDZ128rm || 137 Opcode == X86::VMOVDQU64Z128rm || Opcode == X86::VMOVDQA64Z128rm || 138 Opcode == X86::VMOVDQU32Z128rm || Opcode == X86::VMOVDQA32Z128rm; 139 } 140 static bool isYMMLoadOpcode(unsigned Opcode) { 141 return Opcode == X86::VMOVUPSYrm || Opcode == X86::VMOVAPSYrm || 142 Opcode == X86::VMOVUPDYrm || Opcode == X86::VMOVAPDYrm || 143 Opcode == X86::VMOVDQUYrm || Opcode == X86::VMOVDQAYrm || 144 Opcode == X86::VMOVUPSZ256rm || Opcode == X86::VMOVAPSZ256rm || 145 Opcode == X86::VMOVUPDZ256rm || Opcode == X86::VMOVAPDZ256rm || 146 Opcode == X86::VMOVDQU64Z256rm || Opcode == X86::VMOVDQA64Z256rm || 147 Opcode == X86::VMOVDQU32Z256rm || Opcode == X86::VMOVDQA32Z256rm; 148 } 149 150 static bool isPotentialBlockedMemCpyLd(unsigned Opcode) { 151 return isXMMLoadOpcode(Opcode) || isYMMLoadOpcode(Opcode); 152 } 153 154 static bool isPotentialBlockedMemCpyPair(int LdOpcode, int StOpcode) { 155 switch (LdOpcode) { 156 case X86::MOVUPSrm: 157 case X86::MOVAPSrm: 158 return StOpcode == X86::MOVUPSmr || StOpcode == X86::MOVAPSmr; 159 case X86::VMOVUPSrm: 160 case X86::VMOVAPSrm: 161 return StOpcode == X86::VMOVUPSmr || StOpcode == X86::VMOVAPSmr; 162 case X86::VMOVUPDrm: 163 case X86::VMOVAPDrm: 164 return StOpcode == X86::VMOVUPDmr || StOpcode == X86::VMOVAPDmr; 165 case X86::VMOVDQUrm: 166 case X86::VMOVDQArm: 167 return StOpcode == X86::VMOVDQUmr || StOpcode == X86::VMOVDQAmr; 168 case X86::VMOVUPSZ128rm: 169 case X86::VMOVAPSZ128rm: 170 return StOpcode == X86::VMOVUPSZ128mr || StOpcode == X86::VMOVAPSZ128mr; 171 case X86::VMOVUPDZ128rm: 172 case X86::VMOVAPDZ128rm: 173 return StOpcode == X86::VMOVUPDZ128mr || StOpcode == X86::VMOVAPDZ128mr; 174 case X86::VMOVUPSYrm: 175 case X86::VMOVAPSYrm: 176 return StOpcode == X86::VMOVUPSYmr || StOpcode == X86::VMOVAPSYmr; 177 case X86::VMOVUPDYrm: 178 case X86::VMOVAPDYrm: 179 return StOpcode == X86::VMOVUPDYmr || StOpcode == X86::VMOVAPDYmr; 180 case X86::VMOVDQUYrm: 181 case X86::VMOVDQAYrm: 182 return StOpcode == X86::VMOVDQUYmr || StOpcode == X86::VMOVDQAYmr; 183 case X86::VMOVUPSZ256rm: 184 case X86::VMOVAPSZ256rm: 185 return StOpcode == X86::VMOVUPSZ256mr || StOpcode == X86::VMOVAPSZ256mr; 186 case X86::VMOVUPDZ256rm: 187 case X86::VMOVAPDZ256rm: 188 return StOpcode == X86::VMOVUPDZ256mr || StOpcode == X86::VMOVAPDZ256mr; 189 case X86::VMOVDQU64Z128rm: 190 case X86::VMOVDQA64Z128rm: 191 return StOpcode == X86::VMOVDQU64Z128mr || StOpcode == X86::VMOVDQA64Z128mr; 192 case X86::VMOVDQU32Z128rm: 193 case X86::VMOVDQA32Z128rm: 194 return StOpcode == X86::VMOVDQU32Z128mr || StOpcode == X86::VMOVDQA32Z128mr; 195 case X86::VMOVDQU64Z256rm: 196 case X86::VMOVDQA64Z256rm: 197 return StOpcode == X86::VMOVDQU64Z256mr || StOpcode == X86::VMOVDQA64Z256mr; 198 case X86::VMOVDQU32Z256rm: 199 case X86::VMOVDQA32Z256rm: 200 return StOpcode == X86::VMOVDQU32Z256mr || StOpcode == X86::VMOVDQA32Z256mr; 201 default: 202 return false; 203 } 204 } 205 206 static bool isPotentialBlockingStoreInst(int Opcode, int LoadOpcode) { 207 bool PBlock = false; 208 PBlock |= Opcode == X86::MOV64mr || Opcode == X86::MOV64mi32 || 209 Opcode == X86::MOV32mr || Opcode == X86::MOV32mi || 210 Opcode == X86::MOV16mr || Opcode == X86::MOV16mi || 211 Opcode == X86::MOV8mr || Opcode == X86::MOV8mi; 212 if (isYMMLoadOpcode(LoadOpcode)) 213 PBlock |= Opcode == X86::VMOVUPSmr || Opcode == X86::VMOVAPSmr || 214 Opcode == X86::VMOVUPDmr || Opcode == X86::VMOVAPDmr || 215 Opcode == X86::VMOVDQUmr || Opcode == X86::VMOVDQAmr || 216 Opcode == X86::VMOVUPSZ128mr || Opcode == X86::VMOVAPSZ128mr || 217 Opcode == X86::VMOVUPDZ128mr || Opcode == X86::VMOVAPDZ128mr || 218 Opcode == X86::VMOVDQU64Z128mr || 219 Opcode == X86::VMOVDQA64Z128mr || 220 Opcode == X86::VMOVDQU32Z128mr || Opcode == X86::VMOVDQA32Z128mr; 221 return PBlock; 222 } 223 224 static const int MOV128SZ = 16; 225 static const int MOV64SZ = 8; 226 static const int MOV32SZ = 4; 227 static const int MOV16SZ = 2; 228 static const int MOV8SZ = 1; 229 230 static unsigned getYMMtoXMMLoadOpcode(unsigned LoadOpcode) { 231 switch (LoadOpcode) { 232 case X86::VMOVUPSYrm: 233 case X86::VMOVAPSYrm: 234 return X86::VMOVUPSrm; 235 case X86::VMOVUPDYrm: 236 case X86::VMOVAPDYrm: 237 return X86::VMOVUPDrm; 238 case X86::VMOVDQUYrm: 239 case X86::VMOVDQAYrm: 240 return X86::VMOVDQUrm; 241 case X86::VMOVUPSZ256rm: 242 case X86::VMOVAPSZ256rm: 243 return X86::VMOVUPSZ128rm; 244 case X86::VMOVUPDZ256rm: 245 case X86::VMOVAPDZ256rm: 246 return X86::VMOVUPDZ128rm; 247 case X86::VMOVDQU64Z256rm: 248 case X86::VMOVDQA64Z256rm: 249 return X86::VMOVDQU64Z128rm; 250 case X86::VMOVDQU32Z256rm: 251 case X86::VMOVDQA32Z256rm: 252 return X86::VMOVDQU32Z128rm; 253 default: 254 llvm_unreachable("Unexpected Load Instruction Opcode"); 255 } 256 return 0; 257 } 258 259 static unsigned getYMMtoXMMStoreOpcode(unsigned StoreOpcode) { 260 switch (StoreOpcode) { 261 case X86::VMOVUPSYmr: 262 case X86::VMOVAPSYmr: 263 return X86::VMOVUPSmr; 264 case X86::VMOVUPDYmr: 265 case X86::VMOVAPDYmr: 266 return X86::VMOVUPDmr; 267 case X86::VMOVDQUYmr: 268 case X86::VMOVDQAYmr: 269 return X86::VMOVDQUmr; 270 case X86::VMOVUPSZ256mr: 271 case X86::VMOVAPSZ256mr: 272 return X86::VMOVUPSZ128mr; 273 case X86::VMOVUPDZ256mr: 274 case X86::VMOVAPDZ256mr: 275 return X86::VMOVUPDZ128mr; 276 case X86::VMOVDQU64Z256mr: 277 case X86::VMOVDQA64Z256mr: 278 return X86::VMOVDQU64Z128mr; 279 case X86::VMOVDQU32Z256mr: 280 case X86::VMOVDQA32Z256mr: 281 return X86::VMOVDQU32Z128mr; 282 default: 283 llvm_unreachable("Unexpected Load Instruction Opcode"); 284 } 285 return 0; 286 } 287 288 static int getAddrOffset(MachineInstr *MI) { 289 const MCInstrDesc &Descl = MI->getDesc(); 290 int AddrOffset = X86II::getMemoryOperandNo(Descl.TSFlags); 291 assert(AddrOffset != -1 && "Expected Memory Operand"); 292 AddrOffset += X86II::getOperandBias(Descl); 293 return AddrOffset; 294 } 295 296 static MachineOperand &getBaseOperand(MachineInstr *MI) { 297 int AddrOffset = getAddrOffset(MI); 298 return MI->getOperand(AddrOffset + X86::AddrBaseReg); 299 } 300 301 static MachineOperand &getDispOperand(MachineInstr *MI) { 302 int AddrOffset = getAddrOffset(MI); 303 return MI->getOperand(AddrOffset + X86::AddrDisp); 304 } 305 306 // Relevant addressing modes contain only base register and immediate 307 // displacement or frameindex and immediate displacement. 308 // TODO: Consider expanding to other addressing modes in the future 309 static bool isRelevantAddressingMode(MachineInstr *MI) { 310 int AddrOffset = getAddrOffset(MI); 311 MachineOperand &Base = getBaseOperand(MI); 312 MachineOperand &Disp = getDispOperand(MI); 313 MachineOperand &Scale = MI->getOperand(AddrOffset + X86::AddrScaleAmt); 314 MachineOperand &Index = MI->getOperand(AddrOffset + X86::AddrIndexReg); 315 MachineOperand &Segment = MI->getOperand(AddrOffset + X86::AddrSegmentReg); 316 317 if (!((Base.isReg() && Base.getReg() != X86::NoRegister) || Base.isFI())) 318 return false; 319 if (!Disp.isImm()) 320 return false; 321 if (Scale.getImm() != 1) 322 return false; 323 if (!(Index.isReg() && Index.getReg() == X86::NoRegister)) 324 return false; 325 if (!(Segment.isReg() && Segment.getReg() == X86::NoRegister)) 326 return false; 327 return true; 328 } 329 330 // Collect potentially blocking stores. 331 // Limit the number of instructions backwards we want to inspect 332 // since the effect of store block won't be visible if the store 333 // and load instructions have enough instructions in between to 334 // keep the core busy. 335 static SmallVector<MachineInstr *, 2> 336 findPotentialBlockers(MachineInstr *LoadInst) { 337 SmallVector<MachineInstr *, 2> PotentialBlockers; 338 unsigned BlockCount = 0; 339 const unsigned InspectionLimit = X86AvoidSFBInspectionLimit; 340 for (auto PBInst = std::next(MachineBasicBlock::reverse_iterator(LoadInst)), 341 E = LoadInst->getParent()->rend(); 342 PBInst != E; ++PBInst) { 343 BlockCount++; 344 if (BlockCount >= InspectionLimit) 345 break; 346 MachineInstr &MI = *PBInst; 347 if (MI.getDesc().isCall()) 348 return PotentialBlockers; 349 PotentialBlockers.push_back(&MI); 350 } 351 // If we didn't get to the instructions limit try predecessing blocks. 352 // Ideally we should traverse the predecessor blocks in depth with some 353 // coloring algorithm, but for now let's just look at the first order 354 // predecessors. 355 if (BlockCount < InspectionLimit) { 356 MachineBasicBlock *MBB = LoadInst->getParent(); 357 int LimitLeft = InspectionLimit - BlockCount; 358 for (MachineBasicBlock::pred_iterator PB = MBB->pred_begin(), 359 PE = MBB->pred_end(); 360 PB != PE; ++PB) { 361 MachineBasicBlock *PMBB = *PB; 362 int PredCount = 0; 363 for (MachineBasicBlock::reverse_iterator PBInst = PMBB->rbegin(), 364 PME = PMBB->rend(); 365 PBInst != PME; ++PBInst) { 366 PredCount++; 367 if (PredCount >= LimitLeft) 368 break; 369 if (PBInst->getDesc().isCall()) 370 break; 371 PotentialBlockers.push_back(&*PBInst); 372 } 373 } 374 } 375 return PotentialBlockers; 376 } 377 378 void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, 379 int64_t LoadDisp, MachineInstr *StoreInst, 380 unsigned NStoreOpcode, int64_t StoreDisp, 381 unsigned Size, int64_t LMMOffset, 382 int64_t SMMOffset) { 383 MachineOperand &LoadBase = getBaseOperand(LoadInst); 384 MachineOperand &StoreBase = getBaseOperand(StoreInst); 385 MachineBasicBlock *MBB = LoadInst->getParent(); 386 MachineMemOperand *LMMO = *LoadInst->memoperands_begin(); 387 MachineMemOperand *SMMO = *StoreInst->memoperands_begin(); 388 389 unsigned Reg1 = MRI->createVirtualRegister( 390 TII->getRegClass(TII->get(NLoadOpcode), 0, TRI, *(MBB->getParent()))); 391 MachineInstr *NewLoad = 392 BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode), 393 Reg1) 394 .add(LoadBase) 395 .addImm(1) 396 .addReg(X86::NoRegister) 397 .addImm(LoadDisp) 398 .addReg(X86::NoRegister) 399 .addMemOperand( 400 MBB->getParent()->getMachineMemOperand(LMMO, LMMOffset, Size)); 401 if (LoadBase.isReg()) 402 getBaseOperand(NewLoad).setIsKill(false); 403 LLVM_DEBUG(NewLoad->dump()); 404 // If the load and store are consecutive, use the loadInst location to 405 // reduce register pressure. 406 MachineInstr *StInst = StoreInst; 407 auto PrevInstrIt = skipDebugInstructionsBackward( 408 std::prev(MachineBasicBlock::instr_iterator(StoreInst)), 409 MBB->instr_begin()); 410 if (PrevInstrIt.getNodePtr() == LoadInst) 411 StInst = LoadInst; 412 MachineInstr *NewStore = 413 BuildMI(*MBB, StInst, StInst->getDebugLoc(), TII->get(NStoreOpcode)) 414 .add(StoreBase) 415 .addImm(1) 416 .addReg(X86::NoRegister) 417 .addImm(StoreDisp) 418 .addReg(X86::NoRegister) 419 .addReg(Reg1) 420 .addMemOperand( 421 MBB->getParent()->getMachineMemOperand(SMMO, SMMOffset, Size)); 422 if (StoreBase.isReg()) 423 getBaseOperand(NewStore).setIsKill(false); 424 MachineOperand &StoreSrcVReg = StoreInst->getOperand(X86::AddrNumOperands); 425 assert(StoreSrcVReg.isReg() && "Expected virtual register"); 426 NewStore->getOperand(X86::AddrNumOperands).setIsKill(StoreSrcVReg.isKill()); 427 LLVM_DEBUG(NewStore->dump()); 428 } 429 430 void X86AvoidSFBPass::buildCopies(int Size, MachineInstr *LoadInst, 431 int64_t LdDispImm, MachineInstr *StoreInst, 432 int64_t StDispImm, int64_t LMMOffset, 433 int64_t SMMOffset) { 434 int LdDisp = LdDispImm; 435 int StDisp = StDispImm; 436 while (Size > 0) { 437 if ((Size - MOV128SZ >= 0) && isYMMLoadOpcode(LoadInst->getOpcode())) { 438 Size = Size - MOV128SZ; 439 buildCopy(LoadInst, getYMMtoXMMLoadOpcode(LoadInst->getOpcode()), LdDisp, 440 StoreInst, getYMMtoXMMStoreOpcode(StoreInst->getOpcode()), 441 StDisp, MOV128SZ, LMMOffset, SMMOffset); 442 LdDisp += MOV128SZ; 443 StDisp += MOV128SZ; 444 LMMOffset += MOV128SZ; 445 SMMOffset += MOV128SZ; 446 continue; 447 } 448 if (Size - MOV64SZ >= 0) { 449 Size = Size - MOV64SZ; 450 buildCopy(LoadInst, X86::MOV64rm, LdDisp, StoreInst, X86::MOV64mr, StDisp, 451 MOV64SZ, LMMOffset, SMMOffset); 452 LdDisp += MOV64SZ; 453 StDisp += MOV64SZ; 454 LMMOffset += MOV64SZ; 455 SMMOffset += MOV64SZ; 456 continue; 457 } 458 if (Size - MOV32SZ >= 0) { 459 Size = Size - MOV32SZ; 460 buildCopy(LoadInst, X86::MOV32rm, LdDisp, StoreInst, X86::MOV32mr, StDisp, 461 MOV32SZ, LMMOffset, SMMOffset); 462 LdDisp += MOV32SZ; 463 StDisp += MOV32SZ; 464 LMMOffset += MOV32SZ; 465 SMMOffset += MOV32SZ; 466 continue; 467 } 468 if (Size - MOV16SZ >= 0) { 469 Size = Size - MOV16SZ; 470 buildCopy(LoadInst, X86::MOV16rm, LdDisp, StoreInst, X86::MOV16mr, StDisp, 471 MOV16SZ, LMMOffset, SMMOffset); 472 LdDisp += MOV16SZ; 473 StDisp += MOV16SZ; 474 LMMOffset += MOV16SZ; 475 SMMOffset += MOV16SZ; 476 continue; 477 } 478 if (Size - MOV8SZ >= 0) { 479 Size = Size - MOV8SZ; 480 buildCopy(LoadInst, X86::MOV8rm, LdDisp, StoreInst, X86::MOV8mr, StDisp, 481 MOV8SZ, LMMOffset, SMMOffset); 482 LdDisp += MOV8SZ; 483 StDisp += MOV8SZ; 484 LMMOffset += MOV8SZ; 485 SMMOffset += MOV8SZ; 486 continue; 487 } 488 } 489 assert(Size == 0 && "Wrong size division"); 490 } 491 492 static void updateKillStatus(MachineInstr *LoadInst, MachineInstr *StoreInst) { 493 MachineOperand &LoadBase = getBaseOperand(LoadInst); 494 MachineOperand &StoreBase = getBaseOperand(StoreInst); 495 auto StorePrevNonDbgInstr = skipDebugInstructionsBackward( 496 std::prev(MachineBasicBlock::instr_iterator(StoreInst)), 497 LoadInst->getParent()->instr_begin()).getNodePtr(); 498 if (LoadBase.isReg()) { 499 MachineInstr *LastLoad = LoadInst->getPrevNode(); 500 // If the original load and store to xmm/ymm were consecutive 501 // then the partial copies were also created in 502 // a consecutive order to reduce register pressure, 503 // and the location of the last load is before the last store. 504 if (StorePrevNonDbgInstr == LoadInst) 505 LastLoad = LoadInst->getPrevNode()->getPrevNode(); 506 getBaseOperand(LastLoad).setIsKill(LoadBase.isKill()); 507 } 508 if (StoreBase.isReg()) { 509 MachineInstr *StInst = StoreInst; 510 if (StorePrevNonDbgInstr == LoadInst) 511 StInst = LoadInst; 512 getBaseOperand(StInst->getPrevNode()).setIsKill(StoreBase.isKill()); 513 } 514 } 515 516 bool X86AvoidSFBPass::alias(const MachineMemOperand &Op1, 517 const MachineMemOperand &Op2) const { 518 if (!Op1.getValue() || !Op2.getValue()) 519 return true; 520 521 int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset()); 522 int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset; 523 int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset; 524 525 AliasResult AAResult = 526 AA->alias(MemoryLocation(Op1.getValue(), Overlapa, Op1.getAAInfo()), 527 MemoryLocation(Op2.getValue(), Overlapb, Op2.getAAInfo())); 528 return AAResult != NoAlias; 529 } 530 531 void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) { 532 for (auto &MBB : MF) 533 for (auto &MI : MBB) { 534 if (!isPotentialBlockedMemCpyLd(MI.getOpcode())) 535 continue; 536 int DefVR = MI.getOperand(0).getReg(); 537 if (!MRI->hasOneNonDBGUse(DefVR)) 538 continue; 539 for (auto UI = MRI->use_nodbg_begin(DefVR), UE = MRI->use_nodbg_end(); 540 UI != UE;) { 541 MachineOperand &StoreMO = *UI++; 542 MachineInstr &StoreMI = *StoreMO.getParent(); 543 // Skip cases where the memcpy may overlap. 544 if (StoreMI.getParent() == MI.getParent() && 545 isPotentialBlockedMemCpyPair(MI.getOpcode(), StoreMI.getOpcode()) && 546 isRelevantAddressingMode(&MI) && 547 isRelevantAddressingMode(&StoreMI)) { 548 assert(MI.hasOneMemOperand() && 549 "Expected one memory operand for load instruction"); 550 assert(StoreMI.hasOneMemOperand() && 551 "Expected one memory operand for store instruction"); 552 if (!alias(**MI.memoperands_begin(), **StoreMI.memoperands_begin())) 553 BlockedLoadsStoresPairs.push_back(std::make_pair(&MI, &StoreMI)); 554 } 555 } 556 } 557 } 558 559 unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) { 560 auto TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0, TRI, 561 *LoadInst->getParent()->getParent()); 562 return TRI->getRegSizeInBits(*TRC) / 8; 563 } 564 565 void X86AvoidSFBPass::breakBlockedCopies( 566 MachineInstr *LoadInst, MachineInstr *StoreInst, 567 const DisplacementSizeMap &BlockingStoresDispSizeMap) { 568 int64_t LdDispImm = getDispOperand(LoadInst).getImm(); 569 int64_t StDispImm = getDispOperand(StoreInst).getImm(); 570 int64_t LMMOffset = 0; 571 int64_t SMMOffset = 0; 572 573 int64_t LdDisp1 = LdDispImm; 574 int64_t LdDisp2 = 0; 575 int64_t StDisp1 = StDispImm; 576 int64_t StDisp2 = 0; 577 unsigned Size1 = 0; 578 unsigned Size2 = 0; 579 int64_t LdStDelta = StDispImm - LdDispImm; 580 581 for (auto DispSizePair : BlockingStoresDispSizeMap) { 582 LdDisp2 = DispSizePair.first; 583 StDisp2 = DispSizePair.first + LdStDelta; 584 Size2 = DispSizePair.second; 585 // Avoid copying overlapping areas. 586 if (LdDisp2 < LdDisp1) { 587 int OverlapDelta = LdDisp1 - LdDisp2; 588 LdDisp2 += OverlapDelta; 589 StDisp2 += OverlapDelta; 590 Size2 -= OverlapDelta; 591 } 592 Size1 = LdDisp2 - LdDisp1; 593 594 // Build a copy for the point until the current blocking store's 595 // displacement. 596 buildCopies(Size1, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset, 597 SMMOffset); 598 // Build a copy for the current blocking store. 599 buildCopies(Size2, LoadInst, LdDisp2, StoreInst, StDisp2, LMMOffset + Size1, 600 SMMOffset + Size1); 601 LdDisp1 = LdDisp2 + Size2; 602 StDisp1 = StDisp2 + Size2; 603 LMMOffset += Size1 + Size2; 604 SMMOffset += Size1 + Size2; 605 } 606 unsigned Size3 = (LdDispImm + getRegSizeInBytes(LoadInst)) - LdDisp1; 607 buildCopies(Size3, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset, 608 LMMOffset); 609 } 610 611 static bool hasSameBaseOpValue(MachineInstr *LoadInst, 612 MachineInstr *StoreInst) { 613 MachineOperand &LoadBase = getBaseOperand(LoadInst); 614 MachineOperand &StoreBase = getBaseOperand(StoreInst); 615 if (LoadBase.isReg() != StoreBase.isReg()) 616 return false; 617 if (LoadBase.isReg()) 618 return LoadBase.getReg() == StoreBase.getReg(); 619 return LoadBase.getIndex() == StoreBase.getIndex(); 620 } 621 622 static bool isBlockingStore(int64_t LoadDispImm, unsigned LoadSize, 623 int64_t StoreDispImm, unsigned StoreSize) { 624 return ((StoreDispImm >= LoadDispImm) && 625 (StoreDispImm <= LoadDispImm + (LoadSize - StoreSize))); 626 } 627 628 // Keep track of all stores blocking a load 629 static void 630 updateBlockingStoresDispSizeMap(DisplacementSizeMap &BlockingStoresDispSizeMap, 631 int64_t DispImm, unsigned Size) { 632 if (BlockingStoresDispSizeMap.count(DispImm)) { 633 // Choose the smallest blocking store starting at this displacement. 634 if (BlockingStoresDispSizeMap[DispImm] > Size) 635 BlockingStoresDispSizeMap[DispImm] = Size; 636 637 } else 638 BlockingStoresDispSizeMap[DispImm] = Size; 639 } 640 641 // Remove blocking stores contained in each other. 642 static void 643 removeRedundantBlockingStores(DisplacementSizeMap &BlockingStoresDispSizeMap) { 644 if (BlockingStoresDispSizeMap.size() <= 1) 645 return; 646 647 SmallVector<std::pair<int64_t, unsigned>, 0> DispSizeStack; 648 for (auto DispSizePair : BlockingStoresDispSizeMap) { 649 int64_t CurrDisp = DispSizePair.first; 650 unsigned CurrSize = DispSizePair.second; 651 while (DispSizeStack.size()) { 652 int64_t PrevDisp = DispSizeStack.back().first; 653 unsigned PrevSize = DispSizeStack.back().second; 654 if (CurrDisp + CurrSize > PrevDisp + PrevSize) 655 break; 656 DispSizeStack.pop_back(); 657 } 658 DispSizeStack.push_back(DispSizePair); 659 } 660 BlockingStoresDispSizeMap.clear(); 661 for (auto Disp : DispSizeStack) 662 BlockingStoresDispSizeMap.insert(Disp); 663 } 664 665 bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) { 666 bool Changed = false; 667 668 if (DisableX86AvoidStoreForwardBlocks || skipFunction(MF.getFunction()) || 669 !MF.getSubtarget<X86Subtarget>().is64Bit()) 670 return false; 671 672 MRI = &MF.getRegInfo(); 673 assert(MRI->isSSA() && "Expected MIR to be in SSA form"); 674 TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); 675 TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo(); 676 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 677 LLVM_DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";); 678 // Look for a load then a store to XMM/YMM which look like a memcpy 679 findPotentiallylBlockedCopies(MF); 680 681 for (auto LoadStoreInstPair : BlockedLoadsStoresPairs) { 682 MachineInstr *LoadInst = LoadStoreInstPair.first; 683 int64_t LdDispImm = getDispOperand(LoadInst).getImm(); 684 DisplacementSizeMap BlockingStoresDispSizeMap; 685 686 SmallVector<MachineInstr *, 2> PotentialBlockers = 687 findPotentialBlockers(LoadInst); 688 for (auto PBInst : PotentialBlockers) { 689 if (!isPotentialBlockingStoreInst(PBInst->getOpcode(), 690 LoadInst->getOpcode()) || 691 !isRelevantAddressingMode(PBInst)) 692 continue; 693 int64_t PBstDispImm = getDispOperand(PBInst).getImm(); 694 assert(PBInst->hasOneMemOperand() && "Expected One Memory Operand"); 695 unsigned PBstSize = (*PBInst->memoperands_begin())->getSize(); 696 // This check doesn't cover all cases, but it will suffice for now. 697 // TODO: take branch probability into consideration, if the blocking 698 // store is in an unreached block, breaking the memcopy could lose 699 // performance. 700 if (hasSameBaseOpValue(LoadInst, PBInst) && 701 isBlockingStore(LdDispImm, getRegSizeInBytes(LoadInst), PBstDispImm, 702 PBstSize)) 703 updateBlockingStoresDispSizeMap(BlockingStoresDispSizeMap, PBstDispImm, 704 PBstSize); 705 } 706 707 if (BlockingStoresDispSizeMap.empty()) 708 continue; 709 710 // We found a store forward block, break the memcpy's load and store 711 // into smaller copies such that each smaller store that was causing 712 // a store block would now be copied separately. 713 MachineInstr *StoreInst = LoadStoreInstPair.second; 714 LLVM_DEBUG(dbgs() << "Blocked load and store instructions: \n"); 715 LLVM_DEBUG(LoadInst->dump()); 716 LLVM_DEBUG(StoreInst->dump()); 717 LLVM_DEBUG(dbgs() << "Replaced with:\n"); 718 removeRedundantBlockingStores(BlockingStoresDispSizeMap); 719 breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap); 720 updateKillStatus(LoadInst, StoreInst); 721 ForRemoval.push_back(LoadInst); 722 ForRemoval.push_back(StoreInst); 723 } 724 for (auto RemovedInst : ForRemoval) { 725 RemovedInst->eraseFromParent(); 726 } 727 ForRemoval.clear(); 728 BlockedLoadsStoresPairs.clear(); 729 LLVM_DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";); 730 731 return Changed; 732 } 733