1 //===----------- PPCVSXSwapRemoval.cpp - Remove VSX LE Swaps -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===---------------------------------------------------------------------===// 9 // 10 // This pass analyzes vector computations and removes unnecessary 11 // doubleword swaps (xxswapd instructions). This pass is performed 12 // only for little-endian VSX code generation. 13 // 14 // For this specific case, loads and stores of v4i32, v4f32, v2i64, 15 // and v2f64 vectors are inefficient. These are implemented using 16 // the lxvd2x and stxvd2x instructions, which invert the order of 17 // doublewords in a vector register. Thus code generation inserts 18 // an xxswapd after each such load, and prior to each such store. 19 // 20 // The extra xxswapd instructions reduce performance. The purpose 21 // of this pass is to reduce the number of xxswapd instructions 22 // required for correctness. 23 // 24 // The primary insight is that much code that operates on vectors 25 // does not care about the relative order of elements in a register, 26 // so long as the correct memory order is preserved. If we have a 27 // computation where all input values are provided by lxvd2x/xxswapd, 28 // all outputs are stored using xxswapd/lxvd2x, and all intermediate 29 // computations are lane-insensitive (independent of element order), 30 // then all the xxswapd instructions associated with the loads and 31 // stores may be removed without changing observable semantics. 32 // 33 // This pass uses standard equivalence class infrastructure to create 34 // maximal webs of computations fitting the above description. Each 35 // such web is then optimized by removing its unnecessary xxswapd 36 // instructions. 37 // 38 // There are some lane-sensitive operations for which we can still 39 // permit the optimization, provided we modify those operations 40 // accordingly. Such operations are identified as using "special 41 // handling" within this module. 42 // 43 //===---------------------------------------------------------------------===// 44 45 #include "PPC.h" 46 #include "PPCInstrBuilder.h" 47 #include "PPCInstrInfo.h" 48 #include "PPCTargetMachine.h" 49 #include "llvm/ADT/DenseMap.h" 50 #include "llvm/ADT/EquivalenceClasses.h" 51 #include "llvm/CodeGen/MachineFunctionPass.h" 52 #include "llvm/CodeGen/MachineInstrBuilder.h" 53 #include "llvm/CodeGen/MachineRegisterInfo.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/Format.h" 56 #include "llvm/Support/raw_ostream.h" 57 58 using namespace llvm; 59 60 #define DEBUG_TYPE "ppc-vsx-swaps" 61 62 namespace llvm { 63 void initializePPCVSXSwapRemovalPass(PassRegistry&); 64 } 65 66 namespace { 67 68 // A PPCVSXSwapEntry is created for each machine instruction that 69 // is relevant to a vector computation. 70 struct PPCVSXSwapEntry { 71 // Pointer to the instruction. 72 MachineInstr *VSEMI; 73 74 // Unique ID (position in the swap vector). 75 int VSEId; 76 77 // Attributes of this node. 78 unsigned int IsLoad : 1; 79 unsigned int IsStore : 1; 80 unsigned int IsSwap : 1; 81 unsigned int MentionsPhysVR : 1; 82 unsigned int IsSwappable : 1; 83 unsigned int MentionsPartialVR : 1; 84 unsigned int SpecialHandling : 3; 85 unsigned int WebRejected : 1; 86 unsigned int WillRemove : 1; 87 }; 88 89 enum SHValues { 90 SH_NONE = 0, 91 SH_EXTRACT, 92 SH_INSERT, 93 SH_NOSWAP_LD, 94 SH_NOSWAP_ST, 95 SH_SPLAT, 96 SH_XXPERMDI, 97 SH_COPYWIDEN 98 }; 99 100 struct PPCVSXSwapRemoval : public MachineFunctionPass { 101 102 static char ID; 103 const PPCInstrInfo *TII; 104 MachineFunction *MF; 105 MachineRegisterInfo *MRI; 106 107 // Swap entries are allocated in a vector for better performance. 108 std::vector<PPCVSXSwapEntry> SwapVector; 109 110 // A mapping is maintained between machine instructions and 111 // their swap entries. The key is the address of the MI. 112 DenseMap<MachineInstr*, int> SwapMap; 113 114 // Equivalence classes are used to gather webs of related computation. 115 // Swap entries are represented by their VSEId fields. 116 EquivalenceClasses<int> *EC; 117 118 PPCVSXSwapRemoval() : MachineFunctionPass(ID) { 119 initializePPCVSXSwapRemovalPass(*PassRegistry::getPassRegistry()); 120 } 121 122 private: 123 // Initialize data structures. 124 void initialize(MachineFunction &MFParm); 125 126 // Walk the machine instructions to gather vector usage information. 127 // Return true iff vector mentions are present. 128 bool gatherVectorInstructions(); 129 130 // Add an entry to the swap vector and swap map. 131 int addSwapEntry(MachineInstr *MI, PPCVSXSwapEntry &SwapEntry); 132 133 // Hunt backwards through COPY and SUBREG_TO_REG chains for a 134 // source register. VecIdx indicates the swap vector entry to 135 // mark as mentioning a physical register if the search leads 136 // to one. 137 unsigned lookThruCopyLike(unsigned SrcReg, unsigned VecIdx); 138 139 // Generate equivalence classes for related computations (webs). 140 void formWebs(); 141 142 // Analyze webs and determine those that cannot be optimized. 143 void recordUnoptimizableWebs(); 144 145 // Record which swap instructions can be safely removed. 146 void markSwapsForRemoval(); 147 148 // Remove swaps and update other instructions requiring special 149 // handling. Return true iff any changes are made. 150 bool removeSwaps(); 151 152 // Insert a swap instruction from SrcReg to DstReg at the given 153 // InsertPoint. 154 void insertSwap(MachineInstr *MI, MachineBasicBlock::iterator InsertPoint, 155 unsigned DstReg, unsigned SrcReg); 156 157 // Update instructions requiring special handling. 158 void handleSpecialSwappables(int EntryIdx); 159 160 // Dump a description of the entries in the swap vector. 161 void dumpSwapVector(); 162 163 // Return true iff the given register is in the given class. 164 bool isRegInClass(unsigned Reg, const TargetRegisterClass *RC) { 165 if (TargetRegisterInfo::isVirtualRegister(Reg)) 166 return RC->hasSubClassEq(MRI->getRegClass(Reg)); 167 return RC->contains(Reg); 168 } 169 170 // Return true iff the given register is a full vector register. 171 bool isVecReg(unsigned Reg) { 172 return (isRegInClass(Reg, &PPC::VSRCRegClass) || 173 isRegInClass(Reg, &PPC::VRRCRegClass)); 174 } 175 176 // Return true iff the given register is a partial vector register. 177 bool isScalarVecReg(unsigned Reg) { 178 return (isRegInClass(Reg, &PPC::VSFRCRegClass) || 179 isRegInClass(Reg, &PPC::VSSRCRegClass)); 180 } 181 182 // Return true iff the given register mentions all or part of a 183 // vector register. Also sets Partial to true if the mention 184 // is for just the floating-point register overlap of the register. 185 bool isAnyVecReg(unsigned Reg, bool &Partial) { 186 if (isScalarVecReg(Reg)) 187 Partial = true; 188 return isScalarVecReg(Reg) || isVecReg(Reg); 189 } 190 191 public: 192 // Main entry point for this pass. 193 bool runOnMachineFunction(MachineFunction &MF) override { 194 if (skipFunction(MF.getFunction())) 195 return false; 196 197 // If we don't have VSX on the subtarget, don't do anything. 198 // Also, on Power 9 the load and store ops preserve element order and so 199 // the swaps are not required. 200 const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>(); 201 if (!STI.hasVSX() || !STI.needsSwapsForVSXMemOps()) 202 return false; 203 204 bool Changed = false; 205 initialize(MF); 206 207 if (gatherVectorInstructions()) { 208 formWebs(); 209 recordUnoptimizableWebs(); 210 markSwapsForRemoval(); 211 Changed = removeSwaps(); 212 } 213 214 // FIXME: See the allocation of EC in initialize(). 215 delete EC; 216 return Changed; 217 } 218 }; 219 220 // Initialize data structures for this pass. In particular, clear the 221 // swap vector and allocate the equivalence class mapping before 222 // processing each function. 223 void PPCVSXSwapRemoval::initialize(MachineFunction &MFParm) { 224 MF = &MFParm; 225 MRI = &MF->getRegInfo(); 226 TII = MF->getSubtarget<PPCSubtarget>().getInstrInfo(); 227 228 // An initial vector size of 256 appears to work well in practice. 229 // Small/medium functions with vector content tend not to incur a 230 // reallocation at this size. Three of the vector tests in 231 // projects/test-suite reallocate, which seems like a reasonable rate. 232 const int InitialVectorSize(256); 233 SwapVector.clear(); 234 SwapVector.reserve(InitialVectorSize); 235 236 // FIXME: Currently we allocate EC each time because we don't have 237 // access to the set representation on which to call clear(). Should 238 // consider adding a clear() method to the EquivalenceClasses class. 239 EC = new EquivalenceClasses<int>; 240 } 241 242 // Create an entry in the swap vector for each instruction that mentions 243 // a full vector register, recording various characteristics of the 244 // instructions there. 245 bool PPCVSXSwapRemoval::gatherVectorInstructions() { 246 bool RelevantFunction = false; 247 248 for (MachineBasicBlock &MBB : *MF) { 249 for (MachineInstr &MI : MBB) { 250 251 if (MI.isDebugValue()) 252 continue; 253 254 bool RelevantInstr = false; 255 bool Partial = false; 256 257 for (const MachineOperand &MO : MI.operands()) { 258 if (!MO.isReg()) 259 continue; 260 unsigned Reg = MO.getReg(); 261 if (isAnyVecReg(Reg, Partial)) { 262 RelevantInstr = true; 263 break; 264 } 265 } 266 267 if (!RelevantInstr) 268 continue; 269 270 RelevantFunction = true; 271 272 // Create a SwapEntry initialized to zeros, then fill in the 273 // instruction and ID fields before pushing it to the back 274 // of the swap vector. 275 PPCVSXSwapEntry SwapEntry{}; 276 int VecIdx = addSwapEntry(&MI, SwapEntry); 277 278 switch(MI.getOpcode()) { 279 default: 280 // Unless noted otherwise, an instruction is considered 281 // safe for the optimization. There are a large number of 282 // such true-SIMD instructions (all vector math, logical, 283 // select, compare, etc.). However, if the instruction 284 // mentions a partial vector register and does not have 285 // special handling defined, it is not swappable. 286 if (Partial) 287 SwapVector[VecIdx].MentionsPartialVR = 1; 288 else 289 SwapVector[VecIdx].IsSwappable = 1; 290 break; 291 case PPC::XXPERMDI: { 292 // This is a swap if it is of the form XXPERMDI t, s, s, 2. 293 // Unfortunately, MachineCSE ignores COPY and SUBREG_TO_REG, so we 294 // can also see XXPERMDI t, SUBREG_TO_REG(s), SUBREG_TO_REG(s), 2, 295 // for example. We have to look through chains of COPY and 296 // SUBREG_TO_REG to find the real source value for comparison. 297 // If the real source value is a physical register, then mark the 298 // XXPERMDI as mentioning a physical register. 299 int immed = MI.getOperand(3).getImm(); 300 if (immed == 2) { 301 unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(), 302 VecIdx); 303 unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(), 304 VecIdx); 305 if (trueReg1 == trueReg2) 306 SwapVector[VecIdx].IsSwap = 1; 307 else { 308 // We can still handle these if the two registers are not 309 // identical, by adjusting the form of the XXPERMDI. 310 SwapVector[VecIdx].IsSwappable = 1; 311 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI; 312 } 313 // This is a doubleword splat if it is of the form 314 // XXPERMDI t, s, s, 0 or XXPERMDI t, s, s, 3. As above we 315 // must look through chains of copy-likes to find the source 316 // register. We turn off the marking for mention of a physical 317 // register, because splatting it is safe; the optimization 318 // will not swap the value in the physical register. Whether 319 // or not the two input registers are identical, we can handle 320 // these by adjusting the form of the XXPERMDI. 321 } else if (immed == 0 || immed == 3) { 322 323 SwapVector[VecIdx].IsSwappable = 1; 324 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI; 325 326 unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(), 327 VecIdx); 328 unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(), 329 VecIdx); 330 if (trueReg1 == trueReg2) 331 SwapVector[VecIdx].MentionsPhysVR = 0; 332 333 } else { 334 // We can still handle these by adjusting the form of the XXPERMDI. 335 SwapVector[VecIdx].IsSwappable = 1; 336 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI; 337 } 338 break; 339 } 340 case PPC::LVX: 341 // Non-permuting loads are currently unsafe. We can use special 342 // handling for this in the future. By not marking these as 343 // IsSwap, we ensure computations containing them will be rejected 344 // for now. 345 SwapVector[VecIdx].IsLoad = 1; 346 break; 347 case PPC::LXVD2X: 348 case PPC::LXVW4X: 349 // Permuting loads are marked as both load and swap, and are 350 // safe for optimization. 351 SwapVector[VecIdx].IsLoad = 1; 352 SwapVector[VecIdx].IsSwap = 1; 353 break; 354 case PPC::LXSDX: 355 case PPC::LXSSPX: 356 case PPC::XFLOADf64: 357 case PPC::XFLOADf32: 358 // A load of a floating-point value into the high-order half of 359 // a vector register is safe, provided that we introduce a swap 360 // following the load, which will be done by the SUBREG_TO_REG 361 // support. So just mark these as safe. 362 SwapVector[VecIdx].IsLoad = 1; 363 SwapVector[VecIdx].IsSwappable = 1; 364 break; 365 case PPC::STVX: 366 // Non-permuting stores are currently unsafe. We can use special 367 // handling for this in the future. By not marking these as 368 // IsSwap, we ensure computations containing them will be rejected 369 // for now. 370 SwapVector[VecIdx].IsStore = 1; 371 break; 372 case PPC::STXVD2X: 373 case PPC::STXVW4X: 374 // Permuting stores are marked as both store and swap, and are 375 // safe for optimization. 376 SwapVector[VecIdx].IsStore = 1; 377 SwapVector[VecIdx].IsSwap = 1; 378 break; 379 case PPC::COPY: 380 // These are fine provided they are moving between full vector 381 // register classes. 382 if (isVecReg(MI.getOperand(0).getReg()) && 383 isVecReg(MI.getOperand(1).getReg())) 384 SwapVector[VecIdx].IsSwappable = 1; 385 // If we have a copy from one scalar floating-point register 386 // to another, we can accept this even if it is a physical 387 // register. The only way this gets involved is if it feeds 388 // a SUBREG_TO_REG, which is handled by introducing a swap. 389 else if (isScalarVecReg(MI.getOperand(0).getReg()) && 390 isScalarVecReg(MI.getOperand(1).getReg())) 391 SwapVector[VecIdx].IsSwappable = 1; 392 break; 393 case PPC::SUBREG_TO_REG: { 394 // These are fine provided they are moving between full vector 395 // register classes. If they are moving from a scalar 396 // floating-point class to a vector class, we can handle those 397 // as well, provided we introduce a swap. It is generally the 398 // case that we will introduce fewer swaps than we remove, but 399 // (FIXME) a cost model could be used. However, introduced 400 // swaps could potentially be CSEd, so this is not trivial. 401 if (isVecReg(MI.getOperand(0).getReg()) && 402 isVecReg(MI.getOperand(2).getReg())) 403 SwapVector[VecIdx].IsSwappable = 1; 404 else if (isVecReg(MI.getOperand(0).getReg()) && 405 isScalarVecReg(MI.getOperand(2).getReg())) { 406 SwapVector[VecIdx].IsSwappable = 1; 407 SwapVector[VecIdx].SpecialHandling = SHValues::SH_COPYWIDEN; 408 } 409 break; 410 } 411 case PPC::VSPLTB: 412 case PPC::VSPLTH: 413 case PPC::VSPLTW: 414 case PPC::XXSPLTW: 415 // Splats are lane-sensitive, but we can use special handling 416 // to adjust the source lane for the splat. 417 SwapVector[VecIdx].IsSwappable = 1; 418 SwapVector[VecIdx].SpecialHandling = SHValues::SH_SPLAT; 419 break; 420 // The presence of the following lane-sensitive operations in a 421 // web will kill the optimization, at least for now. For these 422 // we do nothing, causing the optimization to fail. 423 // FIXME: Some of these could be permitted with special handling, 424 // and will be phased in as time permits. 425 // FIXME: There is no simple and maintainable way to express a set 426 // of opcodes having a common attribute in TableGen. Should this 427 // change, this is a prime candidate to use such a mechanism. 428 case PPC::INLINEASM: 429 case PPC::EXTRACT_SUBREG: 430 case PPC::INSERT_SUBREG: 431 case PPC::COPY_TO_REGCLASS: 432 case PPC::LVEBX: 433 case PPC::LVEHX: 434 case PPC::LVEWX: 435 case PPC::LVSL: 436 case PPC::LVSR: 437 case PPC::LVXL: 438 case PPC::STVEBX: 439 case PPC::STVEHX: 440 case PPC::STVEWX: 441 case PPC::STVXL: 442 // We can handle STXSDX and STXSSPX similarly to LXSDX and LXSSPX, 443 // by adding special handling for narrowing copies as well as 444 // widening ones. However, I've experimented with this, and in 445 // practice we currently do not appear to use STXSDX fed by 446 // a narrowing copy from a full vector register. Since I can't 447 // generate any useful test cases, I've left this alone for now. 448 case PPC::STXSDX: 449 case PPC::STXSSPX: 450 case PPC::VCIPHER: 451 case PPC::VCIPHERLAST: 452 case PPC::VMRGHB: 453 case PPC::VMRGHH: 454 case PPC::VMRGHW: 455 case PPC::VMRGLB: 456 case PPC::VMRGLH: 457 case PPC::VMRGLW: 458 case PPC::VMULESB: 459 case PPC::VMULESH: 460 case PPC::VMULESW: 461 case PPC::VMULEUB: 462 case PPC::VMULEUH: 463 case PPC::VMULEUW: 464 case PPC::VMULOSB: 465 case PPC::VMULOSH: 466 case PPC::VMULOSW: 467 case PPC::VMULOUB: 468 case PPC::VMULOUH: 469 case PPC::VMULOUW: 470 case PPC::VNCIPHER: 471 case PPC::VNCIPHERLAST: 472 case PPC::VPERM: 473 case PPC::VPERMXOR: 474 case PPC::VPKPX: 475 case PPC::VPKSHSS: 476 case PPC::VPKSHUS: 477 case PPC::VPKSDSS: 478 case PPC::VPKSDUS: 479 case PPC::VPKSWSS: 480 case PPC::VPKSWUS: 481 case PPC::VPKUDUM: 482 case PPC::VPKUDUS: 483 case PPC::VPKUHUM: 484 case PPC::VPKUHUS: 485 case PPC::VPKUWUM: 486 case PPC::VPKUWUS: 487 case PPC::VPMSUMB: 488 case PPC::VPMSUMD: 489 case PPC::VPMSUMH: 490 case PPC::VPMSUMW: 491 case PPC::VRLB: 492 case PPC::VRLD: 493 case PPC::VRLH: 494 case PPC::VRLW: 495 case PPC::VSBOX: 496 case PPC::VSHASIGMAD: 497 case PPC::VSHASIGMAW: 498 case PPC::VSL: 499 case PPC::VSLDOI: 500 case PPC::VSLO: 501 case PPC::VSR: 502 case PPC::VSRO: 503 case PPC::VSUM2SWS: 504 case PPC::VSUM4SBS: 505 case PPC::VSUM4SHS: 506 case PPC::VSUM4UBS: 507 case PPC::VSUMSWS: 508 case PPC::VUPKHPX: 509 case PPC::VUPKHSB: 510 case PPC::VUPKHSH: 511 case PPC::VUPKHSW: 512 case PPC::VUPKLPX: 513 case PPC::VUPKLSB: 514 case PPC::VUPKLSH: 515 case PPC::VUPKLSW: 516 case PPC::XXMRGHW: 517 case PPC::XXMRGLW: 518 // XXSLDWI could be replaced by a general permute with one of three 519 // permute control vectors (for shift values 1, 2, 3). However, 520 // VPERM has a more restrictive register class. 521 case PPC::XXSLDWI: 522 break; 523 } 524 } 525 } 526 527 if (RelevantFunction) { 528 DEBUG(dbgs() << "Swap vector when first built\n\n"); 529 DEBUG(dumpSwapVector()); 530 } 531 532 return RelevantFunction; 533 } 534 535 // Add an entry to the swap vector and swap map, and make a 536 // singleton equivalence class for the entry. 537 int PPCVSXSwapRemoval::addSwapEntry(MachineInstr *MI, 538 PPCVSXSwapEntry& SwapEntry) { 539 SwapEntry.VSEMI = MI; 540 SwapEntry.VSEId = SwapVector.size(); 541 SwapVector.push_back(SwapEntry); 542 EC->insert(SwapEntry.VSEId); 543 SwapMap[MI] = SwapEntry.VSEId; 544 return SwapEntry.VSEId; 545 } 546 547 // This is used to find the "true" source register for an 548 // XXPERMDI instruction, since MachineCSE does not handle the 549 // "copy-like" operations (Copy and SubregToReg). Returns 550 // the original SrcReg unless it is the target of a copy-like 551 // operation, in which case we chain backwards through all 552 // such operations to the ultimate source register. If a 553 // physical register is encountered, we stop the search and 554 // flag the swap entry indicated by VecIdx (the original 555 // XXPERMDI) as mentioning a physical register. 556 unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg, 557 unsigned VecIdx) { 558 MachineInstr *MI = MRI->getVRegDef(SrcReg); 559 if (!MI->isCopyLike()) 560 return SrcReg; 561 562 unsigned CopySrcReg; 563 if (MI->isCopy()) 564 CopySrcReg = MI->getOperand(1).getReg(); 565 else { 566 assert(MI->isSubregToReg() && "bad opcode for lookThruCopyLike"); 567 CopySrcReg = MI->getOperand(2).getReg(); 568 } 569 570 if (!TargetRegisterInfo::isVirtualRegister(CopySrcReg)) { 571 if (!isScalarVecReg(CopySrcReg)) 572 SwapVector[VecIdx].MentionsPhysVR = 1; 573 return CopySrcReg; 574 } 575 576 return lookThruCopyLike(CopySrcReg, VecIdx); 577 } 578 579 // Generate equivalence classes for related computations (webs) by 580 // def-use relationships of virtual registers. Mention of a physical 581 // register terminates the generation of equivalence classes as this 582 // indicates a use of a parameter, definition of a return value, use 583 // of a value returned from a call, or definition of a parameter to a 584 // call. Computations with physical register mentions are flagged 585 // as such so their containing webs will not be optimized. 586 void PPCVSXSwapRemoval::formWebs() { 587 588 DEBUG(dbgs() << "\n*** Forming webs for swap removal ***\n\n"); 589 590 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) { 591 592 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 593 594 DEBUG(dbgs() << "\n" << SwapVector[EntryIdx].VSEId << " "); 595 DEBUG(MI->dump()); 596 597 // It's sufficient to walk vector uses and join them to their unique 598 // definitions. In addition, check full vector register operands 599 // for physical regs. We exclude partial-vector register operands 600 // because we can handle them if copied to a full vector. 601 for (const MachineOperand &MO : MI->operands()) { 602 if (!MO.isReg()) 603 continue; 604 605 unsigned Reg = MO.getReg(); 606 if (!isVecReg(Reg) && !isScalarVecReg(Reg)) 607 continue; 608 609 if (!TargetRegisterInfo::isVirtualRegister(Reg)) { 610 if (!(MI->isCopy() && isScalarVecReg(Reg))) 611 SwapVector[EntryIdx].MentionsPhysVR = 1; 612 continue; 613 } 614 615 if (!MO.isUse()) 616 continue; 617 618 MachineInstr* DefMI = MRI->getVRegDef(Reg); 619 assert(SwapMap.find(DefMI) != SwapMap.end() && 620 "Inconsistency: def of vector reg not found in swap map!"); 621 int DefIdx = SwapMap[DefMI]; 622 (void)EC->unionSets(SwapVector[DefIdx].VSEId, 623 SwapVector[EntryIdx].VSEId); 624 625 DEBUG(dbgs() << format("Unioning %d with %d\n", SwapVector[DefIdx].VSEId, 626 SwapVector[EntryIdx].VSEId)); 627 DEBUG(dbgs() << " Def: "); 628 DEBUG(DefMI->dump()); 629 } 630 } 631 } 632 633 // Walk the swap vector entries looking for conditions that prevent their 634 // containing computations from being optimized. When such conditions are 635 // found, mark the representative of the computation's equivalence class 636 // as rejected. 637 void PPCVSXSwapRemoval::recordUnoptimizableWebs() { 638 639 DEBUG(dbgs() << "\n*** Rejecting webs for swap removal ***\n\n"); 640 641 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) { 642 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId); 643 644 // If representative is already rejected, don't waste further time. 645 if (SwapVector[Repr].WebRejected) 646 continue; 647 648 // Reject webs containing mentions of physical or partial registers, or 649 // containing operations that we don't know how to handle in a lane- 650 // permuted region. 651 if (SwapVector[EntryIdx].MentionsPhysVR || 652 SwapVector[EntryIdx].MentionsPartialVR || 653 !(SwapVector[EntryIdx].IsSwappable || SwapVector[EntryIdx].IsSwap)) { 654 655 SwapVector[Repr].WebRejected = 1; 656 657 DEBUG(dbgs() << 658 format("Web %d rejected for physreg, partial reg, or not " 659 "swap[pable]\n", Repr)); 660 DEBUG(dbgs() << " in " << EntryIdx << ": "); 661 DEBUG(SwapVector[EntryIdx].VSEMI->dump()); 662 DEBUG(dbgs() << "\n"); 663 } 664 665 // Reject webs than contain swapping loads that feed something other 666 // than a swap instruction. 667 else if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) { 668 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 669 unsigned DefReg = MI->getOperand(0).getReg(); 670 671 // We skip debug instructions in the analysis. (Note that debug 672 // location information is still maintained by this optimization 673 // because it remains on the LXVD2X and STXVD2X instructions after 674 // the XXPERMDIs are removed.) 675 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { 676 int UseIdx = SwapMap[&UseMI]; 677 678 if (!SwapVector[UseIdx].IsSwap || SwapVector[UseIdx].IsLoad || 679 SwapVector[UseIdx].IsStore) { 680 681 SwapVector[Repr].WebRejected = 1; 682 683 DEBUG(dbgs() << 684 format("Web %d rejected for load not feeding swap\n", Repr)); 685 DEBUG(dbgs() << " def " << EntryIdx << ": "); 686 DEBUG(MI->dump()); 687 DEBUG(dbgs() << " use " << UseIdx << ": "); 688 DEBUG(UseMI.dump()); 689 DEBUG(dbgs() << "\n"); 690 } 691 } 692 693 // Reject webs that contain swapping stores that are fed by something 694 // other than a swap instruction. 695 } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) { 696 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 697 unsigned UseReg = MI->getOperand(0).getReg(); 698 MachineInstr *DefMI = MRI->getVRegDef(UseReg); 699 unsigned DefReg = DefMI->getOperand(0).getReg(); 700 int DefIdx = SwapMap[DefMI]; 701 702 if (!SwapVector[DefIdx].IsSwap || SwapVector[DefIdx].IsLoad || 703 SwapVector[DefIdx].IsStore) { 704 705 SwapVector[Repr].WebRejected = 1; 706 707 DEBUG(dbgs() << 708 format("Web %d rejected for store not fed by swap\n", Repr)); 709 DEBUG(dbgs() << " def " << DefIdx << ": "); 710 DEBUG(DefMI->dump()); 711 DEBUG(dbgs() << " use " << EntryIdx << ": "); 712 DEBUG(MI->dump()); 713 DEBUG(dbgs() << "\n"); 714 } 715 716 // Ensure all uses of the register defined by DefMI feed store 717 // instructions 718 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { 719 int UseIdx = SwapMap[&UseMI]; 720 721 if (SwapVector[UseIdx].VSEMI->getOpcode() != MI->getOpcode()) { 722 SwapVector[Repr].WebRejected = 1; 723 724 DEBUG(dbgs() << 725 format("Web %d rejected for swap not feeding only stores\n", 726 Repr)); 727 DEBUG(dbgs() << " def " << " : "); 728 DEBUG(DefMI->dump()); 729 DEBUG(dbgs() << " use " << UseIdx << ": "); 730 DEBUG(SwapVector[UseIdx].VSEMI->dump()); 731 DEBUG(dbgs() << "\n"); 732 } 733 } 734 } 735 } 736 737 DEBUG(dbgs() << "Swap vector after web analysis:\n\n"); 738 DEBUG(dumpSwapVector()); 739 } 740 741 // Walk the swap vector entries looking for swaps fed by permuting loads 742 // and swaps that feed permuting stores. If the containing computation 743 // has not been marked rejected, mark each such swap for removal. 744 // (Removal is delayed in case optimization has disturbed the pattern, 745 // such that multiple loads feed the same swap, etc.) 746 void PPCVSXSwapRemoval::markSwapsForRemoval() { 747 748 DEBUG(dbgs() << "\n*** Marking swaps for removal ***\n\n"); 749 750 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) { 751 752 if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) { 753 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId); 754 755 if (!SwapVector[Repr].WebRejected) { 756 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 757 unsigned DefReg = MI->getOperand(0).getReg(); 758 759 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) { 760 int UseIdx = SwapMap[&UseMI]; 761 SwapVector[UseIdx].WillRemove = 1; 762 763 DEBUG(dbgs() << "Marking swap fed by load for removal: "); 764 DEBUG(UseMI.dump()); 765 } 766 } 767 768 } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) { 769 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId); 770 771 if (!SwapVector[Repr].WebRejected) { 772 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 773 unsigned UseReg = MI->getOperand(0).getReg(); 774 MachineInstr *DefMI = MRI->getVRegDef(UseReg); 775 int DefIdx = SwapMap[DefMI]; 776 SwapVector[DefIdx].WillRemove = 1; 777 778 DEBUG(dbgs() << "Marking swap feeding store for removal: "); 779 DEBUG(DefMI->dump()); 780 } 781 782 } else if (SwapVector[EntryIdx].IsSwappable && 783 SwapVector[EntryIdx].SpecialHandling != 0) { 784 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId); 785 786 if (!SwapVector[Repr].WebRejected) 787 handleSpecialSwappables(EntryIdx); 788 } 789 } 790 } 791 792 // Create an xxswapd instruction and insert it prior to the given point. 793 // MI is used to determine basic block and debug loc information. 794 // FIXME: When inserting a swap, we should check whether SrcReg is 795 // defined by another swap: SrcReg = XXPERMDI Reg, Reg, 2; If so, 796 // then instead we should generate a copy from Reg to DstReg. 797 void PPCVSXSwapRemoval::insertSwap(MachineInstr *MI, 798 MachineBasicBlock::iterator InsertPoint, 799 unsigned DstReg, unsigned SrcReg) { 800 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(), 801 TII->get(PPC::XXPERMDI), DstReg) 802 .addReg(SrcReg) 803 .addReg(SrcReg) 804 .addImm(2); 805 } 806 807 // The identified swap entry requires special handling to allow its 808 // containing computation to be optimized. Perform that handling 809 // here. 810 // FIXME: Additional opportunities will be phased in with subsequent 811 // patches. 812 void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) { 813 switch (SwapVector[EntryIdx].SpecialHandling) { 814 815 default: 816 llvm_unreachable("Unexpected special handling type"); 817 818 // For splats based on an index into a vector, add N/2 modulo N 819 // to the index, where N is the number of vector elements. 820 case SHValues::SH_SPLAT: { 821 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 822 unsigned NElts; 823 824 DEBUG(dbgs() << "Changing splat: "); 825 DEBUG(MI->dump()); 826 827 switch (MI->getOpcode()) { 828 default: 829 llvm_unreachable("Unexpected splat opcode"); 830 case PPC::VSPLTB: NElts = 16; break; 831 case PPC::VSPLTH: NElts = 8; break; 832 case PPC::VSPLTW: 833 case PPC::XXSPLTW: NElts = 4; break; 834 } 835 836 unsigned EltNo; 837 if (MI->getOpcode() == PPC::XXSPLTW) 838 EltNo = MI->getOperand(2).getImm(); 839 else 840 EltNo = MI->getOperand(1).getImm(); 841 842 EltNo = (EltNo + NElts / 2) % NElts; 843 if (MI->getOpcode() == PPC::XXSPLTW) 844 MI->getOperand(2).setImm(EltNo); 845 else 846 MI->getOperand(1).setImm(EltNo); 847 848 DEBUG(dbgs() << " Into: "); 849 DEBUG(MI->dump()); 850 break; 851 } 852 853 // For an XXPERMDI that isn't handled otherwise, we need to 854 // reverse the order of the operands. If the selector operand 855 // has a value of 0 or 3, we need to change it to 3 or 0, 856 // respectively. Otherwise we should leave it alone. (This 857 // is equivalent to reversing the two bits of the selector 858 // operand and complementing the result.) 859 case SHValues::SH_XXPERMDI: { 860 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 861 862 DEBUG(dbgs() << "Changing XXPERMDI: "); 863 DEBUG(MI->dump()); 864 865 unsigned Selector = MI->getOperand(3).getImm(); 866 if (Selector == 0 || Selector == 3) 867 Selector = 3 - Selector; 868 MI->getOperand(3).setImm(Selector); 869 870 unsigned Reg1 = MI->getOperand(1).getReg(); 871 unsigned Reg2 = MI->getOperand(2).getReg(); 872 MI->getOperand(1).setReg(Reg2); 873 MI->getOperand(2).setReg(Reg1); 874 875 DEBUG(dbgs() << " Into: "); 876 DEBUG(MI->dump()); 877 break; 878 } 879 880 // For a copy from a scalar floating-point register to a vector 881 // register, removing swaps will leave the copied value in the 882 // wrong lane. Insert a swap following the copy to fix this. 883 case SHValues::SH_COPYWIDEN: { 884 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 885 886 DEBUG(dbgs() << "Changing SUBREG_TO_REG: "); 887 DEBUG(MI->dump()); 888 889 unsigned DstReg = MI->getOperand(0).getReg(); 890 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); 891 unsigned NewVReg = MRI->createVirtualRegister(DstRC); 892 893 MI->getOperand(0).setReg(NewVReg); 894 DEBUG(dbgs() << " Into: "); 895 DEBUG(MI->dump()); 896 897 auto InsertPoint = ++MachineBasicBlock::iterator(MI); 898 899 // Note that an XXPERMDI requires a VSRC, so if the SUBREG_TO_REG 900 // is copying to a VRRC, we need to be careful to avoid a register 901 // assignment problem. In this case we must copy from VRRC to VSRC 902 // prior to the swap, and from VSRC to VRRC following the swap. 903 // Coalescing will usually remove all this mess. 904 if (DstRC == &PPC::VRRCRegClass) { 905 unsigned VSRCTmp1 = MRI->createVirtualRegister(&PPC::VSRCRegClass); 906 unsigned VSRCTmp2 = MRI->createVirtualRegister(&PPC::VSRCRegClass); 907 908 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(), 909 TII->get(PPC::COPY), VSRCTmp1) 910 .addReg(NewVReg); 911 DEBUG(std::prev(InsertPoint)->dump()); 912 913 insertSwap(MI, InsertPoint, VSRCTmp2, VSRCTmp1); 914 DEBUG(std::prev(InsertPoint)->dump()); 915 916 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(), 917 TII->get(PPC::COPY), DstReg) 918 .addReg(VSRCTmp2); 919 DEBUG(std::prev(InsertPoint)->dump()); 920 921 } else { 922 insertSwap(MI, InsertPoint, DstReg, NewVReg); 923 DEBUG(std::prev(InsertPoint)->dump()); 924 } 925 break; 926 } 927 } 928 } 929 930 // Walk the swap vector and replace each entry marked for removal with 931 // a copy operation. 932 bool PPCVSXSwapRemoval::removeSwaps() { 933 934 DEBUG(dbgs() << "\n*** Removing swaps ***\n\n"); 935 936 bool Changed = false; 937 938 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) { 939 if (SwapVector[EntryIdx].WillRemove) { 940 Changed = true; 941 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 942 MachineBasicBlock *MBB = MI->getParent(); 943 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY), 944 MI->getOperand(0).getReg()) 945 .add(MI->getOperand(1)); 946 947 DEBUG(dbgs() << format("Replaced %d with copy: ", 948 SwapVector[EntryIdx].VSEId)); 949 DEBUG(MI->dump()); 950 951 MI->eraseFromParent(); 952 } 953 } 954 955 return Changed; 956 } 957 958 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 959 // For debug purposes, dump the contents of the swap vector. 960 LLVM_DUMP_METHOD void PPCVSXSwapRemoval::dumpSwapVector() { 961 962 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) { 963 964 MachineInstr *MI = SwapVector[EntryIdx].VSEMI; 965 int ID = SwapVector[EntryIdx].VSEId; 966 967 dbgs() << format("%6d", ID); 968 dbgs() << format("%6d", EC->getLeaderValue(ID)); 969 dbgs() << format(" %bb.%3d", MI->getParent()->getNumber()); 970 dbgs() << format(" %14s ", TII->getName(MI->getOpcode()).str().c_str()); 971 972 if (SwapVector[EntryIdx].IsLoad) 973 dbgs() << "load "; 974 if (SwapVector[EntryIdx].IsStore) 975 dbgs() << "store "; 976 if (SwapVector[EntryIdx].IsSwap) 977 dbgs() << "swap "; 978 if (SwapVector[EntryIdx].MentionsPhysVR) 979 dbgs() << "physreg "; 980 if (SwapVector[EntryIdx].MentionsPartialVR) 981 dbgs() << "partialreg "; 982 983 if (SwapVector[EntryIdx].IsSwappable) { 984 dbgs() << "swappable "; 985 switch(SwapVector[EntryIdx].SpecialHandling) { 986 default: 987 dbgs() << "special:**unknown**"; 988 break; 989 case SH_NONE: 990 break; 991 case SH_EXTRACT: 992 dbgs() << "special:extract "; 993 break; 994 case SH_INSERT: 995 dbgs() << "special:insert "; 996 break; 997 case SH_NOSWAP_LD: 998 dbgs() << "special:load "; 999 break; 1000 case SH_NOSWAP_ST: 1001 dbgs() << "special:store "; 1002 break; 1003 case SH_SPLAT: 1004 dbgs() << "special:splat "; 1005 break; 1006 case SH_XXPERMDI: 1007 dbgs() << "special:xxpermdi "; 1008 break; 1009 case SH_COPYWIDEN: 1010 dbgs() << "special:copywiden "; 1011 break; 1012 } 1013 } 1014 1015 if (SwapVector[EntryIdx].WebRejected) 1016 dbgs() << "rejected "; 1017 if (SwapVector[EntryIdx].WillRemove) 1018 dbgs() << "remove "; 1019 1020 dbgs() << "\n"; 1021 1022 // For no-asserts builds. 1023 (void)MI; 1024 (void)ID; 1025 } 1026 1027 dbgs() << "\n"; 1028 } 1029 #endif 1030 1031 } // end default namespace 1032 1033 INITIALIZE_PASS_BEGIN(PPCVSXSwapRemoval, DEBUG_TYPE, 1034 "PowerPC VSX Swap Removal", false, false) 1035 INITIALIZE_PASS_END(PPCVSXSwapRemoval, DEBUG_TYPE, 1036 "PowerPC VSX Swap Removal", false, false) 1037 1038 char PPCVSXSwapRemoval::ID = 0; 1039 FunctionPass* 1040 llvm::createPPCVSXSwapRemovalPass() { return new PPCVSXSwapRemoval(); } 1041