1 //===--- HexagonBitSimplify.cpp -------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #define DEBUG_TYPE "hexbit" 11 12 #include "HexagonBitTracker.h" 13 #include "HexagonTargetMachine.h" 14 #include "llvm/ADT/BitVector.h" 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/CodeGen/MachineBasicBlock.h" 20 #include "llvm/CodeGen/MachineDominators.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineFunctionPass.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineOperand.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/Passes.h" 28 #include "llvm/IR/DebugLoc.h" 29 #include "llvm/MC/MCInstrDesc.h" 30 #include "llvm/Pass.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Compiler.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/MathExtras.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Target/TargetRegisterInfo.h" 37 #include <algorithm> 38 #include <cassert> 39 #include <cstdint> 40 #include <iterator> 41 #include <limits> 42 #include <utility> 43 #include <vector> 44 45 using namespace llvm; 46 47 static cl::opt<bool> PreserveTiedOps("hexbit-keep-tied", cl::Hidden, 48 cl::init(true), cl::desc("Preserve subregisters in tied operands")); 49 50 namespace llvm { 51 52 void initializeHexagonBitSimplifyPass(PassRegistry& Registry); 53 FunctionPass *createHexagonBitSimplify(); 54 55 } // end namespace llvm 56 57 namespace { 58 59 // Set of virtual registers, based on BitVector. 60 struct RegisterSet : private BitVector { 61 RegisterSet() = default; 62 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} 63 RegisterSet(const RegisterSet &RS) = default; 64 65 using BitVector::clear; 66 using BitVector::count; 67 68 unsigned find_first() const { 69 int First = BitVector::find_first(); 70 if (First < 0) 71 return 0; 72 return x2v(First); 73 } 74 75 unsigned find_next(unsigned Prev) const { 76 int Next = BitVector::find_next(v2x(Prev)); 77 if (Next < 0) 78 return 0; 79 return x2v(Next); 80 } 81 82 RegisterSet &insert(unsigned R) { 83 unsigned Idx = v2x(R); 84 ensure(Idx); 85 return static_cast<RegisterSet&>(BitVector::set(Idx)); 86 } 87 RegisterSet &remove(unsigned R) { 88 unsigned Idx = v2x(R); 89 if (Idx >= size()) 90 return *this; 91 return static_cast<RegisterSet&>(BitVector::reset(Idx)); 92 } 93 94 RegisterSet &insert(const RegisterSet &Rs) { 95 return static_cast<RegisterSet&>(BitVector::operator|=(Rs)); 96 } 97 RegisterSet &remove(const RegisterSet &Rs) { 98 return static_cast<RegisterSet&>(BitVector::reset(Rs)); 99 } 100 101 reference operator[](unsigned R) { 102 unsigned Idx = v2x(R); 103 ensure(Idx); 104 return BitVector::operator[](Idx); 105 } 106 bool operator[](unsigned R) const { 107 unsigned Idx = v2x(R); 108 assert(Idx < size()); 109 return BitVector::operator[](Idx); 110 } 111 bool has(unsigned R) const { 112 unsigned Idx = v2x(R); 113 if (Idx >= size()) 114 return false; 115 return BitVector::test(Idx); 116 } 117 118 bool empty() const { 119 return !BitVector::any(); 120 } 121 bool includes(const RegisterSet &Rs) const { 122 // A.BitVector::test(B) <=> A-B != {} 123 return !Rs.BitVector::test(*this); 124 } 125 bool intersects(const RegisterSet &Rs) const { 126 return BitVector::anyCommon(Rs); 127 } 128 129 private: 130 void ensure(unsigned Idx) { 131 if (size() <= Idx) 132 resize(std::max(Idx+1, 32U)); 133 } 134 135 static inline unsigned v2x(unsigned v) { 136 return TargetRegisterInfo::virtReg2Index(v); 137 } 138 139 static inline unsigned x2v(unsigned x) { 140 return TargetRegisterInfo::index2VirtReg(x); 141 } 142 }; 143 144 struct PrintRegSet { 145 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI) 146 : RS(S), TRI(RI) {} 147 148 friend raw_ostream &operator<< (raw_ostream &OS, 149 const PrintRegSet &P); 150 151 private: 152 const RegisterSet &RS; 153 const TargetRegisterInfo *TRI; 154 }; 155 156 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) 157 LLVM_ATTRIBUTE_UNUSED; 158 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) { 159 OS << '{'; 160 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R)) 161 OS << ' ' << PrintReg(R, P.TRI); 162 OS << " }"; 163 return OS; 164 } 165 166 class Transformation; 167 168 class HexagonBitSimplify : public MachineFunctionPass { 169 public: 170 static char ID; 171 172 HexagonBitSimplify() : MachineFunctionPass(ID), MDT(nullptr) { 173 initializeHexagonBitSimplifyPass(*PassRegistry::getPassRegistry()); 174 } 175 176 StringRef getPassName() const override { 177 return "Hexagon bit simplification"; 178 } 179 180 void getAnalysisUsage(AnalysisUsage &AU) const override { 181 AU.addRequired<MachineDominatorTree>(); 182 AU.addPreserved<MachineDominatorTree>(); 183 MachineFunctionPass::getAnalysisUsage(AU); 184 } 185 186 bool runOnMachineFunction(MachineFunction &MF) override; 187 188 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs); 189 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses); 190 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1, 191 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W); 192 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B, 193 uint16_t W); 194 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B, 195 uint16_t W, uint64_t &U); 196 static bool replaceReg(unsigned OldR, unsigned NewR, 197 MachineRegisterInfo &MRI); 198 static bool getSubregMask(const BitTracker::RegisterRef &RR, 199 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI); 200 static bool replaceRegWithSub(unsigned OldR, unsigned NewR, 201 unsigned NewSR, MachineRegisterInfo &MRI); 202 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR, 203 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI); 204 static bool parseRegSequence(const MachineInstr &I, 205 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH, 206 const MachineRegisterInfo &MRI); 207 208 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits, 209 uint16_t Begin); 210 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits, 211 uint16_t Begin, const HexagonInstrInfo &HII); 212 213 static const TargetRegisterClass *getFinalVRegClass( 214 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI); 215 static bool isTransparentCopy(const BitTracker::RegisterRef &RD, 216 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI); 217 218 private: 219 MachineDominatorTree *MDT; 220 221 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs); 222 static bool hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI, 223 unsigned NewSub = Hexagon::NoSubRegister); 224 }; 225 226 char HexagonBitSimplify::ID = 0; 227 typedef HexagonBitSimplify HBS; 228 229 // The purpose of this class is to provide a common facility to traverse 230 // the function top-down or bottom-up via the dominator tree, and keep 231 // track of the available registers. 232 class Transformation { 233 public: 234 bool TopDown; 235 236 Transformation(bool TD) : TopDown(TD) {} 237 virtual ~Transformation() = default; 238 239 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0; 240 }; 241 242 } // end anonymous namespace 243 244 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexbit", 245 "Hexagon bit simplification", false, false) 246 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 247 INITIALIZE_PASS_END(HexagonBitSimplify, "hexbit", 248 "Hexagon bit simplification", false, false) 249 250 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T, 251 RegisterSet &AVs) { 252 MachineDomTreeNode *N = MDT->getNode(&B); 253 typedef GraphTraits<MachineDomTreeNode*> GTN; 254 bool Changed = false; 255 256 if (T.TopDown) 257 Changed = T.processBlock(B, AVs); 258 259 RegisterSet Defs; 260 for (auto &I : B) 261 getInstrDefs(I, Defs); 262 RegisterSet NewAVs = AVs; 263 NewAVs.insert(Defs); 264 265 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) { 266 MachineBasicBlock *SB = (*I)->getBlock(); 267 Changed |= visitBlock(*SB, T, NewAVs); 268 } 269 if (!T.TopDown) 270 Changed |= T.processBlock(B, AVs); 271 272 return Changed; 273 } 274 275 // 276 // Utility functions: 277 // 278 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI, 279 RegisterSet &Defs) { 280 for (auto &Op : MI.operands()) { 281 if (!Op.isReg() || !Op.isDef()) 282 continue; 283 unsigned R = Op.getReg(); 284 if (!TargetRegisterInfo::isVirtualRegister(R)) 285 continue; 286 Defs.insert(R); 287 } 288 } 289 290 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI, 291 RegisterSet &Uses) { 292 for (auto &Op : MI.operands()) { 293 if (!Op.isReg() || !Op.isUse()) 294 continue; 295 unsigned R = Op.getReg(); 296 if (!TargetRegisterInfo::isVirtualRegister(R)) 297 continue; 298 Uses.insert(R); 299 } 300 } 301 302 // Check if all the bits in range [B, E) in both cells are equal. 303 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1, 304 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2, 305 uint16_t W) { 306 for (uint16_t i = 0; i < W; ++i) { 307 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i]. 308 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0) 309 return false; 310 // Same for RC2[i]. 311 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0) 312 return false; 313 if (RC1[B1+i] != RC2[B2+i]) 314 return false; 315 } 316 return true; 317 } 318 319 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC, 320 uint16_t B, uint16_t W) { 321 assert(B < RC.width() && B+W <= RC.width()); 322 for (uint16_t i = B; i < B+W; ++i) 323 if (!RC[i].is(0)) 324 return false; 325 return true; 326 } 327 328 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC, 329 uint16_t B, uint16_t W, uint64_t &U) { 330 assert(B < RC.width() && B+W <= RC.width()); 331 int64_t T = 0; 332 for (uint16_t i = B+W; i > B; --i) { 333 const BitTracker::BitValue &BV = RC[i-1]; 334 T <<= 1; 335 if (BV.is(1)) 336 T |= 1; 337 else if (!BV.is(0)) 338 return false; 339 } 340 U = T; 341 return true; 342 } 343 344 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR, 345 MachineRegisterInfo &MRI) { 346 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 347 !TargetRegisterInfo::isVirtualRegister(NewR)) 348 return false; 349 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 350 decltype(End) NextI; 351 for (auto I = Begin; I != End; I = NextI) { 352 NextI = std::next(I); 353 I->setReg(NewR); 354 } 355 return Begin != End; 356 } 357 358 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR, 359 unsigned NewSR, MachineRegisterInfo &MRI) { 360 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 361 !TargetRegisterInfo::isVirtualRegister(NewR)) 362 return false; 363 if (hasTiedUse(OldR, MRI, NewSR)) 364 return false; 365 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 366 decltype(End) NextI; 367 for (auto I = Begin; I != End; I = NextI) { 368 NextI = std::next(I); 369 I->setReg(NewR); 370 I->setSubReg(NewSR); 371 } 372 return Begin != End; 373 } 374 375 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR, 376 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) { 377 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 378 !TargetRegisterInfo::isVirtualRegister(NewR)) 379 return false; 380 if (OldSR != NewSR && hasTiedUse(OldR, MRI, NewSR)) 381 return false; 382 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 383 decltype(End) NextI; 384 for (auto I = Begin; I != End; I = NextI) { 385 NextI = std::next(I); 386 if (I->getSubReg() != OldSR) 387 continue; 388 I->setReg(NewR); 389 I->setSubReg(NewSR); 390 } 391 return Begin != End; 392 } 393 394 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB 395 // of Sub in Reg, and set Width to the size of Sub in bits. Return true, 396 // if this succeeded, otherwise return false. 397 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR, 398 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) { 399 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg); 400 if (RR.Sub == 0) { 401 Begin = 0; 402 Width = RC->getSize()*8; 403 return true; 404 } 405 406 Begin = 0; 407 408 switch (RC->getID()) { 409 case Hexagon::DoubleRegsRegClassID: 410 case Hexagon::VecDblRegsRegClassID: 411 case Hexagon::VecDblRegs128BRegClassID: 412 Width = RC->getSize()*8 / 2; 413 if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi) 414 Begin = Width; 415 break; 416 default: 417 return false; 418 } 419 return true; 420 } 421 422 423 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high 424 // subregister. 425 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I, 426 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH, 427 const MachineRegisterInfo &MRI) { 428 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE); 429 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm(); 430 auto *DstRC = MRI.getRegClass(I.getOperand(0).getReg()); 431 auto &HRI = static_cast<const HexagonRegisterInfo&>( 432 *MRI.getTargetRegisterInfo()); 433 unsigned SubLo = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_lo); 434 unsigned SubHi = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_hi); 435 assert((Sub1 == SubLo && Sub2 == SubHi) || (Sub1 == SubHi && Sub2 == SubLo)); 436 if (Sub1 == SubLo && Sub2 == SubHi) { 437 SL = I.getOperand(1); 438 SH = I.getOperand(3); 439 return true; 440 } 441 if (Sub1 == SubHi && Sub2 == SubLo) { 442 SH = I.getOperand(1); 443 SL = I.getOperand(3); 444 return true; 445 } 446 return false; 447 } 448 449 // All stores (except 64-bit stores) take a 32-bit register as the source 450 // of the value to be stored. If the instruction stores into a location 451 // that is shorter than 32 bits, some bits of the source register are not 452 // used. For each store instruction, calculate the set of used bits in 453 // the source register, and set appropriate bits in Bits. Return true if 454 // the bits are calculated, false otherwise. 455 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits, 456 uint16_t Begin) { 457 using namespace Hexagon; 458 459 switch (Opc) { 460 // Store byte 461 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32 462 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new 463 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32 464 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32 465 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32 466 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32 467 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new 468 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new 469 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new 470 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new 471 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32 472 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new 473 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32 474 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32 475 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32 476 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32 477 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new 478 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new 479 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new 480 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new 481 case S4_storerb_ap: // memb(Re32=#U6)=Rt32 482 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new 483 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32 484 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new 485 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32 486 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new 487 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32 488 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new 489 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32 490 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new 491 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32 492 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new 493 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32 494 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new 495 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32 496 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32 497 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 498 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 499 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 500 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 501 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 502 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 503 case S2_storerbgp: // memb(gp+#u16:0)=Rt32 504 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new 505 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32 506 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32 507 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32 508 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32 509 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new 510 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new 511 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new 512 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new 513 Bits.set(Begin, Begin+8); 514 return true; 515 516 // Store low half 517 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32 518 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new 519 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32 520 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32 521 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32 522 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32 523 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new 524 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new 525 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new 526 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new 527 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32 528 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new 529 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32 530 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32 531 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32 532 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32 533 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new 534 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new 535 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new 536 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new 537 case S4_storerh_ap: // memh(Re32=#U6)=Rt32 538 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new 539 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32 540 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new 541 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32 542 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new 543 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32 544 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new 545 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32 546 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new 547 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32 548 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new 549 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32 550 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32 551 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32 552 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 553 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 554 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new 555 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 556 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 557 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 558 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 559 case S2_storerhgp: // memh(gp+#u16:1)=Rt32 560 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new 561 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32 562 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32 563 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32 564 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32 565 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new 566 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new 567 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new 568 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new 569 Bits.set(Begin, Begin+16); 570 return true; 571 572 // Store high half 573 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32 574 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32 575 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32 576 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32 577 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32 578 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32 579 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32 580 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32 581 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32 582 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32 583 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32 584 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32 585 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32 586 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32 587 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32 588 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32 589 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32 590 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 591 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 592 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 593 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 594 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32 595 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32 596 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32 597 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32 598 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32 599 Bits.set(Begin+16, Begin+32); 600 return true; 601 } 602 603 return false; 604 } 605 606 // For an instruction with opcode Opc, calculate the set of bits that it 607 // uses in a register in operand OpN. This only calculates the set of used 608 // bits for cases where it does not depend on any operands (as is the case 609 // in shifts, for example). For concrete instructions from a program, the 610 // operand may be a subregister of a larger register, while Bits would 611 // correspond to the larger register in its entirety. Because of that, 612 // the parameter Begin can be used to indicate which bit of Bits should be 613 // considered the LSB of of the operand. 614 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN, 615 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) { 616 using namespace Hexagon; 617 618 const MCInstrDesc &D = HII.get(Opc); 619 if (D.mayStore()) { 620 if (OpN == D.getNumOperands()-1) 621 return getUsedBitsInStore(Opc, Bits, Begin); 622 return false; 623 } 624 625 switch (Opc) { 626 // One register source. Used bits: R1[0-7]. 627 case A2_sxtb: 628 case A2_zxtb: 629 case A4_cmpbeqi: 630 case A4_cmpbgti: 631 case A4_cmpbgtui: 632 if (OpN == 1) { 633 Bits.set(Begin, Begin+8); 634 return true; 635 } 636 break; 637 638 // One register source. Used bits: R1[0-15]. 639 case A2_aslh: 640 case A2_sxth: 641 case A2_zxth: 642 case A4_cmpheqi: 643 case A4_cmphgti: 644 case A4_cmphgtui: 645 if (OpN == 1) { 646 Bits.set(Begin, Begin+16); 647 return true; 648 } 649 break; 650 651 // One register source. Used bits: R1[16-31]. 652 case A2_asrh: 653 if (OpN == 1) { 654 Bits.set(Begin+16, Begin+32); 655 return true; 656 } 657 break; 658 659 // Two register sources. Used bits: R1[0-7], R2[0-7]. 660 case A4_cmpbeq: 661 case A4_cmpbgt: 662 case A4_cmpbgtu: 663 if (OpN == 1) { 664 Bits.set(Begin, Begin+8); 665 return true; 666 } 667 break; 668 669 // Two register sources. Used bits: R1[0-15], R2[0-15]. 670 case A4_cmpheq: 671 case A4_cmphgt: 672 case A4_cmphgtu: 673 case A2_addh_h16_ll: 674 case A2_addh_h16_sat_ll: 675 case A2_addh_l16_ll: 676 case A2_addh_l16_sat_ll: 677 case A2_combine_ll: 678 case A2_subh_h16_ll: 679 case A2_subh_h16_sat_ll: 680 case A2_subh_l16_ll: 681 case A2_subh_l16_sat_ll: 682 case M2_mpy_acc_ll_s0: 683 case M2_mpy_acc_ll_s1: 684 case M2_mpy_acc_sat_ll_s0: 685 case M2_mpy_acc_sat_ll_s1: 686 case M2_mpy_ll_s0: 687 case M2_mpy_ll_s1: 688 case M2_mpy_nac_ll_s0: 689 case M2_mpy_nac_ll_s1: 690 case M2_mpy_nac_sat_ll_s0: 691 case M2_mpy_nac_sat_ll_s1: 692 case M2_mpy_rnd_ll_s0: 693 case M2_mpy_rnd_ll_s1: 694 case M2_mpy_sat_ll_s0: 695 case M2_mpy_sat_ll_s1: 696 case M2_mpy_sat_rnd_ll_s0: 697 case M2_mpy_sat_rnd_ll_s1: 698 case M2_mpyd_acc_ll_s0: 699 case M2_mpyd_acc_ll_s1: 700 case M2_mpyd_ll_s0: 701 case M2_mpyd_ll_s1: 702 case M2_mpyd_nac_ll_s0: 703 case M2_mpyd_nac_ll_s1: 704 case M2_mpyd_rnd_ll_s0: 705 case M2_mpyd_rnd_ll_s1: 706 case M2_mpyu_acc_ll_s0: 707 case M2_mpyu_acc_ll_s1: 708 case M2_mpyu_ll_s0: 709 case M2_mpyu_ll_s1: 710 case M2_mpyu_nac_ll_s0: 711 case M2_mpyu_nac_ll_s1: 712 case M2_mpyud_acc_ll_s0: 713 case M2_mpyud_acc_ll_s1: 714 case M2_mpyud_ll_s0: 715 case M2_mpyud_ll_s1: 716 case M2_mpyud_nac_ll_s0: 717 case M2_mpyud_nac_ll_s1: 718 if (OpN == 1 || OpN == 2) { 719 Bits.set(Begin, Begin+16); 720 return true; 721 } 722 break; 723 724 // Two register sources. Used bits: R1[0-15], R2[16-31]. 725 case A2_addh_h16_lh: 726 case A2_addh_h16_sat_lh: 727 case A2_combine_lh: 728 case A2_subh_h16_lh: 729 case A2_subh_h16_sat_lh: 730 case M2_mpy_acc_lh_s0: 731 case M2_mpy_acc_lh_s1: 732 case M2_mpy_acc_sat_lh_s0: 733 case M2_mpy_acc_sat_lh_s1: 734 case M2_mpy_lh_s0: 735 case M2_mpy_lh_s1: 736 case M2_mpy_nac_lh_s0: 737 case M2_mpy_nac_lh_s1: 738 case M2_mpy_nac_sat_lh_s0: 739 case M2_mpy_nac_sat_lh_s1: 740 case M2_mpy_rnd_lh_s0: 741 case M2_mpy_rnd_lh_s1: 742 case M2_mpy_sat_lh_s0: 743 case M2_mpy_sat_lh_s1: 744 case M2_mpy_sat_rnd_lh_s0: 745 case M2_mpy_sat_rnd_lh_s1: 746 case M2_mpyd_acc_lh_s0: 747 case M2_mpyd_acc_lh_s1: 748 case M2_mpyd_lh_s0: 749 case M2_mpyd_lh_s1: 750 case M2_mpyd_nac_lh_s0: 751 case M2_mpyd_nac_lh_s1: 752 case M2_mpyd_rnd_lh_s0: 753 case M2_mpyd_rnd_lh_s1: 754 case M2_mpyu_acc_lh_s0: 755 case M2_mpyu_acc_lh_s1: 756 case M2_mpyu_lh_s0: 757 case M2_mpyu_lh_s1: 758 case M2_mpyu_nac_lh_s0: 759 case M2_mpyu_nac_lh_s1: 760 case M2_mpyud_acc_lh_s0: 761 case M2_mpyud_acc_lh_s1: 762 case M2_mpyud_lh_s0: 763 case M2_mpyud_lh_s1: 764 case M2_mpyud_nac_lh_s0: 765 case M2_mpyud_nac_lh_s1: 766 // These four are actually LH. 767 case A2_addh_l16_hl: 768 case A2_addh_l16_sat_hl: 769 case A2_subh_l16_hl: 770 case A2_subh_l16_sat_hl: 771 if (OpN == 1) { 772 Bits.set(Begin, Begin+16); 773 return true; 774 } 775 if (OpN == 2) { 776 Bits.set(Begin+16, Begin+32); 777 return true; 778 } 779 break; 780 781 // Two register sources, used bits: R1[16-31], R2[0-15]. 782 case A2_addh_h16_hl: 783 case A2_addh_h16_sat_hl: 784 case A2_combine_hl: 785 case A2_subh_h16_hl: 786 case A2_subh_h16_sat_hl: 787 case M2_mpy_acc_hl_s0: 788 case M2_mpy_acc_hl_s1: 789 case M2_mpy_acc_sat_hl_s0: 790 case M2_mpy_acc_sat_hl_s1: 791 case M2_mpy_hl_s0: 792 case M2_mpy_hl_s1: 793 case M2_mpy_nac_hl_s0: 794 case M2_mpy_nac_hl_s1: 795 case M2_mpy_nac_sat_hl_s0: 796 case M2_mpy_nac_sat_hl_s1: 797 case M2_mpy_rnd_hl_s0: 798 case M2_mpy_rnd_hl_s1: 799 case M2_mpy_sat_hl_s0: 800 case M2_mpy_sat_hl_s1: 801 case M2_mpy_sat_rnd_hl_s0: 802 case M2_mpy_sat_rnd_hl_s1: 803 case M2_mpyd_acc_hl_s0: 804 case M2_mpyd_acc_hl_s1: 805 case M2_mpyd_hl_s0: 806 case M2_mpyd_hl_s1: 807 case M2_mpyd_nac_hl_s0: 808 case M2_mpyd_nac_hl_s1: 809 case M2_mpyd_rnd_hl_s0: 810 case M2_mpyd_rnd_hl_s1: 811 case M2_mpyu_acc_hl_s0: 812 case M2_mpyu_acc_hl_s1: 813 case M2_mpyu_hl_s0: 814 case M2_mpyu_hl_s1: 815 case M2_mpyu_nac_hl_s0: 816 case M2_mpyu_nac_hl_s1: 817 case M2_mpyud_acc_hl_s0: 818 case M2_mpyud_acc_hl_s1: 819 case M2_mpyud_hl_s0: 820 case M2_mpyud_hl_s1: 821 case M2_mpyud_nac_hl_s0: 822 case M2_mpyud_nac_hl_s1: 823 if (OpN == 1) { 824 Bits.set(Begin+16, Begin+32); 825 return true; 826 } 827 if (OpN == 2) { 828 Bits.set(Begin, Begin+16); 829 return true; 830 } 831 break; 832 833 // Two register sources, used bits: R1[16-31], R2[16-31]. 834 case A2_addh_h16_hh: 835 case A2_addh_h16_sat_hh: 836 case A2_combine_hh: 837 case A2_subh_h16_hh: 838 case A2_subh_h16_sat_hh: 839 case M2_mpy_acc_hh_s0: 840 case M2_mpy_acc_hh_s1: 841 case M2_mpy_acc_sat_hh_s0: 842 case M2_mpy_acc_sat_hh_s1: 843 case M2_mpy_hh_s0: 844 case M2_mpy_hh_s1: 845 case M2_mpy_nac_hh_s0: 846 case M2_mpy_nac_hh_s1: 847 case M2_mpy_nac_sat_hh_s0: 848 case M2_mpy_nac_sat_hh_s1: 849 case M2_mpy_rnd_hh_s0: 850 case M2_mpy_rnd_hh_s1: 851 case M2_mpy_sat_hh_s0: 852 case M2_mpy_sat_hh_s1: 853 case M2_mpy_sat_rnd_hh_s0: 854 case M2_mpy_sat_rnd_hh_s1: 855 case M2_mpyd_acc_hh_s0: 856 case M2_mpyd_acc_hh_s1: 857 case M2_mpyd_hh_s0: 858 case M2_mpyd_hh_s1: 859 case M2_mpyd_nac_hh_s0: 860 case M2_mpyd_nac_hh_s1: 861 case M2_mpyd_rnd_hh_s0: 862 case M2_mpyd_rnd_hh_s1: 863 case M2_mpyu_acc_hh_s0: 864 case M2_mpyu_acc_hh_s1: 865 case M2_mpyu_hh_s0: 866 case M2_mpyu_hh_s1: 867 case M2_mpyu_nac_hh_s0: 868 case M2_mpyu_nac_hh_s1: 869 case M2_mpyud_acc_hh_s0: 870 case M2_mpyud_acc_hh_s1: 871 case M2_mpyud_hh_s0: 872 case M2_mpyud_hh_s1: 873 case M2_mpyud_nac_hh_s0: 874 case M2_mpyud_nac_hh_s1: 875 if (OpN == 1 || OpN == 2) { 876 Bits.set(Begin+16, Begin+32); 877 return true; 878 } 879 break; 880 } 881 882 return false; 883 } 884 885 // Calculate the register class that matches Reg:Sub. For example, if 886 // vreg1 is a double register, then vreg1:isub_hi would match the "int" 887 // register class. 888 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass( 889 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) { 890 if (!TargetRegisterInfo::isVirtualRegister(RR.Reg)) 891 return nullptr; 892 auto *RC = MRI.getRegClass(RR.Reg); 893 if (RR.Sub == 0) 894 return RC; 895 auto &HRI = static_cast<const HexagonRegisterInfo&>( 896 *MRI.getTargetRegisterInfo()); 897 898 auto VerifySR = [&HRI] (const TargetRegisterClass *RC, unsigned Sub) -> void { 899 assert(Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo) || 900 Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi)); 901 }; 902 903 switch (RC->getID()) { 904 case Hexagon::DoubleRegsRegClassID: 905 VerifySR(RC, RR.Sub); 906 return &Hexagon::IntRegsRegClass; 907 case Hexagon::VecDblRegsRegClassID: 908 VerifySR(RC, RR.Sub); 909 return &Hexagon::VectorRegsRegClass; 910 case Hexagon::VecDblRegs128BRegClassID: 911 VerifySR(RC, RR.Sub); 912 return &Hexagon::VectorRegs128BRegClass; 913 } 914 return nullptr; 915 } 916 917 // Check if RD could be replaced with RS at any possible use of RD. 918 // For example a predicate register cannot be replaced with a integer 919 // register, but a 64-bit register with a subregister can be replaced 920 // with a 32-bit register. 921 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD, 922 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) { 923 if (!TargetRegisterInfo::isVirtualRegister(RD.Reg) || 924 !TargetRegisterInfo::isVirtualRegister(RS.Reg)) 925 return false; 926 // Return false if one (or both) classes are nullptr. 927 auto *DRC = getFinalVRegClass(RD, MRI); 928 if (!DRC) 929 return false; 930 931 return DRC == getFinalVRegClass(RS, MRI); 932 } 933 934 bool HexagonBitSimplify::hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI, 935 unsigned NewSub) { 936 if (!PreserveTiedOps) 937 return false; 938 return llvm::any_of(MRI.use_operands(Reg), 939 [NewSub] (const MachineOperand &Op) -> bool { 940 return Op.getSubReg() != NewSub && Op.isTied(); 941 }); 942 } 943 944 namespace { 945 946 class DeadCodeElimination { 947 public: 948 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt) 949 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()), 950 MDT(mdt), MRI(mf.getRegInfo()) {} 951 952 bool run() { 953 return runOnNode(MDT.getRootNode()); 954 } 955 956 private: 957 bool isDead(unsigned R) const; 958 bool runOnNode(MachineDomTreeNode *N); 959 960 MachineFunction &MF; 961 const HexagonInstrInfo &HII; 962 MachineDominatorTree &MDT; 963 MachineRegisterInfo &MRI; 964 }; 965 966 } // end anonymous namespace 967 968 bool DeadCodeElimination::isDead(unsigned R) const { 969 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 970 MachineInstr *UseI = I->getParent(); 971 if (UseI->isDebugValue()) 972 continue; 973 if (UseI->isPHI()) { 974 assert(!UseI->getOperand(0).getSubReg()); 975 unsigned DR = UseI->getOperand(0).getReg(); 976 if (DR == R) 977 continue; 978 } 979 return false; 980 } 981 return true; 982 } 983 984 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) { 985 bool Changed = false; 986 typedef GraphTraits<MachineDomTreeNode*> GTN; 987 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) 988 Changed |= runOnNode(*I); 989 990 MachineBasicBlock *B = N->getBlock(); 991 std::vector<MachineInstr*> Instrs; 992 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I) 993 Instrs.push_back(&*I); 994 995 for (auto MI : Instrs) { 996 unsigned Opc = MI->getOpcode(); 997 // Do not touch lifetime markers. This is why the target-independent DCE 998 // cannot be used. 999 if (Opc == TargetOpcode::LIFETIME_START || 1000 Opc == TargetOpcode::LIFETIME_END) 1001 continue; 1002 bool Store = false; 1003 if (MI->isInlineAsm()) 1004 continue; 1005 // Delete PHIs if possible. 1006 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store)) 1007 continue; 1008 1009 bool AllDead = true; 1010 SmallVector<unsigned,2> Regs; 1011 for (auto &Op : MI->operands()) { 1012 if (!Op.isReg() || !Op.isDef()) 1013 continue; 1014 unsigned R = Op.getReg(); 1015 if (!TargetRegisterInfo::isVirtualRegister(R) || !isDead(R)) { 1016 AllDead = false; 1017 break; 1018 } 1019 Regs.push_back(R); 1020 } 1021 if (!AllDead) 1022 continue; 1023 1024 B->erase(MI); 1025 for (unsigned i = 0, n = Regs.size(); i != n; ++i) 1026 MRI.markUsesInDebugValueAsUndef(Regs[i]); 1027 Changed = true; 1028 } 1029 1030 return Changed; 1031 } 1032 1033 namespace { 1034 1035 // Eliminate redundant instructions 1036 // 1037 // This transformation will identify instructions where the output register 1038 // is the same as one of its input registers. This only works on instructions 1039 // that define a single register (unlike post-increment loads, for example). 1040 // The equality check is actually more detailed: the code calculates which 1041 // bits of the output are used, and only compares these bits with the input 1042 // registers. 1043 // If the output matches an input, the instruction is replaced with COPY. 1044 // The copies will be removed by another transformation. 1045 class RedundantInstrElimination : public Transformation { 1046 public: 1047 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii, 1048 MachineRegisterInfo &mri) 1049 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1050 1051 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1052 1053 private: 1054 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN, 1055 unsigned &LostB, unsigned &LostE); 1056 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN, 1057 unsigned &LostB, unsigned &LostE); 1058 bool computeUsedBits(unsigned Reg, BitVector &Bits); 1059 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits, 1060 uint16_t Begin); 1061 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS); 1062 1063 const HexagonInstrInfo &HII; 1064 MachineRegisterInfo &MRI; 1065 BitTracker &BT; 1066 }; 1067 1068 } // end anonymous namespace 1069 1070 // Check if the instruction is a lossy shift left, where the input being 1071 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1072 // of bit indices that are lost. 1073 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI, 1074 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1075 using namespace Hexagon; 1076 1077 unsigned Opc = MI.getOpcode(); 1078 unsigned ImN, RegN, Width; 1079 switch (Opc) { 1080 case S2_asl_i_p: 1081 ImN = 2; 1082 RegN = 1; 1083 Width = 64; 1084 break; 1085 case S2_asl_i_p_acc: 1086 case S2_asl_i_p_and: 1087 case S2_asl_i_p_nac: 1088 case S2_asl_i_p_or: 1089 case S2_asl_i_p_xacc: 1090 ImN = 3; 1091 RegN = 2; 1092 Width = 64; 1093 break; 1094 case S2_asl_i_r: 1095 ImN = 2; 1096 RegN = 1; 1097 Width = 32; 1098 break; 1099 case S2_addasl_rrri: 1100 case S4_andi_asl_ri: 1101 case S4_ori_asl_ri: 1102 case S4_addi_asl_ri: 1103 case S4_subi_asl_ri: 1104 case S2_asl_i_r_acc: 1105 case S2_asl_i_r_and: 1106 case S2_asl_i_r_nac: 1107 case S2_asl_i_r_or: 1108 case S2_asl_i_r_sat: 1109 case S2_asl_i_r_xacc: 1110 ImN = 3; 1111 RegN = 2; 1112 Width = 32; 1113 break; 1114 default: 1115 return false; 1116 } 1117 1118 if (RegN != OpN) 1119 return false; 1120 1121 assert(MI.getOperand(ImN).isImm()); 1122 unsigned S = MI.getOperand(ImN).getImm(); 1123 if (S == 0) 1124 return false; 1125 LostB = Width-S; 1126 LostE = Width; 1127 return true; 1128 } 1129 1130 // Check if the instruction is a lossy shift right, where the input being 1131 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1132 // of bit indices that are lost. 1133 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI, 1134 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1135 using namespace Hexagon; 1136 1137 unsigned Opc = MI.getOpcode(); 1138 unsigned ImN, RegN; 1139 switch (Opc) { 1140 case S2_asr_i_p: 1141 case S2_lsr_i_p: 1142 ImN = 2; 1143 RegN = 1; 1144 break; 1145 case S2_asr_i_p_acc: 1146 case S2_asr_i_p_and: 1147 case S2_asr_i_p_nac: 1148 case S2_asr_i_p_or: 1149 case S2_lsr_i_p_acc: 1150 case S2_lsr_i_p_and: 1151 case S2_lsr_i_p_nac: 1152 case S2_lsr_i_p_or: 1153 case S2_lsr_i_p_xacc: 1154 ImN = 3; 1155 RegN = 2; 1156 break; 1157 case S2_asr_i_r: 1158 case S2_lsr_i_r: 1159 ImN = 2; 1160 RegN = 1; 1161 break; 1162 case S4_andi_lsr_ri: 1163 case S4_ori_lsr_ri: 1164 case S4_addi_lsr_ri: 1165 case S4_subi_lsr_ri: 1166 case S2_asr_i_r_acc: 1167 case S2_asr_i_r_and: 1168 case S2_asr_i_r_nac: 1169 case S2_asr_i_r_or: 1170 case S2_lsr_i_r_acc: 1171 case S2_lsr_i_r_and: 1172 case S2_lsr_i_r_nac: 1173 case S2_lsr_i_r_or: 1174 case S2_lsr_i_r_xacc: 1175 ImN = 3; 1176 RegN = 2; 1177 break; 1178 1179 default: 1180 return false; 1181 } 1182 1183 if (RegN != OpN) 1184 return false; 1185 1186 assert(MI.getOperand(ImN).isImm()); 1187 unsigned S = MI.getOperand(ImN).getImm(); 1188 LostB = 0; 1189 LostE = S; 1190 return true; 1191 } 1192 1193 // Calculate the bit vector that corresponds to the used bits of register Reg. 1194 // The vector Bits has the same size, as the size of Reg in bits. If the cal- 1195 // culation fails (i.e. the used bits are unknown), it returns false. Other- 1196 // wise, it returns true and sets the corresponding bits in Bits. 1197 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) { 1198 BitVector Used(Bits.size()); 1199 RegisterSet Visited; 1200 std::vector<unsigned> Pending; 1201 Pending.push_back(Reg); 1202 1203 for (unsigned i = 0; i < Pending.size(); ++i) { 1204 unsigned R = Pending[i]; 1205 if (Visited.has(R)) 1206 continue; 1207 Visited.insert(R); 1208 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 1209 BitTracker::RegisterRef UR = *I; 1210 unsigned B, W; 1211 if (!HBS::getSubregMask(UR, B, W, MRI)) 1212 return false; 1213 MachineInstr &UseI = *I->getParent(); 1214 if (UseI.isPHI() || UseI.isCopy()) { 1215 unsigned DefR = UseI.getOperand(0).getReg(); 1216 if (!TargetRegisterInfo::isVirtualRegister(DefR)) 1217 return false; 1218 Pending.push_back(DefR); 1219 } else { 1220 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B)) 1221 return false; 1222 } 1223 } 1224 } 1225 Bits |= Used; 1226 return true; 1227 } 1228 1229 // Calculate the bits used by instruction MI in a register in operand OpN. 1230 // Return true/false if the calculation succeeds/fails. If is succeeds, set 1231 // used bits in Bits. This function does not reset any bits in Bits, so 1232 // subsequent calls over different instructions will result in the union 1233 // of the used bits in all these instructions. 1234 // The register in question may be used with a sub-register, whereas Bits 1235 // holds the bits for the entire register. To keep track of that, the 1236 // argument Begin indicates where in Bits is the lowest-significant bit 1237 // of the register used in operand OpN. For example, in instruction: 1238 // vreg1 = S2_lsr_i_r vreg2:isub_hi, 10 1239 // the operand 1 is a 32-bit register, which happens to be a subregister 1240 // of the 64-bit register vreg2, and that subregister starts at position 32. 1241 // In this case Begin=32, since Bits[32] would be the lowest-significant bit 1242 // of vreg2:isub_hi. 1243 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI, 1244 unsigned OpN, BitVector &Bits, uint16_t Begin) { 1245 unsigned Opc = MI.getOpcode(); 1246 BitVector T(Bits.size()); 1247 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII); 1248 // Even if we don't have bits yet, we could still provide some information 1249 // if the instruction is a lossy shift: the lost bits will be marked as 1250 // not used. 1251 unsigned LB, LE; 1252 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) { 1253 assert(MI.getOperand(OpN).isReg()); 1254 BitTracker::RegisterRef RR = MI.getOperand(OpN); 1255 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI); 1256 uint16_t Width = RC->getSize()*8; 1257 1258 if (!GotBits) 1259 T.set(Begin, Begin+Width); 1260 assert(LB <= LE && LB < Width && LE <= Width); 1261 T.reset(Begin+LB, Begin+LE); 1262 GotBits = true; 1263 } 1264 if (GotBits) 1265 Bits |= T; 1266 return GotBits; 1267 } 1268 1269 // Calculates the used bits in RD ("defined register"), and checks if these 1270 // bits in RS ("used register") and RD are identical. 1271 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD, 1272 BitTracker::RegisterRef RS) { 1273 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1274 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1275 1276 unsigned DB, DW; 1277 if (!HBS::getSubregMask(RD, DB, DW, MRI)) 1278 return false; 1279 unsigned SB, SW; 1280 if (!HBS::getSubregMask(RS, SB, SW, MRI)) 1281 return false; 1282 if (SW != DW) 1283 return false; 1284 1285 BitVector Used(DC.width()); 1286 if (!computeUsedBits(RD.Reg, Used)) 1287 return false; 1288 1289 for (unsigned i = 0; i != DW; ++i) 1290 if (Used[i+DB] && DC[DB+i] != SC[SB+i]) 1291 return false; 1292 return true; 1293 } 1294 1295 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B, 1296 const RegisterSet&) { 1297 if (!BT.reached(&B)) 1298 return false; 1299 bool Changed = false; 1300 1301 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) { 1302 NextI = std::next(I); 1303 MachineInstr *MI = &*I; 1304 1305 if (MI->getOpcode() == TargetOpcode::COPY) 1306 continue; 1307 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 1308 continue; 1309 unsigned NumD = MI->getDesc().getNumDefs(); 1310 if (NumD != 1) 1311 continue; 1312 1313 BitTracker::RegisterRef RD = MI->getOperand(0); 1314 if (!BT.has(RD.Reg)) 1315 continue; 1316 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1317 auto At = MI->isPHI() ? B.getFirstNonPHI() 1318 : MachineBasicBlock::iterator(MI); 1319 1320 // Find a source operand that is equal to the result. 1321 for (auto &Op : MI->uses()) { 1322 if (!Op.isReg()) 1323 continue; 1324 BitTracker::RegisterRef RS = Op; 1325 if (!BT.has(RS.Reg)) 1326 continue; 1327 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1328 continue; 1329 1330 unsigned BN, BW; 1331 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 1332 continue; 1333 1334 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1335 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW)) 1336 continue; 1337 1338 // If found, replace the instruction with a COPY. 1339 const DebugLoc &DL = MI->getDebugLoc(); 1340 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 1341 unsigned NewR = MRI.createVirtualRegister(FRC); 1342 MachineInstr *CopyI = 1343 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1344 .addReg(RS.Reg, 0, RS.Sub); 1345 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1346 // This pass can create copies between registers that don't have the 1347 // exact same values. Updating the tracker has to involve updating 1348 // all dependent cells. Example: 1349 // vreg1 = inst vreg2 ; vreg1 != vreg2, but used bits are equal 1350 // 1351 // vreg3 = copy vreg2 ; <- inserted 1352 // ... = vreg3 ; <- replaced from vreg2 1353 // Indirectly, we can create a "copy" between vreg1 and vreg2 even 1354 // though their exact values do not match. 1355 BT.visit(*CopyI); 1356 Changed = true; 1357 break; 1358 } 1359 } 1360 1361 return Changed; 1362 } 1363 1364 namespace { 1365 1366 // Recognize instructions that produce constant values known at compile-time. 1367 // Replace them with register definitions that load these constants directly. 1368 class ConstGeneration : public Transformation { 1369 public: 1370 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1371 MachineRegisterInfo &mri) 1372 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1373 1374 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1375 static bool isTfrConst(const MachineInstr &MI); 1376 1377 private: 1378 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C, 1379 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL); 1380 1381 const HexagonInstrInfo &HII; 1382 MachineRegisterInfo &MRI; 1383 BitTracker &BT; 1384 }; 1385 1386 } // end anonymous namespace 1387 1388 bool ConstGeneration::isTfrConst(const MachineInstr &MI) { 1389 unsigned Opc = MI.getOpcode(); 1390 switch (Opc) { 1391 case Hexagon::A2_combineii: 1392 case Hexagon::A4_combineii: 1393 case Hexagon::A2_tfrsi: 1394 case Hexagon::A2_tfrpi: 1395 case Hexagon::PS_true: 1396 case Hexagon::PS_false: 1397 case Hexagon::CONST32: 1398 case Hexagon::CONST64: 1399 return true; 1400 } 1401 return false; 1402 } 1403 1404 // Generate a transfer-immediate instruction that is appropriate for the 1405 // register class and the actual value being transferred. 1406 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C, 1407 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) { 1408 unsigned Reg = MRI.createVirtualRegister(RC); 1409 if (RC == &Hexagon::IntRegsRegClass) { 1410 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg) 1411 .addImm(int32_t(C)); 1412 return Reg; 1413 } 1414 1415 if (RC == &Hexagon::DoubleRegsRegClass) { 1416 if (isInt<8>(C)) { 1417 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg) 1418 .addImm(C); 1419 return Reg; 1420 } 1421 1422 unsigned Lo = Lo_32(C), Hi = Hi_32(C); 1423 if (isInt<8>(Lo) || isInt<8>(Hi)) { 1424 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii 1425 : Hexagon::A4_combineii; 1426 BuildMI(B, At, DL, HII.get(Opc), Reg) 1427 .addImm(int32_t(Hi)) 1428 .addImm(int32_t(Lo)); 1429 return Reg; 1430 } 1431 1432 BuildMI(B, At, DL, HII.get(Hexagon::CONST64), Reg) 1433 .addImm(C); 1434 return Reg; 1435 } 1436 1437 if (RC == &Hexagon::PredRegsRegClass) { 1438 unsigned Opc; 1439 if (C == 0) 1440 Opc = Hexagon::PS_false; 1441 else if ((C & 0xFF) == 0xFF) 1442 Opc = Hexagon::PS_true; 1443 else 1444 return 0; 1445 BuildMI(B, At, DL, HII.get(Opc), Reg); 1446 return Reg; 1447 } 1448 1449 return 0; 1450 } 1451 1452 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1453 if (!BT.reached(&B)) 1454 return false; 1455 bool Changed = false; 1456 RegisterSet Defs; 1457 1458 for (auto I = B.begin(), E = B.end(); I != E; ++I) { 1459 if (isTfrConst(*I)) 1460 continue; 1461 Defs.clear(); 1462 HBS::getInstrDefs(*I, Defs); 1463 if (Defs.count() != 1) 1464 continue; 1465 unsigned DR = Defs.find_first(); 1466 if (!TargetRegisterInfo::isVirtualRegister(DR)) 1467 continue; 1468 uint64_t U; 1469 const BitTracker::RegisterCell &DRC = BT.lookup(DR); 1470 if (HBS::getConst(DRC, 0, DRC.width(), U)) { 1471 int64_t C = U; 1472 DebugLoc DL = I->getDebugLoc(); 1473 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1474 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL); 1475 if (ImmReg) { 1476 HBS::replaceReg(DR, ImmReg, MRI); 1477 BT.put(ImmReg, DRC); 1478 Changed = true; 1479 } 1480 } 1481 } 1482 return Changed; 1483 } 1484 1485 namespace { 1486 1487 // Identify pairs of available registers which hold identical values. 1488 // In such cases, only one of them needs to be calculated, the other one 1489 // will be defined as a copy of the first. 1490 class CopyGeneration : public Transformation { 1491 public: 1492 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1493 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1494 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {} 1495 1496 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1497 1498 private: 1499 bool findMatch(const BitTracker::RegisterRef &Inp, 1500 BitTracker::RegisterRef &Out, const RegisterSet &AVs); 1501 1502 const HexagonInstrInfo &HII; 1503 const HexagonRegisterInfo &HRI; 1504 MachineRegisterInfo &MRI; 1505 BitTracker &BT; 1506 RegisterSet Forbidden; 1507 }; 1508 1509 // Eliminate register copies RD = RS, by replacing the uses of RD with 1510 // with uses of RS. 1511 class CopyPropagation : public Transformation { 1512 public: 1513 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1514 : Transformation(false), HRI(hri), MRI(mri) {} 1515 1516 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1517 1518 static bool isCopyReg(unsigned Opc, bool NoConv); 1519 1520 private: 1521 bool propagateRegCopy(MachineInstr &MI); 1522 1523 const HexagonRegisterInfo &HRI; 1524 MachineRegisterInfo &MRI; 1525 }; 1526 1527 } // end anonymous namespace 1528 1529 /// Check if there is a register in AVs that is identical to Inp. If so, 1530 /// set Out to the found register. The output may be a pair Reg:Sub. 1531 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp, 1532 BitTracker::RegisterRef &Out, const RegisterSet &AVs) { 1533 if (!BT.has(Inp.Reg)) 1534 return false; 1535 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg); 1536 auto *FRC = HBS::getFinalVRegClass(Inp, MRI); 1537 unsigned B, W; 1538 if (!HBS::getSubregMask(Inp, B, W, MRI)) 1539 return false; 1540 1541 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) { 1542 if (!BT.has(R) || Forbidden[R]) 1543 continue; 1544 const BitTracker::RegisterCell &RC = BT.lookup(R); 1545 unsigned RW = RC.width(); 1546 if (W == RW) { 1547 if (FRC != MRI.getRegClass(R)) 1548 continue; 1549 if (!HBS::isTransparentCopy(R, Inp, MRI)) 1550 continue; 1551 if (!HBS::isEqual(InpRC, B, RC, 0, W)) 1552 continue; 1553 Out.Reg = R; 1554 Out.Sub = 0; 1555 return true; 1556 } 1557 // Check if there is a super-register, whose part (with a subregister) 1558 // is equal to the input. 1559 // Only do double registers for now. 1560 if (W*2 != RW) 1561 continue; 1562 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass) 1563 continue; 1564 1565 if (HBS::isEqual(InpRC, B, RC, 0, W)) 1566 Out.Sub = Hexagon::isub_lo; 1567 else if (HBS::isEqual(InpRC, B, RC, W, W)) 1568 Out.Sub = Hexagon::isub_hi; 1569 else 1570 continue; 1571 Out.Reg = R; 1572 if (HBS::isTransparentCopy(Out, Inp, MRI)) 1573 return true; 1574 } 1575 return false; 1576 } 1577 1578 bool CopyGeneration::processBlock(MachineBasicBlock &B, 1579 const RegisterSet &AVs) { 1580 if (!BT.reached(&B)) 1581 return false; 1582 RegisterSet AVB(AVs); 1583 bool Changed = false; 1584 RegisterSet Defs; 1585 1586 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; 1587 ++I, AVB.insert(Defs)) { 1588 NextI = std::next(I); 1589 Defs.clear(); 1590 HBS::getInstrDefs(*I, Defs); 1591 1592 unsigned Opc = I->getOpcode(); 1593 if (CopyPropagation::isCopyReg(Opc, false) || 1594 ConstGeneration::isTfrConst(*I)) 1595 continue; 1596 1597 DebugLoc DL = I->getDebugLoc(); 1598 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1599 1600 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) { 1601 BitTracker::RegisterRef MR; 1602 auto *FRC = HBS::getFinalVRegClass(R, MRI); 1603 1604 if (findMatch(R, MR, AVB)) { 1605 unsigned NewR = MRI.createVirtualRegister(FRC); 1606 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1607 .addReg(MR.Reg, 0, MR.Sub); 1608 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR)); 1609 HBS::replaceReg(R, NewR, MRI); 1610 Forbidden.insert(R); 1611 continue; 1612 } 1613 1614 if (FRC == &Hexagon::DoubleRegsRegClass || 1615 FRC == &Hexagon::VecDblRegsRegClass || 1616 FRC == &Hexagon::VecDblRegs128BRegClass) { 1617 // Try to generate REG_SEQUENCE. 1618 unsigned SubLo = HRI.getHexagonSubRegIndex(FRC, Hexagon::ps_sub_lo); 1619 unsigned SubHi = HRI.getHexagonSubRegIndex(FRC, Hexagon::ps_sub_hi); 1620 BitTracker::RegisterRef TL = { R, SubLo }; 1621 BitTracker::RegisterRef TH = { R, SubHi }; 1622 BitTracker::RegisterRef ML, MH; 1623 if (findMatch(TL, ML, AVB) && findMatch(TH, MH, AVB)) { 1624 auto *FRC = HBS::getFinalVRegClass(R, MRI); 1625 unsigned NewR = MRI.createVirtualRegister(FRC); 1626 BuildMI(B, At, DL, HII.get(TargetOpcode::REG_SEQUENCE), NewR) 1627 .addReg(ML.Reg, 0, ML.Sub) 1628 .addImm(SubLo) 1629 .addReg(MH.Reg, 0, MH.Sub) 1630 .addImm(SubHi); 1631 BT.put(BitTracker::RegisterRef(NewR), BT.get(R)); 1632 HBS::replaceReg(R, NewR, MRI); 1633 Forbidden.insert(R); 1634 } 1635 } 1636 } 1637 } 1638 1639 return Changed; 1640 } 1641 1642 bool CopyPropagation::isCopyReg(unsigned Opc, bool NoConv) { 1643 switch (Opc) { 1644 case TargetOpcode::COPY: 1645 case TargetOpcode::REG_SEQUENCE: 1646 case Hexagon::A4_combineir: 1647 case Hexagon::A4_combineri: 1648 return true; 1649 case Hexagon::A2_tfr: 1650 case Hexagon::A2_tfrp: 1651 case Hexagon::A2_combinew: 1652 case Hexagon::V6_vcombine: 1653 case Hexagon::V6_vcombine_128B: 1654 return NoConv; 1655 default: 1656 break; 1657 } 1658 return false; 1659 } 1660 1661 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) { 1662 bool Changed = false; 1663 unsigned Opc = MI.getOpcode(); 1664 BitTracker::RegisterRef RD = MI.getOperand(0); 1665 assert(MI.getOperand(0).getSubReg() == 0); 1666 1667 switch (Opc) { 1668 case TargetOpcode::COPY: 1669 case Hexagon::A2_tfr: 1670 case Hexagon::A2_tfrp: { 1671 BitTracker::RegisterRef RS = MI.getOperand(1); 1672 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1673 break; 1674 if (RS.Sub != 0) 1675 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI); 1676 else 1677 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI); 1678 break; 1679 } 1680 case TargetOpcode::REG_SEQUENCE: { 1681 BitTracker::RegisterRef SL, SH; 1682 if (HBS::parseRegSequence(MI, SL, SH, MRI)) { 1683 const TargetRegisterClass *RC = MRI.getRegClass(RD.Reg); 1684 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo); 1685 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi); 1686 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, SL.Reg, SL.Sub, MRI); 1687 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, SH.Reg, SH.Sub, MRI); 1688 } 1689 break; 1690 } 1691 case Hexagon::A2_combinew: 1692 case Hexagon::V6_vcombine: 1693 case Hexagon::V6_vcombine_128B: { 1694 const TargetRegisterClass *RC = MRI.getRegClass(RD.Reg); 1695 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo); 1696 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi); 1697 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2); 1698 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, RL.Reg, RL.Sub, MRI); 1699 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, RH.Reg, RH.Sub, MRI); 1700 break; 1701 } 1702 case Hexagon::A4_combineir: 1703 case Hexagon::A4_combineri: { 1704 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1; 1705 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::isub_lo 1706 : Hexagon::isub_hi; 1707 BitTracker::RegisterRef RS = MI.getOperand(SrcX); 1708 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI); 1709 break; 1710 } 1711 } 1712 return Changed; 1713 } 1714 1715 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1716 std::vector<MachineInstr*> Instrs; 1717 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I) 1718 Instrs.push_back(&*I); 1719 1720 bool Changed = false; 1721 for (auto I : Instrs) { 1722 unsigned Opc = I->getOpcode(); 1723 if (!CopyPropagation::isCopyReg(Opc, true)) 1724 continue; 1725 Changed |= propagateRegCopy(*I); 1726 } 1727 1728 return Changed; 1729 } 1730 1731 namespace { 1732 1733 // Recognize patterns that can be simplified and replace them with the 1734 // simpler forms. 1735 // This is by no means complete 1736 class BitSimplification : public Transformation { 1737 public: 1738 BitSimplification(BitTracker &bt, const HexagonInstrInfo &hii, 1739 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri, 1740 MachineFunction &mf) 1741 : Transformation(true), HII(hii), HRI(hri), MRI(mri), MF(mf), BT(bt) {} 1742 1743 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1744 1745 private: 1746 struct RegHalf : public BitTracker::RegisterRef { 1747 bool Low; // Low/High halfword. 1748 }; 1749 1750 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC, 1751 unsigned B, RegHalf &RH); 1752 bool validateReg(BitTracker::RegisterRef R, unsigned Opc, unsigned OpNum); 1753 1754 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC, 1755 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt); 1756 unsigned getCombineOpcode(bool HLow, bool LLow); 1757 1758 bool genStoreUpperHalf(MachineInstr *MI); 1759 bool genStoreImmediate(MachineInstr *MI); 1760 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD, 1761 const BitTracker::RegisterCell &RC); 1762 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1763 const BitTracker::RegisterCell &RC); 1764 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1765 const BitTracker::RegisterCell &RC); 1766 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1767 const BitTracker::RegisterCell &RC); 1768 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD, 1769 const BitTracker::RegisterCell &RC); 1770 1771 const HexagonInstrInfo &HII; 1772 const HexagonRegisterInfo &HRI; 1773 MachineRegisterInfo &MRI; 1774 MachineFunction &MF; 1775 BitTracker &BT; 1776 }; 1777 1778 } // end anonymous namespace 1779 1780 // Check if the bits [B..B+16) in register cell RC form a valid halfword, 1781 // i.e. [0..16), [16..32), etc. of some register. If so, return true and 1782 // set the information about the found register in RH. 1783 bool BitSimplification::matchHalf(unsigned SelfR, 1784 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) { 1785 // XXX This could be searching in the set of available registers, in case 1786 // the match is not exact. 1787 1788 // Match 16-bit chunks, where the RC[B..B+15] references exactly one 1789 // register and all the bits B..B+15 match between RC and the register. 1790 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... }, 1791 // and RC = { [0]:0 [1-15]:v1[1-15]... }. 1792 bool Low = false; 1793 unsigned I = B; 1794 while (I < B+16 && RC[I].num()) 1795 I++; 1796 if (I == B+16) 1797 return false; 1798 1799 unsigned Reg = RC[I].RefI.Reg; 1800 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B. 1801 if (P < I-B) 1802 return false; 1803 unsigned Pos = P - (I-B); 1804 1805 if (Reg == 0 || Reg == SelfR) // Don't match "self". 1806 return false; 1807 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1808 return false; 1809 if (!BT.has(Reg)) 1810 return false; 1811 1812 const BitTracker::RegisterCell &SC = BT.lookup(Reg); 1813 if (Pos+16 > SC.width()) 1814 return false; 1815 1816 for (unsigned i = 0; i < 16; ++i) { 1817 const BitTracker::BitValue &RV = RC[i+B]; 1818 if (RV.Type == BitTracker::BitValue::Ref) { 1819 if (RV.RefI.Reg != Reg) 1820 return false; 1821 if (RV.RefI.Pos != i+Pos) 1822 return false; 1823 continue; 1824 } 1825 if (RC[i+B] != SC[i+Pos]) 1826 return false; 1827 } 1828 1829 unsigned Sub = 0; 1830 switch (Pos) { 1831 case 0: 1832 Sub = Hexagon::isub_lo; 1833 Low = true; 1834 break; 1835 case 16: 1836 Sub = Hexagon::isub_lo; 1837 Low = false; 1838 break; 1839 case 32: 1840 Sub = Hexagon::isub_hi; 1841 Low = true; 1842 break; 1843 case 48: 1844 Sub = Hexagon::isub_hi; 1845 Low = false; 1846 break; 1847 default: 1848 return false; 1849 } 1850 1851 RH.Reg = Reg; 1852 RH.Sub = Sub; 1853 RH.Low = Low; 1854 // If the subregister is not valid with the register, set it to 0. 1855 if (!HBS::getFinalVRegClass(RH, MRI)) 1856 RH.Sub = 0; 1857 1858 return true; 1859 } 1860 1861 bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc, 1862 unsigned OpNum) { 1863 auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI, MF); 1864 auto *RRC = HBS::getFinalVRegClass(R, MRI); 1865 return OpRC->hasSubClassEq(RRC); 1866 } 1867 1868 // Check if RC matches the pattern of a S2_packhl. If so, return true and 1869 // set the inputs Rs and Rt. 1870 bool BitSimplification::matchPackhl(unsigned SelfR, 1871 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs, 1872 BitTracker::RegisterRef &Rt) { 1873 RegHalf L1, H1, L2, H2; 1874 1875 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1)) 1876 return false; 1877 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1)) 1878 return false; 1879 1880 // Rs = H1.L1, Rt = H2.L2 1881 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low) 1882 return false; 1883 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low) 1884 return false; 1885 1886 Rs = H1; 1887 Rt = H2; 1888 return true; 1889 } 1890 1891 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) { 1892 return HLow ? LLow ? Hexagon::A2_combine_ll 1893 : Hexagon::A2_combine_lh 1894 : LLow ? Hexagon::A2_combine_hl 1895 : Hexagon::A2_combine_hh; 1896 } 1897 1898 // If MI stores the upper halfword of a register (potentially obtained via 1899 // shifts or extracts), replace it with a storerf instruction. This could 1900 // cause the "extraction" code to become dead. 1901 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) { 1902 unsigned Opc = MI->getOpcode(); 1903 if (Opc != Hexagon::S2_storerh_io) 1904 return false; 1905 1906 MachineOperand &ValOp = MI->getOperand(2); 1907 BitTracker::RegisterRef RS = ValOp; 1908 if (!BT.has(RS.Reg)) 1909 return false; 1910 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1911 RegHalf H; 1912 if (!matchHalf(0, RC, 0, H)) 1913 return false; 1914 if (H.Low) 1915 return false; 1916 MI->setDesc(HII.get(Hexagon::S2_storerf_io)); 1917 ValOp.setReg(H.Reg); 1918 ValOp.setSubReg(H.Sub); 1919 return true; 1920 } 1921 1922 // If MI stores a value known at compile-time, and the value is within a range 1923 // that avoids using constant-extenders, replace it with a store-immediate. 1924 bool BitSimplification::genStoreImmediate(MachineInstr *MI) { 1925 unsigned Opc = MI->getOpcode(); 1926 unsigned Align = 0; 1927 switch (Opc) { 1928 case Hexagon::S2_storeri_io: 1929 Align++; 1930 case Hexagon::S2_storerh_io: 1931 Align++; 1932 case Hexagon::S2_storerb_io: 1933 break; 1934 default: 1935 return false; 1936 } 1937 1938 // Avoid stores to frame-indices (due to an unknown offset). 1939 if (!MI->getOperand(0).isReg()) 1940 return false; 1941 MachineOperand &OffOp = MI->getOperand(1); 1942 if (!OffOp.isImm()) 1943 return false; 1944 1945 int64_t Off = OffOp.getImm(); 1946 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x). 1947 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1))) 1948 return false; 1949 // Source register: 1950 BitTracker::RegisterRef RS = MI->getOperand(2); 1951 if (!BT.has(RS.Reg)) 1952 return false; 1953 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1954 uint64_t U; 1955 if (!HBS::getConst(RC, 0, RC.width(), U)) 1956 return false; 1957 1958 // Only consider 8-bit values to avoid constant-extenders. 1959 int V; 1960 switch (Opc) { 1961 case Hexagon::S2_storerb_io: 1962 V = int8_t(U); 1963 break; 1964 case Hexagon::S2_storerh_io: 1965 V = int16_t(U); 1966 break; 1967 case Hexagon::S2_storeri_io: 1968 V = int32_t(U); 1969 break; 1970 } 1971 if (!isInt<8>(V)) 1972 return false; 1973 1974 MI->RemoveOperand(2); 1975 switch (Opc) { 1976 case Hexagon::S2_storerb_io: 1977 MI->setDesc(HII.get(Hexagon::S4_storeirb_io)); 1978 break; 1979 case Hexagon::S2_storerh_io: 1980 MI->setDesc(HII.get(Hexagon::S4_storeirh_io)); 1981 break; 1982 case Hexagon::S2_storeri_io: 1983 MI->setDesc(HII.get(Hexagon::S4_storeiri_io)); 1984 break; 1985 } 1986 MI->addOperand(MachineOperand::CreateImm(V)); 1987 return true; 1988 } 1989 1990 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the 1991 // last instruction in a sequence that results in something equivalent to 1992 // the pack-halfwords. The intent is to cause the entire sequence to become 1993 // dead. 1994 bool BitSimplification::genPackhl(MachineInstr *MI, 1995 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 1996 unsigned Opc = MI->getOpcode(); 1997 if (Opc == Hexagon::S2_packhl) 1998 return false; 1999 BitTracker::RegisterRef Rs, Rt; 2000 if (!matchPackhl(RD.Reg, RC, Rs, Rt)) 2001 return false; 2002 if (!validateReg(Rs, Hexagon::S2_packhl, 1) || 2003 !validateReg(Rt, Hexagon::S2_packhl, 2)) 2004 return false; 2005 2006 MachineBasicBlock &B = *MI->getParent(); 2007 unsigned NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 2008 DebugLoc DL = MI->getDebugLoc(); 2009 auto At = MI->isPHI() ? B.getFirstNonPHI() 2010 : MachineBasicBlock::iterator(MI); 2011 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR) 2012 .addReg(Rs.Reg, 0, Rs.Sub) 2013 .addReg(Rt.Reg, 0, Rt.Sub); 2014 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2015 BT.put(BitTracker::RegisterRef(NewR), RC); 2016 return true; 2017 } 2018 2019 // If MI produces halfword of the input in the low half of the output, 2020 // replace it with zero-extend or extractu. 2021 bool BitSimplification::genExtractHalf(MachineInstr *MI, 2022 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2023 RegHalf L; 2024 // Check for halfword in low 16 bits, zeros elsewhere. 2025 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16)) 2026 return false; 2027 2028 unsigned Opc = MI->getOpcode(); 2029 MachineBasicBlock &B = *MI->getParent(); 2030 DebugLoc DL = MI->getDebugLoc(); 2031 2032 // Prefer zxth, since zxth can go in any slot, while extractu only in 2033 // slots 2 and 3. 2034 unsigned NewR = 0; 2035 auto At = MI->isPHI() ? B.getFirstNonPHI() 2036 : MachineBasicBlock::iterator(MI); 2037 if (L.Low && Opc != Hexagon::A2_zxth) { 2038 if (validateReg(L, Hexagon::A2_zxth, 1)) { 2039 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2040 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR) 2041 .addReg(L.Reg, 0, L.Sub); 2042 } 2043 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) { 2044 if (validateReg(L, Hexagon::S2_lsr_i_r, 1)) { 2045 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2046 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR) 2047 .addReg(L.Reg, 0, L.Sub) 2048 .addImm(16); 2049 } 2050 } 2051 if (NewR == 0) 2052 return false; 2053 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2054 BT.put(BitTracker::RegisterRef(NewR), RC); 2055 return true; 2056 } 2057 2058 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the 2059 // combine. 2060 bool BitSimplification::genCombineHalf(MachineInstr *MI, 2061 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2062 RegHalf L, H; 2063 // Check for combine h/l 2064 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H)) 2065 return false; 2066 // Do nothing if this is just a reg copy. 2067 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low) 2068 return false; 2069 2070 unsigned Opc = MI->getOpcode(); 2071 unsigned COpc = getCombineOpcode(H.Low, L.Low); 2072 if (COpc == Opc) 2073 return false; 2074 if (!validateReg(H, COpc, 1) || !validateReg(L, COpc, 2)) 2075 return false; 2076 2077 MachineBasicBlock &B = *MI->getParent(); 2078 DebugLoc DL = MI->getDebugLoc(); 2079 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2080 auto At = MI->isPHI() ? B.getFirstNonPHI() 2081 : MachineBasicBlock::iterator(MI); 2082 BuildMI(B, At, DL, HII.get(COpc), NewR) 2083 .addReg(H.Reg, 0, H.Sub) 2084 .addReg(L.Reg, 0, L.Sub); 2085 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2086 BT.put(BitTracker::RegisterRef(NewR), RC); 2087 return true; 2088 } 2089 2090 // If MI resets high bits of a register and keeps the lower ones, replace it 2091 // with zero-extend byte/half, and-immediate, or extractu, as appropriate. 2092 bool BitSimplification::genExtractLow(MachineInstr *MI, 2093 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2094 unsigned Opc = MI->getOpcode(); 2095 switch (Opc) { 2096 case Hexagon::A2_zxtb: 2097 case Hexagon::A2_zxth: 2098 case Hexagon::S2_extractu: 2099 return false; 2100 } 2101 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) { 2102 int32_t Imm = MI->getOperand(2).getImm(); 2103 if (isInt<10>(Imm)) 2104 return false; 2105 } 2106 2107 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 2108 return false; 2109 unsigned W = RC.width(); 2110 while (W > 0 && RC[W-1].is(0)) 2111 W--; 2112 if (W == 0 || W == RC.width()) 2113 return false; 2114 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb 2115 : (W == 16) ? Hexagon::A2_zxth 2116 : (W < 10) ? Hexagon::A2_andir 2117 : Hexagon::S2_extractu; 2118 MachineBasicBlock &B = *MI->getParent(); 2119 DebugLoc DL = MI->getDebugLoc(); 2120 2121 for (auto &Op : MI->uses()) { 2122 if (!Op.isReg()) 2123 continue; 2124 BitTracker::RegisterRef RS = Op; 2125 if (!BT.has(RS.Reg)) 2126 continue; 2127 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2128 unsigned BN, BW; 2129 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 2130 continue; 2131 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W)) 2132 continue; 2133 if (!validateReg(RS, NewOpc, 1)) 2134 continue; 2135 2136 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2137 auto At = MI->isPHI() ? B.getFirstNonPHI() 2138 : MachineBasicBlock::iterator(MI); 2139 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR) 2140 .addReg(RS.Reg, 0, RS.Sub); 2141 if (NewOpc == Hexagon::A2_andir) 2142 MIB.addImm((1 << W) - 1); 2143 else if (NewOpc == Hexagon::S2_extractu) 2144 MIB.addImm(W).addImm(0); 2145 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2146 BT.put(BitTracker::RegisterRef(NewR), RC); 2147 return true; 2148 } 2149 return false; 2150 } 2151 2152 // Check for tstbit simplification opportunity, where the bit being checked 2153 // can be tracked back to another register. For example: 2154 // vreg2 = S2_lsr_i_r vreg1, 5 2155 // vreg3 = S2_tstbit_i vreg2, 0 2156 // => 2157 // vreg3 = S2_tstbit_i vreg1, 5 2158 bool BitSimplification::simplifyTstbit(MachineInstr *MI, 2159 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2160 unsigned Opc = MI->getOpcode(); 2161 if (Opc != Hexagon::S2_tstbit_i) 2162 return false; 2163 2164 unsigned BN = MI->getOperand(2).getImm(); 2165 BitTracker::RegisterRef RS = MI->getOperand(1); 2166 unsigned F, W; 2167 DebugLoc DL = MI->getDebugLoc(); 2168 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI)) 2169 return false; 2170 MachineBasicBlock &B = *MI->getParent(); 2171 auto At = MI->isPHI() ? B.getFirstNonPHI() 2172 : MachineBasicBlock::iterator(MI); 2173 2174 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2175 const BitTracker::BitValue &V = SC[F+BN]; 2176 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) { 2177 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg); 2178 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is 2179 // a double register, need to use a subregister and adjust bit 2180 // number. 2181 unsigned P = std::numeric_limits<unsigned>::max(); 2182 BitTracker::RegisterRef RR(V.RefI.Reg, 0); 2183 if (TC == &Hexagon::DoubleRegsRegClass) { 2184 P = V.RefI.Pos; 2185 RR.Sub = Hexagon::isub_lo; 2186 if (P >= 32) { 2187 P -= 32; 2188 RR.Sub = Hexagon::isub_hi; 2189 } 2190 } else if (TC == &Hexagon::IntRegsRegClass) { 2191 P = V.RefI.Pos; 2192 } 2193 if (P != std::numeric_limits<unsigned>::max()) { 2194 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2195 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR) 2196 .addReg(RR.Reg, 0, RR.Sub) 2197 .addImm(P); 2198 HBS::replaceReg(RD.Reg, NewR, MRI); 2199 BT.put(NewR, RC); 2200 return true; 2201 } 2202 } else if (V.is(0) || V.is(1)) { 2203 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2204 unsigned NewOpc = V.is(0) ? Hexagon::PS_false : Hexagon::PS_true; 2205 BuildMI(B, At, DL, HII.get(NewOpc), NewR); 2206 HBS::replaceReg(RD.Reg, NewR, MRI); 2207 return true; 2208 } 2209 2210 return false; 2211 } 2212 2213 bool BitSimplification::processBlock(MachineBasicBlock &B, 2214 const RegisterSet &AVs) { 2215 if (!BT.reached(&B)) 2216 return false; 2217 bool Changed = false; 2218 RegisterSet AVB = AVs; 2219 RegisterSet Defs; 2220 2221 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) { 2222 MachineInstr *MI = &*I; 2223 Defs.clear(); 2224 HBS::getInstrDefs(*MI, Defs); 2225 2226 unsigned Opc = MI->getOpcode(); 2227 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE) 2228 continue; 2229 2230 if (MI->mayStore()) { 2231 bool T = genStoreUpperHalf(MI); 2232 T = T || genStoreImmediate(MI); 2233 Changed |= T; 2234 continue; 2235 } 2236 2237 if (Defs.count() != 1) 2238 continue; 2239 const MachineOperand &Op0 = MI->getOperand(0); 2240 if (!Op0.isReg() || !Op0.isDef()) 2241 continue; 2242 BitTracker::RegisterRef RD = Op0; 2243 if (!BT.has(RD.Reg)) 2244 continue; 2245 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2246 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg); 2247 2248 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) { 2249 bool T = genPackhl(MI, RD, RC); 2250 Changed |= T; 2251 continue; 2252 } 2253 2254 if (FRC->getID() == Hexagon::IntRegsRegClassID) { 2255 bool T = genExtractHalf(MI, RD, RC); 2256 T = T || genCombineHalf(MI, RD, RC); 2257 T = T || genExtractLow(MI, RD, RC); 2258 Changed |= T; 2259 continue; 2260 } 2261 2262 if (FRC->getID() == Hexagon::PredRegsRegClassID) { 2263 bool T = simplifyTstbit(MI, RD, RC); 2264 Changed |= T; 2265 continue; 2266 } 2267 } 2268 return Changed; 2269 } 2270 2271 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) { 2272 if (skipFunction(*MF.getFunction())) 2273 return false; 2274 2275 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2276 auto &HRI = *HST.getRegisterInfo(); 2277 auto &HII = *HST.getInstrInfo(); 2278 2279 MDT = &getAnalysis<MachineDominatorTree>(); 2280 MachineRegisterInfo &MRI = MF.getRegInfo(); 2281 bool Changed; 2282 2283 Changed = DeadCodeElimination(MF, *MDT).run(); 2284 2285 const HexagonEvaluator HE(HRI, MRI, HII, MF); 2286 BitTracker BT(HE, MF); 2287 DEBUG(BT.trace(true)); 2288 BT.run(); 2289 2290 MachineBasicBlock &Entry = MF.front(); 2291 2292 RegisterSet AIG; // Available registers for IG. 2293 ConstGeneration ImmG(BT, HII, MRI); 2294 Changed |= visitBlock(Entry, ImmG, AIG); 2295 2296 RegisterSet ARE; // Available registers for RIE. 2297 RedundantInstrElimination RIE(BT, HII, MRI); 2298 bool Ried = visitBlock(Entry, RIE, ARE); 2299 if (Ried) { 2300 Changed = true; 2301 BT.run(); 2302 } 2303 2304 RegisterSet ACG; // Available registers for CG. 2305 CopyGeneration CopyG(BT, HII, HRI, MRI); 2306 Changed |= visitBlock(Entry, CopyG, ACG); 2307 2308 RegisterSet ACP; // Available registers for CP. 2309 CopyPropagation CopyP(HRI, MRI); 2310 Changed |= visitBlock(Entry, CopyP, ACP); 2311 2312 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2313 2314 BT.run(); 2315 RegisterSet ABS; // Available registers for BS. 2316 BitSimplification BitS(BT, HII, HRI, MRI, MF); 2317 Changed |= visitBlock(Entry, BitS, ABS); 2318 2319 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2320 2321 if (Changed) { 2322 for (auto &B : MF) 2323 for (auto &I : B) 2324 I.clearKillInfo(); 2325 DeadCodeElimination(MF, *MDT).run(); 2326 } 2327 return Changed; 2328 } 2329 2330 // Recognize loops where the code at the end of the loop matches the code 2331 // before the entry of the loop, and the matching code is such that is can 2332 // be simplified. This pass relies on the bit simplification above and only 2333 // prepares code in a way that can be handled by the bit simplifcation. 2334 // 2335 // This is the motivating testcase (and explanation): 2336 // 2337 // { 2338 // loop0(.LBB0_2, r1) // %for.body.preheader 2339 // r5:4 = memd(r0++#8) 2340 // } 2341 // { 2342 // r3 = lsr(r4, #16) 2343 // r7:6 = combine(r5, r5) 2344 // } 2345 // { 2346 // r3 = insert(r5, #16, #16) 2347 // r7:6 = vlsrw(r7:6, #16) 2348 // } 2349 // .LBB0_2: 2350 // { 2351 // memh(r2+#4) = r5 2352 // memh(r2+#6) = r6 # R6 is really R5.H 2353 // } 2354 // { 2355 // r2 = add(r2, #8) 2356 // memh(r2+#0) = r4 2357 // memh(r2+#2) = r3 # R3 is really R4.H 2358 // } 2359 // { 2360 // r5:4 = memd(r0++#8) 2361 // } 2362 // { # "Shuffling" code that sets up R3 and R6 2363 // r3 = lsr(r4, #16) # so that their halves can be stored in the 2364 // r7:6 = combine(r5, r5) # next iteration. This could be folded into 2365 // } # the stores if the code was at the beginning 2366 // { # of the loop iteration. Since the same code 2367 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved 2368 // r7:6 = vlsrw(r7:6, #16) # there. 2369 // }:endloop0 2370 // 2371 // 2372 // The outcome: 2373 // 2374 // { 2375 // loop0(.LBB0_2, r1) 2376 // r5:4 = memd(r0++#8) 2377 // } 2378 // .LBB0_2: 2379 // { 2380 // memh(r2+#4) = r5 2381 // memh(r2+#6) = r5.h 2382 // } 2383 // { 2384 // r2 = add(r2, #8) 2385 // memh(r2+#0) = r4 2386 // memh(r2+#2) = r4.h 2387 // } 2388 // { 2389 // r5:4 = memd(r0++#8) 2390 // }:endloop0 2391 2392 namespace llvm { 2393 2394 FunctionPass *createHexagonLoopRescheduling(); 2395 void initializeHexagonLoopReschedulingPass(PassRegistry&); 2396 2397 } // end namespace llvm 2398 2399 namespace { 2400 2401 class HexagonLoopRescheduling : public MachineFunctionPass { 2402 public: 2403 static char ID; 2404 2405 HexagonLoopRescheduling() : MachineFunctionPass(ID), 2406 HII(nullptr), HRI(nullptr), MRI(nullptr), BTP(nullptr) { 2407 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry()); 2408 } 2409 2410 bool runOnMachineFunction(MachineFunction &MF) override; 2411 2412 private: 2413 const HexagonInstrInfo *HII; 2414 const HexagonRegisterInfo *HRI; 2415 MachineRegisterInfo *MRI; 2416 BitTracker *BTP; 2417 2418 struct LoopCand { 2419 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb, 2420 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {} 2421 MachineBasicBlock *LB, *PB, *EB; 2422 }; 2423 typedef std::vector<MachineInstr*> InstrList; 2424 struct InstrGroup { 2425 BitTracker::RegisterRef Inp, Out; 2426 InstrList Ins; 2427 }; 2428 struct PhiInfo { 2429 PhiInfo(MachineInstr &P, MachineBasicBlock &B); 2430 unsigned DefR; 2431 BitTracker::RegisterRef LR, PR; // Loop Register, Preheader Register 2432 MachineBasicBlock *LB, *PB; // Loop Block, Preheader Block 2433 }; 2434 2435 static unsigned getDefReg(const MachineInstr *MI); 2436 bool isConst(unsigned Reg) const; 2437 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const; 2438 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const; 2439 bool isShuffleOf(unsigned OutR, unsigned InpR) const; 2440 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2, 2441 unsigned &InpR2) const; 2442 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB, 2443 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR); 2444 bool processLoop(LoopCand &C); 2445 }; 2446 2447 } // end anonymous namespace 2448 2449 char HexagonLoopRescheduling::ID = 0; 2450 2451 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched", 2452 "Hexagon Loop Rescheduling", false, false) 2453 2454 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P, 2455 MachineBasicBlock &B) { 2456 DefR = HexagonLoopRescheduling::getDefReg(&P); 2457 LB = &B; 2458 PB = nullptr; 2459 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) { 2460 const MachineOperand &OpB = P.getOperand(i+1); 2461 if (OpB.getMBB() == &B) { 2462 LR = P.getOperand(i); 2463 continue; 2464 } 2465 PB = OpB.getMBB(); 2466 PR = P.getOperand(i); 2467 } 2468 } 2469 2470 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) { 2471 RegisterSet Defs; 2472 HBS::getInstrDefs(*MI, Defs); 2473 if (Defs.count() != 1) 2474 return 0; 2475 return Defs.find_first(); 2476 } 2477 2478 bool HexagonLoopRescheduling::isConst(unsigned Reg) const { 2479 if (!BTP->has(Reg)) 2480 return false; 2481 const BitTracker::RegisterCell &RC = BTP->lookup(Reg); 2482 for (unsigned i = 0, w = RC.width(); i < w; ++i) { 2483 const BitTracker::BitValue &V = RC[i]; 2484 if (!V.is(0) && !V.is(1)) 2485 return false; 2486 } 2487 return true; 2488 } 2489 2490 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI, 2491 unsigned DefR) const { 2492 unsigned Opc = MI->getOpcode(); 2493 switch (Opc) { 2494 case TargetOpcode::COPY: 2495 case Hexagon::S2_lsr_i_r: 2496 case Hexagon::S2_asr_i_r: 2497 case Hexagon::S2_asl_i_r: 2498 case Hexagon::S2_lsr_i_p: 2499 case Hexagon::S2_asr_i_p: 2500 case Hexagon::S2_asl_i_p: 2501 case Hexagon::S2_insert: 2502 case Hexagon::A2_or: 2503 case Hexagon::A2_orp: 2504 case Hexagon::A2_and: 2505 case Hexagon::A2_andp: 2506 case Hexagon::A2_combinew: 2507 case Hexagon::A4_combineri: 2508 case Hexagon::A4_combineir: 2509 case Hexagon::A2_combineii: 2510 case Hexagon::A4_combineii: 2511 case Hexagon::A2_combine_ll: 2512 case Hexagon::A2_combine_lh: 2513 case Hexagon::A2_combine_hl: 2514 case Hexagon::A2_combine_hh: 2515 return true; 2516 } 2517 return false; 2518 } 2519 2520 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI, 2521 unsigned InpR) const { 2522 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { 2523 const MachineOperand &Op = MI->getOperand(i); 2524 if (!Op.isReg()) 2525 continue; 2526 if (Op.getReg() == InpR) 2527 return i == n-1; 2528 } 2529 return false; 2530 } 2531 2532 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const { 2533 if (!BTP->has(OutR) || !BTP->has(InpR)) 2534 return false; 2535 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR); 2536 for (unsigned i = 0, w = OutC.width(); i < w; ++i) { 2537 const BitTracker::BitValue &V = OutC[i]; 2538 if (V.Type != BitTracker::BitValue::Ref) 2539 continue; 2540 if (V.RefI.Reg != InpR) 2541 return false; 2542 } 2543 return true; 2544 } 2545 2546 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1, 2547 unsigned OutR2, unsigned &InpR2) const { 2548 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2)) 2549 return false; 2550 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1); 2551 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2); 2552 unsigned W = OutC1.width(); 2553 unsigned MatchR = 0; 2554 if (W != OutC2.width()) 2555 return false; 2556 for (unsigned i = 0; i < W; ++i) { 2557 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i]; 2558 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One) 2559 return false; 2560 if (V1.Type != BitTracker::BitValue::Ref) 2561 continue; 2562 if (V1.RefI.Pos != V2.RefI.Pos) 2563 return false; 2564 if (V1.RefI.Reg != InpR1) 2565 return false; 2566 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2) 2567 return false; 2568 if (!MatchR) 2569 MatchR = V2.RefI.Reg; 2570 else if (V2.RefI.Reg != MatchR) 2571 return false; 2572 } 2573 InpR2 = MatchR; 2574 return true; 2575 } 2576 2577 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, 2578 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR, 2579 unsigned NewPredR) { 2580 DenseMap<unsigned,unsigned> RegMap; 2581 2582 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR); 2583 unsigned PhiR = MRI->createVirtualRegister(PhiRC); 2584 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR) 2585 .addReg(NewPredR) 2586 .addMBB(&PB) 2587 .addReg(G.Inp.Reg) 2588 .addMBB(&LB); 2589 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR)); 2590 2591 for (unsigned i = G.Ins.size(); i > 0; --i) { 2592 const MachineInstr *SI = G.Ins[i-1]; 2593 unsigned DR = getDefReg(SI); 2594 const TargetRegisterClass *RC = MRI->getRegClass(DR); 2595 unsigned NewDR = MRI->createVirtualRegister(RC); 2596 DebugLoc DL = SI->getDebugLoc(); 2597 2598 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR); 2599 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) { 2600 const MachineOperand &Op = SI->getOperand(j); 2601 if (!Op.isReg()) { 2602 MIB.addOperand(Op); 2603 continue; 2604 } 2605 if (!Op.isUse()) 2606 continue; 2607 unsigned UseR = RegMap[Op.getReg()]; 2608 MIB.addReg(UseR, 0, Op.getSubReg()); 2609 } 2610 RegMap.insert(std::make_pair(DR, NewDR)); 2611 } 2612 2613 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI); 2614 } 2615 2616 bool HexagonLoopRescheduling::processLoop(LoopCand &C) { 2617 DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n"); 2618 std::vector<PhiInfo> Phis; 2619 for (auto &I : *C.LB) { 2620 if (!I.isPHI()) 2621 break; 2622 unsigned PR = getDefReg(&I); 2623 if (isConst(PR)) 2624 continue; 2625 bool BadUse = false, GoodUse = false; 2626 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) { 2627 MachineInstr *UseI = UI->getParent(); 2628 if (UseI->getParent() != C.LB) { 2629 BadUse = true; 2630 break; 2631 } 2632 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR)) 2633 GoodUse = true; 2634 } 2635 if (BadUse || !GoodUse) 2636 continue; 2637 2638 Phis.push_back(PhiInfo(I, *C.LB)); 2639 } 2640 2641 DEBUG({ 2642 dbgs() << "Phis: {"; 2643 for (auto &I : Phis) { 2644 dbgs() << ' ' << PrintReg(I.DefR, HRI) << "=phi(" 2645 << PrintReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber() 2646 << ',' << PrintReg(I.LR.Reg, HRI, I.LR.Sub) << ":b" 2647 << I.LB->getNumber() << ')'; 2648 } 2649 dbgs() << " }\n"; 2650 }); 2651 2652 if (Phis.empty()) 2653 return false; 2654 2655 bool Changed = false; 2656 InstrList ShufIns; 2657 2658 // Go backwards in the block: for each bit shuffling instruction, check 2659 // if that instruction could potentially be moved to the front of the loop: 2660 // the output of the loop cannot be used in a non-shuffling instruction 2661 // in this loop. 2662 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) { 2663 if (I->isTerminator()) 2664 continue; 2665 if (I->isPHI()) 2666 break; 2667 2668 RegisterSet Defs; 2669 HBS::getInstrDefs(*I, Defs); 2670 if (Defs.count() != 1) 2671 continue; 2672 unsigned DefR = Defs.find_first(); 2673 if (!TargetRegisterInfo::isVirtualRegister(DefR)) 2674 continue; 2675 if (!isBitShuffle(&*I, DefR)) 2676 continue; 2677 2678 bool BadUse = false; 2679 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) { 2680 MachineInstr *UseI = UI->getParent(); 2681 if (UseI->getParent() == C.LB) { 2682 if (UseI->isPHI()) { 2683 // If the use is in a phi node in this loop, then it should be 2684 // the value corresponding to the back edge. 2685 unsigned Idx = UI.getOperandNo(); 2686 if (UseI->getOperand(Idx+1).getMBB() != C.LB) 2687 BadUse = true; 2688 } else { 2689 auto F = find(ShufIns, UseI); 2690 if (F == ShufIns.end()) 2691 BadUse = true; 2692 } 2693 } else { 2694 // There is a use outside of the loop, but there is no epilog block 2695 // suitable for a copy-out. 2696 if (C.EB == nullptr) 2697 BadUse = true; 2698 } 2699 if (BadUse) 2700 break; 2701 } 2702 2703 if (BadUse) 2704 continue; 2705 ShufIns.push_back(&*I); 2706 } 2707 2708 // Partition the list of shuffling instructions into instruction groups, 2709 // where each group has to be moved as a whole (i.e. a group is a chain of 2710 // dependent instructions). A group produces a single live output register, 2711 // which is meant to be the input of the loop phi node (although this is 2712 // not checked here yet). It also uses a single register as its input, 2713 // which is some value produced in the loop body. After moving the group 2714 // to the beginning of the loop, that input register would need to be 2715 // the loop-carried register (through a phi node) instead of the (currently 2716 // loop-carried) output register. 2717 typedef std::vector<InstrGroup> InstrGroupList; 2718 InstrGroupList Groups; 2719 2720 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) { 2721 MachineInstr *SI = ShufIns[i]; 2722 if (SI == nullptr) 2723 continue; 2724 2725 InstrGroup G; 2726 G.Ins.push_back(SI); 2727 G.Out.Reg = getDefReg(SI); 2728 RegisterSet Inputs; 2729 HBS::getInstrUses(*SI, Inputs); 2730 2731 for (unsigned j = i+1; j < n; ++j) { 2732 MachineInstr *MI = ShufIns[j]; 2733 if (MI == nullptr) 2734 continue; 2735 RegisterSet Defs; 2736 HBS::getInstrDefs(*MI, Defs); 2737 // If this instruction does not define any pending inputs, skip it. 2738 if (!Defs.intersects(Inputs)) 2739 continue; 2740 // Otherwise, add it to the current group and remove the inputs that 2741 // are defined by MI. 2742 G.Ins.push_back(MI); 2743 Inputs.remove(Defs); 2744 // Then add all registers used by MI. 2745 HBS::getInstrUses(*MI, Inputs); 2746 ShufIns[j] = nullptr; 2747 } 2748 2749 // Only add a group if it requires at most one register. 2750 if (Inputs.count() > 1) 2751 continue; 2752 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 2753 return G.Out.Reg == P.LR.Reg; 2754 }; 2755 if (llvm::find_if(Phis, LoopInpEq) == Phis.end()) 2756 continue; 2757 2758 G.Inp.Reg = Inputs.find_first(); 2759 Groups.push_back(G); 2760 } 2761 2762 DEBUG({ 2763 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 2764 InstrGroup &G = Groups[i]; 2765 dbgs() << "Group[" << i << "] inp: " 2766 << PrintReg(G.Inp.Reg, HRI, G.Inp.Sub) 2767 << " out: " << PrintReg(G.Out.Reg, HRI, G.Out.Sub) << "\n"; 2768 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j) 2769 dbgs() << " " << *G.Ins[j]; 2770 } 2771 }); 2772 2773 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 2774 InstrGroup &G = Groups[i]; 2775 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg)) 2776 continue; 2777 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 2778 return G.Out.Reg == P.LR.Reg; 2779 }; 2780 auto F = llvm::find_if(Phis, LoopInpEq); 2781 if (F == Phis.end()) 2782 continue; 2783 unsigned PrehR = 0; 2784 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PrehR)) { 2785 const MachineInstr *DefPrehR = MRI->getVRegDef(F->PR.Reg); 2786 unsigned Opc = DefPrehR->getOpcode(); 2787 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi) 2788 continue; 2789 if (!DefPrehR->getOperand(1).isImm()) 2790 continue; 2791 if (DefPrehR->getOperand(1).getImm() != 0) 2792 continue; 2793 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg); 2794 if (RC != MRI->getRegClass(F->PR.Reg)) { 2795 PrehR = MRI->createVirtualRegister(RC); 2796 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi 2797 : Hexagon::A2_tfrpi; 2798 auto T = C.PB->getFirstTerminator(); 2799 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc(); 2800 BuildMI(*C.PB, T, DL, HII->get(TfrI), PrehR) 2801 .addImm(0); 2802 } else { 2803 PrehR = F->PR.Reg; 2804 } 2805 } 2806 // isSameShuffle could match with PrehR being of a wider class than 2807 // G.Inp.Reg, for example if G shuffles the low 32 bits of its input, 2808 // it would match for the input being a 32-bit register, and PrehR 2809 // being a 64-bit register (where the low 32 bits match). This could 2810 // be handled, but for now skip these cases. 2811 if (MRI->getRegClass(PrehR) != MRI->getRegClass(G.Inp.Reg)) 2812 continue; 2813 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PrehR); 2814 Changed = true; 2815 } 2816 2817 return Changed; 2818 } 2819 2820 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) { 2821 if (skipFunction(*MF.getFunction())) 2822 return false; 2823 2824 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2825 HII = HST.getInstrInfo(); 2826 HRI = HST.getRegisterInfo(); 2827 MRI = &MF.getRegInfo(); 2828 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); 2829 BitTracker BT(HE, MF); 2830 DEBUG(BT.trace(true)); 2831 BT.run(); 2832 BTP = &BT; 2833 2834 std::vector<LoopCand> Cand; 2835 2836 for (auto &B : MF) { 2837 if (B.pred_size() != 2 || B.succ_size() != 2) 2838 continue; 2839 MachineBasicBlock *PB = nullptr; 2840 bool IsLoop = false; 2841 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) { 2842 if (*PI != &B) 2843 PB = *PI; 2844 else 2845 IsLoop = true; 2846 } 2847 if (!IsLoop) 2848 continue; 2849 2850 MachineBasicBlock *EB = nullptr; 2851 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) { 2852 if (*SI == &B) 2853 continue; 2854 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the 2855 // edge from B to EP is non-critical. 2856 if ((*SI)->pred_size() == 1) 2857 EB = *SI; 2858 break; 2859 } 2860 2861 Cand.push_back(LoopCand(&B, PB, EB)); 2862 } 2863 2864 bool Changed = false; 2865 for (auto &C : Cand) 2866 Changed |= processLoop(C); 2867 2868 return Changed; 2869 } 2870 2871 //===----------------------------------------------------------------------===// 2872 // Public Constructor Functions 2873 //===----------------------------------------------------------------------===// 2874 2875 FunctionPass *llvm::createHexagonLoopRescheduling() { 2876 return new HexagonLoopRescheduling(); 2877 } 2878 2879 FunctionPass *llvm::createHexagonBitSimplify() { 2880 return new HexagonBitSimplify(); 2881 } 2882