1 //===--- HexagonBitSimplify.cpp -------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #define DEBUG_TYPE "hexbit" 11 12 #include "llvm/CodeGen/Passes.h" 13 #include "llvm/CodeGen/MachineDominators.h" 14 #include "llvm/CodeGen/MachineFunctionPass.h" 15 #include "llvm/CodeGen/MachineInstrBuilder.h" 16 #include "llvm/CodeGen/MachineRegisterInfo.h" 17 #include "llvm/Support/CommandLine.h" 18 #include "llvm/Support/Debug.h" 19 #include "llvm/Support/raw_ostream.h" 20 #include "llvm/Target/TargetMachine.h" 21 #include "llvm/Target/TargetInstrInfo.h" 22 #include "HexagonTargetMachine.h" 23 #include "HexagonBitTracker.h" 24 25 using namespace llvm; 26 27 namespace llvm { 28 void initializeHexagonBitSimplifyPass(PassRegistry& Registry); 29 FunctionPass *createHexagonBitSimplify(); 30 } 31 32 namespace { 33 // Set of virtual registers, based on BitVector. 34 struct RegisterSet : private BitVector { 35 RegisterSet() : BitVector() {} 36 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} 37 RegisterSet(const RegisterSet &RS) : BitVector(RS) {} 38 39 using BitVector::clear; 40 using BitVector::count; 41 42 unsigned find_first() const { 43 int First = BitVector::find_first(); 44 if (First < 0) 45 return 0; 46 return x2v(First); 47 } 48 49 unsigned find_next(unsigned Prev) const { 50 int Next = BitVector::find_next(v2x(Prev)); 51 if (Next < 0) 52 return 0; 53 return x2v(Next); 54 } 55 56 RegisterSet &insert(unsigned R) { 57 unsigned Idx = v2x(R); 58 ensure(Idx); 59 return static_cast<RegisterSet&>(BitVector::set(Idx)); 60 } 61 RegisterSet &remove(unsigned R) { 62 unsigned Idx = v2x(R); 63 if (Idx >= size()) 64 return *this; 65 return static_cast<RegisterSet&>(BitVector::reset(Idx)); 66 } 67 68 RegisterSet &insert(const RegisterSet &Rs) { 69 return static_cast<RegisterSet&>(BitVector::operator|=(Rs)); 70 } 71 RegisterSet &remove(const RegisterSet &Rs) { 72 return static_cast<RegisterSet&>(BitVector::reset(Rs)); 73 } 74 75 reference operator[](unsigned R) { 76 unsigned Idx = v2x(R); 77 ensure(Idx); 78 return BitVector::operator[](Idx); 79 } 80 bool operator[](unsigned R) const { 81 unsigned Idx = v2x(R); 82 assert(Idx < size()); 83 return BitVector::operator[](Idx); 84 } 85 bool has(unsigned R) const { 86 unsigned Idx = v2x(R); 87 if (Idx >= size()) 88 return false; 89 return BitVector::test(Idx); 90 } 91 92 bool empty() const { 93 return !BitVector::any(); 94 } 95 bool includes(const RegisterSet &Rs) const { 96 // A.BitVector::test(B) <=> A-B != {} 97 return !Rs.BitVector::test(*this); 98 } 99 bool intersects(const RegisterSet &Rs) const { 100 return BitVector::anyCommon(Rs); 101 } 102 103 private: 104 void ensure(unsigned Idx) { 105 if (size() <= Idx) 106 resize(std::max(Idx+1, 32U)); 107 } 108 static inline unsigned v2x(unsigned v) { 109 return TargetRegisterInfo::virtReg2Index(v); 110 } 111 static inline unsigned x2v(unsigned x) { 112 return TargetRegisterInfo::index2VirtReg(x); 113 } 114 }; 115 116 117 struct PrintRegSet { 118 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI) 119 : RS(S), TRI(RI) {} 120 friend raw_ostream &operator<< (raw_ostream &OS, 121 const PrintRegSet &P); 122 private: 123 const RegisterSet &RS; 124 const TargetRegisterInfo *TRI; 125 }; 126 127 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) 128 LLVM_ATTRIBUTE_UNUSED; 129 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) { 130 OS << '{'; 131 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R)) 132 OS << ' ' << PrintReg(R, P.TRI); 133 OS << " }"; 134 return OS; 135 } 136 } 137 138 139 namespace { 140 class Transformation; 141 142 class HexagonBitSimplify : public MachineFunctionPass { 143 public: 144 static char ID; 145 HexagonBitSimplify() : MachineFunctionPass(ID), MDT(0) { 146 initializeHexagonBitSimplifyPass(*PassRegistry::getPassRegistry()); 147 } 148 virtual const char *getPassName() const { 149 return "Hexagon bit simplification"; 150 } 151 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 152 AU.addRequired<MachineDominatorTree>(); 153 AU.addPreserved<MachineDominatorTree>(); 154 MachineFunctionPass::getAnalysisUsage(AU); 155 } 156 virtual bool runOnMachineFunction(MachineFunction &MF); 157 158 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs); 159 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses); 160 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1, 161 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W); 162 static bool isConst(const BitTracker::RegisterCell &RC, uint16_t B, 163 uint16_t W); 164 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B, 165 uint16_t W); 166 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B, 167 uint16_t W, uint64_t &U); 168 static bool replaceReg(unsigned OldR, unsigned NewR, 169 MachineRegisterInfo &MRI); 170 static bool getSubregMask(const BitTracker::RegisterRef &RR, 171 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI); 172 static bool replaceRegWithSub(unsigned OldR, unsigned NewR, 173 unsigned NewSR, MachineRegisterInfo &MRI); 174 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR, 175 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI); 176 static bool parseRegSequence(const MachineInstr &I, 177 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH); 178 179 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits, 180 uint16_t Begin); 181 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits, 182 uint16_t Begin, const HexagonInstrInfo &HII); 183 184 static const TargetRegisterClass *getFinalVRegClass( 185 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI); 186 static bool isTransparentCopy(const BitTracker::RegisterRef &RD, 187 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI); 188 189 private: 190 MachineDominatorTree *MDT; 191 192 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs); 193 }; 194 195 char HexagonBitSimplify::ID = 0; 196 typedef HexagonBitSimplify HBS; 197 198 199 // The purpose of this class is to provide a common facility to traverse 200 // the function top-down or bottom-up via the dominator tree, and keep 201 // track of the available registers. 202 class Transformation { 203 public: 204 bool TopDown; 205 Transformation(bool TD) : TopDown(TD) {} 206 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0; 207 virtual ~Transformation() {} 208 }; 209 } 210 211 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexbit", 212 "Hexagon bit simplification", false, false) 213 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 214 INITIALIZE_PASS_END(HexagonBitSimplify, "hexbit", 215 "Hexagon bit simplification", false, false) 216 217 218 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T, 219 RegisterSet &AVs) { 220 MachineDomTreeNode *N = MDT->getNode(&B); 221 typedef GraphTraits<MachineDomTreeNode*> GTN; 222 bool Changed = false; 223 224 if (T.TopDown) 225 Changed = T.processBlock(B, AVs); 226 227 RegisterSet Defs; 228 for (auto &I : B) 229 getInstrDefs(I, Defs); 230 RegisterSet NewAVs = AVs; 231 NewAVs.insert(Defs); 232 233 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) { 234 MachineBasicBlock *SB = (*I)->getBlock(); 235 Changed |= visitBlock(*SB, T, NewAVs); 236 } 237 if (!T.TopDown) 238 Changed |= T.processBlock(B, AVs); 239 240 return Changed; 241 } 242 243 // 244 // Utility functions: 245 // 246 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI, 247 RegisterSet &Defs) { 248 for (auto &Op : MI.operands()) { 249 if (!Op.isReg() || !Op.isDef()) 250 continue; 251 unsigned R = Op.getReg(); 252 if (!TargetRegisterInfo::isVirtualRegister(R)) 253 continue; 254 Defs.insert(R); 255 } 256 } 257 258 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI, 259 RegisterSet &Uses) { 260 for (auto &Op : MI.operands()) { 261 if (!Op.isReg() || !Op.isUse()) 262 continue; 263 unsigned R = Op.getReg(); 264 if (!TargetRegisterInfo::isVirtualRegister(R)) 265 continue; 266 Uses.insert(R); 267 } 268 } 269 270 // Check if all the bits in range [B, E) in both cells are equal. 271 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1, 272 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2, 273 uint16_t W) { 274 for (uint16_t i = 0; i < W; ++i) { 275 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i]. 276 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0) 277 return false; 278 // Same for RC2[i]. 279 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0) 280 return false; 281 if (RC1[B1+i] != RC2[B2+i]) 282 return false; 283 } 284 return true; 285 } 286 287 288 bool HexagonBitSimplify::isConst(const BitTracker::RegisterCell &RC, 289 uint16_t B, uint16_t W) { 290 assert(B < RC.width() && B+W <= RC.width()); 291 for (uint16_t i = B; i < B+W; ++i) 292 if (!RC[i].num()) 293 return false; 294 return true; 295 } 296 297 298 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC, 299 uint16_t B, uint16_t W) { 300 assert(B < RC.width() && B+W <= RC.width()); 301 for (uint16_t i = B; i < B+W; ++i) 302 if (!RC[i].is(0)) 303 return false; 304 return true; 305 } 306 307 308 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC, 309 uint16_t B, uint16_t W, uint64_t &U) { 310 assert(B < RC.width() && B+W <= RC.width()); 311 int64_t T = 0; 312 for (uint16_t i = B+W; i > B; --i) { 313 const BitTracker::BitValue &BV = RC[i-1]; 314 T <<= 1; 315 if (BV.is(1)) 316 T |= 1; 317 else if (!BV.is(0)) 318 return false; 319 } 320 U = T; 321 return true; 322 } 323 324 325 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR, 326 MachineRegisterInfo &MRI) { 327 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 328 !TargetRegisterInfo::isVirtualRegister(NewR)) 329 return false; 330 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 331 decltype(End) NextI; 332 for (auto I = Begin; I != End; I = NextI) { 333 NextI = std::next(I); 334 I->setReg(NewR); 335 } 336 return Begin != End; 337 } 338 339 340 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR, 341 unsigned NewSR, MachineRegisterInfo &MRI) { 342 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 343 !TargetRegisterInfo::isVirtualRegister(NewR)) 344 return false; 345 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 346 decltype(End) NextI; 347 for (auto I = Begin; I != End; I = NextI) { 348 NextI = std::next(I); 349 I->setReg(NewR); 350 I->setSubReg(NewSR); 351 } 352 return Begin != End; 353 } 354 355 356 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR, 357 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) { 358 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 359 !TargetRegisterInfo::isVirtualRegister(NewR)) 360 return false; 361 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 362 decltype(End) NextI; 363 for (auto I = Begin; I != End; I = NextI) { 364 NextI = std::next(I); 365 if (I->getSubReg() != OldSR) 366 continue; 367 I->setReg(NewR); 368 I->setSubReg(NewSR); 369 } 370 return Begin != End; 371 } 372 373 374 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB 375 // of Sub in Reg, and set Width to the size of Sub in bits. Return true, 376 // if this succeeded, otherwise return false. 377 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR, 378 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) { 379 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg); 380 if (RC == &Hexagon::IntRegsRegClass) { 381 assert(RR.Sub == 0); 382 Begin = 0; 383 Width = 32; 384 return true; 385 } 386 if (RC == &Hexagon::DoubleRegsRegClass) { 387 if (RR.Sub == 0) { 388 Begin = 0; 389 Width = 64; 390 return true; 391 } 392 assert(RR.Sub == Hexagon::subreg_loreg || RR.Sub == Hexagon::subreg_hireg); 393 Width = 32; 394 Begin = (RR.Sub == Hexagon::subreg_loreg ? 0 : 32); 395 return true; 396 } 397 return false; 398 } 399 400 401 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high 402 // subregister. 403 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I, 404 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH) { 405 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE); 406 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm(); 407 assert(Sub1 != Sub2); 408 if (Sub1 == Hexagon::subreg_loreg && Sub2 == Hexagon::subreg_hireg) { 409 SL = I.getOperand(1); 410 SH = I.getOperand(3); 411 return true; 412 } 413 if (Sub1 == Hexagon::subreg_hireg && Sub2 == Hexagon::subreg_loreg) { 414 SH = I.getOperand(1); 415 SL = I.getOperand(3); 416 return true; 417 } 418 return false; 419 } 420 421 422 // All stores (except 64-bit stores) take a 32-bit register as the source 423 // of the value to be stored. If the instruction stores into a location 424 // that is shorter than 32 bits, some bits of the source register are not 425 // used. For each store instruction, calculate the set of used bits in 426 // the source register, and set appropriate bits in Bits. Return true if 427 // the bits are calculated, false otherwise. 428 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits, 429 uint16_t Begin) { 430 using namespace Hexagon; 431 432 switch (Opc) { 433 // Store byte 434 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32 435 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new 436 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32 437 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32 438 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32 439 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32 440 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new 441 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new 442 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new 443 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new 444 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32 445 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new 446 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32 447 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32 448 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32 449 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32 450 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new 451 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new 452 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new 453 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new 454 case S4_storerb_ap: // memb(Re32=#U6)=Rt32 455 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new 456 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32 457 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new 458 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32 459 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new 460 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32 461 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new 462 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32 463 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new 464 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32 465 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new 466 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32 467 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new 468 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32 469 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32 470 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 471 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 472 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 473 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 474 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 475 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 476 case S2_storerbgp: // memb(gp+#u16:0)=Rt32 477 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new 478 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32 479 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32 480 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32 481 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32 482 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new 483 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new 484 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new 485 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new 486 Bits.set(Begin, Begin+8); 487 return true; 488 489 // Store low half 490 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32 491 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new 492 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32 493 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32 494 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32 495 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32 496 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new 497 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new 498 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new 499 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new 500 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32 501 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new 502 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32 503 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32 504 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32 505 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32 506 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new 507 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new 508 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new 509 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new 510 case S4_storerh_ap: // memh(Re32=#U6)=Rt32 511 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new 512 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32 513 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new 514 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32 515 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new 516 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32 517 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new 518 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32 519 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new 520 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32 521 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new 522 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32 523 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32 524 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32 525 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 526 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 527 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new 528 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 529 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 530 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 531 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 532 case S2_storerhgp: // memh(gp+#u16:1)=Rt32 533 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new 534 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32 535 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32 536 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32 537 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32 538 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new 539 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new 540 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new 541 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new 542 Bits.set(Begin, Begin+16); 543 return true; 544 545 // Store high half 546 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32 547 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32 548 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32 549 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32 550 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32 551 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32 552 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32 553 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32 554 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32 555 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32 556 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32 557 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32 558 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32 559 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32 560 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32 561 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32 562 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32 563 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 564 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 565 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 566 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 567 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32 568 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32 569 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32 570 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32 571 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32 572 Bits.set(Begin+16, Begin+32); 573 return true; 574 } 575 576 return false; 577 } 578 579 580 // For an instruction with opcode Opc, calculate the set of bits that it 581 // uses in a register in operand OpN. This only calculates the set of used 582 // bits for cases where it does not depend on any operands (as is the case 583 // in shifts, for example). For concrete instructions from a program, the 584 // operand may be a subregister of a larger register, while Bits would 585 // correspond to the larger register in its entirety. Because of that, 586 // the parameter Begin can be used to indicate which bit of Bits should be 587 // considered the LSB of of the operand. 588 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN, 589 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) { 590 using namespace Hexagon; 591 592 const MCInstrDesc &D = HII.get(Opc); 593 if (D.mayStore()) { 594 if (OpN == D.getNumOperands()-1) 595 return getUsedBitsInStore(Opc, Bits, Begin); 596 return false; 597 } 598 599 switch (Opc) { 600 // One register source. Used bits: R1[0-7]. 601 case A2_sxtb: 602 case A2_zxtb: 603 case A4_cmpbeqi: 604 case A4_cmpbgti: 605 case A4_cmpbgtui: 606 if (OpN == 1) { 607 Bits.set(Begin, Begin+8); 608 return true; 609 } 610 break; 611 612 // One register source. Used bits: R1[0-15]. 613 case A2_aslh: 614 case A2_sxth: 615 case A2_zxth: 616 case A4_cmpheqi: 617 case A4_cmphgti: 618 case A4_cmphgtui: 619 if (OpN == 1) { 620 Bits.set(Begin, Begin+16); 621 return true; 622 } 623 break; 624 625 // One register source. Used bits: R1[16-31]. 626 case A2_asrh: 627 if (OpN == 1) { 628 Bits.set(Begin+16, Begin+32); 629 return true; 630 } 631 break; 632 633 // Two register sources. Used bits: R1[0-7], R2[0-7]. 634 case A4_cmpbeq: 635 case A4_cmpbgt: 636 case A4_cmpbgtu: 637 if (OpN == 1) { 638 Bits.set(Begin, Begin+8); 639 return true; 640 } 641 break; 642 643 // Two register sources. Used bits: R1[0-15], R2[0-15]. 644 case A4_cmpheq: 645 case A4_cmphgt: 646 case A4_cmphgtu: 647 case A2_addh_h16_ll: 648 case A2_addh_h16_sat_ll: 649 case A2_addh_l16_ll: 650 case A2_addh_l16_sat_ll: 651 case A2_combine_ll: 652 case A2_subh_h16_ll: 653 case A2_subh_h16_sat_ll: 654 case A2_subh_l16_ll: 655 case A2_subh_l16_sat_ll: 656 case M2_mpy_acc_ll_s0: 657 case M2_mpy_acc_ll_s1: 658 case M2_mpy_acc_sat_ll_s0: 659 case M2_mpy_acc_sat_ll_s1: 660 case M2_mpy_ll_s0: 661 case M2_mpy_ll_s1: 662 case M2_mpy_nac_ll_s0: 663 case M2_mpy_nac_ll_s1: 664 case M2_mpy_nac_sat_ll_s0: 665 case M2_mpy_nac_sat_ll_s1: 666 case M2_mpy_rnd_ll_s0: 667 case M2_mpy_rnd_ll_s1: 668 case M2_mpy_sat_ll_s0: 669 case M2_mpy_sat_ll_s1: 670 case M2_mpy_sat_rnd_ll_s0: 671 case M2_mpy_sat_rnd_ll_s1: 672 case M2_mpyd_acc_ll_s0: 673 case M2_mpyd_acc_ll_s1: 674 case M2_mpyd_ll_s0: 675 case M2_mpyd_ll_s1: 676 case M2_mpyd_nac_ll_s0: 677 case M2_mpyd_nac_ll_s1: 678 case M2_mpyd_rnd_ll_s0: 679 case M2_mpyd_rnd_ll_s1: 680 case M2_mpyu_acc_ll_s0: 681 case M2_mpyu_acc_ll_s1: 682 case M2_mpyu_ll_s0: 683 case M2_mpyu_ll_s1: 684 case M2_mpyu_nac_ll_s0: 685 case M2_mpyu_nac_ll_s1: 686 case M2_mpyud_acc_ll_s0: 687 case M2_mpyud_acc_ll_s1: 688 case M2_mpyud_ll_s0: 689 case M2_mpyud_ll_s1: 690 case M2_mpyud_nac_ll_s0: 691 case M2_mpyud_nac_ll_s1: 692 if (OpN == 1 || OpN == 2) { 693 Bits.set(Begin, Begin+16); 694 return true; 695 } 696 break; 697 698 // Two register sources. Used bits: R1[0-15], R2[16-31]. 699 case A2_addh_h16_lh: 700 case A2_addh_h16_sat_lh: 701 case A2_combine_lh: 702 case A2_subh_h16_lh: 703 case A2_subh_h16_sat_lh: 704 case M2_mpy_acc_lh_s0: 705 case M2_mpy_acc_lh_s1: 706 case M2_mpy_acc_sat_lh_s0: 707 case M2_mpy_acc_sat_lh_s1: 708 case M2_mpy_lh_s0: 709 case M2_mpy_lh_s1: 710 case M2_mpy_nac_lh_s0: 711 case M2_mpy_nac_lh_s1: 712 case M2_mpy_nac_sat_lh_s0: 713 case M2_mpy_nac_sat_lh_s1: 714 case M2_mpy_rnd_lh_s0: 715 case M2_mpy_rnd_lh_s1: 716 case M2_mpy_sat_lh_s0: 717 case M2_mpy_sat_lh_s1: 718 case M2_mpy_sat_rnd_lh_s0: 719 case M2_mpy_sat_rnd_lh_s1: 720 case M2_mpyd_acc_lh_s0: 721 case M2_mpyd_acc_lh_s1: 722 case M2_mpyd_lh_s0: 723 case M2_mpyd_lh_s1: 724 case M2_mpyd_nac_lh_s0: 725 case M2_mpyd_nac_lh_s1: 726 case M2_mpyd_rnd_lh_s0: 727 case M2_mpyd_rnd_lh_s1: 728 case M2_mpyu_acc_lh_s0: 729 case M2_mpyu_acc_lh_s1: 730 case M2_mpyu_lh_s0: 731 case M2_mpyu_lh_s1: 732 case M2_mpyu_nac_lh_s0: 733 case M2_mpyu_nac_lh_s1: 734 case M2_mpyud_acc_lh_s0: 735 case M2_mpyud_acc_lh_s1: 736 case M2_mpyud_lh_s0: 737 case M2_mpyud_lh_s1: 738 case M2_mpyud_nac_lh_s0: 739 case M2_mpyud_nac_lh_s1: 740 // These four are actually LH. 741 case A2_addh_l16_hl: 742 case A2_addh_l16_sat_hl: 743 case A2_subh_l16_hl: 744 case A2_subh_l16_sat_hl: 745 if (OpN == 1) { 746 Bits.set(Begin, Begin+16); 747 return true; 748 } 749 if (OpN == 2) { 750 Bits.set(Begin+16, Begin+32); 751 return true; 752 } 753 break; 754 755 // Two register sources, used bits: R1[16-31], R2[0-15]. 756 case A2_addh_h16_hl: 757 case A2_addh_h16_sat_hl: 758 case A2_combine_hl: 759 case A2_subh_h16_hl: 760 case A2_subh_h16_sat_hl: 761 case M2_mpy_acc_hl_s0: 762 case M2_mpy_acc_hl_s1: 763 case M2_mpy_acc_sat_hl_s0: 764 case M2_mpy_acc_sat_hl_s1: 765 case M2_mpy_hl_s0: 766 case M2_mpy_hl_s1: 767 case M2_mpy_nac_hl_s0: 768 case M2_mpy_nac_hl_s1: 769 case M2_mpy_nac_sat_hl_s0: 770 case M2_mpy_nac_sat_hl_s1: 771 case M2_mpy_rnd_hl_s0: 772 case M2_mpy_rnd_hl_s1: 773 case M2_mpy_sat_hl_s0: 774 case M2_mpy_sat_hl_s1: 775 case M2_mpy_sat_rnd_hl_s0: 776 case M2_mpy_sat_rnd_hl_s1: 777 case M2_mpyd_acc_hl_s0: 778 case M2_mpyd_acc_hl_s1: 779 case M2_mpyd_hl_s0: 780 case M2_mpyd_hl_s1: 781 case M2_mpyd_nac_hl_s0: 782 case M2_mpyd_nac_hl_s1: 783 case M2_mpyd_rnd_hl_s0: 784 case M2_mpyd_rnd_hl_s1: 785 case M2_mpyu_acc_hl_s0: 786 case M2_mpyu_acc_hl_s1: 787 case M2_mpyu_hl_s0: 788 case M2_mpyu_hl_s1: 789 case M2_mpyu_nac_hl_s0: 790 case M2_mpyu_nac_hl_s1: 791 case M2_mpyud_acc_hl_s0: 792 case M2_mpyud_acc_hl_s1: 793 case M2_mpyud_hl_s0: 794 case M2_mpyud_hl_s1: 795 case M2_mpyud_nac_hl_s0: 796 case M2_mpyud_nac_hl_s1: 797 if (OpN == 1) { 798 Bits.set(Begin+16, Begin+32); 799 return true; 800 } 801 if (OpN == 2) { 802 Bits.set(Begin, Begin+16); 803 return true; 804 } 805 break; 806 807 // Two register sources, used bits: R1[16-31], R2[16-31]. 808 case A2_addh_h16_hh: 809 case A2_addh_h16_sat_hh: 810 case A2_combine_hh: 811 case A2_subh_h16_hh: 812 case A2_subh_h16_sat_hh: 813 case M2_mpy_acc_hh_s0: 814 case M2_mpy_acc_hh_s1: 815 case M2_mpy_acc_sat_hh_s0: 816 case M2_mpy_acc_sat_hh_s1: 817 case M2_mpy_hh_s0: 818 case M2_mpy_hh_s1: 819 case M2_mpy_nac_hh_s0: 820 case M2_mpy_nac_hh_s1: 821 case M2_mpy_nac_sat_hh_s0: 822 case M2_mpy_nac_sat_hh_s1: 823 case M2_mpy_rnd_hh_s0: 824 case M2_mpy_rnd_hh_s1: 825 case M2_mpy_sat_hh_s0: 826 case M2_mpy_sat_hh_s1: 827 case M2_mpy_sat_rnd_hh_s0: 828 case M2_mpy_sat_rnd_hh_s1: 829 case M2_mpyd_acc_hh_s0: 830 case M2_mpyd_acc_hh_s1: 831 case M2_mpyd_hh_s0: 832 case M2_mpyd_hh_s1: 833 case M2_mpyd_nac_hh_s0: 834 case M2_mpyd_nac_hh_s1: 835 case M2_mpyd_rnd_hh_s0: 836 case M2_mpyd_rnd_hh_s1: 837 case M2_mpyu_acc_hh_s0: 838 case M2_mpyu_acc_hh_s1: 839 case M2_mpyu_hh_s0: 840 case M2_mpyu_hh_s1: 841 case M2_mpyu_nac_hh_s0: 842 case M2_mpyu_nac_hh_s1: 843 case M2_mpyud_acc_hh_s0: 844 case M2_mpyud_acc_hh_s1: 845 case M2_mpyud_hh_s0: 846 case M2_mpyud_hh_s1: 847 case M2_mpyud_nac_hh_s0: 848 case M2_mpyud_nac_hh_s1: 849 if (OpN == 1 || OpN == 2) { 850 Bits.set(Begin+16, Begin+32); 851 return true; 852 } 853 break; 854 } 855 856 return false; 857 } 858 859 860 // Calculate the register class that matches Reg:Sub. For example, if 861 // vreg1 is a double register, then vreg1:subreg_hireg would match "int" 862 // register class. 863 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass( 864 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) { 865 if (!TargetRegisterInfo::isVirtualRegister(RR.Reg)) 866 return nullptr; 867 auto *RC = MRI.getRegClass(RR.Reg); 868 if (RR.Sub == 0) 869 return RC; 870 871 auto VerifySR = [] (unsigned Sub) -> void { 872 assert(Sub == Hexagon::subreg_hireg || Sub == Hexagon::subreg_loreg); 873 }; 874 875 switch (RC->getID()) { 876 case Hexagon::DoubleRegsRegClassID: 877 VerifySR(RR.Sub); 878 return &Hexagon::IntRegsRegClass; 879 case Hexagon::VecDblRegsRegClassID: 880 VerifySR(RR.Sub); 881 return &Hexagon::VectorRegsRegClass; 882 case Hexagon::VecDblRegs128BRegClassID: 883 VerifySR(RR.Sub); 884 return &Hexagon::VectorRegs128BRegClass; 885 } 886 return nullptr; 887 } 888 889 890 // Check if RD could be replaced with RS at any possible use of RD. 891 // For example a predicate register cannot be replaced with a integer 892 // register, but a 64-bit register with a subregister can be replaced 893 // with a 32-bit register. 894 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD, 895 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) { 896 if (!TargetRegisterInfo::isVirtualRegister(RD.Reg) || 897 !TargetRegisterInfo::isVirtualRegister(RS.Reg)) 898 return false; 899 // Return false if one (or both) classes are nullptr. 900 auto *DRC = getFinalVRegClass(RD, MRI); 901 if (!DRC) 902 return false; 903 904 return DRC == getFinalVRegClass(RS, MRI); 905 } 906 907 908 // 909 // Dead code elimination 910 // 911 namespace { 912 class DeadCodeElimination { 913 public: 914 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt) 915 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()), 916 MDT(mdt), MRI(mf.getRegInfo()) {} 917 918 bool run() { 919 return runOnNode(MDT.getRootNode()); 920 } 921 922 private: 923 bool isDead(unsigned R) const; 924 bool runOnNode(MachineDomTreeNode *N); 925 926 MachineFunction &MF; 927 const HexagonInstrInfo &HII; 928 MachineDominatorTree &MDT; 929 MachineRegisterInfo &MRI; 930 }; 931 } 932 933 934 bool DeadCodeElimination::isDead(unsigned R) const { 935 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 936 MachineInstr *UseI = I->getParent(); 937 if (UseI->isDebugValue()) 938 continue; 939 if (UseI->isPHI()) { 940 assert(!UseI->getOperand(0).getSubReg()); 941 unsigned DR = UseI->getOperand(0).getReg(); 942 if (DR == R) 943 continue; 944 } 945 return false; 946 } 947 return true; 948 } 949 950 951 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) { 952 bool Changed = false; 953 typedef GraphTraits<MachineDomTreeNode*> GTN; 954 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) 955 Changed |= runOnNode(*I); 956 957 MachineBasicBlock *B = N->getBlock(); 958 std::vector<MachineInstr*> Instrs; 959 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I) 960 Instrs.push_back(&*I); 961 962 for (auto MI : Instrs) { 963 unsigned Opc = MI->getOpcode(); 964 // Do not touch lifetime markers. This is why the target-independent DCE 965 // cannot be used. 966 if (Opc == TargetOpcode::LIFETIME_START || 967 Opc == TargetOpcode::LIFETIME_END) 968 continue; 969 bool Store = false; 970 if (MI->isInlineAsm()) 971 continue; 972 // Delete PHIs if possible. 973 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store)) 974 continue; 975 976 bool AllDead = true; 977 SmallVector<unsigned,2> Regs; 978 for (auto &Op : MI->operands()) { 979 if (!Op.isReg() || !Op.isDef()) 980 continue; 981 unsigned R = Op.getReg(); 982 if (!TargetRegisterInfo::isVirtualRegister(R) || !isDead(R)) { 983 AllDead = false; 984 break; 985 } 986 Regs.push_back(R); 987 } 988 if (!AllDead) 989 continue; 990 991 B->erase(MI); 992 for (unsigned i = 0, n = Regs.size(); i != n; ++i) 993 MRI.markUsesInDebugValueAsUndef(Regs[i]); 994 Changed = true; 995 } 996 997 return Changed; 998 } 999 1000 1001 // 1002 // Eliminate redundant instructions 1003 // 1004 // This transformation will identify instructions where the output register 1005 // is the same as one of its input registers. This only works on instructions 1006 // that define a single register (unlike post-increment loads, for example). 1007 // The equality check is actually more detailed: the code calculates which 1008 // bits of the output are used, and only compares these bits with the input 1009 // registers. 1010 // If the output matches an input, the instruction is replaced with COPY. 1011 // The copies will be removed by another transformation. 1012 namespace { 1013 class RedundantInstrElimination : public Transformation { 1014 public: 1015 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii, 1016 MachineRegisterInfo &mri) 1017 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1018 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1019 private: 1020 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN, 1021 unsigned &LostB, unsigned &LostE); 1022 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN, 1023 unsigned &LostB, unsigned &LostE); 1024 bool computeUsedBits(unsigned Reg, BitVector &Bits); 1025 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits, 1026 uint16_t Begin); 1027 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS); 1028 1029 const HexagonInstrInfo &HII; 1030 MachineRegisterInfo &MRI; 1031 BitTracker &BT; 1032 }; 1033 } 1034 1035 1036 // Check if the instruction is a lossy shift left, where the input being 1037 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1038 // of bit indices that are lost. 1039 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI, 1040 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1041 using namespace Hexagon; 1042 unsigned Opc = MI.getOpcode(); 1043 unsigned ImN, RegN, Width; 1044 switch (Opc) { 1045 case S2_asl_i_p: 1046 ImN = 2; 1047 RegN = 1; 1048 Width = 64; 1049 break; 1050 case S2_asl_i_p_acc: 1051 case S2_asl_i_p_and: 1052 case S2_asl_i_p_nac: 1053 case S2_asl_i_p_or: 1054 case S2_asl_i_p_xacc: 1055 ImN = 3; 1056 RegN = 2; 1057 Width = 64; 1058 break; 1059 case S2_asl_i_r: 1060 ImN = 2; 1061 RegN = 1; 1062 Width = 32; 1063 break; 1064 case S2_addasl_rrri: 1065 case S4_andi_asl_ri: 1066 case S4_ori_asl_ri: 1067 case S4_addi_asl_ri: 1068 case S4_subi_asl_ri: 1069 case S2_asl_i_r_acc: 1070 case S2_asl_i_r_and: 1071 case S2_asl_i_r_nac: 1072 case S2_asl_i_r_or: 1073 case S2_asl_i_r_sat: 1074 case S2_asl_i_r_xacc: 1075 ImN = 3; 1076 RegN = 2; 1077 Width = 32; 1078 break; 1079 default: 1080 return false; 1081 } 1082 1083 if (RegN != OpN) 1084 return false; 1085 1086 assert(MI.getOperand(ImN).isImm()); 1087 unsigned S = MI.getOperand(ImN).getImm(); 1088 if (S == 0) 1089 return false; 1090 LostB = Width-S; 1091 LostE = Width; 1092 return true; 1093 } 1094 1095 1096 // Check if the instruction is a lossy shift right, where the input being 1097 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1098 // of bit indices that are lost. 1099 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI, 1100 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1101 using namespace Hexagon; 1102 unsigned Opc = MI.getOpcode(); 1103 unsigned ImN, RegN; 1104 switch (Opc) { 1105 case S2_asr_i_p: 1106 case S2_lsr_i_p: 1107 ImN = 2; 1108 RegN = 1; 1109 break; 1110 case S2_asr_i_p_acc: 1111 case S2_asr_i_p_and: 1112 case S2_asr_i_p_nac: 1113 case S2_asr_i_p_or: 1114 case S2_lsr_i_p_acc: 1115 case S2_lsr_i_p_and: 1116 case S2_lsr_i_p_nac: 1117 case S2_lsr_i_p_or: 1118 case S2_lsr_i_p_xacc: 1119 ImN = 3; 1120 RegN = 2; 1121 break; 1122 case S2_asr_i_r: 1123 case S2_lsr_i_r: 1124 ImN = 2; 1125 RegN = 1; 1126 break; 1127 case S4_andi_lsr_ri: 1128 case S4_ori_lsr_ri: 1129 case S4_addi_lsr_ri: 1130 case S4_subi_lsr_ri: 1131 case S2_asr_i_r_acc: 1132 case S2_asr_i_r_and: 1133 case S2_asr_i_r_nac: 1134 case S2_asr_i_r_or: 1135 case S2_lsr_i_r_acc: 1136 case S2_lsr_i_r_and: 1137 case S2_lsr_i_r_nac: 1138 case S2_lsr_i_r_or: 1139 case S2_lsr_i_r_xacc: 1140 ImN = 3; 1141 RegN = 2; 1142 break; 1143 1144 default: 1145 return false; 1146 } 1147 1148 if (RegN != OpN) 1149 return false; 1150 1151 assert(MI.getOperand(ImN).isImm()); 1152 unsigned S = MI.getOperand(ImN).getImm(); 1153 LostB = 0; 1154 LostE = S; 1155 return true; 1156 } 1157 1158 1159 // Calculate the bit vector that corresponds to the used bits of register Reg. 1160 // The vector Bits has the same size, as the size of Reg in bits. If the cal- 1161 // culation fails (i.e. the used bits are unknown), it returns false. Other- 1162 // wise, it returns true and sets the corresponding bits in Bits. 1163 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) { 1164 BitVector Used(Bits.size()); 1165 RegisterSet Visited; 1166 std::vector<unsigned> Pending; 1167 Pending.push_back(Reg); 1168 1169 for (unsigned i = 0; i < Pending.size(); ++i) { 1170 unsigned R = Pending[i]; 1171 if (Visited.has(R)) 1172 continue; 1173 Visited.insert(R); 1174 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 1175 BitTracker::RegisterRef UR = *I; 1176 unsigned B, W; 1177 if (!HBS::getSubregMask(UR, B, W, MRI)) 1178 return false; 1179 MachineInstr &UseI = *I->getParent(); 1180 if (UseI.isPHI() || UseI.isCopy()) { 1181 unsigned DefR = UseI.getOperand(0).getReg(); 1182 if (!TargetRegisterInfo::isVirtualRegister(DefR)) 1183 return false; 1184 Pending.push_back(DefR); 1185 } else { 1186 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B)) 1187 return false; 1188 } 1189 } 1190 } 1191 Bits |= Used; 1192 return true; 1193 } 1194 1195 1196 // Calculate the bits used by instruction MI in a register in operand OpN. 1197 // Return true/false if the calculation succeeds/fails. If is succeeds, set 1198 // used bits in Bits. This function does not reset any bits in Bits, so 1199 // subsequent calls over different instructions will result in the union 1200 // of the used bits in all these instructions. 1201 // The register in question may be used with a sub-register, whereas Bits 1202 // holds the bits for the entire register. To keep track of that, the 1203 // argument Begin indicates where in Bits is the lowest-significant bit 1204 // of the register used in operand OpN. For example, in instruction: 1205 // vreg1 = S2_lsr_i_r vreg2:subreg_hireg, 10 1206 // the operand 1 is a 32-bit register, which happens to be a subregister 1207 // of the 64-bit register vreg2, and that subregister starts at position 32. 1208 // In this case Begin=32, since Bits[32] would be the lowest-significant bit 1209 // of vreg2:subreg_hireg. 1210 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI, 1211 unsigned OpN, BitVector &Bits, uint16_t Begin) { 1212 unsigned Opc = MI.getOpcode(); 1213 BitVector T(Bits.size()); 1214 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII); 1215 // Even if we don't have bits yet, we could still provide some information 1216 // if the instruction is a lossy shift: the lost bits will be marked as 1217 // not used. 1218 unsigned LB, LE; 1219 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) { 1220 assert(MI.getOperand(OpN).isReg()); 1221 BitTracker::RegisterRef RR = MI.getOperand(OpN); 1222 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI); 1223 uint16_t Width = RC->getSize()*8; 1224 1225 if (!GotBits) 1226 T.set(Begin, Begin+Width); 1227 assert(LB <= LE && LB < Width && LE <= Width); 1228 T.reset(Begin+LB, Begin+LE); 1229 GotBits = true; 1230 } 1231 if (GotBits) 1232 Bits |= T; 1233 return GotBits; 1234 } 1235 1236 1237 // Calculates the used bits in RD ("defined register"), and checks if these 1238 // bits in RS ("used register") and RD are identical. 1239 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD, 1240 BitTracker::RegisterRef RS) { 1241 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1242 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1243 1244 unsigned DB, DW; 1245 if (!HBS::getSubregMask(RD, DB, DW, MRI)) 1246 return false; 1247 unsigned SB, SW; 1248 if (!HBS::getSubregMask(RS, SB, SW, MRI)) 1249 return false; 1250 if (SW != DW) 1251 return false; 1252 1253 BitVector Used(DC.width()); 1254 if (!computeUsedBits(RD.Reg, Used)) 1255 return false; 1256 1257 for (unsigned i = 0; i != DW; ++i) 1258 if (Used[i+DB] && DC[DB+i] != SC[SB+i]) 1259 return false; 1260 return true; 1261 } 1262 1263 1264 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B, 1265 const RegisterSet&) { 1266 bool Changed = false; 1267 1268 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) { 1269 NextI = std::next(I); 1270 MachineInstr *MI = &*I; 1271 1272 if (MI->getOpcode() == TargetOpcode::COPY) 1273 continue; 1274 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 1275 continue; 1276 unsigned NumD = MI->getDesc().getNumDefs(); 1277 if (NumD != 1) 1278 continue; 1279 1280 BitTracker::RegisterRef RD = MI->getOperand(0); 1281 if (!BT.has(RD.Reg)) 1282 continue; 1283 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1284 auto At = MI->isPHI() ? B.getFirstNonPHI() 1285 : MachineBasicBlock::iterator(MI); 1286 1287 // Find a source operand that is equal to the result. 1288 for (auto &Op : MI->uses()) { 1289 if (!Op.isReg()) 1290 continue; 1291 BitTracker::RegisterRef RS = Op; 1292 if (!BT.has(RS.Reg)) 1293 continue; 1294 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1295 continue; 1296 1297 unsigned BN, BW; 1298 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 1299 continue; 1300 1301 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1302 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW)) 1303 continue; 1304 1305 // If found, replace the instruction with a COPY. 1306 DebugLoc DL = MI->getDebugLoc(); 1307 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 1308 unsigned NewR = MRI.createVirtualRegister(FRC); 1309 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1310 .addReg(RS.Reg, 0, RS.Sub); 1311 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1312 BT.put(BitTracker::RegisterRef(NewR), SC); 1313 Changed = true; 1314 break; 1315 } 1316 } 1317 1318 return Changed; 1319 } 1320 1321 1322 // 1323 // Const generation 1324 // 1325 // Recognize instructions that produce constant values known at compile-time. 1326 // Replace them with register definitions that load these constants directly. 1327 namespace { 1328 class ConstGeneration : public Transformation { 1329 public: 1330 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1331 MachineRegisterInfo &mri) 1332 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1333 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1334 private: 1335 bool isTfrConst(const MachineInstr *MI) const; 1336 bool isConst(unsigned R, int64_t &V) const; 1337 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C, 1338 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL); 1339 1340 const HexagonInstrInfo &HII; 1341 MachineRegisterInfo &MRI; 1342 BitTracker &BT; 1343 }; 1344 } 1345 1346 bool ConstGeneration::isConst(unsigned R, int64_t &C) const { 1347 if (!BT.has(R)) 1348 return false; 1349 const BitTracker::RegisterCell &RC = BT.lookup(R); 1350 int64_t T = 0; 1351 for (unsigned i = RC.width(); i > 0; --i) { 1352 const BitTracker::BitValue &V = RC[i-1]; 1353 T <<= 1; 1354 if (V.is(1)) 1355 T |= 1; 1356 else if (!V.is(0)) 1357 return false; 1358 } 1359 C = T; 1360 return true; 1361 } 1362 1363 1364 bool ConstGeneration::isTfrConst(const MachineInstr *MI) const { 1365 unsigned Opc = MI->getOpcode(); 1366 switch (Opc) { 1367 case Hexagon::A2_combineii: 1368 case Hexagon::A4_combineii: 1369 case Hexagon::A2_tfrsi: 1370 case Hexagon::A2_tfrpi: 1371 case Hexagon::TFR_PdTrue: 1372 case Hexagon::TFR_PdFalse: 1373 case Hexagon::CONST32_Int_Real: 1374 case Hexagon::CONST64_Int_Real: 1375 return true; 1376 } 1377 return false; 1378 } 1379 1380 1381 // Generate a transfer-immediate instruction that is appropriate for the 1382 // register class and the actual value being transferred. 1383 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C, 1384 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) { 1385 unsigned Reg = MRI.createVirtualRegister(RC); 1386 if (RC == &Hexagon::IntRegsRegClass) { 1387 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg) 1388 .addImm(int32_t(C)); 1389 return Reg; 1390 } 1391 1392 if (RC == &Hexagon::DoubleRegsRegClass) { 1393 if (isInt<8>(C)) { 1394 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg) 1395 .addImm(C); 1396 return Reg; 1397 } 1398 1399 unsigned Lo = Lo_32(C), Hi = Hi_32(C); 1400 if (isInt<8>(Lo) || isInt<8>(Hi)) { 1401 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii 1402 : Hexagon::A4_combineii; 1403 BuildMI(B, At, DL, HII.get(Opc), Reg) 1404 .addImm(int32_t(Hi)) 1405 .addImm(int32_t(Lo)); 1406 return Reg; 1407 } 1408 1409 BuildMI(B, At, DL, HII.get(Hexagon::CONST64_Int_Real), Reg) 1410 .addImm(C); 1411 return Reg; 1412 } 1413 1414 if (RC == &Hexagon::PredRegsRegClass) { 1415 unsigned Opc; 1416 if (C == 0) 1417 Opc = Hexagon::TFR_PdFalse; 1418 else if ((C & 0xFF) == 0xFF) 1419 Opc = Hexagon::TFR_PdTrue; 1420 else 1421 return 0; 1422 BuildMI(B, At, DL, HII.get(Opc), Reg); 1423 return Reg; 1424 } 1425 1426 return 0; 1427 } 1428 1429 1430 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1431 bool Changed = false; 1432 RegisterSet Defs; 1433 1434 for (auto I = B.begin(), E = B.end(); I != E; ++I) { 1435 if (isTfrConst(I)) 1436 continue; 1437 Defs.clear(); 1438 HBS::getInstrDefs(*I, Defs); 1439 if (Defs.count() != 1) 1440 continue; 1441 unsigned DR = Defs.find_first(); 1442 if (!TargetRegisterInfo::isVirtualRegister(DR)) 1443 continue; 1444 int64_t C; 1445 if (isConst(DR, C)) { 1446 DebugLoc DL = I->getDebugLoc(); 1447 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1448 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL); 1449 if (ImmReg) { 1450 HBS::replaceReg(DR, ImmReg, MRI); 1451 BT.put(ImmReg, BT.lookup(DR)); 1452 Changed = true; 1453 } 1454 } 1455 } 1456 return Changed; 1457 } 1458 1459 1460 // 1461 // Copy generation 1462 // 1463 // Identify pairs of available registers which hold identical values. 1464 // In such cases, only one of them needs to be calculated, the other one 1465 // will be defined as a copy of the first. 1466 // 1467 // Copy propagation 1468 // 1469 // Eliminate register copies RD = RS, by replacing the uses of RD with 1470 // with uses of RS. 1471 namespace { 1472 class CopyGeneration : public Transformation { 1473 public: 1474 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1475 MachineRegisterInfo &mri) 1476 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1477 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1478 private: 1479 bool findMatch(const BitTracker::RegisterRef &Inp, 1480 BitTracker::RegisterRef &Out, const RegisterSet &AVs); 1481 1482 const HexagonInstrInfo &HII; 1483 MachineRegisterInfo &MRI; 1484 BitTracker &BT; 1485 }; 1486 1487 class CopyPropagation : public Transformation { 1488 public: 1489 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1490 : Transformation(false), MRI(mri) {} 1491 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1492 static bool isCopyReg(unsigned Opc); 1493 private: 1494 bool propagateRegCopy(MachineInstr &MI); 1495 1496 MachineRegisterInfo &MRI; 1497 }; 1498 1499 } 1500 1501 1502 /// Check if there is a register in AVs that is identical to Inp. If so, 1503 /// set Out to the found register. The output may be a pair Reg:Sub. 1504 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp, 1505 BitTracker::RegisterRef &Out, const RegisterSet &AVs) { 1506 if (!BT.has(Inp.Reg)) 1507 return false; 1508 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg); 1509 unsigned B, W; 1510 if (!HBS::getSubregMask(Inp, B, W, MRI)) 1511 return false; 1512 1513 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) { 1514 if (!BT.has(R) || !HBS::isTransparentCopy(R, Inp, MRI)) 1515 continue; 1516 const BitTracker::RegisterCell &RC = BT.lookup(R); 1517 unsigned RW = RC.width(); 1518 if (W == RW) { 1519 if (MRI.getRegClass(Inp.Reg) != MRI.getRegClass(R)) 1520 continue; 1521 if (!HBS::isEqual(InpRC, B, RC, 0, W)) 1522 continue; 1523 Out.Reg = R; 1524 Out.Sub = 0; 1525 return true; 1526 } 1527 // Check if there is a super-register, whose part (with a subregister) 1528 // is equal to the input. 1529 // Only do double registers for now. 1530 if (W*2 != RW) 1531 continue; 1532 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass) 1533 continue; 1534 1535 if (HBS::isEqual(InpRC, B, RC, 0, W)) 1536 Out.Sub = Hexagon::subreg_loreg; 1537 else if (HBS::isEqual(InpRC, B, RC, W, W)) 1538 Out.Sub = Hexagon::subreg_hireg; 1539 else 1540 continue; 1541 Out.Reg = R; 1542 return true; 1543 } 1544 return false; 1545 } 1546 1547 1548 bool CopyGeneration::processBlock(MachineBasicBlock &B, 1549 const RegisterSet &AVs) { 1550 RegisterSet AVB(AVs); 1551 bool Changed = false; 1552 RegisterSet Defs; 1553 1554 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; 1555 ++I, AVB.insert(Defs)) { 1556 NextI = std::next(I); 1557 Defs.clear(); 1558 HBS::getInstrDefs(*I, Defs); 1559 1560 unsigned Opc = I->getOpcode(); 1561 if (CopyPropagation::isCopyReg(Opc)) 1562 continue; 1563 1564 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) { 1565 BitTracker::RegisterRef MR; 1566 if (!findMatch(R, MR, AVB)) 1567 continue; 1568 DebugLoc DL = I->getDebugLoc(); 1569 auto *FRC = HBS::getFinalVRegClass(MR, MRI); 1570 unsigned NewR = MRI.createVirtualRegister(FRC); 1571 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1572 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1573 .addReg(MR.Reg, 0, MR.Sub); 1574 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR)); 1575 } 1576 } 1577 1578 return Changed; 1579 } 1580 1581 1582 bool CopyPropagation::isCopyReg(unsigned Opc) { 1583 switch (Opc) { 1584 case TargetOpcode::COPY: 1585 case TargetOpcode::REG_SEQUENCE: 1586 case Hexagon::A2_tfr: 1587 case Hexagon::A2_tfrp: 1588 case Hexagon::A2_combinew: 1589 case Hexagon::A4_combineir: 1590 case Hexagon::A4_combineri: 1591 return true; 1592 default: 1593 break; 1594 } 1595 return false; 1596 } 1597 1598 1599 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) { 1600 bool Changed = false; 1601 unsigned Opc = MI.getOpcode(); 1602 BitTracker::RegisterRef RD = MI.getOperand(0); 1603 assert(MI.getOperand(0).getSubReg() == 0); 1604 1605 switch (Opc) { 1606 case TargetOpcode::COPY: 1607 case Hexagon::A2_tfr: 1608 case Hexagon::A2_tfrp: { 1609 BitTracker::RegisterRef RS = MI.getOperand(1); 1610 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1611 break; 1612 if (RS.Sub != 0) 1613 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI); 1614 else 1615 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI); 1616 break; 1617 } 1618 case TargetOpcode::REG_SEQUENCE: { 1619 BitTracker::RegisterRef SL, SH; 1620 if (HBS::parseRegSequence(MI, SL, SH)) { 1621 Changed = HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_loreg, 1622 SL.Reg, SL.Sub, MRI); 1623 Changed |= HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_hireg, 1624 SH.Reg, SH.Sub, MRI); 1625 } 1626 break; 1627 } 1628 case Hexagon::A2_combinew: { 1629 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2); 1630 Changed = HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_loreg, 1631 RL.Reg, RL.Sub, MRI); 1632 Changed |= HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_hireg, 1633 RH.Reg, RH.Sub, MRI); 1634 break; 1635 } 1636 case Hexagon::A4_combineir: 1637 case Hexagon::A4_combineri: { 1638 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1; 1639 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::subreg_loreg 1640 : Hexagon::subreg_hireg; 1641 BitTracker::RegisterRef RS = MI.getOperand(SrcX); 1642 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI); 1643 break; 1644 } 1645 } 1646 return Changed; 1647 } 1648 1649 1650 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1651 std::vector<MachineInstr*> Instrs; 1652 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I) 1653 Instrs.push_back(&*I); 1654 1655 bool Changed = false; 1656 for (auto I : Instrs) { 1657 unsigned Opc = I->getOpcode(); 1658 if (!CopyPropagation::isCopyReg(Opc)) 1659 continue; 1660 Changed |= propagateRegCopy(*I); 1661 } 1662 1663 return Changed; 1664 } 1665 1666 1667 // 1668 // Bit simplification 1669 // 1670 // Recognize patterns that can be simplified and replace them with the 1671 // simpler forms. 1672 // This is by no means complete 1673 namespace { 1674 class BitSimplification : public Transformation { 1675 public: 1676 BitSimplification(BitTracker &bt, const HexagonInstrInfo &hii, 1677 MachineRegisterInfo &mri) 1678 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1679 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1680 private: 1681 struct RegHalf : public BitTracker::RegisterRef { 1682 bool Low; // Low/High halfword. 1683 }; 1684 1685 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC, 1686 unsigned B, RegHalf &RH); 1687 1688 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC, 1689 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt); 1690 unsigned getCombineOpcode(bool HLow, bool LLow); 1691 1692 bool genStoreUpperHalf(MachineInstr *MI); 1693 bool genStoreImmediate(MachineInstr *MI); 1694 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD, 1695 const BitTracker::RegisterCell &RC); 1696 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1697 const BitTracker::RegisterCell &RC); 1698 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1699 const BitTracker::RegisterCell &RC); 1700 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1701 const BitTracker::RegisterCell &RC); 1702 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD, 1703 const BitTracker::RegisterCell &RC); 1704 1705 const HexagonInstrInfo &HII; 1706 MachineRegisterInfo &MRI; 1707 BitTracker &BT; 1708 }; 1709 } 1710 1711 1712 // Check if the bits [B..B+16) in register cell RC form a valid halfword, 1713 // i.e. [0..16), [16..32), etc. of some register. If so, return true and 1714 // set the information about the found register in RH. 1715 bool BitSimplification::matchHalf(unsigned SelfR, 1716 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) { 1717 // XXX This could be searching in the set of available registers, in case 1718 // the match is not exact. 1719 1720 // Match 16-bit chunks, where the RC[B..B+15] references exactly one 1721 // register and all the bits B..B+15 match between RC and the register. 1722 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... }, 1723 // and RC = { [0]:0 [1-15]:v1[1-15]... }. 1724 bool Low = false; 1725 unsigned I = B; 1726 while (I < B+16 && RC[I].num()) 1727 I++; 1728 if (I == B+16) 1729 return false; 1730 1731 unsigned Reg = RC[I].RefI.Reg; 1732 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B. 1733 if (P < I-B) 1734 return false; 1735 unsigned Pos = P - (I-B); 1736 1737 if (Reg == 0 || Reg == SelfR) // Don't match "self". 1738 return false; 1739 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1740 return false; 1741 if (!BT.has(Reg)) 1742 return false; 1743 1744 const BitTracker::RegisterCell &SC = BT.lookup(Reg); 1745 if (Pos+16 > SC.width()) 1746 return false; 1747 1748 for (unsigned i = 0; i < 16; ++i) { 1749 const BitTracker::BitValue &RV = RC[i+B]; 1750 if (RV.Type == BitTracker::BitValue::Ref) { 1751 if (RV.RefI.Reg != Reg) 1752 return false; 1753 if (RV.RefI.Pos != i+Pos) 1754 return false; 1755 continue; 1756 } 1757 if (RC[i+B] != SC[i+Pos]) 1758 return false; 1759 } 1760 1761 unsigned Sub = 0; 1762 switch (Pos) { 1763 case 0: 1764 Sub = Hexagon::subreg_loreg; 1765 Low = true; 1766 break; 1767 case 16: 1768 Sub = Hexagon::subreg_loreg; 1769 Low = false; 1770 break; 1771 case 32: 1772 Sub = Hexagon::subreg_hireg; 1773 Low = true; 1774 break; 1775 case 48: 1776 Sub = Hexagon::subreg_hireg; 1777 Low = false; 1778 break; 1779 default: 1780 return false; 1781 } 1782 1783 RH.Reg = Reg; 1784 RH.Sub = Sub; 1785 RH.Low = Low; 1786 // If the subregister is not valid with the register, set it to 0. 1787 if (!HBS::getFinalVRegClass(RH, MRI)) 1788 RH.Sub = 0; 1789 1790 return true; 1791 } 1792 1793 1794 // Check if RC matches the pattern of a S2_packhl. If so, return true and 1795 // set the inputs Rs and Rt. 1796 bool BitSimplification::matchPackhl(unsigned SelfR, 1797 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs, 1798 BitTracker::RegisterRef &Rt) { 1799 RegHalf L1, H1, L2, H2; 1800 1801 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1)) 1802 return false; 1803 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1)) 1804 return false; 1805 1806 // Rs = H1.L1, Rt = H2.L2 1807 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low) 1808 return false; 1809 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low) 1810 return false; 1811 1812 Rs = H1; 1813 Rt = H2; 1814 return true; 1815 } 1816 1817 1818 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) { 1819 return HLow ? LLow ? Hexagon::A2_combine_ll 1820 : Hexagon::A2_combine_lh 1821 : LLow ? Hexagon::A2_combine_hl 1822 : Hexagon::A2_combine_hh; 1823 } 1824 1825 1826 // If MI stores the upper halfword of a register (potentially obtained via 1827 // shifts or extracts), replace it with a storerf instruction. This could 1828 // cause the "extraction" code to become dead. 1829 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) { 1830 unsigned Opc = MI->getOpcode(); 1831 if (Opc != Hexagon::S2_storerh_io) 1832 return false; 1833 1834 MachineOperand &ValOp = MI->getOperand(2); 1835 BitTracker::RegisterRef RS = ValOp; 1836 if (!BT.has(RS.Reg)) 1837 return false; 1838 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1839 RegHalf H; 1840 if (!matchHalf(0, RC, 0, H)) 1841 return false; 1842 if (H.Low) 1843 return false; 1844 MI->setDesc(HII.get(Hexagon::S2_storerf_io)); 1845 ValOp.setReg(H.Reg); 1846 ValOp.setSubReg(H.Sub); 1847 return true; 1848 } 1849 1850 1851 // If MI stores a value known at compile-time, and the value is within a range 1852 // that avoids using constant-extenders, replace it with a store-immediate. 1853 bool BitSimplification::genStoreImmediate(MachineInstr *MI) { 1854 unsigned Opc = MI->getOpcode(); 1855 unsigned Align = 0; 1856 switch (Opc) { 1857 case Hexagon::S2_storeri_io: 1858 Align++; 1859 case Hexagon::S2_storerh_io: 1860 Align++; 1861 case Hexagon::S2_storerb_io: 1862 break; 1863 default: 1864 return false; 1865 } 1866 1867 // Avoid stores to frame-indices (due to an unknown offset). 1868 if (!MI->getOperand(0).isReg()) 1869 return false; 1870 MachineOperand &OffOp = MI->getOperand(1); 1871 if (!OffOp.isImm()) 1872 return false; 1873 1874 int64_t Off = OffOp.getImm(); 1875 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x). 1876 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1))) 1877 return false; 1878 // Source register: 1879 BitTracker::RegisterRef RS = MI->getOperand(2); 1880 if (!BT.has(RS.Reg)) 1881 return false; 1882 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1883 uint64_t U; 1884 if (!HBS::getConst(RC, 0, RC.width(), U)) 1885 return false; 1886 1887 // Only consider 8-bit values to avoid constant-extenders. 1888 int V; 1889 switch (Opc) { 1890 case Hexagon::S2_storerb_io: 1891 V = int8_t(U); 1892 break; 1893 case Hexagon::S2_storerh_io: 1894 V = int16_t(U); 1895 break; 1896 case Hexagon::S2_storeri_io: 1897 V = int32_t(U); 1898 break; 1899 } 1900 if (!isInt<8>(V)) 1901 return false; 1902 1903 MI->RemoveOperand(2); 1904 switch (Opc) { 1905 case Hexagon::S2_storerb_io: 1906 MI->setDesc(HII.get(Hexagon::S4_storeirb_io)); 1907 break; 1908 case Hexagon::S2_storerh_io: 1909 MI->setDesc(HII.get(Hexagon::S4_storeirh_io)); 1910 break; 1911 case Hexagon::S2_storeri_io: 1912 MI->setDesc(HII.get(Hexagon::S4_storeiri_io)); 1913 break; 1914 } 1915 MI->addOperand(MachineOperand::CreateImm(V)); 1916 return true; 1917 } 1918 1919 1920 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the 1921 // last instruction in a sequence that results in something equivalent to 1922 // the pack-halfwords. The intent is to cause the entire sequence to become 1923 // dead. 1924 bool BitSimplification::genPackhl(MachineInstr *MI, 1925 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 1926 unsigned Opc = MI->getOpcode(); 1927 if (Opc == Hexagon::S2_packhl) 1928 return false; 1929 BitTracker::RegisterRef Rs, Rt; 1930 if (!matchPackhl(RD.Reg, RC, Rs, Rt)) 1931 return false; 1932 1933 MachineBasicBlock &B = *MI->getParent(); 1934 unsigned NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 1935 DebugLoc DL = MI->getDebugLoc(); 1936 auto At = MI->isPHI() ? B.getFirstNonPHI() 1937 : MachineBasicBlock::iterator(MI); 1938 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR) 1939 .addReg(Rs.Reg, 0, Rs.Sub) 1940 .addReg(Rt.Reg, 0, Rt.Sub); 1941 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1942 BT.put(BitTracker::RegisterRef(NewR), RC); 1943 return true; 1944 } 1945 1946 1947 // If MI produces halfword of the input in the low half of the output, 1948 // replace it with zero-extend or extractu. 1949 bool BitSimplification::genExtractHalf(MachineInstr *MI, 1950 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 1951 RegHalf L; 1952 // Check for halfword in low 16 bits, zeros elsewhere. 1953 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16)) 1954 return false; 1955 1956 unsigned Opc = MI->getOpcode(); 1957 MachineBasicBlock &B = *MI->getParent(); 1958 DebugLoc DL = MI->getDebugLoc(); 1959 1960 // Prefer zxth, since zxth can go in any slot, while extractu only in 1961 // slots 2 and 3. 1962 unsigned NewR = 0; 1963 auto At = MI->isPHI() ? B.getFirstNonPHI() 1964 : MachineBasicBlock::iterator(MI); 1965 if (L.Low && Opc != Hexagon::A2_zxth) { 1966 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1967 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR) 1968 .addReg(L.Reg, 0, L.Sub); 1969 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) { 1970 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 1971 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR) 1972 .addReg(L.Reg, 0, L.Sub) 1973 .addImm(16); 1974 } 1975 if (NewR == 0) 1976 return false; 1977 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1978 BT.put(BitTracker::RegisterRef(NewR), RC); 1979 return true; 1980 } 1981 1982 1983 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the 1984 // combine. 1985 bool BitSimplification::genCombineHalf(MachineInstr *MI, 1986 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 1987 RegHalf L, H; 1988 // Check for combine h/l 1989 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H)) 1990 return false; 1991 // Do nothing if this is just a reg copy. 1992 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low) 1993 return false; 1994 1995 unsigned Opc = MI->getOpcode(); 1996 unsigned COpc = getCombineOpcode(H.Low, L.Low); 1997 if (COpc == Opc) 1998 return false; 1999 2000 MachineBasicBlock &B = *MI->getParent(); 2001 DebugLoc DL = MI->getDebugLoc(); 2002 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2003 auto At = MI->isPHI() ? B.getFirstNonPHI() 2004 : MachineBasicBlock::iterator(MI); 2005 BuildMI(B, At, DL, HII.get(COpc), NewR) 2006 .addReg(H.Reg, 0, H.Sub) 2007 .addReg(L.Reg, 0, L.Sub); 2008 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2009 BT.put(BitTracker::RegisterRef(NewR), RC); 2010 return true; 2011 } 2012 2013 2014 // If MI resets high bits of a register and keeps the lower ones, replace it 2015 // with zero-extend byte/half, and-immediate, or extractu, as appropriate. 2016 bool BitSimplification::genExtractLow(MachineInstr *MI, 2017 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2018 unsigned Opc = MI->getOpcode(); 2019 switch (Opc) { 2020 case Hexagon::A2_zxtb: 2021 case Hexagon::A2_zxth: 2022 case Hexagon::S2_extractu: 2023 return false; 2024 } 2025 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) { 2026 int32_t Imm = MI->getOperand(2).getImm(); 2027 if (isInt<10>(Imm)) 2028 return false; 2029 } 2030 2031 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 2032 return false; 2033 unsigned W = RC.width(); 2034 while (W > 0 && RC[W-1].is(0)) 2035 W--; 2036 if (W == 0 || W == RC.width()) 2037 return false; 2038 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb 2039 : (W == 16) ? Hexagon::A2_zxth 2040 : (W < 10) ? Hexagon::A2_andir 2041 : Hexagon::S2_extractu; 2042 MachineBasicBlock &B = *MI->getParent(); 2043 DebugLoc DL = MI->getDebugLoc(); 2044 2045 for (auto &Op : MI->uses()) { 2046 if (!Op.isReg()) 2047 continue; 2048 BitTracker::RegisterRef RS = Op; 2049 if (!BT.has(RS.Reg)) 2050 continue; 2051 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2052 unsigned BN, BW; 2053 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 2054 continue; 2055 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W)) 2056 continue; 2057 2058 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2059 auto At = MI->isPHI() ? B.getFirstNonPHI() 2060 : MachineBasicBlock::iterator(MI); 2061 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR) 2062 .addReg(RS.Reg, 0, RS.Sub); 2063 if (NewOpc == Hexagon::A2_andir) 2064 MIB.addImm((1 << W) - 1); 2065 else if (NewOpc == Hexagon::S2_extractu) 2066 MIB.addImm(W).addImm(0); 2067 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2068 BT.put(BitTracker::RegisterRef(NewR), RC); 2069 return true; 2070 } 2071 return false; 2072 } 2073 2074 2075 // Check for tstbit simplification opportunity, where the bit being checked 2076 // can be tracked back to another register. For example: 2077 // vreg2 = S2_lsr_i_r vreg1, 5 2078 // vreg3 = S2_tstbit_i vreg2, 0 2079 // => 2080 // vreg3 = S2_tstbit_i vreg1, 5 2081 bool BitSimplification::simplifyTstbit(MachineInstr *MI, 2082 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2083 unsigned Opc = MI->getOpcode(); 2084 if (Opc != Hexagon::S2_tstbit_i) 2085 return false; 2086 2087 unsigned BN = MI->getOperand(2).getImm(); 2088 BitTracker::RegisterRef RS = MI->getOperand(1); 2089 unsigned F, W; 2090 DebugLoc DL = MI->getDebugLoc(); 2091 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI)) 2092 return false; 2093 MachineBasicBlock &B = *MI->getParent(); 2094 auto At = MI->isPHI() ? B.getFirstNonPHI() 2095 : MachineBasicBlock::iterator(MI); 2096 2097 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2098 const BitTracker::BitValue &V = SC[F+BN]; 2099 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) { 2100 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg); 2101 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is 2102 // a double register, need to use a subregister and adjust bit 2103 // number. 2104 unsigned P = UINT_MAX; 2105 BitTracker::RegisterRef RR(V.RefI.Reg, 0); 2106 if (TC == &Hexagon::DoubleRegsRegClass) { 2107 P = V.RefI.Pos; 2108 RR.Sub = Hexagon::subreg_loreg; 2109 if (P >= 32) { 2110 P -= 32; 2111 RR.Sub = Hexagon::subreg_hireg; 2112 } 2113 } else if (TC == &Hexagon::IntRegsRegClass) { 2114 P = V.RefI.Pos; 2115 } 2116 if (P != UINT_MAX) { 2117 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2118 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR) 2119 .addReg(RR.Reg, 0, RR.Sub) 2120 .addImm(P); 2121 HBS::replaceReg(RD.Reg, NewR, MRI); 2122 BT.put(NewR, RC); 2123 return true; 2124 } 2125 } else if (V.is(0) || V.is(1)) { 2126 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2127 unsigned NewOpc = V.is(0) ? Hexagon::TFR_PdFalse : Hexagon::TFR_PdTrue; 2128 BuildMI(B, At, DL, HII.get(NewOpc), NewR); 2129 HBS::replaceReg(RD.Reg, NewR, MRI); 2130 return true; 2131 } 2132 2133 return false; 2134 } 2135 2136 2137 bool BitSimplification::processBlock(MachineBasicBlock &B, 2138 const RegisterSet &AVs) { 2139 bool Changed = false; 2140 RegisterSet AVB = AVs; 2141 RegisterSet Defs; 2142 2143 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) { 2144 MachineInstr *MI = &*I; 2145 Defs.clear(); 2146 HBS::getInstrDefs(*MI, Defs); 2147 2148 unsigned Opc = MI->getOpcode(); 2149 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE) 2150 continue; 2151 2152 if (MI->mayStore()) { 2153 bool T = genStoreUpperHalf(MI); 2154 T = T || genStoreImmediate(MI); 2155 Changed |= T; 2156 continue; 2157 } 2158 2159 if (Defs.count() != 1) 2160 continue; 2161 const MachineOperand &Op0 = MI->getOperand(0); 2162 if (!Op0.isReg() || !Op0.isDef()) 2163 continue; 2164 BitTracker::RegisterRef RD = Op0; 2165 if (!BT.has(RD.Reg)) 2166 continue; 2167 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2168 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg); 2169 2170 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) { 2171 bool T = genPackhl(MI, RD, RC); 2172 Changed |= T; 2173 continue; 2174 } 2175 2176 if (FRC->getID() == Hexagon::IntRegsRegClassID) { 2177 bool T = genExtractHalf(MI, RD, RC); 2178 T = T || genCombineHalf(MI, RD, RC); 2179 T = T || genExtractLow(MI, RD, RC); 2180 Changed |= T; 2181 continue; 2182 } 2183 2184 if (FRC->getID() == Hexagon::PredRegsRegClassID) { 2185 bool T = simplifyTstbit(MI, RD, RC); 2186 Changed |= T; 2187 continue; 2188 } 2189 } 2190 return Changed; 2191 } 2192 2193 2194 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) { 2195 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2196 auto &HRI = *HST.getRegisterInfo(); 2197 auto &HII = *HST.getInstrInfo(); 2198 2199 MDT = &getAnalysis<MachineDominatorTree>(); 2200 MachineRegisterInfo &MRI = MF.getRegInfo(); 2201 bool Changed; 2202 2203 Changed = DeadCodeElimination(MF, *MDT).run(); 2204 2205 const HexagonEvaluator HE(HRI, MRI, HII, MF); 2206 BitTracker BT(HE, MF); 2207 DEBUG(BT.trace(true)); 2208 BT.run(); 2209 2210 MachineBasicBlock &Entry = MF.front(); 2211 2212 RegisterSet AIG; // Available registers for IG. 2213 ConstGeneration ImmG(BT, HII, MRI); 2214 Changed |= visitBlock(Entry, ImmG, AIG); 2215 2216 RegisterSet ARE; // Available registers for RIE. 2217 RedundantInstrElimination RIE(BT, HII, MRI); 2218 Changed |= visitBlock(Entry, RIE, ARE); 2219 2220 RegisterSet ACG; // Available registers for CG. 2221 CopyGeneration CopyG(BT, HII, MRI); 2222 Changed |= visitBlock(Entry, CopyG, ACG); 2223 2224 RegisterSet ACP; // Available registers for CP. 2225 CopyPropagation CopyP(HRI, MRI); 2226 Changed |= visitBlock(Entry, CopyP, ACP); 2227 2228 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2229 2230 BT.run(); 2231 RegisterSet ABS; // Available registers for BS. 2232 BitSimplification BitS(BT, HII, MRI); 2233 Changed |= visitBlock(Entry, BitS, ABS); 2234 2235 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2236 2237 if (Changed) { 2238 for (auto &B : MF) 2239 for (auto &I : B) 2240 I.clearKillInfo(); 2241 DeadCodeElimination(MF, *MDT).run(); 2242 } 2243 return Changed; 2244 } 2245 2246 2247 // Recognize loops where the code at the end of the loop matches the code 2248 // before the entry of the loop, and the matching code is such that is can 2249 // be simplified. This pass relies on the bit simplification above and only 2250 // prepares code in a way that can be handled by the bit simplifcation. 2251 // 2252 // This is the motivating testcase (and explanation): 2253 // 2254 // { 2255 // loop0(.LBB0_2, r1) // %for.body.preheader 2256 // r5:4 = memd(r0++#8) 2257 // } 2258 // { 2259 // r3 = lsr(r4, #16) 2260 // r7:6 = combine(r5, r5) 2261 // } 2262 // { 2263 // r3 = insert(r5, #16, #16) 2264 // r7:6 = vlsrw(r7:6, #16) 2265 // } 2266 // .LBB0_2: 2267 // { 2268 // memh(r2+#4) = r5 2269 // memh(r2+#6) = r6 # R6 is really R5.H 2270 // } 2271 // { 2272 // r2 = add(r2, #8) 2273 // memh(r2+#0) = r4 2274 // memh(r2+#2) = r3 # R3 is really R4.H 2275 // } 2276 // { 2277 // r5:4 = memd(r0++#8) 2278 // } 2279 // { # "Shuffling" code that sets up R3 and R6 2280 // r3 = lsr(r4, #16) # so that their halves can be stored in the 2281 // r7:6 = combine(r5, r5) # next iteration. This could be folded into 2282 // } # the stores if the code was at the beginning 2283 // { # of the loop iteration. Since the same code 2284 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved 2285 // r7:6 = vlsrw(r7:6, #16) # there. 2286 // }:endloop0 2287 // 2288 // 2289 // The outcome: 2290 // 2291 // { 2292 // loop0(.LBB0_2, r1) 2293 // r5:4 = memd(r0++#8) 2294 // } 2295 // .LBB0_2: 2296 // { 2297 // memh(r2+#4) = r5 2298 // memh(r2+#6) = r5.h 2299 // } 2300 // { 2301 // r2 = add(r2, #8) 2302 // memh(r2+#0) = r4 2303 // memh(r2+#2) = r4.h 2304 // } 2305 // { 2306 // r5:4 = memd(r0++#8) 2307 // }:endloop0 2308 2309 namespace llvm { 2310 FunctionPass *createHexagonLoopRescheduling(); 2311 void initializeHexagonLoopReschedulingPass(PassRegistry&); 2312 } 2313 2314 namespace { 2315 class HexagonLoopRescheduling : public MachineFunctionPass { 2316 public: 2317 static char ID; 2318 HexagonLoopRescheduling() : MachineFunctionPass(ID), 2319 HII(0), HRI(0), MRI(0), BTP(0) { 2320 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry()); 2321 } 2322 2323 bool runOnMachineFunction(MachineFunction &MF) override; 2324 2325 private: 2326 const HexagonInstrInfo *HII; 2327 const HexagonRegisterInfo *HRI; 2328 MachineRegisterInfo *MRI; 2329 BitTracker *BTP; 2330 2331 struct LoopCand { 2332 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb, 2333 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {} 2334 MachineBasicBlock *LB, *PB, *EB; 2335 }; 2336 typedef std::vector<MachineInstr*> InstrList; 2337 struct InstrGroup { 2338 BitTracker::RegisterRef Inp, Out; 2339 InstrList Ins; 2340 }; 2341 struct PhiInfo { 2342 PhiInfo(MachineInstr &P, MachineBasicBlock &B); 2343 unsigned DefR; 2344 BitTracker::RegisterRef LR, PR; 2345 MachineBasicBlock *LB, *PB; 2346 }; 2347 2348 static unsigned getDefReg(const MachineInstr *MI); 2349 bool isConst(unsigned Reg) const; 2350 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const; 2351 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const; 2352 bool isShuffleOf(unsigned OutR, unsigned InpR) const; 2353 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2, 2354 unsigned &InpR2) const; 2355 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB, 2356 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR); 2357 bool processLoop(LoopCand &C); 2358 }; 2359 } 2360 2361 char HexagonLoopRescheduling::ID = 0; 2362 2363 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched", 2364 "Hexagon Loop Rescheduling", false, false) 2365 2366 2367 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P, 2368 MachineBasicBlock &B) { 2369 DefR = HexagonLoopRescheduling::getDefReg(&P); 2370 LB = &B; 2371 PB = nullptr; 2372 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) { 2373 const MachineOperand &OpB = P.getOperand(i+1); 2374 if (OpB.getMBB() == &B) { 2375 LR = P.getOperand(i); 2376 continue; 2377 } 2378 PB = OpB.getMBB(); 2379 PR = P.getOperand(i); 2380 } 2381 } 2382 2383 2384 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) { 2385 RegisterSet Defs; 2386 HBS::getInstrDefs(*MI, Defs); 2387 if (Defs.count() != 1) 2388 return 0; 2389 return Defs.find_first(); 2390 } 2391 2392 2393 bool HexagonLoopRescheduling::isConst(unsigned Reg) const { 2394 if (!BTP->has(Reg)) 2395 return false; 2396 const BitTracker::RegisterCell &RC = BTP->lookup(Reg); 2397 for (unsigned i = 0, w = RC.width(); i < w; ++i) { 2398 const BitTracker::BitValue &V = RC[i]; 2399 if (!V.is(0) && !V.is(1)) 2400 return false; 2401 } 2402 return true; 2403 } 2404 2405 2406 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI, 2407 unsigned DefR) const { 2408 unsigned Opc = MI->getOpcode(); 2409 switch (Opc) { 2410 case TargetOpcode::COPY: 2411 case Hexagon::S2_lsr_i_r: 2412 case Hexagon::S2_asr_i_r: 2413 case Hexagon::S2_asl_i_r: 2414 case Hexagon::S2_lsr_i_p: 2415 case Hexagon::S2_asr_i_p: 2416 case Hexagon::S2_asl_i_p: 2417 case Hexagon::S2_insert: 2418 case Hexagon::A2_or: 2419 case Hexagon::A2_orp: 2420 case Hexagon::A2_and: 2421 case Hexagon::A2_andp: 2422 case Hexagon::A2_combinew: 2423 case Hexagon::A4_combineri: 2424 case Hexagon::A4_combineir: 2425 case Hexagon::A2_combineii: 2426 case Hexagon::A4_combineii: 2427 case Hexagon::A2_combine_ll: 2428 case Hexagon::A2_combine_lh: 2429 case Hexagon::A2_combine_hl: 2430 case Hexagon::A2_combine_hh: 2431 return true; 2432 } 2433 return false; 2434 } 2435 2436 2437 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI, 2438 unsigned InpR) const { 2439 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { 2440 const MachineOperand &Op = MI->getOperand(i); 2441 if (!Op.isReg()) 2442 continue; 2443 if (Op.getReg() == InpR) 2444 return i == n-1; 2445 } 2446 return false; 2447 } 2448 2449 2450 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const { 2451 if (!BTP->has(OutR) || !BTP->has(InpR)) 2452 return false; 2453 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR); 2454 for (unsigned i = 0, w = OutC.width(); i < w; ++i) { 2455 const BitTracker::BitValue &V = OutC[i]; 2456 if (V.Type != BitTracker::BitValue::Ref) 2457 continue; 2458 if (V.RefI.Reg != InpR) 2459 return false; 2460 } 2461 return true; 2462 } 2463 2464 2465 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1, 2466 unsigned OutR2, unsigned &InpR2) const { 2467 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2)) 2468 return false; 2469 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1); 2470 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2); 2471 unsigned W = OutC1.width(); 2472 unsigned MatchR = 0; 2473 if (W != OutC2.width()) 2474 return false; 2475 for (unsigned i = 0; i < W; ++i) { 2476 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i]; 2477 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One) 2478 return false; 2479 if (V1.Type != BitTracker::BitValue::Ref) 2480 continue; 2481 if (V1.RefI.Pos != V2.RefI.Pos) 2482 return false; 2483 if (V1.RefI.Reg != InpR1) 2484 return false; 2485 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2) 2486 return false; 2487 if (!MatchR) 2488 MatchR = V2.RefI.Reg; 2489 else if (V2.RefI.Reg != MatchR) 2490 return false; 2491 } 2492 InpR2 = MatchR; 2493 return true; 2494 } 2495 2496 2497 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, 2498 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR, 2499 unsigned NewPredR) { 2500 DenseMap<unsigned,unsigned> RegMap; 2501 2502 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR); 2503 unsigned PhiR = MRI->createVirtualRegister(PhiRC); 2504 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR) 2505 .addReg(NewPredR) 2506 .addMBB(&PB) 2507 .addReg(G.Inp.Reg) 2508 .addMBB(&LB); 2509 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR)); 2510 2511 for (unsigned i = G.Ins.size(); i > 0; --i) { 2512 const MachineInstr *SI = G.Ins[i-1]; 2513 unsigned DR = getDefReg(SI); 2514 const TargetRegisterClass *RC = MRI->getRegClass(DR); 2515 unsigned NewDR = MRI->createVirtualRegister(RC); 2516 DebugLoc DL = SI->getDebugLoc(); 2517 2518 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR); 2519 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) { 2520 const MachineOperand &Op = SI->getOperand(j); 2521 if (!Op.isReg()) { 2522 MIB.addOperand(Op); 2523 continue; 2524 } 2525 if (!Op.isUse()) 2526 continue; 2527 unsigned UseR = RegMap[Op.getReg()]; 2528 MIB.addReg(UseR, 0, Op.getSubReg()); 2529 } 2530 RegMap.insert(std::make_pair(DR, NewDR)); 2531 } 2532 2533 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI); 2534 } 2535 2536 2537 bool HexagonLoopRescheduling::processLoop(LoopCand &C) { 2538 DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n"); 2539 std::vector<PhiInfo> Phis; 2540 for (auto &I : *C.LB) { 2541 if (!I.isPHI()) 2542 break; 2543 unsigned PR = getDefReg(&I); 2544 if (isConst(PR)) 2545 continue; 2546 bool BadUse = false, GoodUse = false; 2547 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) { 2548 MachineInstr *UseI = UI->getParent(); 2549 if (UseI->getParent() != C.LB) { 2550 BadUse = true; 2551 break; 2552 } 2553 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR)) 2554 GoodUse = true; 2555 } 2556 if (BadUse || !GoodUse) 2557 continue; 2558 2559 Phis.push_back(PhiInfo(I, *C.LB)); 2560 } 2561 2562 DEBUG({ 2563 dbgs() << "Phis: {"; 2564 for (auto &I : Phis) { 2565 dbgs() << ' ' << PrintReg(I.DefR, HRI) << "=phi(" 2566 << PrintReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber() 2567 << ',' << PrintReg(I.LR.Reg, HRI, I.LR.Sub) << ":b" 2568 << I.LB->getNumber() << ')'; 2569 } 2570 dbgs() << " }\n"; 2571 }); 2572 2573 if (Phis.empty()) 2574 return false; 2575 2576 bool Changed = false; 2577 InstrList ShufIns; 2578 2579 // Go backwards in the block: for each bit shuffling instruction, check 2580 // if that instruction could potentially be moved to the front of the loop: 2581 // the output of the loop cannot be used in a non-shuffling instruction 2582 // in this loop. 2583 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) { 2584 if (I->isTerminator()) 2585 continue; 2586 if (I->isPHI()) 2587 break; 2588 2589 RegisterSet Defs; 2590 HBS::getInstrDefs(*I, Defs); 2591 if (Defs.count() != 1) 2592 continue; 2593 unsigned DefR = Defs.find_first(); 2594 if (!TargetRegisterInfo::isVirtualRegister(DefR)) 2595 continue; 2596 if (!isBitShuffle(&*I, DefR)) 2597 continue; 2598 2599 bool BadUse = false; 2600 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) { 2601 MachineInstr *UseI = UI->getParent(); 2602 if (UseI->getParent() == C.LB) { 2603 if (UseI->isPHI()) { 2604 // If the use is in a phi node in this loop, then it should be 2605 // the value corresponding to the back edge. 2606 unsigned Idx = UI.getOperandNo(); 2607 if (UseI->getOperand(Idx+1).getMBB() != C.LB) 2608 BadUse = true; 2609 } else { 2610 auto F = std::find(ShufIns.begin(), ShufIns.end(), UseI); 2611 if (F == ShufIns.end()) 2612 BadUse = true; 2613 } 2614 } else { 2615 // There is a use outside of the loop, but there is no epilog block 2616 // suitable for a copy-out. 2617 if (C.EB == nullptr) 2618 BadUse = true; 2619 } 2620 if (BadUse) 2621 break; 2622 } 2623 2624 if (BadUse) 2625 continue; 2626 ShufIns.push_back(&*I); 2627 } 2628 2629 // Partition the list of shuffling instructions into instruction groups, 2630 // where each group has to be moved as a whole (i.e. a group is a chain of 2631 // dependent instructions). A group produces a single live output register, 2632 // which is meant to be the input of the loop phi node (although this is 2633 // not checked here yet). It also uses a single register as its input, 2634 // which is some value produced in the loop body. After moving the group 2635 // to the beginning of the loop, that input register would need to be 2636 // the loop-carried register (through a phi node) instead of the (currently 2637 // loop-carried) output register. 2638 typedef std::vector<InstrGroup> InstrGroupList; 2639 InstrGroupList Groups; 2640 2641 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) { 2642 MachineInstr *SI = ShufIns[i]; 2643 if (SI == nullptr) 2644 continue; 2645 2646 InstrGroup G; 2647 G.Ins.push_back(SI); 2648 G.Out.Reg = getDefReg(SI); 2649 RegisterSet Inputs; 2650 HBS::getInstrUses(*SI, Inputs); 2651 2652 for (unsigned j = i+1; j < n; ++j) { 2653 MachineInstr *MI = ShufIns[j]; 2654 if (MI == nullptr) 2655 continue; 2656 RegisterSet Defs; 2657 HBS::getInstrDefs(*MI, Defs); 2658 // If this instruction does not define any pending inputs, skip it. 2659 if (!Defs.intersects(Inputs)) 2660 continue; 2661 // Otherwise, add it to the current group and remove the inputs that 2662 // are defined by MI. 2663 G.Ins.push_back(MI); 2664 Inputs.remove(Defs); 2665 // Then add all registers used by MI. 2666 HBS::getInstrUses(*MI, Inputs); 2667 ShufIns[j] = nullptr; 2668 } 2669 2670 // Only add a group if it requires at most one register. 2671 if (Inputs.count() > 1) 2672 continue; 2673 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 2674 return G.Out.Reg == P.LR.Reg; 2675 }; 2676 if (std::find_if(Phis.begin(), Phis.end(), LoopInpEq) == Phis.end()) 2677 continue; 2678 2679 G.Inp.Reg = Inputs.find_first(); 2680 Groups.push_back(G); 2681 } 2682 2683 DEBUG({ 2684 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 2685 InstrGroup &G = Groups[i]; 2686 dbgs() << "Group[" << i << "] inp: " 2687 << PrintReg(G.Inp.Reg, HRI, G.Inp.Sub) 2688 << " out: " << PrintReg(G.Out.Reg, HRI, G.Out.Sub) << "\n"; 2689 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j) 2690 dbgs() << " " << *G.Ins[j]; 2691 } 2692 }); 2693 2694 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 2695 InstrGroup &G = Groups[i]; 2696 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg)) 2697 continue; 2698 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 2699 return G.Out.Reg == P.LR.Reg; 2700 }; 2701 auto F = std::find_if(Phis.begin(), Phis.end(), LoopInpEq); 2702 if (F == Phis.end()) 2703 continue; 2704 unsigned PredR = 0; 2705 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PredR)) { 2706 const MachineInstr *DefPredR = MRI->getVRegDef(F->PR.Reg); 2707 unsigned Opc = DefPredR->getOpcode(); 2708 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi) 2709 continue; 2710 if (!DefPredR->getOperand(1).isImm()) 2711 continue; 2712 if (DefPredR->getOperand(1).getImm() != 0) 2713 continue; 2714 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg); 2715 if (RC != MRI->getRegClass(F->PR.Reg)) { 2716 PredR = MRI->createVirtualRegister(RC); 2717 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi 2718 : Hexagon::A2_tfrpi; 2719 auto T = C.PB->getFirstTerminator(); 2720 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc(); 2721 BuildMI(*C.PB, T, DL, HII->get(TfrI), PredR) 2722 .addImm(0); 2723 } else { 2724 PredR = F->PR.Reg; 2725 } 2726 } 2727 assert(MRI->getRegClass(PredR) == MRI->getRegClass(G.Inp.Reg)); 2728 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PredR); 2729 Changed = true; 2730 } 2731 2732 return Changed; 2733 } 2734 2735 2736 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) { 2737 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2738 HII = HST.getInstrInfo(); 2739 HRI = HST.getRegisterInfo(); 2740 MRI = &MF.getRegInfo(); 2741 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); 2742 BitTracker BT(HE, MF); 2743 DEBUG(BT.trace(true)); 2744 BT.run(); 2745 BTP = &BT; 2746 2747 std::vector<LoopCand> Cand; 2748 2749 for (auto &B : MF) { 2750 if (B.pred_size() != 2 || B.succ_size() != 2) 2751 continue; 2752 MachineBasicBlock *PB = nullptr; 2753 bool IsLoop = false; 2754 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) { 2755 if (*PI != &B) 2756 PB = *PI; 2757 else 2758 IsLoop = true; 2759 } 2760 if (!IsLoop) 2761 continue; 2762 2763 MachineBasicBlock *EB = nullptr; 2764 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) { 2765 if (*SI == &B) 2766 continue; 2767 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the 2768 // edge from B to EP is non-critical. 2769 if ((*SI)->pred_size() == 1) 2770 EB = *SI; 2771 break; 2772 } 2773 2774 Cand.push_back(LoopCand(&B, PB, EB)); 2775 } 2776 2777 bool Changed = false; 2778 for (auto &C : Cand) 2779 Changed |= processLoop(C); 2780 2781 return Changed; 2782 } 2783 2784 //===----------------------------------------------------------------------===// 2785 // Public Constructor Functions 2786 //===----------------------------------------------------------------------===// 2787 2788 FunctionPass *llvm::createHexagonLoopRescheduling() { 2789 return new HexagonLoopRescheduling(); 2790 } 2791 2792 FunctionPass *llvm::createHexagonBitSimplify() { 2793 return new HexagonBitSimplify(); 2794 } 2795 2796