1 //===--- HexagonBitSimplify.cpp -------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #define DEBUG_TYPE "hexbit" 11 12 #include "HexagonBitTracker.h" 13 #include "HexagonTargetMachine.h" 14 #include "llvm/ADT/BitVector.h" 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/CodeGen/MachineBasicBlock.h" 20 #include "llvm/CodeGen/MachineDominators.h" 21 #include "llvm/CodeGen/MachineFunction.h" 22 #include "llvm/CodeGen/MachineFunctionPass.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineOperand.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/Passes.h" 28 #include "llvm/IR/DebugLoc.h" 29 #include "llvm/MC/MCInstrDesc.h" 30 #include "llvm/Pass.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Compiler.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/MathExtras.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Target/TargetRegisterInfo.h" 37 #include <algorithm> 38 #include <cassert> 39 #include <cstdint> 40 #include <iterator> 41 #include <limits> 42 #include <utility> 43 #include <vector> 44 45 using namespace llvm; 46 47 static cl::opt<bool> PreserveTiedOps("hexbit-keep-tied", cl::Hidden, 48 cl::init(true), cl::desc("Preserve subregisters in tied operands")); 49 static cl::opt<bool> GenExtract("hexbit-extract", cl::Hidden, 50 cl::init(true), cl::desc("Generate extract instructions")); 51 static cl::opt<bool> GenBitSplit("hexbit-bitsplit", cl::Hidden, 52 cl::init(true), cl::desc("Generate bitsplit instructions")); 53 54 static cl::opt<unsigned> MaxExtract("hexbit-max-extract", cl::Hidden, 55 cl::init(UINT_MAX)); 56 static unsigned CountExtract = 0; 57 static cl::opt<unsigned> MaxBitSplit("hexbit-max-bitsplit", cl::Hidden, 58 cl::init(UINT_MAX)); 59 static unsigned CountBitSplit = 0; 60 61 namespace llvm { 62 63 void initializeHexagonBitSimplifyPass(PassRegistry& Registry); 64 FunctionPass *createHexagonBitSimplify(); 65 66 } // end namespace llvm 67 68 namespace { 69 70 // Set of virtual registers, based on BitVector. 71 struct RegisterSet : private BitVector { 72 RegisterSet() = default; 73 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {} 74 RegisterSet(const RegisterSet &RS) = default; 75 76 using BitVector::clear; 77 using BitVector::count; 78 79 unsigned find_first() const { 80 int First = BitVector::find_first(); 81 if (First < 0) 82 return 0; 83 return x2v(First); 84 } 85 86 unsigned find_next(unsigned Prev) const { 87 int Next = BitVector::find_next(v2x(Prev)); 88 if (Next < 0) 89 return 0; 90 return x2v(Next); 91 } 92 93 RegisterSet &insert(unsigned R) { 94 unsigned Idx = v2x(R); 95 ensure(Idx); 96 return static_cast<RegisterSet&>(BitVector::set(Idx)); 97 } 98 RegisterSet &remove(unsigned R) { 99 unsigned Idx = v2x(R); 100 if (Idx >= size()) 101 return *this; 102 return static_cast<RegisterSet&>(BitVector::reset(Idx)); 103 } 104 105 RegisterSet &insert(const RegisterSet &Rs) { 106 return static_cast<RegisterSet&>(BitVector::operator|=(Rs)); 107 } 108 RegisterSet &remove(const RegisterSet &Rs) { 109 return static_cast<RegisterSet&>(BitVector::reset(Rs)); 110 } 111 112 reference operator[](unsigned R) { 113 unsigned Idx = v2x(R); 114 ensure(Idx); 115 return BitVector::operator[](Idx); 116 } 117 bool operator[](unsigned R) const { 118 unsigned Idx = v2x(R); 119 assert(Idx < size()); 120 return BitVector::operator[](Idx); 121 } 122 bool has(unsigned R) const { 123 unsigned Idx = v2x(R); 124 if (Idx >= size()) 125 return false; 126 return BitVector::test(Idx); 127 } 128 129 bool empty() const { 130 return !BitVector::any(); 131 } 132 bool includes(const RegisterSet &Rs) const { 133 // A.BitVector::test(B) <=> A-B != {} 134 return !Rs.BitVector::test(*this); 135 } 136 bool intersects(const RegisterSet &Rs) const { 137 return BitVector::anyCommon(Rs); 138 } 139 140 private: 141 void ensure(unsigned Idx) { 142 if (size() <= Idx) 143 resize(std::max(Idx+1, 32U)); 144 } 145 146 static inline unsigned v2x(unsigned v) { 147 return TargetRegisterInfo::virtReg2Index(v); 148 } 149 150 static inline unsigned x2v(unsigned x) { 151 return TargetRegisterInfo::index2VirtReg(x); 152 } 153 }; 154 155 struct PrintRegSet { 156 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI) 157 : RS(S), TRI(RI) {} 158 159 friend raw_ostream &operator<< (raw_ostream &OS, 160 const PrintRegSet &P); 161 162 private: 163 const RegisterSet &RS; 164 const TargetRegisterInfo *TRI; 165 }; 166 167 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) 168 LLVM_ATTRIBUTE_UNUSED; 169 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) { 170 OS << '{'; 171 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R)) 172 OS << ' ' << PrintReg(R, P.TRI); 173 OS << " }"; 174 return OS; 175 } 176 177 class Transformation; 178 179 class HexagonBitSimplify : public MachineFunctionPass { 180 public: 181 static char ID; 182 183 HexagonBitSimplify() : MachineFunctionPass(ID), MDT(nullptr) { 184 initializeHexagonBitSimplifyPass(*PassRegistry::getPassRegistry()); 185 } 186 187 StringRef getPassName() const override { 188 return "Hexagon bit simplification"; 189 } 190 191 void getAnalysisUsage(AnalysisUsage &AU) const override { 192 AU.addRequired<MachineDominatorTree>(); 193 AU.addPreserved<MachineDominatorTree>(); 194 MachineFunctionPass::getAnalysisUsage(AU); 195 } 196 197 bool runOnMachineFunction(MachineFunction &MF) override; 198 199 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs); 200 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses); 201 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1, 202 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W); 203 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B, 204 uint16_t W); 205 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B, 206 uint16_t W, uint64_t &U); 207 static bool replaceReg(unsigned OldR, unsigned NewR, 208 MachineRegisterInfo &MRI); 209 static bool getSubregMask(const BitTracker::RegisterRef &RR, 210 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI); 211 static bool replaceRegWithSub(unsigned OldR, unsigned NewR, 212 unsigned NewSR, MachineRegisterInfo &MRI); 213 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR, 214 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI); 215 static bool parseRegSequence(const MachineInstr &I, 216 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH, 217 const MachineRegisterInfo &MRI); 218 219 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits, 220 uint16_t Begin); 221 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits, 222 uint16_t Begin, const HexagonInstrInfo &HII); 223 224 static const TargetRegisterClass *getFinalVRegClass( 225 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI); 226 static bool isTransparentCopy(const BitTracker::RegisterRef &RD, 227 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI); 228 229 private: 230 MachineDominatorTree *MDT; 231 232 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs); 233 static bool hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI, 234 unsigned NewSub = Hexagon::NoSubRegister); 235 }; 236 237 char HexagonBitSimplify::ID = 0; 238 typedef HexagonBitSimplify HBS; 239 240 // The purpose of this class is to provide a common facility to traverse 241 // the function top-down or bottom-up via the dominator tree, and keep 242 // track of the available registers. 243 class Transformation { 244 public: 245 bool TopDown; 246 247 Transformation(bool TD) : TopDown(TD) {} 248 virtual ~Transformation() = default; 249 250 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0; 251 }; 252 253 } // end anonymous namespace 254 255 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexbit", 256 "Hexagon bit simplification", false, false) 257 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 258 INITIALIZE_PASS_END(HexagonBitSimplify, "hexbit", 259 "Hexagon bit simplification", false, false) 260 261 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T, 262 RegisterSet &AVs) { 263 bool Changed = false; 264 265 if (T.TopDown) 266 Changed = T.processBlock(B, AVs); 267 268 RegisterSet Defs; 269 for (auto &I : B) 270 getInstrDefs(I, Defs); 271 RegisterSet NewAVs = AVs; 272 NewAVs.insert(Defs); 273 274 for (auto *DTN : children<MachineDomTreeNode*>(MDT->getNode(&B))) 275 Changed |= visitBlock(*(DTN->getBlock()), T, NewAVs); 276 277 if (!T.TopDown) 278 Changed |= T.processBlock(B, AVs); 279 280 return Changed; 281 } 282 283 // 284 // Utility functions: 285 // 286 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI, 287 RegisterSet &Defs) { 288 for (auto &Op : MI.operands()) { 289 if (!Op.isReg() || !Op.isDef()) 290 continue; 291 unsigned R = Op.getReg(); 292 if (!TargetRegisterInfo::isVirtualRegister(R)) 293 continue; 294 Defs.insert(R); 295 } 296 } 297 298 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI, 299 RegisterSet &Uses) { 300 for (auto &Op : MI.operands()) { 301 if (!Op.isReg() || !Op.isUse()) 302 continue; 303 unsigned R = Op.getReg(); 304 if (!TargetRegisterInfo::isVirtualRegister(R)) 305 continue; 306 Uses.insert(R); 307 } 308 } 309 310 // Check if all the bits in range [B, E) in both cells are equal. 311 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1, 312 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2, 313 uint16_t W) { 314 for (uint16_t i = 0; i < W; ++i) { 315 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i]. 316 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0) 317 return false; 318 // Same for RC2[i]. 319 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0) 320 return false; 321 if (RC1[B1+i] != RC2[B2+i]) 322 return false; 323 } 324 return true; 325 } 326 327 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC, 328 uint16_t B, uint16_t W) { 329 assert(B < RC.width() && B+W <= RC.width()); 330 for (uint16_t i = B; i < B+W; ++i) 331 if (!RC[i].is(0)) 332 return false; 333 return true; 334 } 335 336 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC, 337 uint16_t B, uint16_t W, uint64_t &U) { 338 assert(B < RC.width() && B+W <= RC.width()); 339 int64_t T = 0; 340 for (uint16_t i = B+W; i > B; --i) { 341 const BitTracker::BitValue &BV = RC[i-1]; 342 T <<= 1; 343 if (BV.is(1)) 344 T |= 1; 345 else if (!BV.is(0)) 346 return false; 347 } 348 U = T; 349 return true; 350 } 351 352 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR, 353 MachineRegisterInfo &MRI) { 354 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 355 !TargetRegisterInfo::isVirtualRegister(NewR)) 356 return false; 357 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 358 decltype(End) NextI; 359 for (auto I = Begin; I != End; I = NextI) { 360 NextI = std::next(I); 361 I->setReg(NewR); 362 } 363 return Begin != End; 364 } 365 366 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR, 367 unsigned NewSR, MachineRegisterInfo &MRI) { 368 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 369 !TargetRegisterInfo::isVirtualRegister(NewR)) 370 return false; 371 if (hasTiedUse(OldR, MRI, NewSR)) 372 return false; 373 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 374 decltype(End) NextI; 375 for (auto I = Begin; I != End; I = NextI) { 376 NextI = std::next(I); 377 I->setReg(NewR); 378 I->setSubReg(NewSR); 379 } 380 return Begin != End; 381 } 382 383 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR, 384 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) { 385 if (!TargetRegisterInfo::isVirtualRegister(OldR) || 386 !TargetRegisterInfo::isVirtualRegister(NewR)) 387 return false; 388 if (OldSR != NewSR && hasTiedUse(OldR, MRI, NewSR)) 389 return false; 390 auto Begin = MRI.use_begin(OldR), End = MRI.use_end(); 391 decltype(End) NextI; 392 for (auto I = Begin; I != End; I = NextI) { 393 NextI = std::next(I); 394 if (I->getSubReg() != OldSR) 395 continue; 396 I->setReg(NewR); 397 I->setSubReg(NewSR); 398 } 399 return Begin != End; 400 } 401 402 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB 403 // of Sub in Reg, and set Width to the size of Sub in bits. Return true, 404 // if this succeeded, otherwise return false. 405 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR, 406 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) { 407 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg); 408 if (RR.Sub == 0) { 409 Begin = 0; 410 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC); 411 return true; 412 } 413 414 Begin = 0; 415 416 switch (RC->getID()) { 417 case Hexagon::DoubleRegsRegClassID: 418 case Hexagon::VecDblRegsRegClassID: 419 case Hexagon::VecDblRegs128BRegClassID: 420 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 2; 421 if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi) 422 Begin = Width; 423 break; 424 default: 425 return false; 426 } 427 return true; 428 } 429 430 431 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high 432 // subregister. 433 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I, 434 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH, 435 const MachineRegisterInfo &MRI) { 436 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE); 437 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm(); 438 auto *DstRC = MRI.getRegClass(I.getOperand(0).getReg()); 439 auto &HRI = static_cast<const HexagonRegisterInfo&>( 440 *MRI.getTargetRegisterInfo()); 441 unsigned SubLo = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_lo); 442 unsigned SubHi = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_hi); 443 assert((Sub1 == SubLo && Sub2 == SubHi) || (Sub1 == SubHi && Sub2 == SubLo)); 444 if (Sub1 == SubLo && Sub2 == SubHi) { 445 SL = I.getOperand(1); 446 SH = I.getOperand(3); 447 return true; 448 } 449 if (Sub1 == SubHi && Sub2 == SubLo) { 450 SH = I.getOperand(1); 451 SL = I.getOperand(3); 452 return true; 453 } 454 return false; 455 } 456 457 // All stores (except 64-bit stores) take a 32-bit register as the source 458 // of the value to be stored. If the instruction stores into a location 459 // that is shorter than 32 bits, some bits of the source register are not 460 // used. For each store instruction, calculate the set of used bits in 461 // the source register, and set appropriate bits in Bits. Return true if 462 // the bits are calculated, false otherwise. 463 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits, 464 uint16_t Begin) { 465 using namespace Hexagon; 466 467 switch (Opc) { 468 // Store byte 469 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32 470 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new 471 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32 472 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32 473 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32 474 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32 475 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new 476 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new 477 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new 478 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new 479 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32 480 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new 481 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32 482 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32 483 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32 484 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32 485 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new 486 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new 487 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new 488 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new 489 case S4_storerb_ap: // memb(Re32=#U6)=Rt32 490 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new 491 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32 492 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new 493 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32 494 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new 495 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32 496 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new 497 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32 498 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new 499 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32 500 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new 501 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32 502 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new 503 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32 504 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32 505 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 506 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32 507 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 508 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new 509 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 510 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new 511 case S2_storerbgp: // memb(gp+#u16:0)=Rt32 512 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new 513 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32 514 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32 515 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32 516 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32 517 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new 518 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new 519 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new 520 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new 521 Bits.set(Begin, Begin+8); 522 return true; 523 524 // Store low half 525 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32 526 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new 527 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32 528 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32 529 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32 530 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32 531 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new 532 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new 533 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new 534 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new 535 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32 536 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new 537 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32 538 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32 539 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32 540 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32 541 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new 542 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new 543 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new 544 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new 545 case S4_storerh_ap: // memh(Re32=#U6)=Rt32 546 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new 547 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32 548 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new 549 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32 550 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new 551 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32 552 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new 553 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32 554 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new 555 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32 556 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new 557 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32 558 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32 559 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32 560 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 561 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32 562 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new 563 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 564 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new 565 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 566 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new 567 case S2_storerhgp: // memh(gp+#u16:1)=Rt32 568 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new 569 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32 570 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32 571 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32 572 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32 573 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new 574 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new 575 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new 576 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new 577 Bits.set(Begin, Begin+16); 578 return true; 579 580 // Store high half 581 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32 582 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32 583 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32 584 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32 585 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32 586 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32 587 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32 588 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32 589 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32 590 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32 591 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32 592 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32 593 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32 594 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32 595 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32 596 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32 597 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32 598 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 599 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32 600 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 601 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32 602 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32 603 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32 604 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32 605 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32 606 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32 607 Bits.set(Begin+16, Begin+32); 608 return true; 609 } 610 611 return false; 612 } 613 614 // For an instruction with opcode Opc, calculate the set of bits that it 615 // uses in a register in operand OpN. This only calculates the set of used 616 // bits for cases where it does not depend on any operands (as is the case 617 // in shifts, for example). For concrete instructions from a program, the 618 // operand may be a subregister of a larger register, while Bits would 619 // correspond to the larger register in its entirety. Because of that, 620 // the parameter Begin can be used to indicate which bit of Bits should be 621 // considered the LSB of of the operand. 622 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN, 623 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) { 624 using namespace Hexagon; 625 626 const MCInstrDesc &D = HII.get(Opc); 627 if (D.mayStore()) { 628 if (OpN == D.getNumOperands()-1) 629 return getUsedBitsInStore(Opc, Bits, Begin); 630 return false; 631 } 632 633 switch (Opc) { 634 // One register source. Used bits: R1[0-7]. 635 case A2_sxtb: 636 case A2_zxtb: 637 case A4_cmpbeqi: 638 case A4_cmpbgti: 639 case A4_cmpbgtui: 640 if (OpN == 1) { 641 Bits.set(Begin, Begin+8); 642 return true; 643 } 644 break; 645 646 // One register source. Used bits: R1[0-15]. 647 case A2_aslh: 648 case A2_sxth: 649 case A2_zxth: 650 case A4_cmpheqi: 651 case A4_cmphgti: 652 case A4_cmphgtui: 653 if (OpN == 1) { 654 Bits.set(Begin, Begin+16); 655 return true; 656 } 657 break; 658 659 // One register source. Used bits: R1[16-31]. 660 case A2_asrh: 661 if (OpN == 1) { 662 Bits.set(Begin+16, Begin+32); 663 return true; 664 } 665 break; 666 667 // Two register sources. Used bits: R1[0-7], R2[0-7]. 668 case A4_cmpbeq: 669 case A4_cmpbgt: 670 case A4_cmpbgtu: 671 if (OpN == 1) { 672 Bits.set(Begin, Begin+8); 673 return true; 674 } 675 break; 676 677 // Two register sources. Used bits: R1[0-15], R2[0-15]. 678 case A4_cmpheq: 679 case A4_cmphgt: 680 case A4_cmphgtu: 681 case A2_addh_h16_ll: 682 case A2_addh_h16_sat_ll: 683 case A2_addh_l16_ll: 684 case A2_addh_l16_sat_ll: 685 case A2_combine_ll: 686 case A2_subh_h16_ll: 687 case A2_subh_h16_sat_ll: 688 case A2_subh_l16_ll: 689 case A2_subh_l16_sat_ll: 690 case M2_mpy_acc_ll_s0: 691 case M2_mpy_acc_ll_s1: 692 case M2_mpy_acc_sat_ll_s0: 693 case M2_mpy_acc_sat_ll_s1: 694 case M2_mpy_ll_s0: 695 case M2_mpy_ll_s1: 696 case M2_mpy_nac_ll_s0: 697 case M2_mpy_nac_ll_s1: 698 case M2_mpy_nac_sat_ll_s0: 699 case M2_mpy_nac_sat_ll_s1: 700 case M2_mpy_rnd_ll_s0: 701 case M2_mpy_rnd_ll_s1: 702 case M2_mpy_sat_ll_s0: 703 case M2_mpy_sat_ll_s1: 704 case M2_mpy_sat_rnd_ll_s0: 705 case M2_mpy_sat_rnd_ll_s1: 706 case M2_mpyd_acc_ll_s0: 707 case M2_mpyd_acc_ll_s1: 708 case M2_mpyd_ll_s0: 709 case M2_mpyd_ll_s1: 710 case M2_mpyd_nac_ll_s0: 711 case M2_mpyd_nac_ll_s1: 712 case M2_mpyd_rnd_ll_s0: 713 case M2_mpyd_rnd_ll_s1: 714 case M2_mpyu_acc_ll_s0: 715 case M2_mpyu_acc_ll_s1: 716 case M2_mpyu_ll_s0: 717 case M2_mpyu_ll_s1: 718 case M2_mpyu_nac_ll_s0: 719 case M2_mpyu_nac_ll_s1: 720 case M2_mpyud_acc_ll_s0: 721 case M2_mpyud_acc_ll_s1: 722 case M2_mpyud_ll_s0: 723 case M2_mpyud_ll_s1: 724 case M2_mpyud_nac_ll_s0: 725 case M2_mpyud_nac_ll_s1: 726 if (OpN == 1 || OpN == 2) { 727 Bits.set(Begin, Begin+16); 728 return true; 729 } 730 break; 731 732 // Two register sources. Used bits: R1[0-15], R2[16-31]. 733 case A2_addh_h16_lh: 734 case A2_addh_h16_sat_lh: 735 case A2_combine_lh: 736 case A2_subh_h16_lh: 737 case A2_subh_h16_sat_lh: 738 case M2_mpy_acc_lh_s0: 739 case M2_mpy_acc_lh_s1: 740 case M2_mpy_acc_sat_lh_s0: 741 case M2_mpy_acc_sat_lh_s1: 742 case M2_mpy_lh_s0: 743 case M2_mpy_lh_s1: 744 case M2_mpy_nac_lh_s0: 745 case M2_mpy_nac_lh_s1: 746 case M2_mpy_nac_sat_lh_s0: 747 case M2_mpy_nac_sat_lh_s1: 748 case M2_mpy_rnd_lh_s0: 749 case M2_mpy_rnd_lh_s1: 750 case M2_mpy_sat_lh_s0: 751 case M2_mpy_sat_lh_s1: 752 case M2_mpy_sat_rnd_lh_s0: 753 case M2_mpy_sat_rnd_lh_s1: 754 case M2_mpyd_acc_lh_s0: 755 case M2_mpyd_acc_lh_s1: 756 case M2_mpyd_lh_s0: 757 case M2_mpyd_lh_s1: 758 case M2_mpyd_nac_lh_s0: 759 case M2_mpyd_nac_lh_s1: 760 case M2_mpyd_rnd_lh_s0: 761 case M2_mpyd_rnd_lh_s1: 762 case M2_mpyu_acc_lh_s0: 763 case M2_mpyu_acc_lh_s1: 764 case M2_mpyu_lh_s0: 765 case M2_mpyu_lh_s1: 766 case M2_mpyu_nac_lh_s0: 767 case M2_mpyu_nac_lh_s1: 768 case M2_mpyud_acc_lh_s0: 769 case M2_mpyud_acc_lh_s1: 770 case M2_mpyud_lh_s0: 771 case M2_mpyud_lh_s1: 772 case M2_mpyud_nac_lh_s0: 773 case M2_mpyud_nac_lh_s1: 774 // These four are actually LH. 775 case A2_addh_l16_hl: 776 case A2_addh_l16_sat_hl: 777 case A2_subh_l16_hl: 778 case A2_subh_l16_sat_hl: 779 if (OpN == 1) { 780 Bits.set(Begin, Begin+16); 781 return true; 782 } 783 if (OpN == 2) { 784 Bits.set(Begin+16, Begin+32); 785 return true; 786 } 787 break; 788 789 // Two register sources, used bits: R1[16-31], R2[0-15]. 790 case A2_addh_h16_hl: 791 case A2_addh_h16_sat_hl: 792 case A2_combine_hl: 793 case A2_subh_h16_hl: 794 case A2_subh_h16_sat_hl: 795 case M2_mpy_acc_hl_s0: 796 case M2_mpy_acc_hl_s1: 797 case M2_mpy_acc_sat_hl_s0: 798 case M2_mpy_acc_sat_hl_s1: 799 case M2_mpy_hl_s0: 800 case M2_mpy_hl_s1: 801 case M2_mpy_nac_hl_s0: 802 case M2_mpy_nac_hl_s1: 803 case M2_mpy_nac_sat_hl_s0: 804 case M2_mpy_nac_sat_hl_s1: 805 case M2_mpy_rnd_hl_s0: 806 case M2_mpy_rnd_hl_s1: 807 case M2_mpy_sat_hl_s0: 808 case M2_mpy_sat_hl_s1: 809 case M2_mpy_sat_rnd_hl_s0: 810 case M2_mpy_sat_rnd_hl_s1: 811 case M2_mpyd_acc_hl_s0: 812 case M2_mpyd_acc_hl_s1: 813 case M2_mpyd_hl_s0: 814 case M2_mpyd_hl_s1: 815 case M2_mpyd_nac_hl_s0: 816 case M2_mpyd_nac_hl_s1: 817 case M2_mpyd_rnd_hl_s0: 818 case M2_mpyd_rnd_hl_s1: 819 case M2_mpyu_acc_hl_s0: 820 case M2_mpyu_acc_hl_s1: 821 case M2_mpyu_hl_s0: 822 case M2_mpyu_hl_s1: 823 case M2_mpyu_nac_hl_s0: 824 case M2_mpyu_nac_hl_s1: 825 case M2_mpyud_acc_hl_s0: 826 case M2_mpyud_acc_hl_s1: 827 case M2_mpyud_hl_s0: 828 case M2_mpyud_hl_s1: 829 case M2_mpyud_nac_hl_s0: 830 case M2_mpyud_nac_hl_s1: 831 if (OpN == 1) { 832 Bits.set(Begin+16, Begin+32); 833 return true; 834 } 835 if (OpN == 2) { 836 Bits.set(Begin, Begin+16); 837 return true; 838 } 839 break; 840 841 // Two register sources, used bits: R1[16-31], R2[16-31]. 842 case A2_addh_h16_hh: 843 case A2_addh_h16_sat_hh: 844 case A2_combine_hh: 845 case A2_subh_h16_hh: 846 case A2_subh_h16_sat_hh: 847 case M2_mpy_acc_hh_s0: 848 case M2_mpy_acc_hh_s1: 849 case M2_mpy_acc_sat_hh_s0: 850 case M2_mpy_acc_sat_hh_s1: 851 case M2_mpy_hh_s0: 852 case M2_mpy_hh_s1: 853 case M2_mpy_nac_hh_s0: 854 case M2_mpy_nac_hh_s1: 855 case M2_mpy_nac_sat_hh_s0: 856 case M2_mpy_nac_sat_hh_s1: 857 case M2_mpy_rnd_hh_s0: 858 case M2_mpy_rnd_hh_s1: 859 case M2_mpy_sat_hh_s0: 860 case M2_mpy_sat_hh_s1: 861 case M2_mpy_sat_rnd_hh_s0: 862 case M2_mpy_sat_rnd_hh_s1: 863 case M2_mpyd_acc_hh_s0: 864 case M2_mpyd_acc_hh_s1: 865 case M2_mpyd_hh_s0: 866 case M2_mpyd_hh_s1: 867 case M2_mpyd_nac_hh_s0: 868 case M2_mpyd_nac_hh_s1: 869 case M2_mpyd_rnd_hh_s0: 870 case M2_mpyd_rnd_hh_s1: 871 case M2_mpyu_acc_hh_s0: 872 case M2_mpyu_acc_hh_s1: 873 case M2_mpyu_hh_s0: 874 case M2_mpyu_hh_s1: 875 case M2_mpyu_nac_hh_s0: 876 case M2_mpyu_nac_hh_s1: 877 case M2_mpyud_acc_hh_s0: 878 case M2_mpyud_acc_hh_s1: 879 case M2_mpyud_hh_s0: 880 case M2_mpyud_hh_s1: 881 case M2_mpyud_nac_hh_s0: 882 case M2_mpyud_nac_hh_s1: 883 if (OpN == 1 || OpN == 2) { 884 Bits.set(Begin+16, Begin+32); 885 return true; 886 } 887 break; 888 } 889 890 return false; 891 } 892 893 // Calculate the register class that matches Reg:Sub. For example, if 894 // vreg1 is a double register, then vreg1:isub_hi would match the "int" 895 // register class. 896 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass( 897 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) { 898 if (!TargetRegisterInfo::isVirtualRegister(RR.Reg)) 899 return nullptr; 900 auto *RC = MRI.getRegClass(RR.Reg); 901 if (RR.Sub == 0) 902 return RC; 903 auto &HRI = static_cast<const HexagonRegisterInfo&>( 904 *MRI.getTargetRegisterInfo()); 905 906 auto VerifySR = [&HRI] (const TargetRegisterClass *RC, unsigned Sub) -> void { 907 (void)HRI; 908 assert(Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo) || 909 Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi)); 910 }; 911 912 switch (RC->getID()) { 913 case Hexagon::DoubleRegsRegClassID: 914 VerifySR(RC, RR.Sub); 915 return &Hexagon::IntRegsRegClass; 916 case Hexagon::VecDblRegsRegClassID: 917 VerifySR(RC, RR.Sub); 918 return &Hexagon::VectorRegsRegClass; 919 case Hexagon::VecDblRegs128BRegClassID: 920 VerifySR(RC, RR.Sub); 921 return &Hexagon::VectorRegs128BRegClass; 922 } 923 return nullptr; 924 } 925 926 // Check if RD could be replaced with RS at any possible use of RD. 927 // For example a predicate register cannot be replaced with a integer 928 // register, but a 64-bit register with a subregister can be replaced 929 // with a 32-bit register. 930 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD, 931 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) { 932 if (!TargetRegisterInfo::isVirtualRegister(RD.Reg) || 933 !TargetRegisterInfo::isVirtualRegister(RS.Reg)) 934 return false; 935 // Return false if one (or both) classes are nullptr. 936 auto *DRC = getFinalVRegClass(RD, MRI); 937 if (!DRC) 938 return false; 939 940 return DRC == getFinalVRegClass(RS, MRI); 941 } 942 943 bool HexagonBitSimplify::hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI, 944 unsigned NewSub) { 945 if (!PreserveTiedOps) 946 return false; 947 return llvm::any_of(MRI.use_operands(Reg), 948 [NewSub] (const MachineOperand &Op) -> bool { 949 return Op.getSubReg() != NewSub && Op.isTied(); 950 }); 951 } 952 953 namespace { 954 955 class DeadCodeElimination { 956 public: 957 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt) 958 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()), 959 MDT(mdt), MRI(mf.getRegInfo()) {} 960 961 bool run() { 962 return runOnNode(MDT.getRootNode()); 963 } 964 965 private: 966 bool isDead(unsigned R) const; 967 bool runOnNode(MachineDomTreeNode *N); 968 969 MachineFunction &MF; 970 const HexagonInstrInfo &HII; 971 MachineDominatorTree &MDT; 972 MachineRegisterInfo &MRI; 973 }; 974 975 } // end anonymous namespace 976 977 bool DeadCodeElimination::isDead(unsigned R) const { 978 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 979 MachineInstr *UseI = I->getParent(); 980 if (UseI->isDebugValue()) 981 continue; 982 if (UseI->isPHI()) { 983 assert(!UseI->getOperand(0).getSubReg()); 984 unsigned DR = UseI->getOperand(0).getReg(); 985 if (DR == R) 986 continue; 987 } 988 return false; 989 } 990 return true; 991 } 992 993 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) { 994 bool Changed = false; 995 996 for (auto *DTN : children<MachineDomTreeNode*>(N)) 997 Changed |= runOnNode(DTN); 998 999 MachineBasicBlock *B = N->getBlock(); 1000 std::vector<MachineInstr*> Instrs; 1001 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I) 1002 Instrs.push_back(&*I); 1003 1004 for (auto MI : Instrs) { 1005 unsigned Opc = MI->getOpcode(); 1006 // Do not touch lifetime markers. This is why the target-independent DCE 1007 // cannot be used. 1008 if (Opc == TargetOpcode::LIFETIME_START || 1009 Opc == TargetOpcode::LIFETIME_END) 1010 continue; 1011 bool Store = false; 1012 if (MI->isInlineAsm()) 1013 continue; 1014 // Delete PHIs if possible. 1015 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store)) 1016 continue; 1017 1018 bool AllDead = true; 1019 SmallVector<unsigned,2> Regs; 1020 for (auto &Op : MI->operands()) { 1021 if (!Op.isReg() || !Op.isDef()) 1022 continue; 1023 unsigned R = Op.getReg(); 1024 if (!TargetRegisterInfo::isVirtualRegister(R) || !isDead(R)) { 1025 AllDead = false; 1026 break; 1027 } 1028 Regs.push_back(R); 1029 } 1030 if (!AllDead) 1031 continue; 1032 1033 B->erase(MI); 1034 for (unsigned i = 0, n = Regs.size(); i != n; ++i) 1035 MRI.markUsesInDebugValueAsUndef(Regs[i]); 1036 Changed = true; 1037 } 1038 1039 return Changed; 1040 } 1041 1042 namespace { 1043 1044 // Eliminate redundant instructions 1045 // 1046 // This transformation will identify instructions where the output register 1047 // is the same as one of its input registers. This only works on instructions 1048 // that define a single register (unlike post-increment loads, for example). 1049 // The equality check is actually more detailed: the code calculates which 1050 // bits of the output are used, and only compares these bits with the input 1051 // registers. 1052 // If the output matches an input, the instruction is replaced with COPY. 1053 // The copies will be removed by another transformation. 1054 class RedundantInstrElimination : public Transformation { 1055 public: 1056 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii, 1057 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1058 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {} 1059 1060 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1061 1062 private: 1063 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN, 1064 unsigned &LostB, unsigned &LostE); 1065 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN, 1066 unsigned &LostB, unsigned &LostE); 1067 bool computeUsedBits(unsigned Reg, BitVector &Bits); 1068 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits, 1069 uint16_t Begin); 1070 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS); 1071 1072 const HexagonInstrInfo &HII; 1073 const HexagonRegisterInfo &HRI; 1074 MachineRegisterInfo &MRI; 1075 BitTracker &BT; 1076 }; 1077 1078 } // end anonymous namespace 1079 1080 // Check if the instruction is a lossy shift left, where the input being 1081 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1082 // of bit indices that are lost. 1083 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI, 1084 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1085 using namespace Hexagon; 1086 1087 unsigned Opc = MI.getOpcode(); 1088 unsigned ImN, RegN, Width; 1089 switch (Opc) { 1090 case S2_asl_i_p: 1091 ImN = 2; 1092 RegN = 1; 1093 Width = 64; 1094 break; 1095 case S2_asl_i_p_acc: 1096 case S2_asl_i_p_and: 1097 case S2_asl_i_p_nac: 1098 case S2_asl_i_p_or: 1099 case S2_asl_i_p_xacc: 1100 ImN = 3; 1101 RegN = 2; 1102 Width = 64; 1103 break; 1104 case S2_asl_i_r: 1105 ImN = 2; 1106 RegN = 1; 1107 Width = 32; 1108 break; 1109 case S2_addasl_rrri: 1110 case S4_andi_asl_ri: 1111 case S4_ori_asl_ri: 1112 case S4_addi_asl_ri: 1113 case S4_subi_asl_ri: 1114 case S2_asl_i_r_acc: 1115 case S2_asl_i_r_and: 1116 case S2_asl_i_r_nac: 1117 case S2_asl_i_r_or: 1118 case S2_asl_i_r_sat: 1119 case S2_asl_i_r_xacc: 1120 ImN = 3; 1121 RegN = 2; 1122 Width = 32; 1123 break; 1124 default: 1125 return false; 1126 } 1127 1128 if (RegN != OpN) 1129 return false; 1130 1131 assert(MI.getOperand(ImN).isImm()); 1132 unsigned S = MI.getOperand(ImN).getImm(); 1133 if (S == 0) 1134 return false; 1135 LostB = Width-S; 1136 LostE = Width; 1137 return true; 1138 } 1139 1140 // Check if the instruction is a lossy shift right, where the input being 1141 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range 1142 // of bit indices that are lost. 1143 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI, 1144 unsigned OpN, unsigned &LostB, unsigned &LostE) { 1145 using namespace Hexagon; 1146 1147 unsigned Opc = MI.getOpcode(); 1148 unsigned ImN, RegN; 1149 switch (Opc) { 1150 case S2_asr_i_p: 1151 case S2_lsr_i_p: 1152 ImN = 2; 1153 RegN = 1; 1154 break; 1155 case S2_asr_i_p_acc: 1156 case S2_asr_i_p_and: 1157 case S2_asr_i_p_nac: 1158 case S2_asr_i_p_or: 1159 case S2_lsr_i_p_acc: 1160 case S2_lsr_i_p_and: 1161 case S2_lsr_i_p_nac: 1162 case S2_lsr_i_p_or: 1163 case S2_lsr_i_p_xacc: 1164 ImN = 3; 1165 RegN = 2; 1166 break; 1167 case S2_asr_i_r: 1168 case S2_lsr_i_r: 1169 ImN = 2; 1170 RegN = 1; 1171 break; 1172 case S4_andi_lsr_ri: 1173 case S4_ori_lsr_ri: 1174 case S4_addi_lsr_ri: 1175 case S4_subi_lsr_ri: 1176 case S2_asr_i_r_acc: 1177 case S2_asr_i_r_and: 1178 case S2_asr_i_r_nac: 1179 case S2_asr_i_r_or: 1180 case S2_lsr_i_r_acc: 1181 case S2_lsr_i_r_and: 1182 case S2_lsr_i_r_nac: 1183 case S2_lsr_i_r_or: 1184 case S2_lsr_i_r_xacc: 1185 ImN = 3; 1186 RegN = 2; 1187 break; 1188 1189 default: 1190 return false; 1191 } 1192 1193 if (RegN != OpN) 1194 return false; 1195 1196 assert(MI.getOperand(ImN).isImm()); 1197 unsigned S = MI.getOperand(ImN).getImm(); 1198 LostB = 0; 1199 LostE = S; 1200 return true; 1201 } 1202 1203 // Calculate the bit vector that corresponds to the used bits of register Reg. 1204 // The vector Bits has the same size, as the size of Reg in bits. If the cal- 1205 // culation fails (i.e. the used bits are unknown), it returns false. Other- 1206 // wise, it returns true and sets the corresponding bits in Bits. 1207 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) { 1208 BitVector Used(Bits.size()); 1209 RegisterSet Visited; 1210 std::vector<unsigned> Pending; 1211 Pending.push_back(Reg); 1212 1213 for (unsigned i = 0; i < Pending.size(); ++i) { 1214 unsigned R = Pending[i]; 1215 if (Visited.has(R)) 1216 continue; 1217 Visited.insert(R); 1218 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) { 1219 BitTracker::RegisterRef UR = *I; 1220 unsigned B, W; 1221 if (!HBS::getSubregMask(UR, B, W, MRI)) 1222 return false; 1223 MachineInstr &UseI = *I->getParent(); 1224 if (UseI.isPHI() || UseI.isCopy()) { 1225 unsigned DefR = UseI.getOperand(0).getReg(); 1226 if (!TargetRegisterInfo::isVirtualRegister(DefR)) 1227 return false; 1228 Pending.push_back(DefR); 1229 } else { 1230 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B)) 1231 return false; 1232 } 1233 } 1234 } 1235 Bits |= Used; 1236 return true; 1237 } 1238 1239 // Calculate the bits used by instruction MI in a register in operand OpN. 1240 // Return true/false if the calculation succeeds/fails. If is succeeds, set 1241 // used bits in Bits. This function does not reset any bits in Bits, so 1242 // subsequent calls over different instructions will result in the union 1243 // of the used bits in all these instructions. 1244 // The register in question may be used with a sub-register, whereas Bits 1245 // holds the bits for the entire register. To keep track of that, the 1246 // argument Begin indicates where in Bits is the lowest-significant bit 1247 // of the register used in operand OpN. For example, in instruction: 1248 // vreg1 = S2_lsr_i_r vreg2:isub_hi, 10 1249 // the operand 1 is a 32-bit register, which happens to be a subregister 1250 // of the 64-bit register vreg2, and that subregister starts at position 32. 1251 // In this case Begin=32, since Bits[32] would be the lowest-significant bit 1252 // of vreg2:isub_hi. 1253 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI, 1254 unsigned OpN, BitVector &Bits, uint16_t Begin) { 1255 unsigned Opc = MI.getOpcode(); 1256 BitVector T(Bits.size()); 1257 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII); 1258 // Even if we don't have bits yet, we could still provide some information 1259 // if the instruction is a lossy shift: the lost bits will be marked as 1260 // not used. 1261 unsigned LB, LE; 1262 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) { 1263 assert(MI.getOperand(OpN).isReg()); 1264 BitTracker::RegisterRef RR = MI.getOperand(OpN); 1265 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI); 1266 uint16_t Width = HRI.getRegSizeInBits(*RC); 1267 1268 if (!GotBits) 1269 T.set(Begin, Begin+Width); 1270 assert(LB <= LE && LB < Width && LE <= Width); 1271 T.reset(Begin+LB, Begin+LE); 1272 GotBits = true; 1273 } 1274 if (GotBits) 1275 Bits |= T; 1276 return GotBits; 1277 } 1278 1279 // Calculates the used bits in RD ("defined register"), and checks if these 1280 // bits in RS ("used register") and RD are identical. 1281 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD, 1282 BitTracker::RegisterRef RS) { 1283 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1284 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1285 1286 unsigned DB, DW; 1287 if (!HBS::getSubregMask(RD, DB, DW, MRI)) 1288 return false; 1289 unsigned SB, SW; 1290 if (!HBS::getSubregMask(RS, SB, SW, MRI)) 1291 return false; 1292 if (SW != DW) 1293 return false; 1294 1295 BitVector Used(DC.width()); 1296 if (!computeUsedBits(RD.Reg, Used)) 1297 return false; 1298 1299 for (unsigned i = 0; i != DW; ++i) 1300 if (Used[i+DB] && DC[DB+i] != SC[SB+i]) 1301 return false; 1302 return true; 1303 } 1304 1305 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B, 1306 const RegisterSet&) { 1307 if (!BT.reached(&B)) 1308 return false; 1309 bool Changed = false; 1310 1311 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) { 1312 NextI = std::next(I); 1313 MachineInstr *MI = &*I; 1314 1315 if (MI->getOpcode() == TargetOpcode::COPY) 1316 continue; 1317 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 1318 continue; 1319 unsigned NumD = MI->getDesc().getNumDefs(); 1320 if (NumD != 1) 1321 continue; 1322 1323 BitTracker::RegisterRef RD = MI->getOperand(0); 1324 if (!BT.has(RD.Reg)) 1325 continue; 1326 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg); 1327 auto At = MI->isPHI() ? B.getFirstNonPHI() 1328 : MachineBasicBlock::iterator(MI); 1329 1330 // Find a source operand that is equal to the result. 1331 for (auto &Op : MI->uses()) { 1332 if (!Op.isReg()) 1333 continue; 1334 BitTracker::RegisterRef RS = Op; 1335 if (!BT.has(RS.Reg)) 1336 continue; 1337 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1338 continue; 1339 1340 unsigned BN, BW; 1341 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 1342 continue; 1343 1344 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 1345 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW)) 1346 continue; 1347 1348 // If found, replace the instruction with a COPY. 1349 const DebugLoc &DL = MI->getDebugLoc(); 1350 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 1351 unsigned NewR = MRI.createVirtualRegister(FRC); 1352 MachineInstr *CopyI = 1353 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1354 .addReg(RS.Reg, 0, RS.Sub); 1355 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 1356 // This pass can create copies between registers that don't have the 1357 // exact same values. Updating the tracker has to involve updating 1358 // all dependent cells. Example: 1359 // vreg1 = inst vreg2 ; vreg1 != vreg2, but used bits are equal 1360 // 1361 // vreg3 = copy vreg2 ; <- inserted 1362 // ... = vreg3 ; <- replaced from vreg2 1363 // Indirectly, we can create a "copy" between vreg1 and vreg2 even 1364 // though their exact values do not match. 1365 BT.visit(*CopyI); 1366 Changed = true; 1367 break; 1368 } 1369 } 1370 1371 return Changed; 1372 } 1373 1374 namespace { 1375 1376 // Recognize instructions that produce constant values known at compile-time. 1377 // Replace them with register definitions that load these constants directly. 1378 class ConstGeneration : public Transformation { 1379 public: 1380 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1381 MachineRegisterInfo &mri) 1382 : Transformation(true), HII(hii), MRI(mri), BT(bt) {} 1383 1384 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1385 static bool isTfrConst(const MachineInstr &MI); 1386 1387 private: 1388 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C, 1389 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL); 1390 1391 const HexagonInstrInfo &HII; 1392 MachineRegisterInfo &MRI; 1393 BitTracker &BT; 1394 }; 1395 1396 } // end anonymous namespace 1397 1398 bool ConstGeneration::isTfrConst(const MachineInstr &MI) { 1399 unsigned Opc = MI.getOpcode(); 1400 switch (Opc) { 1401 case Hexagon::A2_combineii: 1402 case Hexagon::A4_combineii: 1403 case Hexagon::A2_tfrsi: 1404 case Hexagon::A2_tfrpi: 1405 case Hexagon::PS_true: 1406 case Hexagon::PS_false: 1407 case Hexagon::CONST32: 1408 case Hexagon::CONST64: 1409 return true; 1410 } 1411 return false; 1412 } 1413 1414 // Generate a transfer-immediate instruction that is appropriate for the 1415 // register class and the actual value being transferred. 1416 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C, 1417 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) { 1418 unsigned Reg = MRI.createVirtualRegister(RC); 1419 if (RC == &Hexagon::IntRegsRegClass) { 1420 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg) 1421 .addImm(int32_t(C)); 1422 return Reg; 1423 } 1424 1425 if (RC == &Hexagon::DoubleRegsRegClass) { 1426 if (isInt<8>(C)) { 1427 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg) 1428 .addImm(C); 1429 return Reg; 1430 } 1431 1432 unsigned Lo = Lo_32(C), Hi = Hi_32(C); 1433 if (isInt<8>(Lo) || isInt<8>(Hi)) { 1434 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii 1435 : Hexagon::A4_combineii; 1436 BuildMI(B, At, DL, HII.get(Opc), Reg) 1437 .addImm(int32_t(Hi)) 1438 .addImm(int32_t(Lo)); 1439 return Reg; 1440 } 1441 1442 BuildMI(B, At, DL, HII.get(Hexagon::CONST64), Reg) 1443 .addImm(C); 1444 return Reg; 1445 } 1446 1447 if (RC == &Hexagon::PredRegsRegClass) { 1448 unsigned Opc; 1449 if (C == 0) 1450 Opc = Hexagon::PS_false; 1451 else if ((C & 0xFF) == 0xFF) 1452 Opc = Hexagon::PS_true; 1453 else 1454 return 0; 1455 BuildMI(B, At, DL, HII.get(Opc), Reg); 1456 return Reg; 1457 } 1458 1459 return 0; 1460 } 1461 1462 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1463 if (!BT.reached(&B)) 1464 return false; 1465 bool Changed = false; 1466 RegisterSet Defs; 1467 1468 for (auto I = B.begin(), E = B.end(); I != E; ++I) { 1469 if (isTfrConst(*I)) 1470 continue; 1471 Defs.clear(); 1472 HBS::getInstrDefs(*I, Defs); 1473 if (Defs.count() != 1) 1474 continue; 1475 unsigned DR = Defs.find_first(); 1476 if (!TargetRegisterInfo::isVirtualRegister(DR)) 1477 continue; 1478 uint64_t U; 1479 const BitTracker::RegisterCell &DRC = BT.lookup(DR); 1480 if (HBS::getConst(DRC, 0, DRC.width(), U)) { 1481 int64_t C = U; 1482 DebugLoc DL = I->getDebugLoc(); 1483 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1484 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL); 1485 if (ImmReg) { 1486 HBS::replaceReg(DR, ImmReg, MRI); 1487 BT.put(ImmReg, DRC); 1488 Changed = true; 1489 } 1490 } 1491 } 1492 return Changed; 1493 } 1494 1495 namespace { 1496 1497 // Identify pairs of available registers which hold identical values. 1498 // In such cases, only one of them needs to be calculated, the other one 1499 // will be defined as a copy of the first. 1500 class CopyGeneration : public Transformation { 1501 public: 1502 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii, 1503 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1504 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {} 1505 1506 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1507 1508 private: 1509 bool findMatch(const BitTracker::RegisterRef &Inp, 1510 BitTracker::RegisterRef &Out, const RegisterSet &AVs); 1511 1512 const HexagonInstrInfo &HII; 1513 const HexagonRegisterInfo &HRI; 1514 MachineRegisterInfo &MRI; 1515 BitTracker &BT; 1516 RegisterSet Forbidden; 1517 }; 1518 1519 // Eliminate register copies RD = RS, by replacing the uses of RD with 1520 // with uses of RS. 1521 class CopyPropagation : public Transformation { 1522 public: 1523 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri) 1524 : Transformation(false), HRI(hri), MRI(mri) {} 1525 1526 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1527 1528 static bool isCopyReg(unsigned Opc, bool NoConv); 1529 1530 private: 1531 bool propagateRegCopy(MachineInstr &MI); 1532 1533 const HexagonRegisterInfo &HRI; 1534 MachineRegisterInfo &MRI; 1535 }; 1536 1537 } // end anonymous namespace 1538 1539 /// Check if there is a register in AVs that is identical to Inp. If so, 1540 /// set Out to the found register. The output may be a pair Reg:Sub. 1541 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp, 1542 BitTracker::RegisterRef &Out, const RegisterSet &AVs) { 1543 if (!BT.has(Inp.Reg)) 1544 return false; 1545 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg); 1546 auto *FRC = HBS::getFinalVRegClass(Inp, MRI); 1547 unsigned B, W; 1548 if (!HBS::getSubregMask(Inp, B, W, MRI)) 1549 return false; 1550 1551 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) { 1552 if (!BT.has(R) || Forbidden[R]) 1553 continue; 1554 const BitTracker::RegisterCell &RC = BT.lookup(R); 1555 unsigned RW = RC.width(); 1556 if (W == RW) { 1557 if (FRC != MRI.getRegClass(R)) 1558 continue; 1559 if (!HBS::isTransparentCopy(R, Inp, MRI)) 1560 continue; 1561 if (!HBS::isEqual(InpRC, B, RC, 0, W)) 1562 continue; 1563 Out.Reg = R; 1564 Out.Sub = 0; 1565 return true; 1566 } 1567 // Check if there is a super-register, whose part (with a subregister) 1568 // is equal to the input. 1569 // Only do double registers for now. 1570 if (W*2 != RW) 1571 continue; 1572 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass) 1573 continue; 1574 1575 if (HBS::isEqual(InpRC, B, RC, 0, W)) 1576 Out.Sub = Hexagon::isub_lo; 1577 else if (HBS::isEqual(InpRC, B, RC, W, W)) 1578 Out.Sub = Hexagon::isub_hi; 1579 else 1580 continue; 1581 Out.Reg = R; 1582 if (HBS::isTransparentCopy(Out, Inp, MRI)) 1583 return true; 1584 } 1585 return false; 1586 } 1587 1588 bool CopyGeneration::processBlock(MachineBasicBlock &B, 1589 const RegisterSet &AVs) { 1590 if (!BT.reached(&B)) 1591 return false; 1592 RegisterSet AVB(AVs); 1593 bool Changed = false; 1594 RegisterSet Defs; 1595 1596 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; 1597 ++I, AVB.insert(Defs)) { 1598 NextI = std::next(I); 1599 Defs.clear(); 1600 HBS::getInstrDefs(*I, Defs); 1601 1602 unsigned Opc = I->getOpcode(); 1603 if (CopyPropagation::isCopyReg(Opc, false) || 1604 ConstGeneration::isTfrConst(*I)) 1605 continue; 1606 1607 DebugLoc DL = I->getDebugLoc(); 1608 auto At = I->isPHI() ? B.getFirstNonPHI() : I; 1609 1610 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) { 1611 BitTracker::RegisterRef MR; 1612 auto *FRC = HBS::getFinalVRegClass(R, MRI); 1613 1614 if (findMatch(R, MR, AVB)) { 1615 unsigned NewR = MRI.createVirtualRegister(FRC); 1616 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR) 1617 .addReg(MR.Reg, 0, MR.Sub); 1618 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR)); 1619 HBS::replaceReg(R, NewR, MRI); 1620 Forbidden.insert(R); 1621 continue; 1622 } 1623 1624 if (FRC == &Hexagon::DoubleRegsRegClass || 1625 FRC == &Hexagon::VecDblRegsRegClass || 1626 FRC == &Hexagon::VecDblRegs128BRegClass) { 1627 // Try to generate REG_SEQUENCE. 1628 unsigned SubLo = HRI.getHexagonSubRegIndex(FRC, Hexagon::ps_sub_lo); 1629 unsigned SubHi = HRI.getHexagonSubRegIndex(FRC, Hexagon::ps_sub_hi); 1630 BitTracker::RegisterRef TL = { R, SubLo }; 1631 BitTracker::RegisterRef TH = { R, SubHi }; 1632 BitTracker::RegisterRef ML, MH; 1633 if (findMatch(TL, ML, AVB) && findMatch(TH, MH, AVB)) { 1634 auto *FRC = HBS::getFinalVRegClass(R, MRI); 1635 unsigned NewR = MRI.createVirtualRegister(FRC); 1636 BuildMI(B, At, DL, HII.get(TargetOpcode::REG_SEQUENCE), NewR) 1637 .addReg(ML.Reg, 0, ML.Sub) 1638 .addImm(SubLo) 1639 .addReg(MH.Reg, 0, MH.Sub) 1640 .addImm(SubHi); 1641 BT.put(BitTracker::RegisterRef(NewR), BT.get(R)); 1642 HBS::replaceReg(R, NewR, MRI); 1643 Forbidden.insert(R); 1644 } 1645 } 1646 } 1647 } 1648 1649 return Changed; 1650 } 1651 1652 bool CopyPropagation::isCopyReg(unsigned Opc, bool NoConv) { 1653 switch (Opc) { 1654 case TargetOpcode::COPY: 1655 case TargetOpcode::REG_SEQUENCE: 1656 case Hexagon::A4_combineir: 1657 case Hexagon::A4_combineri: 1658 return true; 1659 case Hexagon::A2_tfr: 1660 case Hexagon::A2_tfrp: 1661 case Hexagon::A2_combinew: 1662 case Hexagon::V6_vcombine: 1663 case Hexagon::V6_vcombine_128B: 1664 return NoConv; 1665 default: 1666 break; 1667 } 1668 return false; 1669 } 1670 1671 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) { 1672 bool Changed = false; 1673 unsigned Opc = MI.getOpcode(); 1674 BitTracker::RegisterRef RD = MI.getOperand(0); 1675 assert(MI.getOperand(0).getSubReg() == 0); 1676 1677 switch (Opc) { 1678 case TargetOpcode::COPY: 1679 case Hexagon::A2_tfr: 1680 case Hexagon::A2_tfrp: { 1681 BitTracker::RegisterRef RS = MI.getOperand(1); 1682 if (!HBS::isTransparentCopy(RD, RS, MRI)) 1683 break; 1684 if (RS.Sub != 0) 1685 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI); 1686 else 1687 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI); 1688 break; 1689 } 1690 case TargetOpcode::REG_SEQUENCE: { 1691 BitTracker::RegisterRef SL, SH; 1692 if (HBS::parseRegSequence(MI, SL, SH, MRI)) { 1693 const TargetRegisterClass *RC = MRI.getRegClass(RD.Reg); 1694 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo); 1695 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi); 1696 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, SL.Reg, SL.Sub, MRI); 1697 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, SH.Reg, SH.Sub, MRI); 1698 } 1699 break; 1700 } 1701 case Hexagon::A2_combinew: 1702 case Hexagon::V6_vcombine: 1703 case Hexagon::V6_vcombine_128B: { 1704 const TargetRegisterClass *RC = MRI.getRegClass(RD.Reg); 1705 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo); 1706 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi); 1707 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2); 1708 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, RL.Reg, RL.Sub, MRI); 1709 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, RH.Reg, RH.Sub, MRI); 1710 break; 1711 } 1712 case Hexagon::A4_combineir: 1713 case Hexagon::A4_combineri: { 1714 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1; 1715 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::isub_lo 1716 : Hexagon::isub_hi; 1717 BitTracker::RegisterRef RS = MI.getOperand(SrcX); 1718 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI); 1719 break; 1720 } 1721 } 1722 return Changed; 1723 } 1724 1725 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) { 1726 std::vector<MachineInstr*> Instrs; 1727 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I) 1728 Instrs.push_back(&*I); 1729 1730 bool Changed = false; 1731 for (auto I : Instrs) { 1732 unsigned Opc = I->getOpcode(); 1733 if (!CopyPropagation::isCopyReg(Opc, true)) 1734 continue; 1735 Changed |= propagateRegCopy(*I); 1736 } 1737 1738 return Changed; 1739 } 1740 1741 namespace { 1742 1743 // Recognize patterns that can be simplified and replace them with the 1744 // simpler forms. 1745 // This is by no means complete 1746 class BitSimplification : public Transformation { 1747 public: 1748 BitSimplification(BitTracker &bt, const MachineDominatorTree &mdt, 1749 const HexagonInstrInfo &hii, const HexagonRegisterInfo &hri, 1750 MachineRegisterInfo &mri, MachineFunction &mf) 1751 : Transformation(true), MDT(mdt), HII(hii), HRI(hri), MRI(mri), 1752 MF(mf), BT(bt) {} 1753 1754 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override; 1755 1756 private: 1757 struct RegHalf : public BitTracker::RegisterRef { 1758 bool Low; // Low/High halfword. 1759 }; 1760 1761 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC, 1762 unsigned B, RegHalf &RH); 1763 bool validateReg(BitTracker::RegisterRef R, unsigned Opc, unsigned OpNum); 1764 1765 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC, 1766 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt); 1767 unsigned getCombineOpcode(bool HLow, bool LLow); 1768 1769 bool genStoreUpperHalf(MachineInstr *MI); 1770 bool genStoreImmediate(MachineInstr *MI); 1771 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD, 1772 const BitTracker::RegisterCell &RC); 1773 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1774 const BitTracker::RegisterCell &RC); 1775 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD, 1776 const BitTracker::RegisterCell &RC); 1777 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1778 const BitTracker::RegisterCell &RC); 1779 bool genBitSplit(MachineInstr *MI, BitTracker::RegisterRef RD, 1780 const BitTracker::RegisterCell &RC, const RegisterSet &AVs); 1781 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD, 1782 const BitTracker::RegisterCell &RC); 1783 bool simplifyExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD, 1784 const BitTracker::RegisterCell &RC, const RegisterSet &AVs); 1785 1786 // Cache of created instructions to avoid creating duplicates. 1787 // XXX Currently only used by genBitSplit. 1788 std::vector<MachineInstr*> NewMIs; 1789 1790 const MachineDominatorTree &MDT; 1791 const HexagonInstrInfo &HII; 1792 const HexagonRegisterInfo &HRI; 1793 MachineRegisterInfo &MRI; 1794 MachineFunction &MF; 1795 BitTracker &BT; 1796 }; 1797 1798 } // end anonymous namespace 1799 1800 // Check if the bits [B..B+16) in register cell RC form a valid halfword, 1801 // i.e. [0..16), [16..32), etc. of some register. If so, return true and 1802 // set the information about the found register in RH. 1803 bool BitSimplification::matchHalf(unsigned SelfR, 1804 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) { 1805 // XXX This could be searching in the set of available registers, in case 1806 // the match is not exact. 1807 1808 // Match 16-bit chunks, where the RC[B..B+15] references exactly one 1809 // register and all the bits B..B+15 match between RC and the register. 1810 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... }, 1811 // and RC = { [0]:0 [1-15]:v1[1-15]... }. 1812 bool Low = false; 1813 unsigned I = B; 1814 while (I < B+16 && RC[I].num()) 1815 I++; 1816 if (I == B+16) 1817 return false; 1818 1819 unsigned Reg = RC[I].RefI.Reg; 1820 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B. 1821 if (P < I-B) 1822 return false; 1823 unsigned Pos = P - (I-B); 1824 1825 if (Reg == 0 || Reg == SelfR) // Don't match "self". 1826 return false; 1827 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1828 return false; 1829 if (!BT.has(Reg)) 1830 return false; 1831 1832 const BitTracker::RegisterCell &SC = BT.lookup(Reg); 1833 if (Pos+16 > SC.width()) 1834 return false; 1835 1836 for (unsigned i = 0; i < 16; ++i) { 1837 const BitTracker::BitValue &RV = RC[i+B]; 1838 if (RV.Type == BitTracker::BitValue::Ref) { 1839 if (RV.RefI.Reg != Reg) 1840 return false; 1841 if (RV.RefI.Pos != i+Pos) 1842 return false; 1843 continue; 1844 } 1845 if (RC[i+B] != SC[i+Pos]) 1846 return false; 1847 } 1848 1849 unsigned Sub = 0; 1850 switch (Pos) { 1851 case 0: 1852 Sub = Hexagon::isub_lo; 1853 Low = true; 1854 break; 1855 case 16: 1856 Sub = Hexagon::isub_lo; 1857 Low = false; 1858 break; 1859 case 32: 1860 Sub = Hexagon::isub_hi; 1861 Low = true; 1862 break; 1863 case 48: 1864 Sub = Hexagon::isub_hi; 1865 Low = false; 1866 break; 1867 default: 1868 return false; 1869 } 1870 1871 RH.Reg = Reg; 1872 RH.Sub = Sub; 1873 RH.Low = Low; 1874 // If the subregister is not valid with the register, set it to 0. 1875 if (!HBS::getFinalVRegClass(RH, MRI)) 1876 RH.Sub = 0; 1877 1878 return true; 1879 } 1880 1881 bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc, 1882 unsigned OpNum) { 1883 auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI, MF); 1884 auto *RRC = HBS::getFinalVRegClass(R, MRI); 1885 return OpRC->hasSubClassEq(RRC); 1886 } 1887 1888 // Check if RC matches the pattern of a S2_packhl. If so, return true and 1889 // set the inputs Rs and Rt. 1890 bool BitSimplification::matchPackhl(unsigned SelfR, 1891 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs, 1892 BitTracker::RegisterRef &Rt) { 1893 RegHalf L1, H1, L2, H2; 1894 1895 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1)) 1896 return false; 1897 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1)) 1898 return false; 1899 1900 // Rs = H1.L1, Rt = H2.L2 1901 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low) 1902 return false; 1903 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low) 1904 return false; 1905 1906 Rs = H1; 1907 Rt = H2; 1908 return true; 1909 } 1910 1911 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) { 1912 return HLow ? LLow ? Hexagon::A2_combine_ll 1913 : Hexagon::A2_combine_lh 1914 : LLow ? Hexagon::A2_combine_hl 1915 : Hexagon::A2_combine_hh; 1916 } 1917 1918 // If MI stores the upper halfword of a register (potentially obtained via 1919 // shifts or extracts), replace it with a storerf instruction. This could 1920 // cause the "extraction" code to become dead. 1921 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) { 1922 unsigned Opc = MI->getOpcode(); 1923 if (Opc != Hexagon::S2_storerh_io) 1924 return false; 1925 1926 MachineOperand &ValOp = MI->getOperand(2); 1927 BitTracker::RegisterRef RS = ValOp; 1928 if (!BT.has(RS.Reg)) 1929 return false; 1930 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1931 RegHalf H; 1932 if (!matchHalf(0, RC, 0, H)) 1933 return false; 1934 if (H.Low) 1935 return false; 1936 MI->setDesc(HII.get(Hexagon::S2_storerf_io)); 1937 ValOp.setReg(H.Reg); 1938 ValOp.setSubReg(H.Sub); 1939 return true; 1940 } 1941 1942 // If MI stores a value known at compile-time, and the value is within a range 1943 // that avoids using constant-extenders, replace it with a store-immediate. 1944 bool BitSimplification::genStoreImmediate(MachineInstr *MI) { 1945 unsigned Opc = MI->getOpcode(); 1946 unsigned Align = 0; 1947 switch (Opc) { 1948 case Hexagon::S2_storeri_io: 1949 Align++; 1950 case Hexagon::S2_storerh_io: 1951 Align++; 1952 case Hexagon::S2_storerb_io: 1953 break; 1954 default: 1955 return false; 1956 } 1957 1958 // Avoid stores to frame-indices (due to an unknown offset). 1959 if (!MI->getOperand(0).isReg()) 1960 return false; 1961 MachineOperand &OffOp = MI->getOperand(1); 1962 if (!OffOp.isImm()) 1963 return false; 1964 1965 int64_t Off = OffOp.getImm(); 1966 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x). 1967 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1))) 1968 return false; 1969 // Source register: 1970 BitTracker::RegisterRef RS = MI->getOperand(2); 1971 if (!BT.has(RS.Reg)) 1972 return false; 1973 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg); 1974 uint64_t U; 1975 if (!HBS::getConst(RC, 0, RC.width(), U)) 1976 return false; 1977 1978 // Only consider 8-bit values to avoid constant-extenders. 1979 int V; 1980 switch (Opc) { 1981 case Hexagon::S2_storerb_io: 1982 V = int8_t(U); 1983 break; 1984 case Hexagon::S2_storerh_io: 1985 V = int16_t(U); 1986 break; 1987 case Hexagon::S2_storeri_io: 1988 V = int32_t(U); 1989 break; 1990 } 1991 if (!isInt<8>(V)) 1992 return false; 1993 1994 MI->RemoveOperand(2); 1995 switch (Opc) { 1996 case Hexagon::S2_storerb_io: 1997 MI->setDesc(HII.get(Hexagon::S4_storeirb_io)); 1998 break; 1999 case Hexagon::S2_storerh_io: 2000 MI->setDesc(HII.get(Hexagon::S4_storeirh_io)); 2001 break; 2002 case Hexagon::S2_storeri_io: 2003 MI->setDesc(HII.get(Hexagon::S4_storeiri_io)); 2004 break; 2005 } 2006 MI->addOperand(MachineOperand::CreateImm(V)); 2007 return true; 2008 } 2009 2010 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the 2011 // last instruction in a sequence that results in something equivalent to 2012 // the pack-halfwords. The intent is to cause the entire sequence to become 2013 // dead. 2014 bool BitSimplification::genPackhl(MachineInstr *MI, 2015 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2016 unsigned Opc = MI->getOpcode(); 2017 if (Opc == Hexagon::S2_packhl) 2018 return false; 2019 BitTracker::RegisterRef Rs, Rt; 2020 if (!matchPackhl(RD.Reg, RC, Rs, Rt)) 2021 return false; 2022 if (!validateReg(Rs, Hexagon::S2_packhl, 1) || 2023 !validateReg(Rt, Hexagon::S2_packhl, 2)) 2024 return false; 2025 2026 MachineBasicBlock &B = *MI->getParent(); 2027 unsigned NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 2028 DebugLoc DL = MI->getDebugLoc(); 2029 auto At = MI->isPHI() ? B.getFirstNonPHI() 2030 : MachineBasicBlock::iterator(MI); 2031 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR) 2032 .addReg(Rs.Reg, 0, Rs.Sub) 2033 .addReg(Rt.Reg, 0, Rt.Sub); 2034 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2035 BT.put(BitTracker::RegisterRef(NewR), RC); 2036 return true; 2037 } 2038 2039 // If MI produces halfword of the input in the low half of the output, 2040 // replace it with zero-extend or extractu. 2041 bool BitSimplification::genExtractHalf(MachineInstr *MI, 2042 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2043 RegHalf L; 2044 // Check for halfword in low 16 bits, zeros elsewhere. 2045 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16)) 2046 return false; 2047 2048 unsigned Opc = MI->getOpcode(); 2049 MachineBasicBlock &B = *MI->getParent(); 2050 DebugLoc DL = MI->getDebugLoc(); 2051 2052 // Prefer zxth, since zxth can go in any slot, while extractu only in 2053 // slots 2 and 3. 2054 unsigned NewR = 0; 2055 auto At = MI->isPHI() ? B.getFirstNonPHI() 2056 : MachineBasicBlock::iterator(MI); 2057 if (L.Low && Opc != Hexagon::A2_zxth) { 2058 if (validateReg(L, Hexagon::A2_zxth, 1)) { 2059 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2060 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR) 2061 .addReg(L.Reg, 0, L.Sub); 2062 } 2063 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) { 2064 if (validateReg(L, Hexagon::S2_lsr_i_r, 1)) { 2065 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2066 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR) 2067 .addReg(L.Reg, 0, L.Sub) 2068 .addImm(16); 2069 } 2070 } 2071 if (NewR == 0) 2072 return false; 2073 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2074 BT.put(BitTracker::RegisterRef(NewR), RC); 2075 return true; 2076 } 2077 2078 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the 2079 // combine. 2080 bool BitSimplification::genCombineHalf(MachineInstr *MI, 2081 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2082 RegHalf L, H; 2083 // Check for combine h/l 2084 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H)) 2085 return false; 2086 // Do nothing if this is just a reg copy. 2087 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low) 2088 return false; 2089 2090 unsigned Opc = MI->getOpcode(); 2091 unsigned COpc = getCombineOpcode(H.Low, L.Low); 2092 if (COpc == Opc) 2093 return false; 2094 if (!validateReg(H, COpc, 1) || !validateReg(L, COpc, 2)) 2095 return false; 2096 2097 MachineBasicBlock &B = *MI->getParent(); 2098 DebugLoc DL = MI->getDebugLoc(); 2099 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2100 auto At = MI->isPHI() ? B.getFirstNonPHI() 2101 : MachineBasicBlock::iterator(MI); 2102 BuildMI(B, At, DL, HII.get(COpc), NewR) 2103 .addReg(H.Reg, 0, H.Sub) 2104 .addReg(L.Reg, 0, L.Sub); 2105 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2106 BT.put(BitTracker::RegisterRef(NewR), RC); 2107 return true; 2108 } 2109 2110 // If MI resets high bits of a register and keeps the lower ones, replace it 2111 // with zero-extend byte/half, and-immediate, or extractu, as appropriate. 2112 bool BitSimplification::genExtractLow(MachineInstr *MI, 2113 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2114 unsigned Opc = MI->getOpcode(); 2115 switch (Opc) { 2116 case Hexagon::A2_zxtb: 2117 case Hexagon::A2_zxth: 2118 case Hexagon::S2_extractu: 2119 return false; 2120 } 2121 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) { 2122 int32_t Imm = MI->getOperand(2).getImm(); 2123 if (isInt<10>(Imm)) 2124 return false; 2125 } 2126 2127 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm()) 2128 return false; 2129 unsigned W = RC.width(); 2130 while (W > 0 && RC[W-1].is(0)) 2131 W--; 2132 if (W == 0 || W == RC.width()) 2133 return false; 2134 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb 2135 : (W == 16) ? Hexagon::A2_zxth 2136 : (W < 10) ? Hexagon::A2_andir 2137 : Hexagon::S2_extractu; 2138 MachineBasicBlock &B = *MI->getParent(); 2139 DebugLoc DL = MI->getDebugLoc(); 2140 2141 for (auto &Op : MI->uses()) { 2142 if (!Op.isReg()) 2143 continue; 2144 BitTracker::RegisterRef RS = Op; 2145 if (!BT.has(RS.Reg)) 2146 continue; 2147 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2148 unsigned BN, BW; 2149 if (!HBS::getSubregMask(RS, BN, BW, MRI)) 2150 continue; 2151 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W)) 2152 continue; 2153 if (!validateReg(RS, NewOpc, 1)) 2154 continue; 2155 2156 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); 2157 auto At = MI->isPHI() ? B.getFirstNonPHI() 2158 : MachineBasicBlock::iterator(MI); 2159 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR) 2160 .addReg(RS.Reg, 0, RS.Sub); 2161 if (NewOpc == Hexagon::A2_andir) 2162 MIB.addImm((1 << W) - 1); 2163 else if (NewOpc == Hexagon::S2_extractu) 2164 MIB.addImm(W).addImm(0); 2165 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI); 2166 BT.put(BitTracker::RegisterRef(NewR), RC); 2167 return true; 2168 } 2169 return false; 2170 } 2171 2172 bool BitSimplification::genBitSplit(MachineInstr *MI, 2173 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC, 2174 const RegisterSet &AVs) { 2175 if (!GenBitSplit) 2176 return false; 2177 if (MaxBitSplit.getNumOccurrences()) { 2178 if (CountBitSplit >= MaxBitSplit) 2179 return false; 2180 } 2181 2182 unsigned Opc = MI->getOpcode(); 2183 switch (Opc) { 2184 case Hexagon::A4_bitsplit: 2185 case Hexagon::A4_bitspliti: 2186 return false; 2187 } 2188 2189 unsigned W = RC.width(); 2190 if (W != 32) 2191 return false; 2192 2193 auto ctlz = [] (const BitTracker::RegisterCell &C) -> unsigned { 2194 unsigned Z = C.width(); 2195 while (Z > 0 && C[Z-1].is(0)) 2196 --Z; 2197 return C.width() - Z; 2198 }; 2199 2200 // Count the number of leading zeros in the target RC. 2201 unsigned Z = ctlz(RC); 2202 if (Z == 0 || Z == W) 2203 return false; 2204 2205 // A simplistic analysis: assume the source register (the one being split) 2206 // is fully unknown, and that all its bits are self-references. 2207 const BitTracker::BitValue &B0 = RC[0]; 2208 if (B0.Type != BitTracker::BitValue::Ref) 2209 return false; 2210 2211 unsigned SrcR = B0.RefI.Reg; 2212 unsigned SrcSR = 0; 2213 unsigned Pos = B0.RefI.Pos; 2214 2215 // All the non-zero bits should be consecutive bits from the same register. 2216 for (unsigned i = 1; i < W-Z; ++i) { 2217 const BitTracker::BitValue &V = RC[i]; 2218 if (V.Type != BitTracker::BitValue::Ref) 2219 return false; 2220 if (V.RefI.Reg != SrcR || V.RefI.Pos != Pos+i) 2221 return false; 2222 } 2223 2224 // Now, find the other bitfield among AVs. 2225 for (unsigned S = AVs.find_first(); S; S = AVs.find_next(S)) { 2226 // The number of leading zeros here should be the number of trailing 2227 // non-zeros in RC. 2228 if (!BT.has(S)) 2229 continue; 2230 const BitTracker::RegisterCell &SC = BT.lookup(S); 2231 if (SC.width() != W || ctlz(SC) != W-Z) 2232 continue; 2233 // The Z lower bits should now match SrcR. 2234 const BitTracker::BitValue &S0 = SC[0]; 2235 if (S0.Type != BitTracker::BitValue::Ref || S0.RefI.Reg != SrcR) 2236 continue; 2237 unsigned P = S0.RefI.Pos; 2238 2239 if (Pos <= P && (Pos + W-Z) != P) 2240 continue; 2241 if (P < Pos && (P + Z) != Pos) 2242 continue; 2243 // The starting bitfield position must be at a subregister boundary. 2244 if (std::min(P, Pos) != 0 && std::min(P, Pos) != 32) 2245 continue; 2246 2247 unsigned I; 2248 for (I = 1; I < Z; ++I) { 2249 const BitTracker::BitValue &V = SC[I]; 2250 if (V.Type != BitTracker::BitValue::Ref) 2251 break; 2252 if (V.RefI.Reg != SrcR || V.RefI.Pos != P+I) 2253 break; 2254 } 2255 if (I != Z) 2256 continue; 2257 2258 // Generate bitsplit where S is defined. 2259 if (MaxBitSplit.getNumOccurrences()) 2260 CountBitSplit++; 2261 MachineInstr *DefS = MRI.getVRegDef(S); 2262 assert(DefS != nullptr); 2263 DebugLoc DL = DefS->getDebugLoc(); 2264 MachineBasicBlock &B = *DefS->getParent(); 2265 auto At = DefS->isPHI() ? B.getFirstNonPHI() 2266 : MachineBasicBlock::iterator(DefS); 2267 if (MRI.getRegClass(SrcR)->getID() == Hexagon::DoubleRegsRegClassID) 2268 SrcSR = (std::min(Pos, P) == 32) ? Hexagon::isub_hi : Hexagon::isub_lo; 2269 if (!validateReg({SrcR,SrcSR}, Hexagon::A4_bitspliti, 1)) 2270 continue; 2271 unsigned ImmOp = Pos <= P ? W-Z : Z; 2272 2273 // Find an existing bitsplit instruction if one already exists. 2274 unsigned NewR = 0; 2275 for (MachineInstr *In : NewMIs) { 2276 if (In->getOpcode() != Hexagon::A4_bitspliti) 2277 continue; 2278 MachineOperand &Op1 = In->getOperand(1); 2279 if (Op1.getReg() != SrcR || Op1.getSubReg() != SrcSR) 2280 continue; 2281 if (In->getOperand(2).getImm() != ImmOp) 2282 continue; 2283 // Check if the target register is available here. 2284 MachineOperand &Op0 = In->getOperand(0); 2285 MachineInstr *DefI = MRI.getVRegDef(Op0.getReg()); 2286 assert(DefI != nullptr); 2287 if (!MDT.dominates(DefI, &*At)) 2288 continue; 2289 2290 // Found one that can be reused. 2291 assert(Op0.getSubReg() == 0); 2292 NewR = Op0.getReg(); 2293 break; 2294 } 2295 if (!NewR) { 2296 NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass); 2297 auto NewBS = BuildMI(B, At, DL, HII.get(Hexagon::A4_bitspliti), NewR) 2298 .addReg(SrcR, 0, SrcSR) 2299 .addImm(ImmOp); 2300 NewMIs.push_back(NewBS); 2301 } 2302 if (Pos <= P) { 2303 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_lo, MRI); 2304 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_hi, MRI); 2305 } else { 2306 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_lo, MRI); 2307 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_hi, MRI); 2308 } 2309 return true; 2310 } 2311 2312 return false; 2313 } 2314 2315 // Check for tstbit simplification opportunity, where the bit being checked 2316 // can be tracked back to another register. For example: 2317 // vreg2 = S2_lsr_i_r vreg1, 5 2318 // vreg3 = S2_tstbit_i vreg2, 0 2319 // => 2320 // vreg3 = S2_tstbit_i vreg1, 5 2321 bool BitSimplification::simplifyTstbit(MachineInstr *MI, 2322 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) { 2323 unsigned Opc = MI->getOpcode(); 2324 if (Opc != Hexagon::S2_tstbit_i) 2325 return false; 2326 2327 unsigned BN = MI->getOperand(2).getImm(); 2328 BitTracker::RegisterRef RS = MI->getOperand(1); 2329 unsigned F, W; 2330 DebugLoc DL = MI->getDebugLoc(); 2331 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI)) 2332 return false; 2333 MachineBasicBlock &B = *MI->getParent(); 2334 auto At = MI->isPHI() ? B.getFirstNonPHI() 2335 : MachineBasicBlock::iterator(MI); 2336 2337 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg); 2338 const BitTracker::BitValue &V = SC[F+BN]; 2339 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) { 2340 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg); 2341 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is 2342 // a double register, need to use a subregister and adjust bit 2343 // number. 2344 unsigned P = std::numeric_limits<unsigned>::max(); 2345 BitTracker::RegisterRef RR(V.RefI.Reg, 0); 2346 if (TC == &Hexagon::DoubleRegsRegClass) { 2347 P = V.RefI.Pos; 2348 RR.Sub = Hexagon::isub_lo; 2349 if (P >= 32) { 2350 P -= 32; 2351 RR.Sub = Hexagon::isub_hi; 2352 } 2353 } else if (TC == &Hexagon::IntRegsRegClass) { 2354 P = V.RefI.Pos; 2355 } 2356 if (P != std::numeric_limits<unsigned>::max()) { 2357 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2358 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR) 2359 .addReg(RR.Reg, 0, RR.Sub) 2360 .addImm(P); 2361 HBS::replaceReg(RD.Reg, NewR, MRI); 2362 BT.put(NewR, RC); 2363 return true; 2364 } 2365 } else if (V.is(0) || V.is(1)) { 2366 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass); 2367 unsigned NewOpc = V.is(0) ? Hexagon::PS_false : Hexagon::PS_true; 2368 BuildMI(B, At, DL, HII.get(NewOpc), NewR); 2369 HBS::replaceReg(RD.Reg, NewR, MRI); 2370 return true; 2371 } 2372 2373 return false; 2374 } 2375 2376 // Detect whether RD is a bitfield extract (sign- or zero-extended) of 2377 // some register from the AVs set. Create a new corresponding instruction 2378 // at the location of MI. The intent is to recognize situations where 2379 // a sequence of instructions performs an operation that is equivalent to 2380 // an extract operation, such as a shift left followed by a shift right. 2381 bool BitSimplification::simplifyExtractLow(MachineInstr *MI, 2382 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC, 2383 const RegisterSet &AVs) { 2384 if (!GenExtract) 2385 return false; 2386 if (MaxExtract.getNumOccurrences()) { 2387 if (CountExtract >= MaxExtract) 2388 return false; 2389 CountExtract++; 2390 } 2391 2392 unsigned W = RC.width(); 2393 unsigned RW = W; 2394 unsigned Len; 2395 bool Signed; 2396 2397 // The code is mostly class-independent, except for the part that generates 2398 // the extract instruction, and establishes the source register (in case it 2399 // needs to use a subregister). 2400 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2401 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass) 2402 return false; 2403 assert(RD.Sub == 0); 2404 2405 // Observation: 2406 // If the cell has a form of 00..0xx..x with k zeros and n remaining 2407 // bits, this could be an extractu of the n bits, but it could also be 2408 // an extractu of a longer field which happens to have 0s in the top 2409 // bit positions. 2410 // The same logic applies to sign-extended fields. 2411 // 2412 // Do not check for the extended extracts, since it would expand the 2413 // search space quite a bit. The search may be expensive as it is. 2414 2415 const BitTracker::BitValue &TopV = RC[W-1]; 2416 2417 // Eliminate candidates that have self-referential bits, since they 2418 // cannot be extracts from other registers. Also, skip registers that 2419 // have compile-time constant values. 2420 bool IsConst = true; 2421 for (unsigned I = 0; I != W; ++I) { 2422 const BitTracker::BitValue &V = RC[I]; 2423 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg == RD.Reg) 2424 return false; 2425 IsConst = IsConst && (V.is(0) || V.is(1)); 2426 } 2427 if (IsConst) 2428 return false; 2429 2430 if (TopV.is(0) || TopV.is(1)) { 2431 bool S = TopV.is(1); 2432 for (--W; W > 0 && RC[W-1].is(S); --W) 2433 ; 2434 Len = W; 2435 Signed = S; 2436 // The sign bit must be a part of the field being extended. 2437 if (Signed) 2438 ++Len; 2439 } else { 2440 // This could still be a sign-extended extract. 2441 assert(TopV.Type == BitTracker::BitValue::Ref); 2442 if (TopV.RefI.Reg == RD.Reg || TopV.RefI.Pos == W-1) 2443 return false; 2444 for (--W; W > 0 && RC[W-1] == TopV; --W) 2445 ; 2446 // The top bits of RC are copies of TopV. One occurrence of TopV will 2447 // be a part of the field. 2448 Len = W + 1; 2449 Signed = true; 2450 } 2451 2452 // This would be just a copy. It should be handled elsewhere. 2453 if (Len == RW) 2454 return false; 2455 2456 DEBUG({ 2457 dbgs() << __func__ << " on reg: " << PrintReg(RD.Reg, &HRI, RD.Sub) 2458 << ", MI: " << *MI; 2459 dbgs() << "Cell: " << RC << '\n'; 2460 dbgs() << "Expected bitfield size: " << Len << " bits, " 2461 << (Signed ? "sign" : "zero") << "-extended\n"; 2462 }); 2463 2464 bool Changed = false; 2465 2466 for (unsigned R = AVs.find_first(); R != 0; R = AVs.find_next(R)) { 2467 if (!BT.has(R)) 2468 continue; 2469 const BitTracker::RegisterCell &SC = BT.lookup(R); 2470 unsigned SW = SC.width(); 2471 2472 // The source can be longer than the destination, as long as its size is 2473 // a multiple of the size of the destination. Also, we would need to be 2474 // able to refer to the subregister in the source that would be of the 2475 // same size as the destination, but only check the sizes here. 2476 if (SW < RW || (SW % RW) != 0) 2477 continue; 2478 2479 // The field can start at any offset in SC as long as it contains Len 2480 // bits and does not cross subregister boundary (if the source register 2481 // is longer than the destination). 2482 unsigned Off = 0; 2483 while (Off <= SW-Len) { 2484 unsigned OE = (Off+Len)/RW; 2485 if (OE != Off/RW) { 2486 // The assumption here is that if the source (R) is longer than the 2487 // destination, then the destination is a sequence of words of 2488 // size RW, and each such word in R can be accessed via a subregister. 2489 // 2490 // If the beginning and the end of the field cross the subregister 2491 // boundary, advance to the next subregister. 2492 Off = OE*RW; 2493 continue; 2494 } 2495 if (HBS::isEqual(RC, 0, SC, Off, Len)) 2496 break; 2497 ++Off; 2498 } 2499 2500 if (Off > SW-Len) 2501 continue; 2502 2503 // Found match. 2504 unsigned ExtOpc = 0; 2505 if (Off == 0) { 2506 if (Len == 8) 2507 ExtOpc = Signed ? Hexagon::A2_sxtb : Hexagon::A2_zxtb; 2508 else if (Len == 16) 2509 ExtOpc = Signed ? Hexagon::A2_sxth : Hexagon::A2_zxth; 2510 else if (Len < 10 && !Signed) 2511 ExtOpc = Hexagon::A2_andir; 2512 } 2513 if (ExtOpc == 0) { 2514 ExtOpc = 2515 Signed ? (RW == 32 ? Hexagon::S4_extract : Hexagon::S4_extractp) 2516 : (RW == 32 ? Hexagon::S2_extractu : Hexagon::S2_extractup); 2517 } 2518 unsigned SR = 0; 2519 // This only recognizes isub_lo and isub_hi. 2520 if (RW != SW && RW*2 != SW) 2521 continue; 2522 if (RW != SW) 2523 SR = (Off/RW == 0) ? Hexagon::isub_lo : Hexagon::isub_hi; 2524 Off = Off % RW; 2525 2526 if (!validateReg({R,SR}, ExtOpc, 1)) 2527 continue; 2528 2529 // Don't generate the same instruction as the one being optimized. 2530 if (MI->getOpcode() == ExtOpc) { 2531 // All possible ExtOpc's have the source in operand(1). 2532 const MachineOperand &SrcOp = MI->getOperand(1); 2533 if (SrcOp.getReg() == R) 2534 continue; 2535 } 2536 2537 DebugLoc DL = MI->getDebugLoc(); 2538 MachineBasicBlock &B = *MI->getParent(); 2539 unsigned NewR = MRI.createVirtualRegister(FRC); 2540 auto At = MI->isPHI() ? B.getFirstNonPHI() 2541 : MachineBasicBlock::iterator(MI); 2542 auto MIB = BuildMI(B, At, DL, HII.get(ExtOpc), NewR) 2543 .addReg(R, 0, SR); 2544 switch (ExtOpc) { 2545 case Hexagon::A2_sxtb: 2546 case Hexagon::A2_zxtb: 2547 case Hexagon::A2_sxth: 2548 case Hexagon::A2_zxth: 2549 break; 2550 case Hexagon::A2_andir: 2551 MIB.addImm((1u << Len) - 1); 2552 break; 2553 case Hexagon::S4_extract: 2554 case Hexagon::S2_extractu: 2555 case Hexagon::S4_extractp: 2556 case Hexagon::S2_extractup: 2557 MIB.addImm(Len) 2558 .addImm(Off); 2559 break; 2560 default: 2561 llvm_unreachable("Unexpected opcode"); 2562 } 2563 2564 HBS::replaceReg(RD.Reg, NewR, MRI); 2565 BT.put(BitTracker::RegisterRef(NewR), RC); 2566 Changed = true; 2567 break; 2568 } 2569 2570 return Changed; 2571 } 2572 2573 bool BitSimplification::processBlock(MachineBasicBlock &B, 2574 const RegisterSet &AVs) { 2575 if (!BT.reached(&B)) 2576 return false; 2577 bool Changed = false; 2578 RegisterSet AVB = AVs; 2579 RegisterSet Defs; 2580 2581 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) { 2582 MachineInstr *MI = &*I; 2583 Defs.clear(); 2584 HBS::getInstrDefs(*MI, Defs); 2585 2586 unsigned Opc = MI->getOpcode(); 2587 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE) 2588 continue; 2589 2590 if (MI->mayStore()) { 2591 bool T = genStoreUpperHalf(MI); 2592 T = T || genStoreImmediate(MI); 2593 Changed |= T; 2594 continue; 2595 } 2596 2597 if (Defs.count() != 1) 2598 continue; 2599 const MachineOperand &Op0 = MI->getOperand(0); 2600 if (!Op0.isReg() || !Op0.isDef()) 2601 continue; 2602 BitTracker::RegisterRef RD = Op0; 2603 if (!BT.has(RD.Reg)) 2604 continue; 2605 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI); 2606 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg); 2607 2608 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) { 2609 bool T = genPackhl(MI, RD, RC); 2610 T = T || simplifyExtractLow(MI, RD, RC, AVB); 2611 Changed |= T; 2612 continue; 2613 } 2614 2615 if (FRC->getID() == Hexagon::IntRegsRegClassID) { 2616 bool T = genBitSplit(MI, RD, RC, AVB); 2617 T = T || simplifyExtractLow(MI, RD, RC, AVB); 2618 T = T || genExtractHalf(MI, RD, RC); 2619 T = T || genCombineHalf(MI, RD, RC); 2620 T = T || genExtractLow(MI, RD, RC); 2621 Changed |= T; 2622 continue; 2623 } 2624 2625 if (FRC->getID() == Hexagon::PredRegsRegClassID) { 2626 bool T = simplifyTstbit(MI, RD, RC); 2627 Changed |= T; 2628 continue; 2629 } 2630 } 2631 return Changed; 2632 } 2633 2634 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) { 2635 if (skipFunction(*MF.getFunction())) 2636 return false; 2637 2638 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 2639 auto &HRI = *HST.getRegisterInfo(); 2640 auto &HII = *HST.getInstrInfo(); 2641 2642 MDT = &getAnalysis<MachineDominatorTree>(); 2643 MachineRegisterInfo &MRI = MF.getRegInfo(); 2644 bool Changed; 2645 2646 Changed = DeadCodeElimination(MF, *MDT).run(); 2647 2648 const HexagonEvaluator HE(HRI, MRI, HII, MF); 2649 BitTracker BT(HE, MF); 2650 DEBUG(BT.trace(true)); 2651 BT.run(); 2652 2653 MachineBasicBlock &Entry = MF.front(); 2654 2655 RegisterSet AIG; // Available registers for IG. 2656 ConstGeneration ImmG(BT, HII, MRI); 2657 Changed |= visitBlock(Entry, ImmG, AIG); 2658 2659 RegisterSet ARE; // Available registers for RIE. 2660 RedundantInstrElimination RIE(BT, HII, HRI, MRI); 2661 bool Ried = visitBlock(Entry, RIE, ARE); 2662 if (Ried) { 2663 Changed = true; 2664 BT.run(); 2665 } 2666 2667 RegisterSet ACG; // Available registers for CG. 2668 CopyGeneration CopyG(BT, HII, HRI, MRI); 2669 Changed |= visitBlock(Entry, CopyG, ACG); 2670 2671 RegisterSet ACP; // Available registers for CP. 2672 CopyPropagation CopyP(HRI, MRI); 2673 Changed |= visitBlock(Entry, CopyP, ACP); 2674 2675 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2676 2677 BT.run(); 2678 RegisterSet ABS; // Available registers for BS. 2679 BitSimplification BitS(BT, *MDT, HII, HRI, MRI, MF); 2680 Changed |= visitBlock(Entry, BitS, ABS); 2681 2682 Changed = DeadCodeElimination(MF, *MDT).run() || Changed; 2683 2684 if (Changed) { 2685 for (auto &B : MF) 2686 for (auto &I : B) 2687 I.clearKillInfo(); 2688 DeadCodeElimination(MF, *MDT).run(); 2689 } 2690 return Changed; 2691 } 2692 2693 // Recognize loops where the code at the end of the loop matches the code 2694 // before the entry of the loop, and the matching code is such that is can 2695 // be simplified. This pass relies on the bit simplification above and only 2696 // prepares code in a way that can be handled by the bit simplifcation. 2697 // 2698 // This is the motivating testcase (and explanation): 2699 // 2700 // { 2701 // loop0(.LBB0_2, r1) // %for.body.preheader 2702 // r5:4 = memd(r0++#8) 2703 // } 2704 // { 2705 // r3 = lsr(r4, #16) 2706 // r7:6 = combine(r5, r5) 2707 // } 2708 // { 2709 // r3 = insert(r5, #16, #16) 2710 // r7:6 = vlsrw(r7:6, #16) 2711 // } 2712 // .LBB0_2: 2713 // { 2714 // memh(r2+#4) = r5 2715 // memh(r2+#6) = r6 # R6 is really R5.H 2716 // } 2717 // { 2718 // r2 = add(r2, #8) 2719 // memh(r2+#0) = r4 2720 // memh(r2+#2) = r3 # R3 is really R4.H 2721 // } 2722 // { 2723 // r5:4 = memd(r0++#8) 2724 // } 2725 // { # "Shuffling" code that sets up R3 and R6 2726 // r3 = lsr(r4, #16) # so that their halves can be stored in the 2727 // r7:6 = combine(r5, r5) # next iteration. This could be folded into 2728 // } # the stores if the code was at the beginning 2729 // { # of the loop iteration. Since the same code 2730 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved 2731 // r7:6 = vlsrw(r7:6, #16) # there. 2732 // }:endloop0 2733 // 2734 // 2735 // The outcome: 2736 // 2737 // { 2738 // loop0(.LBB0_2, r1) 2739 // r5:4 = memd(r0++#8) 2740 // } 2741 // .LBB0_2: 2742 // { 2743 // memh(r2+#4) = r5 2744 // memh(r2+#6) = r5.h 2745 // } 2746 // { 2747 // r2 = add(r2, #8) 2748 // memh(r2+#0) = r4 2749 // memh(r2+#2) = r4.h 2750 // } 2751 // { 2752 // r5:4 = memd(r0++#8) 2753 // }:endloop0 2754 2755 namespace llvm { 2756 2757 FunctionPass *createHexagonLoopRescheduling(); 2758 void initializeHexagonLoopReschedulingPass(PassRegistry&); 2759 2760 } // end namespace llvm 2761 2762 namespace { 2763 2764 class HexagonLoopRescheduling : public MachineFunctionPass { 2765 public: 2766 static char ID; 2767 2768 HexagonLoopRescheduling() : MachineFunctionPass(ID), 2769 HII(nullptr), HRI(nullptr), MRI(nullptr), BTP(nullptr) { 2770 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry()); 2771 } 2772 2773 bool runOnMachineFunction(MachineFunction &MF) override; 2774 2775 private: 2776 const HexagonInstrInfo *HII; 2777 const HexagonRegisterInfo *HRI; 2778 MachineRegisterInfo *MRI; 2779 BitTracker *BTP; 2780 2781 struct LoopCand { 2782 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb, 2783 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {} 2784 MachineBasicBlock *LB, *PB, *EB; 2785 }; 2786 typedef std::vector<MachineInstr*> InstrList; 2787 struct InstrGroup { 2788 BitTracker::RegisterRef Inp, Out; 2789 InstrList Ins; 2790 }; 2791 struct PhiInfo { 2792 PhiInfo(MachineInstr &P, MachineBasicBlock &B); 2793 unsigned DefR; 2794 BitTracker::RegisterRef LR, PR; // Loop Register, Preheader Register 2795 MachineBasicBlock *LB, *PB; // Loop Block, Preheader Block 2796 }; 2797 2798 static unsigned getDefReg(const MachineInstr *MI); 2799 bool isConst(unsigned Reg) const; 2800 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const; 2801 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const; 2802 bool isShuffleOf(unsigned OutR, unsigned InpR) const; 2803 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2, 2804 unsigned &InpR2) const; 2805 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB, 2806 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR); 2807 bool processLoop(LoopCand &C); 2808 }; 2809 2810 } // end anonymous namespace 2811 2812 char HexagonLoopRescheduling::ID = 0; 2813 2814 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched", 2815 "Hexagon Loop Rescheduling", false, false) 2816 2817 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P, 2818 MachineBasicBlock &B) { 2819 DefR = HexagonLoopRescheduling::getDefReg(&P); 2820 LB = &B; 2821 PB = nullptr; 2822 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) { 2823 const MachineOperand &OpB = P.getOperand(i+1); 2824 if (OpB.getMBB() == &B) { 2825 LR = P.getOperand(i); 2826 continue; 2827 } 2828 PB = OpB.getMBB(); 2829 PR = P.getOperand(i); 2830 } 2831 } 2832 2833 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) { 2834 RegisterSet Defs; 2835 HBS::getInstrDefs(*MI, Defs); 2836 if (Defs.count() != 1) 2837 return 0; 2838 return Defs.find_first(); 2839 } 2840 2841 bool HexagonLoopRescheduling::isConst(unsigned Reg) const { 2842 if (!BTP->has(Reg)) 2843 return false; 2844 const BitTracker::RegisterCell &RC = BTP->lookup(Reg); 2845 for (unsigned i = 0, w = RC.width(); i < w; ++i) { 2846 const BitTracker::BitValue &V = RC[i]; 2847 if (!V.is(0) && !V.is(1)) 2848 return false; 2849 } 2850 return true; 2851 } 2852 2853 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI, 2854 unsigned DefR) const { 2855 unsigned Opc = MI->getOpcode(); 2856 switch (Opc) { 2857 case TargetOpcode::COPY: 2858 case Hexagon::S2_lsr_i_r: 2859 case Hexagon::S2_asr_i_r: 2860 case Hexagon::S2_asl_i_r: 2861 case Hexagon::S2_lsr_i_p: 2862 case Hexagon::S2_asr_i_p: 2863 case Hexagon::S2_asl_i_p: 2864 case Hexagon::S2_insert: 2865 case Hexagon::A2_or: 2866 case Hexagon::A2_orp: 2867 case Hexagon::A2_and: 2868 case Hexagon::A2_andp: 2869 case Hexagon::A2_combinew: 2870 case Hexagon::A4_combineri: 2871 case Hexagon::A4_combineir: 2872 case Hexagon::A2_combineii: 2873 case Hexagon::A4_combineii: 2874 case Hexagon::A2_combine_ll: 2875 case Hexagon::A2_combine_lh: 2876 case Hexagon::A2_combine_hl: 2877 case Hexagon::A2_combine_hh: 2878 return true; 2879 } 2880 return false; 2881 } 2882 2883 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI, 2884 unsigned InpR) const { 2885 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) { 2886 const MachineOperand &Op = MI->getOperand(i); 2887 if (!Op.isReg()) 2888 continue; 2889 if (Op.getReg() == InpR) 2890 return i == n-1; 2891 } 2892 return false; 2893 } 2894 2895 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const { 2896 if (!BTP->has(OutR) || !BTP->has(InpR)) 2897 return false; 2898 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR); 2899 for (unsigned i = 0, w = OutC.width(); i < w; ++i) { 2900 const BitTracker::BitValue &V = OutC[i]; 2901 if (V.Type != BitTracker::BitValue::Ref) 2902 continue; 2903 if (V.RefI.Reg != InpR) 2904 return false; 2905 } 2906 return true; 2907 } 2908 2909 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1, 2910 unsigned OutR2, unsigned &InpR2) const { 2911 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2)) 2912 return false; 2913 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1); 2914 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2); 2915 unsigned W = OutC1.width(); 2916 unsigned MatchR = 0; 2917 if (W != OutC2.width()) 2918 return false; 2919 for (unsigned i = 0; i < W; ++i) { 2920 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i]; 2921 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One) 2922 return false; 2923 if (V1.Type != BitTracker::BitValue::Ref) 2924 continue; 2925 if (V1.RefI.Pos != V2.RefI.Pos) 2926 return false; 2927 if (V1.RefI.Reg != InpR1) 2928 return false; 2929 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2) 2930 return false; 2931 if (!MatchR) 2932 MatchR = V2.RefI.Reg; 2933 else if (V2.RefI.Reg != MatchR) 2934 return false; 2935 } 2936 InpR2 = MatchR; 2937 return true; 2938 } 2939 2940 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, 2941 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR, 2942 unsigned NewPredR) { 2943 DenseMap<unsigned,unsigned> RegMap; 2944 2945 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR); 2946 unsigned PhiR = MRI->createVirtualRegister(PhiRC); 2947 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR) 2948 .addReg(NewPredR) 2949 .addMBB(&PB) 2950 .addReg(G.Inp.Reg) 2951 .addMBB(&LB); 2952 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR)); 2953 2954 for (unsigned i = G.Ins.size(); i > 0; --i) { 2955 const MachineInstr *SI = G.Ins[i-1]; 2956 unsigned DR = getDefReg(SI); 2957 const TargetRegisterClass *RC = MRI->getRegClass(DR); 2958 unsigned NewDR = MRI->createVirtualRegister(RC); 2959 DebugLoc DL = SI->getDebugLoc(); 2960 2961 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR); 2962 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) { 2963 const MachineOperand &Op = SI->getOperand(j); 2964 if (!Op.isReg()) { 2965 MIB.add(Op); 2966 continue; 2967 } 2968 if (!Op.isUse()) 2969 continue; 2970 unsigned UseR = RegMap[Op.getReg()]; 2971 MIB.addReg(UseR, 0, Op.getSubReg()); 2972 } 2973 RegMap.insert(std::make_pair(DR, NewDR)); 2974 } 2975 2976 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI); 2977 } 2978 2979 bool HexagonLoopRescheduling::processLoop(LoopCand &C) { 2980 DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n"); 2981 std::vector<PhiInfo> Phis; 2982 for (auto &I : *C.LB) { 2983 if (!I.isPHI()) 2984 break; 2985 unsigned PR = getDefReg(&I); 2986 if (isConst(PR)) 2987 continue; 2988 bool BadUse = false, GoodUse = false; 2989 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) { 2990 MachineInstr *UseI = UI->getParent(); 2991 if (UseI->getParent() != C.LB) { 2992 BadUse = true; 2993 break; 2994 } 2995 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR)) 2996 GoodUse = true; 2997 } 2998 if (BadUse || !GoodUse) 2999 continue; 3000 3001 Phis.push_back(PhiInfo(I, *C.LB)); 3002 } 3003 3004 DEBUG({ 3005 dbgs() << "Phis: {"; 3006 for (auto &I : Phis) { 3007 dbgs() << ' ' << PrintReg(I.DefR, HRI) << "=phi(" 3008 << PrintReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber() 3009 << ',' << PrintReg(I.LR.Reg, HRI, I.LR.Sub) << ":b" 3010 << I.LB->getNumber() << ')'; 3011 } 3012 dbgs() << " }\n"; 3013 }); 3014 3015 if (Phis.empty()) 3016 return false; 3017 3018 bool Changed = false; 3019 InstrList ShufIns; 3020 3021 // Go backwards in the block: for each bit shuffling instruction, check 3022 // if that instruction could potentially be moved to the front of the loop: 3023 // the output of the loop cannot be used in a non-shuffling instruction 3024 // in this loop. 3025 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) { 3026 if (I->isTerminator()) 3027 continue; 3028 if (I->isPHI()) 3029 break; 3030 3031 RegisterSet Defs; 3032 HBS::getInstrDefs(*I, Defs); 3033 if (Defs.count() != 1) 3034 continue; 3035 unsigned DefR = Defs.find_first(); 3036 if (!TargetRegisterInfo::isVirtualRegister(DefR)) 3037 continue; 3038 if (!isBitShuffle(&*I, DefR)) 3039 continue; 3040 3041 bool BadUse = false; 3042 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) { 3043 MachineInstr *UseI = UI->getParent(); 3044 if (UseI->getParent() == C.LB) { 3045 if (UseI->isPHI()) { 3046 // If the use is in a phi node in this loop, then it should be 3047 // the value corresponding to the back edge. 3048 unsigned Idx = UI.getOperandNo(); 3049 if (UseI->getOperand(Idx+1).getMBB() != C.LB) 3050 BadUse = true; 3051 } else { 3052 auto F = find(ShufIns, UseI); 3053 if (F == ShufIns.end()) 3054 BadUse = true; 3055 } 3056 } else { 3057 // There is a use outside of the loop, but there is no epilog block 3058 // suitable for a copy-out. 3059 if (C.EB == nullptr) 3060 BadUse = true; 3061 } 3062 if (BadUse) 3063 break; 3064 } 3065 3066 if (BadUse) 3067 continue; 3068 ShufIns.push_back(&*I); 3069 } 3070 3071 // Partition the list of shuffling instructions into instruction groups, 3072 // where each group has to be moved as a whole (i.e. a group is a chain of 3073 // dependent instructions). A group produces a single live output register, 3074 // which is meant to be the input of the loop phi node (although this is 3075 // not checked here yet). It also uses a single register as its input, 3076 // which is some value produced in the loop body. After moving the group 3077 // to the beginning of the loop, that input register would need to be 3078 // the loop-carried register (through a phi node) instead of the (currently 3079 // loop-carried) output register. 3080 typedef std::vector<InstrGroup> InstrGroupList; 3081 InstrGroupList Groups; 3082 3083 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) { 3084 MachineInstr *SI = ShufIns[i]; 3085 if (SI == nullptr) 3086 continue; 3087 3088 InstrGroup G; 3089 G.Ins.push_back(SI); 3090 G.Out.Reg = getDefReg(SI); 3091 RegisterSet Inputs; 3092 HBS::getInstrUses(*SI, Inputs); 3093 3094 for (unsigned j = i+1; j < n; ++j) { 3095 MachineInstr *MI = ShufIns[j]; 3096 if (MI == nullptr) 3097 continue; 3098 RegisterSet Defs; 3099 HBS::getInstrDefs(*MI, Defs); 3100 // If this instruction does not define any pending inputs, skip it. 3101 if (!Defs.intersects(Inputs)) 3102 continue; 3103 // Otherwise, add it to the current group and remove the inputs that 3104 // are defined by MI. 3105 G.Ins.push_back(MI); 3106 Inputs.remove(Defs); 3107 // Then add all registers used by MI. 3108 HBS::getInstrUses(*MI, Inputs); 3109 ShufIns[j] = nullptr; 3110 } 3111 3112 // Only add a group if it requires at most one register. 3113 if (Inputs.count() > 1) 3114 continue; 3115 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 3116 return G.Out.Reg == P.LR.Reg; 3117 }; 3118 if (llvm::find_if(Phis, LoopInpEq) == Phis.end()) 3119 continue; 3120 3121 G.Inp.Reg = Inputs.find_first(); 3122 Groups.push_back(G); 3123 } 3124 3125 DEBUG({ 3126 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 3127 InstrGroup &G = Groups[i]; 3128 dbgs() << "Group[" << i << "] inp: " 3129 << PrintReg(G.Inp.Reg, HRI, G.Inp.Sub) 3130 << " out: " << PrintReg(G.Out.Reg, HRI, G.Out.Sub) << "\n"; 3131 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j) 3132 dbgs() << " " << *G.Ins[j]; 3133 } 3134 }); 3135 3136 for (unsigned i = 0, n = Groups.size(); i < n; ++i) { 3137 InstrGroup &G = Groups[i]; 3138 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg)) 3139 continue; 3140 auto LoopInpEq = [G] (const PhiInfo &P) -> bool { 3141 return G.Out.Reg == P.LR.Reg; 3142 }; 3143 auto F = llvm::find_if(Phis, LoopInpEq); 3144 if (F == Phis.end()) 3145 continue; 3146 unsigned PrehR = 0; 3147 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PrehR)) { 3148 const MachineInstr *DefPrehR = MRI->getVRegDef(F->PR.Reg); 3149 unsigned Opc = DefPrehR->getOpcode(); 3150 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi) 3151 continue; 3152 if (!DefPrehR->getOperand(1).isImm()) 3153 continue; 3154 if (DefPrehR->getOperand(1).getImm() != 0) 3155 continue; 3156 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg); 3157 if (RC != MRI->getRegClass(F->PR.Reg)) { 3158 PrehR = MRI->createVirtualRegister(RC); 3159 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi 3160 : Hexagon::A2_tfrpi; 3161 auto T = C.PB->getFirstTerminator(); 3162 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc(); 3163 BuildMI(*C.PB, T, DL, HII->get(TfrI), PrehR) 3164 .addImm(0); 3165 } else { 3166 PrehR = F->PR.Reg; 3167 } 3168 } 3169 // isSameShuffle could match with PrehR being of a wider class than 3170 // G.Inp.Reg, for example if G shuffles the low 32 bits of its input, 3171 // it would match for the input being a 32-bit register, and PrehR 3172 // being a 64-bit register (where the low 32 bits match). This could 3173 // be handled, but for now skip these cases. 3174 if (MRI->getRegClass(PrehR) != MRI->getRegClass(G.Inp.Reg)) 3175 continue; 3176 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PrehR); 3177 Changed = true; 3178 } 3179 3180 return Changed; 3181 } 3182 3183 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) { 3184 if (skipFunction(*MF.getFunction())) 3185 return false; 3186 3187 auto &HST = MF.getSubtarget<HexagonSubtarget>(); 3188 HII = HST.getInstrInfo(); 3189 HRI = HST.getRegisterInfo(); 3190 MRI = &MF.getRegInfo(); 3191 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); 3192 BitTracker BT(HE, MF); 3193 DEBUG(BT.trace(true)); 3194 BT.run(); 3195 BTP = &BT; 3196 3197 std::vector<LoopCand> Cand; 3198 3199 for (auto &B : MF) { 3200 if (B.pred_size() != 2 || B.succ_size() != 2) 3201 continue; 3202 MachineBasicBlock *PB = nullptr; 3203 bool IsLoop = false; 3204 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) { 3205 if (*PI != &B) 3206 PB = *PI; 3207 else 3208 IsLoop = true; 3209 } 3210 if (!IsLoop) 3211 continue; 3212 3213 MachineBasicBlock *EB = nullptr; 3214 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) { 3215 if (*SI == &B) 3216 continue; 3217 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the 3218 // edge from B to EP is non-critical. 3219 if ((*SI)->pred_size() == 1) 3220 EB = *SI; 3221 break; 3222 } 3223 3224 Cand.push_back(LoopCand(&B, PB, EB)); 3225 } 3226 3227 bool Changed = false; 3228 for (auto &C : Cand) 3229 Changed |= processLoop(C); 3230 3231 return Changed; 3232 } 3233 3234 //===----------------------------------------------------------------------===// 3235 // Public Constructor Functions 3236 //===----------------------------------------------------------------------===// 3237 3238 FunctionPass *llvm::createHexagonLoopRescheduling() { 3239 return new HexagonLoopRescheduling(); 3240 } 3241 3242 FunctionPass *llvm::createHexagonBitSimplify() { 3243 return new HexagonBitSimplify(); 3244 } 3245