1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "ARM.h" 11 #include "ARMBaseInstrInfo.h" 12 #include "ARMSubtarget.h" 13 #include "MCTargetDesc/ARMAddressingModes.h" 14 #include "Thumb2InstrInfo.h" 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/PostOrderIterator.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/CodeGen/MachineFunctionPass.h" 19 #include "llvm/CodeGen/MachineInstr.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/IR/Function.h" // To access Function attributes 22 #include "llvm/Support/CommandLine.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Target/TargetMachine.h" 26 #include <utility> 27 using namespace llvm; 28 29 #define DEBUG_TYPE "t2-reduce-size" 30 31 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones"); 32 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones"); 33 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones"); 34 35 static cl::opt<int> ReduceLimit("t2-reduce-limit", 36 cl::init(-1), cl::Hidden); 37 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2", 38 cl::init(-1), cl::Hidden); 39 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3", 40 cl::init(-1), cl::Hidden); 41 42 namespace { 43 /// ReduceTable - A static table with information on mapping from wide 44 /// opcodes to narrow 45 struct ReduceEntry { 46 uint16_t WideOpc; // Wide opcode 47 uint16_t NarrowOpc1; // Narrow opcode to transform to 48 uint16_t NarrowOpc2; // Narrow opcode when it's two-address 49 uint8_t Imm1Limit; // Limit of immediate field (bits) 50 uint8_t Imm2Limit; // Limit of immediate field when it's two-address 51 unsigned LowRegs1 : 1; // Only possible if low-registers are used 52 unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr) 53 unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa. 54 // 1 - No cc field. 55 // 2 - Always set CPSR. 56 unsigned PredCC2 : 2; 57 unsigned PartFlag : 1; // 16-bit instruction does partial flag update 58 unsigned Special : 1; // Needs to be dealt with specially 59 unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift) 60 }; 61 62 static const ReduceEntry ReduceTable[] = { 63 // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM 64 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 }, 65 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 }, 66 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 }, 67 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 }, 68 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 69 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 }, 70 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 71 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 72 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 }, 73 //FIXME: Disable CMN, as CCodes are backwards from compare expectations 74 //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 75 { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 76 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 }, 77 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 }, 78 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 }, 79 // FIXME: adr.n immediate offset must be multiple of 4. 80 //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 81 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 82 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 83 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, 84 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, 85 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 }, 86 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 }, 87 // FIXME: Do we need the 16-bit 'S' variant? 88 { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 }, 89 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 }, 90 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, 91 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 }, 92 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 93 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 94 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, 95 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 }, 96 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 97 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, 98 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 }, 99 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 }, 100 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, 101 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 }, 102 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 103 { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 104 { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 105 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, 106 { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 107 { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, 108 109 // FIXME: Clean this up after splitting each Thumb load / store opcode 110 // into multiple ones. 111 { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, 112 { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 113 { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 114 { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 115 { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 116 { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 117 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 118 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 119 { ARM::t2LDR_POST,ARM::tLDMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 }, 120 { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, 121 { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 122 { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 123 { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 124 { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, 125 { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, 126 { ARM::t2STR_POST,ARM::tSTMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 }, 127 128 { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 129 { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 }, 130 { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 }, 131 // ARM::t2STMIA (with no basereg writeback) has no Thumb1 equivalent. 132 // tSTMIA_UPD is a change in semantics which can only be used if the base 133 // register is killed. This difference is correctly handled elsewhere. 134 { ARM::t2STMIA, ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 135 { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, 136 { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 } 137 }; 138 139 class Thumb2SizeReduce : public MachineFunctionPass { 140 public: 141 static char ID; 142 Thumb2SizeReduce(std::function<bool(const Function &)> Ftor); 143 144 const Thumb2InstrInfo *TII; 145 const ARMSubtarget *STI; 146 147 bool runOnMachineFunction(MachineFunction &MF) override; 148 149 MachineFunctionProperties getRequiredProperties() const override { 150 return MachineFunctionProperties().set( 151 MachineFunctionProperties::Property::NoVRegs); 152 } 153 154 StringRef getPassName() const override { 155 return "Thumb2 instruction size reduction pass"; 156 } 157 158 private: 159 /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable. 160 DenseMap<unsigned, unsigned> ReduceOpcodeMap; 161 162 bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop); 163 164 bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, 165 bool is2Addr, ARMCC::CondCodes Pred, 166 bool LiveCPSR, bool &HasCC, bool &CCDead); 167 168 bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, 169 const ReduceEntry &Entry); 170 171 bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, 172 const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop); 173 174 /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address 175 /// instruction. 176 bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, 177 const ReduceEntry &Entry, bool LiveCPSR, 178 bool IsSelfLoop); 179 180 /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit 181 /// non-two-address instruction. 182 bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, 183 const ReduceEntry &Entry, bool LiveCPSR, 184 bool IsSelfLoop); 185 186 /// ReduceMI - Attempt to reduce MI, return true on success. 187 bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, 188 bool LiveCPSR, bool IsSelfLoop); 189 190 /// ReduceMBB - Reduce width of instructions in the specified basic block. 191 bool ReduceMBB(MachineBasicBlock &MBB); 192 193 bool OptimizeSize; 194 bool MinimizeSize; 195 196 // Last instruction to define CPSR in the current block. 197 MachineInstr *CPSRDef; 198 // Was CPSR last defined by a high latency instruction? 199 // When CPSRDef is null, this refers to CPSR defs in predecessors. 200 bool HighLatencyCPSR; 201 202 struct MBBInfo { 203 // The flags leaving this block have high latency. 204 bool HighLatencyCPSR; 205 // Has this block been visited yet? 206 bool Visited; 207 208 MBBInfo() : HighLatencyCPSR(false), Visited(false) {} 209 }; 210 211 SmallVector<MBBInfo, 8> BlockInfo; 212 213 std::function<bool(const Function &)> PredicateFtor; 214 }; 215 char Thumb2SizeReduce::ID = 0; 216 } 217 218 Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor) 219 : MachineFunctionPass(ID), PredicateFtor(std::move(Ftor)) { 220 OptimizeSize = MinimizeSize = false; 221 for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) { 222 unsigned FromOpc = ReduceTable[i].WideOpc; 223 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second) 224 llvm_unreachable("Duplicated entries?"); 225 } 226 } 227 228 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) { 229 for (const MCPhysReg *Regs = MCID.getImplicitDefs(); *Regs; ++Regs) 230 if (*Regs == ARM::CPSR) 231 return true; 232 return false; 233 } 234 235 // Check for a likely high-latency flag def. 236 static bool isHighLatencyCPSR(MachineInstr *Def) { 237 switch(Def->getOpcode()) { 238 case ARM::FMSTAT: 239 case ARM::tMUL: 240 return true; 241 } 242 return false; 243 } 244 245 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations, 246 /// the 's' 16-bit instruction partially update CPSR. Abort the 247 /// transformation to avoid adding false dependency on last CPSR setting 248 /// instruction which hurts the ability for out-of-order execution engine 249 /// to do register renaming magic. 250 /// This function checks if there is a read-of-write dependency between the 251 /// last instruction that defines the CPSR and the current instruction. If there 252 /// is, then there is no harm done since the instruction cannot be retired 253 /// before the CPSR setting instruction anyway. 254 /// Note, we are not doing full dependency analysis here for the sake of compile 255 /// time. We're not looking for cases like: 256 /// r0 = muls ... 257 /// r1 = add.w r0, ... 258 /// ... 259 /// = mul.w r1 260 /// In this case it would have been ok to narrow the mul.w to muls since there 261 /// are indirect RAW dependency between the muls and the mul.w 262 bool 263 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) { 264 // Disable the check for -Oz (aka OptimizeForSizeHarder). 265 if (MinimizeSize || !STI->avoidCPSRPartialUpdate()) 266 return false; 267 268 if (!CPSRDef) 269 // If this BB loops back to itself, conservatively avoid narrowing the 270 // first instruction that does partial flag update. 271 return HighLatencyCPSR || FirstInSelfLoop; 272 273 SmallSet<unsigned, 2> Defs; 274 for (const MachineOperand &MO : CPSRDef->operands()) { 275 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 276 continue; 277 unsigned Reg = MO.getReg(); 278 if (Reg == 0 || Reg == ARM::CPSR) 279 continue; 280 Defs.insert(Reg); 281 } 282 283 for (const MachineOperand &MO : Use->operands()) { 284 if (!MO.isReg() || MO.isUndef() || MO.isDef()) 285 continue; 286 unsigned Reg = MO.getReg(); 287 if (Defs.count(Reg)) 288 return false; 289 } 290 291 // If the current CPSR has high latency, try to avoid the false dependency. 292 if (HighLatencyCPSR) 293 return true; 294 295 // tMOVi8 usually doesn't start long dependency chains, and there are a lot 296 // of them, so always shrink them when CPSR doesn't have high latency. 297 if (Use->getOpcode() == ARM::t2MOVi || 298 Use->getOpcode() == ARM::t2MOVi16) 299 return false; 300 301 // No read-after-write dependency. The narrowing will add false dependency. 302 return true; 303 } 304 305 bool 306 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry, 307 bool is2Addr, ARMCC::CondCodes Pred, 308 bool LiveCPSR, bool &HasCC, bool &CCDead) { 309 if ((is2Addr && Entry.PredCC2 == 0) || 310 (!is2Addr && Entry.PredCC1 == 0)) { 311 if (Pred == ARMCC::AL) { 312 // Not predicated, must set CPSR. 313 if (!HasCC) { 314 // Original instruction was not setting CPSR, but CPSR is not 315 // currently live anyway. It's ok to set it. The CPSR def is 316 // dead though. 317 if (!LiveCPSR) { 318 HasCC = true; 319 CCDead = true; 320 return true; 321 } 322 return false; 323 } 324 } else { 325 // Predicated, must not set CPSR. 326 if (HasCC) 327 return false; 328 } 329 } else if ((is2Addr && Entry.PredCC2 == 2) || 330 (!is2Addr && Entry.PredCC1 == 2)) { 331 /// Old opcode has an optional def of CPSR. 332 if (HasCC) 333 return true; 334 // If old opcode does not implicitly define CPSR, then it's not ok since 335 // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP. 336 if (!HasImplicitCPSRDef(MI->getDesc())) 337 return false; 338 HasCC = true; 339 } else { 340 // 16-bit instruction does not set CPSR. 341 if (HasCC) 342 return false; 343 } 344 345 return true; 346 } 347 348 static bool VerifyLowRegs(MachineInstr *MI) { 349 unsigned Opc = MI->getOpcode(); 350 bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA_UPD); 351 bool isLROk = (Opc == ARM::t2STMDB_UPD); 352 bool isSPOk = isPCOk || isLROk; 353 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 354 const MachineOperand &MO = MI->getOperand(i); 355 if (!MO.isReg() || MO.isImplicit()) 356 continue; 357 unsigned Reg = MO.getReg(); 358 if (Reg == 0 || Reg == ARM::CPSR) 359 continue; 360 if (isPCOk && Reg == ARM::PC) 361 continue; 362 if (isLROk && Reg == ARM::LR) 363 continue; 364 if (Reg == ARM::SP) { 365 if (isSPOk) 366 continue; 367 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12)) 368 // Special case for these ldr / str with sp as base register. 369 continue; 370 } 371 if (!isARMLowRegister(Reg)) 372 return false; 373 } 374 return true; 375 } 376 377 bool 378 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, 379 const ReduceEntry &Entry) { 380 if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt)) 381 return false; 382 383 unsigned Scale = 1; 384 bool HasImmOffset = false; 385 bool HasShift = false; 386 bool HasOffReg = true; 387 bool isLdStMul = false; 388 unsigned Opc = Entry.NarrowOpc1; 389 unsigned OpNum = 3; // First 'rest' of operands. 390 uint8_t ImmLimit = Entry.Imm1Limit; 391 392 switch (Entry.WideOpc) { 393 default: 394 llvm_unreachable("Unexpected Thumb2 load / store opcode!"); 395 case ARM::t2LDRi12: 396 case ARM::t2STRi12: 397 if (MI->getOperand(1).getReg() == ARM::SP) { 398 Opc = Entry.NarrowOpc2; 399 ImmLimit = Entry.Imm2Limit; 400 } 401 402 Scale = 4; 403 HasImmOffset = true; 404 HasOffReg = false; 405 break; 406 case ARM::t2LDRBi12: 407 case ARM::t2STRBi12: 408 HasImmOffset = true; 409 HasOffReg = false; 410 break; 411 case ARM::t2LDRHi12: 412 case ARM::t2STRHi12: 413 Scale = 2; 414 HasImmOffset = true; 415 HasOffReg = false; 416 break; 417 case ARM::t2LDRs: 418 case ARM::t2LDRBs: 419 case ARM::t2LDRHs: 420 case ARM::t2LDRSBs: 421 case ARM::t2LDRSHs: 422 case ARM::t2STRs: 423 case ARM::t2STRBs: 424 case ARM::t2STRHs: 425 HasShift = true; 426 OpNum = 4; 427 break; 428 case ARM::t2LDR_POST: 429 case ARM::t2STR_POST: { 430 if (!MBB.getParent()->getFunction()->optForMinSize()) 431 return false; 432 433 if (!MI->hasOneMemOperand() || 434 (*MI->memoperands_begin())->getAlignment() < 4) 435 return false; 436 437 // We're creating a completely different type of load/store - LDM from LDR. 438 // For this reason we can't reuse the logic at the end of this function; we 439 // have to implement the MI building here. 440 bool IsStore = Entry.WideOpc == ARM::t2STR_POST; 441 unsigned Rt = MI->getOperand(IsStore ? 1 : 0).getReg(); 442 unsigned Rn = MI->getOperand(IsStore ? 0 : 1).getReg(); 443 unsigned Offset = MI->getOperand(3).getImm(); 444 unsigned PredImm = MI->getOperand(4).getImm(); 445 unsigned PredReg = MI->getOperand(5).getReg(); 446 assert(isARMLowRegister(Rt)); 447 assert(isARMLowRegister(Rn)); 448 449 if (Offset != 4) 450 return false; 451 452 // Add the 16-bit load / store instruction. 453 DebugLoc dl = MI->getDebugLoc(); 454 auto MIB = BuildMI(MBB, MI, dl, TII->get(Entry.NarrowOpc1)) 455 .addReg(Rn, RegState::Define) 456 .addReg(Rn) 457 .addImm(PredImm) 458 .addReg(PredReg) 459 .addReg(Rt, IsStore ? 0 : RegState::Define); 460 461 // Transfer memoperands. 462 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 463 464 // Transfer MI flags. 465 MIB.setMIFlags(MI->getFlags()); 466 467 // Kill the old instruction. 468 MI->eraseFromBundle(); 469 ++NumLdSts; 470 return true; 471 } 472 case ARM::t2LDMIA: { 473 unsigned BaseReg = MI->getOperand(0).getReg(); 474 assert(isARMLowRegister(BaseReg)); 475 476 // For the non-writeback version (this one), the base register must be 477 // one of the registers being loaded. 478 bool isOK = false; 479 for (unsigned i = 3; i < MI->getNumOperands(); ++i) { 480 if (MI->getOperand(i).getReg() == BaseReg) { 481 isOK = true; 482 break; 483 } 484 } 485 486 if (!isOK) 487 return false; 488 489 OpNum = 0; 490 isLdStMul = true; 491 break; 492 } 493 case ARM::t2STMIA: { 494 // If the base register is killed, we don't care what its value is after the 495 // instruction, so we can use an updating STMIA. 496 if (!MI->getOperand(0).isKill()) 497 return false; 498 499 break; 500 } 501 case ARM::t2LDMIA_RET: { 502 unsigned BaseReg = MI->getOperand(1).getReg(); 503 if (BaseReg != ARM::SP) 504 return false; 505 Opc = Entry.NarrowOpc2; // tPOP_RET 506 OpNum = 2; 507 isLdStMul = true; 508 break; 509 } 510 case ARM::t2LDMIA_UPD: 511 case ARM::t2STMIA_UPD: 512 case ARM::t2STMDB_UPD: { 513 OpNum = 0; 514 515 unsigned BaseReg = MI->getOperand(1).getReg(); 516 if (BaseReg == ARM::SP && 517 (Entry.WideOpc == ARM::t2LDMIA_UPD || 518 Entry.WideOpc == ARM::t2STMDB_UPD)) { 519 Opc = Entry.NarrowOpc2; // tPOP or tPUSH 520 OpNum = 2; 521 } else if (!isARMLowRegister(BaseReg) || 522 (Entry.WideOpc != ARM::t2LDMIA_UPD && 523 Entry.WideOpc != ARM::t2STMIA_UPD)) { 524 return false; 525 } 526 527 isLdStMul = true; 528 break; 529 } 530 } 531 532 unsigned OffsetReg = 0; 533 bool OffsetKill = false; 534 bool OffsetInternal = false; 535 if (HasShift) { 536 OffsetReg = MI->getOperand(2).getReg(); 537 OffsetKill = MI->getOperand(2).isKill(); 538 OffsetInternal = MI->getOperand(2).isInternalRead(); 539 540 if (MI->getOperand(3).getImm()) 541 // Thumb1 addressing mode doesn't support shift. 542 return false; 543 } 544 545 unsigned OffsetImm = 0; 546 if (HasImmOffset) { 547 OffsetImm = MI->getOperand(2).getImm(); 548 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale; 549 550 if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset) 551 // Make sure the immediate field fits. 552 return false; 553 } 554 555 // Add the 16-bit load / store instruction. 556 DebugLoc dl = MI->getDebugLoc(); 557 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc)); 558 559 // tSTMIA_UPD takes a defining register operand. We've already checked that 560 // the register is killed, so mark it as dead here. 561 if (Entry.WideOpc == ARM::t2STMIA) 562 MIB.addReg(MI->getOperand(0).getReg(), RegState::Define | RegState::Dead); 563 564 if (!isLdStMul) { 565 MIB.addOperand(MI->getOperand(0)); 566 MIB.addOperand(MI->getOperand(1)); 567 568 if (HasImmOffset) 569 MIB.addImm(OffsetImm / Scale); 570 571 assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!"); 572 573 if (HasOffReg) 574 MIB.addReg(OffsetReg, getKillRegState(OffsetKill) | 575 getInternalReadRegState(OffsetInternal)); 576 } 577 578 // Transfer the rest of operands. 579 for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum) 580 MIB.addOperand(MI->getOperand(OpNum)); 581 582 // Transfer memoperands. 583 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 584 585 // Transfer MI flags. 586 MIB.setMIFlags(MI->getFlags()); 587 588 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 589 590 MBB.erase_instr(MI); 591 ++NumLdSts; 592 return true; 593 } 594 595 bool 596 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, 597 const ReduceEntry &Entry, 598 bool LiveCPSR, bool IsSelfLoop) { 599 unsigned Opc = MI->getOpcode(); 600 if (Opc == ARM::t2ADDri) { 601 // If the source register is SP, try to reduce to tADDrSPi, otherwise 602 // it's a normal reduce. 603 if (MI->getOperand(1).getReg() != ARM::SP) { 604 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 605 return true; 606 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 607 } 608 // Try to reduce to tADDrSPi. 609 unsigned Imm = MI->getOperand(2).getImm(); 610 // The immediate must be in range, the destination register must be a low 611 // reg, the predicate must be "always" and the condition flags must not 612 // be being set. 613 if (Imm & 3 || Imm > 1020) 614 return false; 615 if (!isARMLowRegister(MI->getOperand(0).getReg())) 616 return false; 617 if (MI->getOperand(3).getImm() != ARMCC::AL) 618 return false; 619 const MCInstrDesc &MCID = MI->getDesc(); 620 if (MCID.hasOptionalDef() && 621 MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR) 622 return false; 623 624 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), 625 TII->get(ARM::tADDrSPi)) 626 .addOperand(MI->getOperand(0)) 627 .addOperand(MI->getOperand(1)) 628 .addImm(Imm / 4); // The tADDrSPi has an implied scale by four. 629 AddDefaultPred(MIB); 630 631 // Transfer MI flags. 632 MIB.setMIFlags(MI->getFlags()); 633 634 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB); 635 636 MBB.erase_instr(MI); 637 ++NumNarrows; 638 return true; 639 } 640 641 if (Entry.LowRegs1 && !VerifyLowRegs(MI)) 642 return false; 643 644 if (MI->mayLoadOrStore()) 645 return ReduceLoadStore(MBB, MI, Entry); 646 647 switch (Opc) { 648 default: break; 649 case ARM::t2ADDSri: 650 case ARM::t2ADDSrr: { 651 unsigned PredReg = 0; 652 if (getInstrPredicate(*MI, PredReg) == ARMCC::AL) { 653 switch (Opc) { 654 default: break; 655 case ARM::t2ADDSri: { 656 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 657 return true; 658 LLVM_FALLTHROUGH; 659 } 660 case ARM::t2ADDSrr: 661 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 662 } 663 } 664 break; 665 } 666 case ARM::t2RSBri: 667 case ARM::t2RSBSri: 668 case ARM::t2SXTB: 669 case ARM::t2SXTH: 670 case ARM::t2UXTB: 671 case ARM::t2UXTH: 672 if (MI->getOperand(2).getImm() == 0) 673 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 674 break; 675 case ARM::t2MOVi16: 676 // Can convert only 'pure' immediate operands, not immediates obtained as 677 // globals' addresses. 678 if (MI->getOperand(1).isImm()) 679 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 680 break; 681 case ARM::t2CMPrr: { 682 // Try to reduce to the lo-reg only version first. Why there are two 683 // versions of the instruction is a mystery. 684 // It would be nice to just have two entries in the master table that 685 // are prioritized, but the table assumes a unique entry for each 686 // source insn opcode. So for now, we hack a local entry record to use. 687 static const ReduceEntry NarrowEntry = 688 { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 }; 689 if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop)) 690 return true; 691 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 692 } 693 } 694 return false; 695 } 696 697 bool 698 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, 699 const ReduceEntry &Entry, 700 bool LiveCPSR, bool IsSelfLoop) { 701 702 if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr)) 703 return false; 704 705 if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand()) 706 // Don't issue movs with shifter operand for some CPUs unless we 707 // are optimizing for size. 708 return false; 709 710 unsigned Reg0 = MI->getOperand(0).getReg(); 711 unsigned Reg1 = MI->getOperand(1).getReg(); 712 // t2MUL is "special". The tied source operand is second, not first. 713 if (MI->getOpcode() == ARM::t2MUL) { 714 unsigned Reg2 = MI->getOperand(2).getReg(); 715 // Early exit if the regs aren't all low regs. 716 if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1) 717 || !isARMLowRegister(Reg2)) 718 return false; 719 if (Reg0 != Reg2) { 720 // If the other operand also isn't the same as the destination, we 721 // can't reduce. 722 if (Reg1 != Reg0) 723 return false; 724 // Try to commute the operands to make it a 2-address instruction. 725 MachineInstr *CommutedMI = TII->commuteInstruction(*MI); 726 if (!CommutedMI) 727 return false; 728 } 729 } else if (Reg0 != Reg1) { 730 // Try to commute the operands to make it a 2-address instruction. 731 unsigned CommOpIdx1 = 1; 732 unsigned CommOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex; 733 if (!TII->findCommutedOpIndices(*MI, CommOpIdx1, CommOpIdx2) || 734 MI->getOperand(CommOpIdx2).getReg() != Reg0) 735 return false; 736 MachineInstr *CommutedMI = 737 TII->commuteInstruction(*MI, false, CommOpIdx1, CommOpIdx2); 738 if (!CommutedMI) 739 return false; 740 } 741 if (Entry.LowRegs2 && !isARMLowRegister(Reg0)) 742 return false; 743 if (Entry.Imm2Limit) { 744 unsigned Imm = MI->getOperand(2).getImm(); 745 unsigned Limit = (1 << Entry.Imm2Limit) - 1; 746 if (Imm > Limit) 747 return false; 748 } else { 749 unsigned Reg2 = MI->getOperand(2).getReg(); 750 if (Entry.LowRegs2 && !isARMLowRegister(Reg2)) 751 return false; 752 } 753 754 // Check if it's possible / necessary to transfer the predicate. 755 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2); 756 unsigned PredReg = 0; 757 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg); 758 bool SkipPred = false; 759 if (Pred != ARMCC::AL) { 760 if (!NewMCID.isPredicable()) 761 // Can't transfer predicate, fail. 762 return false; 763 } else { 764 SkipPred = !NewMCID.isPredicable(); 765 } 766 767 bool HasCC = false; 768 bool CCDead = false; 769 const MCInstrDesc &MCID = MI->getDesc(); 770 if (MCID.hasOptionalDef()) { 771 unsigned NumOps = MCID.getNumOperands(); 772 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); 773 if (HasCC && MI->getOperand(NumOps-1).isDead()) 774 CCDead = true; 775 } 776 if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead)) 777 return false; 778 779 // Avoid adding a false dependency on partial flag update by some 16-bit 780 // instructions which has the 's' bit set. 781 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && 782 canAddPseudoFlagDep(MI, IsSelfLoop)) 783 return false; 784 785 // Add the 16-bit instruction. 786 DebugLoc dl = MI->getDebugLoc(); 787 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); 788 MIB.addOperand(MI->getOperand(0)); 789 if (NewMCID.hasOptionalDef()) { 790 if (HasCC) 791 AddDefaultT1CC(MIB, CCDead); 792 else 793 AddNoT1CC(MIB); 794 } 795 796 // Transfer the rest of operands. 797 unsigned NumOps = MCID.getNumOperands(); 798 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 799 if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) 800 continue; 801 if (SkipPred && MCID.OpInfo[i].isPredicate()) 802 continue; 803 MIB.addOperand(MI->getOperand(i)); 804 } 805 806 // Transfer MI flags. 807 MIB.setMIFlags(MI->getFlags()); 808 809 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 810 811 MBB.erase_instr(MI); 812 ++Num2Addrs; 813 return true; 814 } 815 816 bool 817 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, 818 const ReduceEntry &Entry, 819 bool LiveCPSR, bool IsSelfLoop) { 820 if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit)) 821 return false; 822 823 if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand()) 824 // Don't issue movs with shifter operand for some CPUs unless we 825 // are optimizing for size. 826 return false; 827 828 unsigned Limit = ~0U; 829 if (Entry.Imm1Limit) 830 Limit = (1 << Entry.Imm1Limit) - 1; 831 832 const MCInstrDesc &MCID = MI->getDesc(); 833 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { 834 if (MCID.OpInfo[i].isPredicate()) 835 continue; 836 const MachineOperand &MO = MI->getOperand(i); 837 if (MO.isReg()) { 838 unsigned Reg = MO.getReg(); 839 if (!Reg || Reg == ARM::CPSR) 840 continue; 841 if (Entry.LowRegs1 && !isARMLowRegister(Reg)) 842 return false; 843 } else if (MO.isImm() && 844 !MCID.OpInfo[i].isPredicate()) { 845 if (((unsigned)MO.getImm()) > Limit) 846 return false; 847 } 848 } 849 850 // Check if it's possible / necessary to transfer the predicate. 851 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1); 852 unsigned PredReg = 0; 853 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg); 854 bool SkipPred = false; 855 if (Pred != ARMCC::AL) { 856 if (!NewMCID.isPredicable()) 857 // Can't transfer predicate, fail. 858 return false; 859 } else { 860 SkipPred = !NewMCID.isPredicable(); 861 } 862 863 bool HasCC = false; 864 bool CCDead = false; 865 if (MCID.hasOptionalDef()) { 866 unsigned NumOps = MCID.getNumOperands(); 867 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR); 868 if (HasCC && MI->getOperand(NumOps-1).isDead()) 869 CCDead = true; 870 } 871 if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead)) 872 return false; 873 874 // Avoid adding a false dependency on partial flag update by some 16-bit 875 // instructions which has the 's' bit set. 876 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC && 877 canAddPseudoFlagDep(MI, IsSelfLoop)) 878 return false; 879 880 // Add the 16-bit instruction. 881 DebugLoc dl = MI->getDebugLoc(); 882 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); 883 MIB.addOperand(MI->getOperand(0)); 884 if (NewMCID.hasOptionalDef()) { 885 if (HasCC) 886 AddDefaultT1CC(MIB, CCDead); 887 else 888 AddNoT1CC(MIB); 889 } 890 891 // Transfer the rest of operands. 892 unsigned NumOps = MCID.getNumOperands(); 893 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 894 if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) 895 continue; 896 if ((MCID.getOpcode() == ARM::t2RSBSri || 897 MCID.getOpcode() == ARM::t2RSBri || 898 MCID.getOpcode() == ARM::t2SXTB || 899 MCID.getOpcode() == ARM::t2SXTH || 900 MCID.getOpcode() == ARM::t2UXTB || 901 MCID.getOpcode() == ARM::t2UXTH) && i == 2) 902 // Skip the zero immediate operand, it's now implicit. 903 continue; 904 bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate()); 905 if (SkipPred && isPred) 906 continue; 907 const MachineOperand &MO = MI->getOperand(i); 908 if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR) 909 // Skip implicit def of CPSR. Either it's modeled as an optional 910 // def now or it's already an implicit def on the new instruction. 911 continue; 912 MIB.addOperand(MO); 913 } 914 if (!MCID.isPredicable() && NewMCID.isPredicable()) 915 AddDefaultPred(MIB); 916 917 // Transfer MI flags. 918 MIB.setMIFlags(MI->getFlags()); 919 920 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB); 921 922 MBB.erase_instr(MI); 923 ++NumNarrows; 924 return true; 925 } 926 927 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) { 928 bool HasDef = false; 929 for (const MachineOperand &MO : MI.operands()) { 930 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 931 continue; 932 if (MO.getReg() != ARM::CPSR) 933 continue; 934 935 DefCPSR = true; 936 if (!MO.isDead()) 937 HasDef = true; 938 } 939 940 return HasDef || LiveCPSR; 941 } 942 943 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) { 944 for (const MachineOperand &MO : MI.operands()) { 945 if (!MO.isReg() || MO.isUndef() || MO.isDef()) 946 continue; 947 if (MO.getReg() != ARM::CPSR) 948 continue; 949 assert(LiveCPSR && "CPSR liveness tracking is wrong!"); 950 if (MO.isKill()) { 951 LiveCPSR = false; 952 break; 953 } 954 } 955 956 return LiveCPSR; 957 } 958 959 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI, 960 bool LiveCPSR, bool IsSelfLoop) { 961 unsigned Opcode = MI->getOpcode(); 962 DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode); 963 if (OPI == ReduceOpcodeMap.end()) 964 return false; 965 const ReduceEntry &Entry = ReduceTable[OPI->second]; 966 967 // Don't attempt normal reductions on "special" cases for now. 968 if (Entry.Special) 969 return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop); 970 971 // Try to transform to a 16-bit two-address instruction. 972 if (Entry.NarrowOpc2 && 973 ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 974 return true; 975 976 // Try to transform to a 16-bit non-two-address instruction. 977 if (Entry.NarrowOpc1 && 978 ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop)) 979 return true; 980 981 return false; 982 } 983 984 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) { 985 bool Modified = false; 986 987 // Yes, CPSR could be livein. 988 bool LiveCPSR = MBB.isLiveIn(ARM::CPSR); 989 MachineInstr *BundleMI = nullptr; 990 991 CPSRDef = nullptr; 992 HighLatencyCPSR = false; 993 994 // Check predecessors for the latest CPSRDef. 995 for (auto *Pred : MBB.predecessors()) { 996 const MBBInfo &PInfo = BlockInfo[Pred->getNumber()]; 997 if (!PInfo.Visited) { 998 // Since blocks are visited in RPO, this must be a back-edge. 999 continue; 1000 } 1001 if (PInfo.HighLatencyCPSR) { 1002 HighLatencyCPSR = true; 1003 break; 1004 } 1005 } 1006 1007 // If this BB loops back to itself, conservatively avoid narrowing the 1008 // first instruction that does partial flag update. 1009 bool IsSelfLoop = MBB.isSuccessor(&MBB); 1010 MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end(); 1011 MachineBasicBlock::instr_iterator NextMII; 1012 for (; MII != E; MII = NextMII) { 1013 NextMII = std::next(MII); 1014 1015 MachineInstr *MI = &*MII; 1016 if (MI->isBundle()) { 1017 BundleMI = MI; 1018 continue; 1019 } 1020 if (MI->isDebugValue()) 1021 continue; 1022 1023 LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR); 1024 1025 // Does NextMII belong to the same bundle as MI? 1026 bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred(); 1027 1028 if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) { 1029 Modified = true; 1030 MachineBasicBlock::instr_iterator I = std::prev(NextMII); 1031 MI = &*I; 1032 // Removing and reinserting the first instruction in a bundle will break 1033 // up the bundle. Fix the bundling if it was broken. 1034 if (NextInSameBundle && !NextMII->isBundledWithPred()) 1035 NextMII->bundleWithPred(); 1036 } 1037 1038 if (BundleMI && !NextInSameBundle && MI->isInsideBundle()) { 1039 // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill 1040 // marker is only on the BUNDLE instruction. Process the BUNDLE 1041 // instruction as we finish with the bundled instruction to work around 1042 // the inconsistency. 1043 if (BundleMI->killsRegister(ARM::CPSR)) 1044 LiveCPSR = false; 1045 MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR); 1046 if (MO && !MO->isDead()) 1047 LiveCPSR = true; 1048 MO = BundleMI->findRegisterUseOperand(ARM::CPSR); 1049 if (MO && !MO->isKill()) 1050 LiveCPSR = true; 1051 } 1052 1053 bool DefCPSR = false; 1054 LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR); 1055 if (MI->isCall()) { 1056 // Calls don't really set CPSR. 1057 CPSRDef = nullptr; 1058 HighLatencyCPSR = false; 1059 IsSelfLoop = false; 1060 } else if (DefCPSR) { 1061 // This is the last CPSR defining instruction. 1062 CPSRDef = MI; 1063 HighLatencyCPSR = isHighLatencyCPSR(CPSRDef); 1064 IsSelfLoop = false; 1065 } 1066 } 1067 1068 MBBInfo &Info = BlockInfo[MBB.getNumber()]; 1069 Info.HighLatencyCPSR = HighLatencyCPSR; 1070 Info.Visited = true; 1071 return Modified; 1072 } 1073 1074 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { 1075 if (PredicateFtor && !PredicateFtor(*MF.getFunction())) 1076 return false; 1077 1078 STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget()); 1079 if (STI->isThumb1Only() || STI->prefers32BitThumb()) 1080 return false; 1081 1082 TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo()); 1083 1084 // Optimizing / minimizing size? Minimizing size implies optimizing for size. 1085 OptimizeSize = MF.getFunction()->optForSize(); 1086 MinimizeSize = MF.getFunction()->optForMinSize(); 1087 1088 BlockInfo.clear(); 1089 BlockInfo.resize(MF.getNumBlockIDs()); 1090 1091 // Visit blocks in reverse post-order so LastCPSRDef is known for all 1092 // predecessors. 1093 ReversePostOrderTraversal<MachineFunction*> RPOT(&MF); 1094 bool Modified = false; 1095 for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator 1096 I = RPOT.begin(), E = RPOT.end(); I != E; ++I) 1097 Modified |= ReduceMBB(**I); 1098 return Modified; 1099 } 1100 1101 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size 1102 /// reduction pass. 1103 FunctionPass *llvm::createThumb2SizeReductionPass( 1104 std::function<bool(const Function &)> Ftor) { 1105 return new Thumb2SizeReduce(std::move(Ftor)); 1106 } 1107