1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Thumb-2 implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "Thumb2InstrInfo.h" 15 #include "ARMMachineFunctionInfo.h" 16 #include "MCTargetDesc/ARMAddressingModes.h" 17 #include "llvm/CodeGen/MachineBasicBlock.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstr.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineMemOperand.h" 23 #include "llvm/CodeGen/MachineOperand.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/IR/DebugLoc.h" 27 #include "llvm/MC/MCInst.h" 28 #include "llvm/MC/MCInstrDesc.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include <cassert> 34 35 using namespace llvm; 36 37 static cl::opt<bool> 38 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden, 39 cl::desc("Use old-style Thumb2 if-conversion heuristics"), 40 cl::init(false)); 41 42 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI) 43 : ARMBaseInstrInfo(STI) {} 44 45 /// Return the noop instruction to use for a noop. 46 void Thumb2InstrInfo::getNoop(MCInst &NopInst) const { 47 NopInst.setOpcode(ARM::tHINT); 48 NopInst.addOperand(MCOperand::createImm(0)); 49 NopInst.addOperand(MCOperand::createImm(ARMCC::AL)); 50 NopInst.addOperand(MCOperand::createReg(0)); 51 } 52 53 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const { 54 // FIXME 55 return 0; 56 } 57 58 void 59 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 60 MachineBasicBlock *NewDest) const { 61 MachineBasicBlock *MBB = Tail->getParent(); 62 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 63 if (!AFI->hasITBlocks() || Tail->isBranch()) { 64 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 65 return; 66 } 67 68 // If the first instruction of Tail is predicated, we may have to update 69 // the IT instruction. 70 unsigned PredReg = 0; 71 ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg); 72 MachineBasicBlock::iterator MBBI = Tail; 73 if (CC != ARMCC::AL) 74 // Expecting at least the t2IT instruction before it. 75 --MBBI; 76 77 // Actually replace the tail. 78 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 79 80 // Fix up IT. 81 if (CC != ARMCC::AL) { 82 MachineBasicBlock::iterator E = MBB->begin(); 83 unsigned Count = 4; // At most 4 instructions in an IT block. 84 while (Count && MBBI != E) { 85 if (MBBI->isDebugValue()) { 86 --MBBI; 87 continue; 88 } 89 if (MBBI->getOpcode() == ARM::t2IT) { 90 unsigned Mask = MBBI->getOperand(1).getImm(); 91 if (Count == 4) 92 MBBI->eraseFromParent(); 93 else { 94 unsigned MaskOn = 1 << Count; 95 unsigned MaskOff = ~(MaskOn - 1); 96 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn); 97 } 98 return; 99 } 100 --MBBI; 101 --Count; 102 } 103 104 // Ctrl flow can reach here if branch folding is run before IT block 105 // formation pass. 106 } 107 } 108 109 bool 110 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 111 MachineBasicBlock::iterator MBBI) const { 112 while (MBBI->isDebugValue()) { 113 ++MBBI; 114 if (MBBI == MBB.end()) 115 return false; 116 } 117 118 unsigned PredReg = 0; 119 return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL; 120 } 121 122 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 123 MachineBasicBlock::iterator I, 124 const DebugLoc &DL, unsigned DestReg, 125 unsigned SrcReg, bool KillSrc) const { 126 // Handle SPR, DPR, and QPR copies. 127 if (!ARM::GPRRegClass.contains(DestReg, SrcReg)) 128 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc); 129 130 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) 131 .addReg(SrcReg, getKillRegState(KillSrc)) 132 .add(predOps(ARMCC::AL)); 133 } 134 135 void Thumb2InstrInfo:: 136 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 137 unsigned SrcReg, bool isKill, int FI, 138 const TargetRegisterClass *RC, 139 const TargetRegisterInfo *TRI) const { 140 DebugLoc DL; 141 if (I != MBB.end()) DL = I->getDebugLoc(); 142 143 MachineFunction &MF = *MBB.getParent(); 144 MachineFrameInfo &MFI = MF.getFrameInfo(); 145 MachineMemOperand *MMO = MF.getMachineMemOperand( 146 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 147 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 148 149 if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || 150 RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || 151 RC == &ARM::GPRnopcRegClass) { 152 BuildMI(MBB, I, DL, get(ARM::t2STRi12)) 153 .addReg(SrcReg, getKillRegState(isKill)) 154 .addFrameIndex(FI) 155 .addImm(0) 156 .addMemOperand(MMO) 157 .add(predOps(ARMCC::AL)); 158 return; 159 } 160 161 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 162 // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for 163 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 164 // otherwise). 165 if (TargetRegisterInfo::isVirtualRegister(SrcReg)) { 166 MachineRegisterInfo *MRI = &MF.getRegInfo(); 167 MRI->constrainRegClass(SrcReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass); 168 } 169 170 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); 171 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 172 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 173 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 174 return; 175 } 176 177 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI); 178 } 179 180 void Thumb2InstrInfo:: 181 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 182 unsigned DestReg, int FI, 183 const TargetRegisterClass *RC, 184 const TargetRegisterInfo *TRI) const { 185 MachineFunction &MF = *MBB.getParent(); 186 MachineFrameInfo &MFI = MF.getFrameInfo(); 187 MachineMemOperand *MMO = MF.getMachineMemOperand( 188 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 189 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 190 DebugLoc DL; 191 if (I != MBB.end()) DL = I->getDebugLoc(); 192 193 if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || 194 RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || 195 RC == &ARM::GPRnopcRegClass) { 196 BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) 197 .addFrameIndex(FI) 198 .addImm(0) 199 .addMemOperand(MMO) 200 .add(predOps(ARMCC::AL)); 201 return; 202 } 203 204 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 205 // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for 206 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 207 // otherwise). 208 if (TargetRegisterInfo::isVirtualRegister(DestReg)) { 209 MachineRegisterInfo *MRI = &MF.getRegInfo(); 210 MRI->constrainRegClass(DestReg, 211 &ARM::GPRPair_with_gsub_1_in_rGPRRegClass); 212 } 213 214 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); 215 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 216 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 217 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 218 219 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 220 MIB.addReg(DestReg, RegState::ImplicitDefine); 221 return; 222 } 223 224 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI); 225 } 226 227 void Thumb2InstrInfo::expandLoadStackGuard( 228 MachineBasicBlock::iterator MI) const { 229 MachineFunction &MF = *MI->getParent()->getParent(); 230 if (MF.getTarget().isPositionIndependent()) 231 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12); 232 else 233 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12); 234 } 235 236 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB, 237 MachineBasicBlock::iterator &MBBI, 238 const DebugLoc &dl, unsigned DestReg, 239 unsigned BaseReg, int NumBytes, 240 ARMCC::CondCodes Pred, unsigned PredReg, 241 const ARMBaseInstrInfo &TII, 242 unsigned MIFlags) { 243 if (NumBytes == 0 && DestReg != BaseReg) { 244 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 245 .addReg(BaseReg, RegState::Kill) 246 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 247 return; 248 } 249 250 bool isSub = NumBytes < 0; 251 if (isSub) NumBytes = -NumBytes; 252 253 // If profitable, use a movw or movt to materialize the offset. 254 // FIXME: Use the scavenger to grab a scratch register. 255 if (DestReg != ARM::SP && DestReg != BaseReg && 256 NumBytes >= 4096 && 257 ARM_AM::getT2SOImmVal(NumBytes) == -1) { 258 bool Fits = false; 259 if (NumBytes < 65536) { 260 // Use a movw to materialize the 16-bit constant. 261 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg) 262 .addImm(NumBytes) 263 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 264 Fits = true; 265 } else if ((NumBytes & 0xffff) == 0) { 266 // Use a movt to materialize the 32-bit constant. 267 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg) 268 .addReg(DestReg) 269 .addImm(NumBytes >> 16) 270 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 271 Fits = true; 272 } 273 274 if (Fits) { 275 if (isSub) { 276 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg) 277 .addReg(BaseReg) 278 .addReg(DestReg, RegState::Kill) 279 .add(predOps(Pred, PredReg)) 280 .add(condCodeOp()) 281 .setMIFlags(MIFlags); 282 } else { 283 // Here we know that DestReg is not SP but we do not 284 // know anything about BaseReg. t2ADDrr is an invalid 285 // instruction is SP is used as the second argument, but 286 // is fine if SP is the first argument. To be sure we 287 // do not generate invalid encoding, put BaseReg first. 288 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg) 289 .addReg(BaseReg) 290 .addReg(DestReg, RegState::Kill) 291 .add(predOps(Pred, PredReg)) 292 .add(condCodeOp()) 293 .setMIFlags(MIFlags); 294 } 295 return; 296 } 297 } 298 299 while (NumBytes) { 300 unsigned ThisVal = NumBytes; 301 unsigned Opc = 0; 302 if (DestReg == ARM::SP && BaseReg != ARM::SP) { 303 // mov sp, rn. Note t2MOVr cannot be used. 304 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 305 .addReg(BaseReg) 306 .setMIFlags(MIFlags) 307 .add(predOps(ARMCC::AL)); 308 BaseReg = ARM::SP; 309 continue; 310 } 311 312 bool HasCCOut = true; 313 if (BaseReg == ARM::SP) { 314 // sub sp, sp, #imm7 315 if (DestReg == ARM::SP && (ThisVal < ((1 << 7)-1) * 4)) { 316 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?"); 317 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi; 318 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 319 .addReg(BaseReg) 320 .addImm(ThisVal / 4) 321 .setMIFlags(MIFlags) 322 .add(predOps(ARMCC::AL)); 323 NumBytes = 0; 324 continue; 325 } 326 327 // sub rd, sp, so_imm 328 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; 329 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { 330 NumBytes = 0; 331 } else { 332 // FIXME: Move this to ARMAddressingModes.h? 333 unsigned RotAmt = countLeadingZeros(ThisVal); 334 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 335 NumBytes &= ~ThisVal; 336 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 337 "Bit extraction didn't work?"); 338 } 339 } else { 340 assert(DestReg != ARM::SP && BaseReg != ARM::SP); 341 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; 342 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { 343 NumBytes = 0; 344 } else if (ThisVal < 4096) { 345 Opc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; 346 HasCCOut = false; 347 NumBytes = 0; 348 } else { 349 // FIXME: Move this to ARMAddressingModes.h? 350 unsigned RotAmt = countLeadingZeros(ThisVal); 351 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 352 NumBytes &= ~ThisVal; 353 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 354 "Bit extraction didn't work?"); 355 } 356 } 357 358 // Build the new ADD / SUB. 359 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 360 .addReg(BaseReg, RegState::Kill) 361 .addImm(ThisVal) 362 .add(predOps(ARMCC::AL)) 363 .setMIFlags(MIFlags); 364 if (HasCCOut) 365 MIB.add(condCodeOp()); 366 367 BaseReg = DestReg; 368 } 369 } 370 371 static unsigned 372 negativeOffsetOpcode(unsigned opcode) 373 { 374 switch (opcode) { 375 case ARM::t2LDRi12: return ARM::t2LDRi8; 376 case ARM::t2LDRHi12: return ARM::t2LDRHi8; 377 case ARM::t2LDRBi12: return ARM::t2LDRBi8; 378 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8; 379 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8; 380 case ARM::t2STRi12: return ARM::t2STRi8; 381 case ARM::t2STRBi12: return ARM::t2STRBi8; 382 case ARM::t2STRHi12: return ARM::t2STRHi8; 383 case ARM::t2PLDi12: return ARM::t2PLDi8; 384 385 case ARM::t2LDRi8: 386 case ARM::t2LDRHi8: 387 case ARM::t2LDRBi8: 388 case ARM::t2LDRSHi8: 389 case ARM::t2LDRSBi8: 390 case ARM::t2STRi8: 391 case ARM::t2STRBi8: 392 case ARM::t2STRHi8: 393 case ARM::t2PLDi8: 394 return opcode; 395 396 default: 397 break; 398 } 399 400 return 0; 401 } 402 403 static unsigned 404 positiveOffsetOpcode(unsigned opcode) 405 { 406 switch (opcode) { 407 case ARM::t2LDRi8: return ARM::t2LDRi12; 408 case ARM::t2LDRHi8: return ARM::t2LDRHi12; 409 case ARM::t2LDRBi8: return ARM::t2LDRBi12; 410 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12; 411 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12; 412 case ARM::t2STRi8: return ARM::t2STRi12; 413 case ARM::t2STRBi8: return ARM::t2STRBi12; 414 case ARM::t2STRHi8: return ARM::t2STRHi12; 415 case ARM::t2PLDi8: return ARM::t2PLDi12; 416 417 case ARM::t2LDRi12: 418 case ARM::t2LDRHi12: 419 case ARM::t2LDRBi12: 420 case ARM::t2LDRSHi12: 421 case ARM::t2LDRSBi12: 422 case ARM::t2STRi12: 423 case ARM::t2STRBi12: 424 case ARM::t2STRHi12: 425 case ARM::t2PLDi12: 426 return opcode; 427 428 default: 429 break; 430 } 431 432 return 0; 433 } 434 435 static unsigned 436 immediateOffsetOpcode(unsigned opcode) 437 { 438 switch (opcode) { 439 case ARM::t2LDRs: return ARM::t2LDRi12; 440 case ARM::t2LDRHs: return ARM::t2LDRHi12; 441 case ARM::t2LDRBs: return ARM::t2LDRBi12; 442 case ARM::t2LDRSHs: return ARM::t2LDRSHi12; 443 case ARM::t2LDRSBs: return ARM::t2LDRSBi12; 444 case ARM::t2STRs: return ARM::t2STRi12; 445 case ARM::t2STRBs: return ARM::t2STRBi12; 446 case ARM::t2STRHs: return ARM::t2STRHi12; 447 case ARM::t2PLDs: return ARM::t2PLDi12; 448 449 case ARM::t2LDRi12: 450 case ARM::t2LDRHi12: 451 case ARM::t2LDRBi12: 452 case ARM::t2LDRSHi12: 453 case ARM::t2LDRSBi12: 454 case ARM::t2STRi12: 455 case ARM::t2STRBi12: 456 case ARM::t2STRHi12: 457 case ARM::t2PLDi12: 458 case ARM::t2LDRi8: 459 case ARM::t2LDRHi8: 460 case ARM::t2LDRBi8: 461 case ARM::t2LDRSHi8: 462 case ARM::t2LDRSBi8: 463 case ARM::t2STRi8: 464 case ARM::t2STRBi8: 465 case ARM::t2STRHi8: 466 case ARM::t2PLDi8: 467 return opcode; 468 469 default: 470 break; 471 } 472 473 return 0; 474 } 475 476 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 477 unsigned FrameReg, int &Offset, 478 const ARMBaseInstrInfo &TII) { 479 unsigned Opcode = MI.getOpcode(); 480 const MCInstrDesc &Desc = MI.getDesc(); 481 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 482 bool isSub = false; 483 484 // Memory operands in inline assembly always use AddrModeT2_i12. 485 if (Opcode == ARM::INLINEASM) 486 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2? 487 488 if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) { 489 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 490 491 unsigned PredReg; 492 if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL) { 493 // Turn it into a move. 494 MI.setDesc(TII.get(ARM::tMOVr)); 495 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 496 // Remove offset and remaining explicit predicate operands. 497 do MI.RemoveOperand(FrameRegIdx+1); 498 while (MI.getNumOperands() > FrameRegIdx+1); 499 MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI); 500 MIB.add(predOps(ARMCC::AL)); 501 return true; 502 } 503 504 bool HasCCOut = Opcode != ARM::t2ADDri12; 505 506 if (Offset < 0) { 507 Offset = -Offset; 508 isSub = true; 509 MI.setDesc(TII.get(ARM::t2SUBri)); 510 } else { 511 MI.setDesc(TII.get(ARM::t2ADDri)); 512 } 513 514 // Common case: small offset, fits into instruction. 515 if (ARM_AM::getT2SOImmVal(Offset) != -1) { 516 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 517 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 518 // Add cc_out operand if the original instruction did not have one. 519 if (!HasCCOut) 520 MI.addOperand(MachineOperand::CreateReg(0, false)); 521 Offset = 0; 522 return true; 523 } 524 // Another common case: imm12. 525 if (Offset < 4096 && 526 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) { 527 unsigned NewOpc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; 528 MI.setDesc(TII.get(NewOpc)); 529 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 530 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 531 // Remove the cc_out operand. 532 if (HasCCOut) 533 MI.RemoveOperand(MI.getNumOperands()-1); 534 Offset = 0; 535 return true; 536 } 537 538 // Otherwise, extract 8 adjacent bits from the immediate into this 539 // t2ADDri/t2SUBri. 540 unsigned RotAmt = countLeadingZeros<unsigned>(Offset); 541 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt); 542 543 // We will handle these bits from offset, clear them. 544 Offset &= ~ThisImmVal; 545 546 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 && 547 "Bit extraction didn't work?"); 548 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 549 // Add cc_out operand if the original instruction did not have one. 550 if (!HasCCOut) 551 MI.addOperand(MachineOperand::CreateReg(0, false)); 552 } else { 553 // AddrMode4 and AddrMode6 cannot handle any offset. 554 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 555 return false; 556 557 // AddrModeT2_so cannot handle any offset. If there is no offset 558 // register then we change to an immediate version. 559 unsigned NewOpc = Opcode; 560 if (AddrMode == ARMII::AddrModeT2_so) { 561 unsigned OffsetReg = MI.getOperand(FrameRegIdx+1).getReg(); 562 if (OffsetReg != 0) { 563 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 564 return Offset == 0; 565 } 566 567 MI.RemoveOperand(FrameRegIdx+1); 568 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0); 569 NewOpc = immediateOffsetOpcode(Opcode); 570 AddrMode = ARMII::AddrModeT2_i12; 571 } 572 573 unsigned NumBits = 0; 574 unsigned Scale = 1; 575 if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) { 576 // i8 supports only negative, and i12 supports only positive, so 577 // based on Offset sign convert Opcode to the appropriate 578 // instruction 579 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 580 if (Offset < 0) { 581 NewOpc = negativeOffsetOpcode(Opcode); 582 NumBits = 8; 583 isSub = true; 584 Offset = -Offset; 585 } else { 586 NewOpc = positiveOffsetOpcode(Opcode); 587 NumBits = 12; 588 } 589 } else if (AddrMode == ARMII::AddrMode5) { 590 // VFP address mode. 591 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 592 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 593 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 594 InstrOffs *= -1; 595 NumBits = 8; 596 Scale = 4; 597 Offset += InstrOffs * 4; 598 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 599 if (Offset < 0) { 600 Offset = -Offset; 601 isSub = true; 602 } 603 } else if (AddrMode == ARMII::AddrModeT2_i8s4) { 604 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; 605 NumBits = 10; // 8 bits scaled by 4 606 // MCInst operand expects already scaled value. 607 Scale = 1; 608 assert((Offset & 3) == 0 && "Can't encode this offset!"); 609 } else { 610 llvm_unreachable("Unsupported addressing mode!"); 611 } 612 613 if (NewOpc != Opcode) 614 MI.setDesc(TII.get(NewOpc)); 615 616 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1); 617 618 // Attempt to fold address computation 619 // Common case: small offset, fits into instruction. 620 int ImmedOffset = Offset / Scale; 621 unsigned Mask = (1 << NumBits) - 1; 622 if ((unsigned)Offset <= Mask * Scale) { 623 // Replace the FrameIndex with fp/sp 624 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 625 if (isSub) { 626 if (AddrMode == ARMII::AddrMode5) 627 // FIXME: Not consistent. 628 ImmedOffset |= 1 << NumBits; 629 else 630 ImmedOffset = -ImmedOffset; 631 } 632 ImmOp.ChangeToImmediate(ImmedOffset); 633 Offset = 0; 634 return true; 635 } 636 637 // Otherwise, offset doesn't fit. Pull in what we can to simplify 638 ImmedOffset = ImmedOffset & Mask; 639 if (isSub) { 640 if (AddrMode == ARMII::AddrMode5) 641 // FIXME: Not consistent. 642 ImmedOffset |= 1 << NumBits; 643 else { 644 ImmedOffset = -ImmedOffset; 645 if (ImmedOffset == 0) 646 // Change the opcode back if the encoded offset is zero. 647 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc))); 648 } 649 } 650 ImmOp.ChangeToImmediate(ImmedOffset); 651 Offset &= ~(Mask*Scale); 652 } 653 654 Offset = (isSub) ? -Offset : Offset; 655 return Offset == 0; 656 } 657 658 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI, 659 unsigned &PredReg) { 660 unsigned Opc = MI.getOpcode(); 661 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc) 662 return ARMCC::AL; 663 return getInstrPredicate(MI, PredReg); 664 } 665