1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Thumb-2 implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "Thumb2InstrInfo.h" 15 #include "ARMMachineFunctionInfo.h" 16 #include "MCTargetDesc/ARMAddressingModes.h" 17 #include "llvm/CodeGen/MachineBasicBlock.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstr.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineMemOperand.h" 23 #include "llvm/CodeGen/MachineOperand.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/IR/DebugLoc.h" 27 #include "llvm/MC/MCInst.h" 28 #include "llvm/MC/MCInstrDesc.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include <cassert> 34 35 using namespace llvm; 36 37 static cl::opt<bool> 38 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden, 39 cl::desc("Use old-style Thumb2 if-conversion heuristics"), 40 cl::init(false)); 41 42 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI) 43 : ARMBaseInstrInfo(STI) {} 44 45 /// Return the noop instruction to use for a noop. 46 void Thumb2InstrInfo::getNoop(MCInst &NopInst) const { 47 NopInst.setOpcode(ARM::tHINT); 48 NopInst.addOperand(MCOperand::createImm(0)); 49 NopInst.addOperand(MCOperand::createImm(ARMCC::AL)); 50 NopInst.addOperand(MCOperand::createReg(0)); 51 } 52 53 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const { 54 // FIXME 55 return 0; 56 } 57 58 void 59 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 60 MachineBasicBlock *NewDest) const { 61 MachineBasicBlock *MBB = Tail->getParent(); 62 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 63 if (!AFI->hasITBlocks() || Tail->isBranch()) { 64 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 65 return; 66 } 67 68 // If the first instruction of Tail is predicated, we may have to update 69 // the IT instruction. 70 unsigned PredReg = 0; 71 ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg); 72 MachineBasicBlock::iterator MBBI = Tail; 73 if (CC != ARMCC::AL) 74 // Expecting at least the t2IT instruction before it. 75 --MBBI; 76 77 // Actually replace the tail. 78 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 79 80 // Fix up IT. 81 if (CC != ARMCC::AL) { 82 MachineBasicBlock::iterator E = MBB->begin(); 83 unsigned Count = 4; // At most 4 instructions in an IT block. 84 while (Count && MBBI != E) { 85 if (MBBI->isDebugInstr()) { 86 --MBBI; 87 continue; 88 } 89 if (MBBI->getOpcode() == ARM::t2IT) { 90 unsigned Mask = MBBI->getOperand(1).getImm(); 91 if (Count == 4) 92 MBBI->eraseFromParent(); 93 else { 94 unsigned MaskOn = 1 << Count; 95 unsigned MaskOff = ~(MaskOn - 1); 96 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn); 97 } 98 return; 99 } 100 --MBBI; 101 --Count; 102 } 103 104 // Ctrl flow can reach here if branch folding is run before IT block 105 // formation pass. 106 } 107 } 108 109 bool 110 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 111 MachineBasicBlock::iterator MBBI) const { 112 while (MBBI->isDebugInstr()) { 113 ++MBBI; 114 if (MBBI == MBB.end()) 115 return false; 116 } 117 118 unsigned PredReg = 0; 119 return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL; 120 } 121 122 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 123 MachineBasicBlock::iterator I, 124 const DebugLoc &DL, unsigned DestReg, 125 unsigned SrcReg, bool KillSrc) const { 126 // Handle SPR, DPR, and QPR copies. 127 if (!ARM::GPRRegClass.contains(DestReg, SrcReg)) 128 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc); 129 130 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) 131 .addReg(SrcReg, getKillRegState(KillSrc)) 132 .add(predOps(ARMCC::AL)); 133 } 134 135 void Thumb2InstrInfo:: 136 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 137 unsigned SrcReg, bool isKill, int FI, 138 const TargetRegisterClass *RC, 139 const TargetRegisterInfo *TRI) const { 140 DebugLoc DL; 141 if (I != MBB.end()) DL = I->getDebugLoc(); 142 143 MachineFunction &MF = *MBB.getParent(); 144 MachineFrameInfo &MFI = MF.getFrameInfo(); 145 MachineMemOperand *MMO = MF.getMachineMemOperand( 146 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 147 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 148 149 if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || 150 RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || 151 RC == &ARM::GPRnopcRegClass) { 152 BuildMI(MBB, I, DL, get(ARM::t2STRi12)) 153 .addReg(SrcReg, getKillRegState(isKill)) 154 .addFrameIndex(FI) 155 .addImm(0) 156 .addMemOperand(MMO) 157 .add(predOps(ARMCC::AL)); 158 return; 159 } 160 161 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 162 // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for 163 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 164 // otherwise). 165 if (TargetRegisterInfo::isVirtualRegister(SrcReg)) { 166 MachineRegisterInfo *MRI = &MF.getRegInfo(); 167 MRI->constrainRegClass(SrcReg, &ARM::GPRPair_with_gsub_1_in_rGPRRegClass); 168 } 169 170 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); 171 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 172 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 173 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 174 return; 175 } 176 177 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI); 178 } 179 180 void Thumb2InstrInfo:: 181 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 182 unsigned DestReg, int FI, 183 const TargetRegisterClass *RC, 184 const TargetRegisterInfo *TRI) const { 185 MachineFunction &MF = *MBB.getParent(); 186 MachineFrameInfo &MFI = MF.getFrameInfo(); 187 MachineMemOperand *MMO = MF.getMachineMemOperand( 188 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 189 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 190 DebugLoc DL; 191 if (I != MBB.end()) DL = I->getDebugLoc(); 192 193 if (RC == &ARM::GPRRegClass || RC == &ARM::tGPRRegClass || 194 RC == &ARM::tcGPRRegClass || RC == &ARM::rGPRRegClass || 195 RC == &ARM::GPRnopcRegClass) { 196 BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) 197 .addFrameIndex(FI) 198 .addImm(0) 199 .addMemOperand(MMO) 200 .add(predOps(ARMCC::AL)); 201 return; 202 } 203 204 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 205 // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for 206 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 207 // otherwise). 208 if (TargetRegisterInfo::isVirtualRegister(DestReg)) { 209 MachineRegisterInfo *MRI = &MF.getRegInfo(); 210 MRI->constrainRegClass(DestReg, 211 &ARM::GPRPair_with_gsub_1_in_rGPRRegClass); 212 } 213 214 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); 215 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 216 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 217 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 218 219 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 220 MIB.addReg(DestReg, RegState::ImplicitDefine); 221 return; 222 } 223 224 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI); 225 } 226 227 void Thumb2InstrInfo::expandLoadStackGuard( 228 MachineBasicBlock::iterator MI) const { 229 MachineFunction &MF = *MI->getParent()->getParent(); 230 if (MF.getTarget().isPositionIndependent()) 231 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12); 232 else 233 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12); 234 } 235 236 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB, 237 MachineBasicBlock::iterator &MBBI, 238 const DebugLoc &dl, unsigned DestReg, 239 unsigned BaseReg, int NumBytes, 240 ARMCC::CondCodes Pred, unsigned PredReg, 241 const ARMBaseInstrInfo &TII, 242 unsigned MIFlags) { 243 if (NumBytes == 0 && DestReg != BaseReg) { 244 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 245 .addReg(BaseReg, RegState::Kill) 246 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 247 return; 248 } 249 250 bool isSub = NumBytes < 0; 251 if (isSub) NumBytes = -NumBytes; 252 253 // If profitable, use a movw or movt to materialize the offset. 254 // FIXME: Use the scavenger to grab a scratch register. 255 if (DestReg != ARM::SP && DestReg != BaseReg && 256 NumBytes >= 4096 && 257 ARM_AM::getT2SOImmVal(NumBytes) == -1) { 258 bool Fits = false; 259 if (NumBytes < 65536) { 260 // Use a movw to materialize the 16-bit constant. 261 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg) 262 .addImm(NumBytes) 263 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 264 Fits = true; 265 } else if ((NumBytes & 0xffff) == 0) { 266 // Use a movt to materialize the 32-bit constant. 267 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg) 268 .addReg(DestReg) 269 .addImm(NumBytes >> 16) 270 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 271 Fits = true; 272 } 273 274 if (Fits) { 275 if (isSub) { 276 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg) 277 .addReg(BaseReg) 278 .addReg(DestReg, RegState::Kill) 279 .add(predOps(Pred, PredReg)) 280 .add(condCodeOp()) 281 .setMIFlags(MIFlags); 282 } else { 283 // Here we know that DestReg is not SP but we do not 284 // know anything about BaseReg. t2ADDrr is an invalid 285 // instruction is SP is used as the second argument, but 286 // is fine if SP is the first argument. To be sure we 287 // do not generate invalid encoding, put BaseReg first. 288 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg) 289 .addReg(BaseReg) 290 .addReg(DestReg, RegState::Kill) 291 .add(predOps(Pred, PredReg)) 292 .add(condCodeOp()) 293 .setMIFlags(MIFlags); 294 } 295 return; 296 } 297 } 298 299 while (NumBytes) { 300 unsigned ThisVal = NumBytes; 301 unsigned Opc = 0; 302 if (DestReg == ARM::SP && BaseReg != ARM::SP) { 303 // mov sp, rn. Note t2MOVr cannot be used. 304 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 305 .addReg(BaseReg) 306 .setMIFlags(MIFlags) 307 .add(predOps(ARMCC::AL)); 308 BaseReg = ARM::SP; 309 continue; 310 } 311 312 bool HasCCOut = true; 313 if (BaseReg == ARM::SP) { 314 // sub sp, sp, #imm7 315 if (DestReg == ARM::SP && (ThisVal < ((1 << 7)-1) * 4)) { 316 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?"); 317 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi; 318 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 319 .addReg(BaseReg) 320 .addImm(ThisVal / 4) 321 .setMIFlags(MIFlags) 322 .add(predOps(ARMCC::AL)); 323 NumBytes = 0; 324 continue; 325 } 326 327 // sub rd, sp, so_imm 328 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; 329 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { 330 NumBytes = 0; 331 } else { 332 // FIXME: Move this to ARMAddressingModes.h? 333 unsigned RotAmt = countLeadingZeros(ThisVal); 334 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 335 NumBytes &= ~ThisVal; 336 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 337 "Bit extraction didn't work?"); 338 } 339 } else { 340 assert(DestReg != ARM::SP && BaseReg != ARM::SP); 341 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; 342 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { 343 NumBytes = 0; 344 } else if (ThisVal < 4096) { 345 Opc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; 346 HasCCOut = false; 347 NumBytes = 0; 348 } else { 349 // FIXME: Move this to ARMAddressingModes.h? 350 unsigned RotAmt = countLeadingZeros(ThisVal); 351 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 352 NumBytes &= ~ThisVal; 353 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 354 "Bit extraction didn't work?"); 355 } 356 } 357 358 // Build the new ADD / SUB. 359 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 360 .addReg(BaseReg, RegState::Kill) 361 .addImm(ThisVal) 362 .add(predOps(ARMCC::AL)) 363 .setMIFlags(MIFlags); 364 if (HasCCOut) 365 MIB.add(condCodeOp()); 366 367 BaseReg = DestReg; 368 } 369 } 370 371 static unsigned 372 negativeOffsetOpcode(unsigned opcode) 373 { 374 switch (opcode) { 375 case ARM::t2LDRi12: return ARM::t2LDRi8; 376 case ARM::t2LDRHi12: return ARM::t2LDRHi8; 377 case ARM::t2LDRBi12: return ARM::t2LDRBi8; 378 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8; 379 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8; 380 case ARM::t2STRi12: return ARM::t2STRi8; 381 case ARM::t2STRBi12: return ARM::t2STRBi8; 382 case ARM::t2STRHi12: return ARM::t2STRHi8; 383 case ARM::t2PLDi12: return ARM::t2PLDi8; 384 385 case ARM::t2LDRi8: 386 case ARM::t2LDRHi8: 387 case ARM::t2LDRBi8: 388 case ARM::t2LDRSHi8: 389 case ARM::t2LDRSBi8: 390 case ARM::t2STRi8: 391 case ARM::t2STRBi8: 392 case ARM::t2STRHi8: 393 case ARM::t2PLDi8: 394 return opcode; 395 396 default: 397 break; 398 } 399 400 return 0; 401 } 402 403 static unsigned 404 positiveOffsetOpcode(unsigned opcode) 405 { 406 switch (opcode) { 407 case ARM::t2LDRi8: return ARM::t2LDRi12; 408 case ARM::t2LDRHi8: return ARM::t2LDRHi12; 409 case ARM::t2LDRBi8: return ARM::t2LDRBi12; 410 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12; 411 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12; 412 case ARM::t2STRi8: return ARM::t2STRi12; 413 case ARM::t2STRBi8: return ARM::t2STRBi12; 414 case ARM::t2STRHi8: return ARM::t2STRHi12; 415 case ARM::t2PLDi8: return ARM::t2PLDi12; 416 417 case ARM::t2LDRi12: 418 case ARM::t2LDRHi12: 419 case ARM::t2LDRBi12: 420 case ARM::t2LDRSHi12: 421 case ARM::t2LDRSBi12: 422 case ARM::t2STRi12: 423 case ARM::t2STRBi12: 424 case ARM::t2STRHi12: 425 case ARM::t2PLDi12: 426 return opcode; 427 428 default: 429 break; 430 } 431 432 return 0; 433 } 434 435 static unsigned 436 immediateOffsetOpcode(unsigned opcode) 437 { 438 switch (opcode) { 439 case ARM::t2LDRs: return ARM::t2LDRi12; 440 case ARM::t2LDRHs: return ARM::t2LDRHi12; 441 case ARM::t2LDRBs: return ARM::t2LDRBi12; 442 case ARM::t2LDRSHs: return ARM::t2LDRSHi12; 443 case ARM::t2LDRSBs: return ARM::t2LDRSBi12; 444 case ARM::t2STRs: return ARM::t2STRi12; 445 case ARM::t2STRBs: return ARM::t2STRBi12; 446 case ARM::t2STRHs: return ARM::t2STRHi12; 447 case ARM::t2PLDs: return ARM::t2PLDi12; 448 449 case ARM::t2LDRi12: 450 case ARM::t2LDRHi12: 451 case ARM::t2LDRBi12: 452 case ARM::t2LDRSHi12: 453 case ARM::t2LDRSBi12: 454 case ARM::t2STRi12: 455 case ARM::t2STRBi12: 456 case ARM::t2STRHi12: 457 case ARM::t2PLDi12: 458 case ARM::t2LDRi8: 459 case ARM::t2LDRHi8: 460 case ARM::t2LDRBi8: 461 case ARM::t2LDRSHi8: 462 case ARM::t2LDRSBi8: 463 case ARM::t2STRi8: 464 case ARM::t2STRBi8: 465 case ARM::t2STRHi8: 466 case ARM::t2PLDi8: 467 return opcode; 468 469 default: 470 break; 471 } 472 473 return 0; 474 } 475 476 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 477 unsigned FrameReg, int &Offset, 478 const ARMBaseInstrInfo &TII) { 479 unsigned Opcode = MI.getOpcode(); 480 const MCInstrDesc &Desc = MI.getDesc(); 481 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 482 bool isSub = false; 483 484 // Memory operands in inline assembly always use AddrModeT2_i12. 485 if (Opcode == ARM::INLINEASM) 486 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2? 487 488 if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) { 489 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 490 491 unsigned PredReg; 492 if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL && 493 !MI.definesRegister(ARM::CPSR)) { 494 // Turn it into a move. 495 MI.setDesc(TII.get(ARM::tMOVr)); 496 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 497 // Remove offset and remaining explicit predicate operands. 498 do MI.RemoveOperand(FrameRegIdx+1); 499 while (MI.getNumOperands() > FrameRegIdx+1); 500 MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI); 501 MIB.add(predOps(ARMCC::AL)); 502 return true; 503 } 504 505 bool HasCCOut = Opcode != ARM::t2ADDri12; 506 507 if (Offset < 0) { 508 Offset = -Offset; 509 isSub = true; 510 MI.setDesc(TII.get(ARM::t2SUBri)); 511 } else { 512 MI.setDesc(TII.get(ARM::t2ADDri)); 513 } 514 515 // Common case: small offset, fits into instruction. 516 if (ARM_AM::getT2SOImmVal(Offset) != -1) { 517 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 518 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 519 // Add cc_out operand if the original instruction did not have one. 520 if (!HasCCOut) 521 MI.addOperand(MachineOperand::CreateReg(0, false)); 522 Offset = 0; 523 return true; 524 } 525 // Another common case: imm12. 526 if (Offset < 4096 && 527 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) { 528 unsigned NewOpc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; 529 MI.setDesc(TII.get(NewOpc)); 530 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 531 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 532 // Remove the cc_out operand. 533 if (HasCCOut) 534 MI.RemoveOperand(MI.getNumOperands()-1); 535 Offset = 0; 536 return true; 537 } 538 539 // Otherwise, extract 8 adjacent bits from the immediate into this 540 // t2ADDri/t2SUBri. 541 unsigned RotAmt = countLeadingZeros<unsigned>(Offset); 542 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt); 543 544 // We will handle these bits from offset, clear them. 545 Offset &= ~ThisImmVal; 546 547 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 && 548 "Bit extraction didn't work?"); 549 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 550 // Add cc_out operand if the original instruction did not have one. 551 if (!HasCCOut) 552 MI.addOperand(MachineOperand::CreateReg(0, false)); 553 } else { 554 // AddrMode4 and AddrMode6 cannot handle any offset. 555 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 556 return false; 557 558 // AddrModeT2_so cannot handle any offset. If there is no offset 559 // register then we change to an immediate version. 560 unsigned NewOpc = Opcode; 561 if (AddrMode == ARMII::AddrModeT2_so) { 562 unsigned OffsetReg = MI.getOperand(FrameRegIdx+1).getReg(); 563 if (OffsetReg != 0) { 564 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 565 return Offset == 0; 566 } 567 568 MI.RemoveOperand(FrameRegIdx+1); 569 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0); 570 NewOpc = immediateOffsetOpcode(Opcode); 571 AddrMode = ARMII::AddrModeT2_i12; 572 } 573 574 unsigned NumBits = 0; 575 unsigned Scale = 1; 576 if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) { 577 // i8 supports only negative, and i12 supports only positive, so 578 // based on Offset sign convert Opcode to the appropriate 579 // instruction 580 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 581 if (Offset < 0) { 582 NewOpc = negativeOffsetOpcode(Opcode); 583 NumBits = 8; 584 isSub = true; 585 Offset = -Offset; 586 } else { 587 NewOpc = positiveOffsetOpcode(Opcode); 588 NumBits = 12; 589 } 590 } else if (AddrMode == ARMII::AddrMode5) { 591 // VFP address mode. 592 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 593 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 594 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 595 InstrOffs *= -1; 596 NumBits = 8; 597 Scale = 4; 598 Offset += InstrOffs * 4; 599 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 600 if (Offset < 0) { 601 Offset = -Offset; 602 isSub = true; 603 } 604 } else if (AddrMode == ARMII::AddrMode5FP16) { 605 // VFP address mode. 606 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 607 int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm()); 608 if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub) 609 InstrOffs *= -1; 610 NumBits = 8; 611 Scale = 2; 612 Offset += InstrOffs * 2; 613 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 614 if (Offset < 0) { 615 Offset = -Offset; 616 isSub = true; 617 } 618 } else if (AddrMode == ARMII::AddrModeT2_i8s4) { 619 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; 620 NumBits = 10; // 8 bits scaled by 4 621 // MCInst operand expects already scaled value. 622 Scale = 1; 623 assert((Offset & 3) == 0 && "Can't encode this offset!"); 624 } else { 625 llvm_unreachable("Unsupported addressing mode!"); 626 } 627 628 if (NewOpc != Opcode) 629 MI.setDesc(TII.get(NewOpc)); 630 631 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1); 632 633 // Attempt to fold address computation 634 // Common case: small offset, fits into instruction. 635 int ImmedOffset = Offset / Scale; 636 unsigned Mask = (1 << NumBits) - 1; 637 if ((unsigned)Offset <= Mask * Scale) { 638 // Replace the FrameIndex with fp/sp 639 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 640 if (isSub) { 641 if (AddrMode == ARMII::AddrMode5) 642 // FIXME: Not consistent. 643 ImmedOffset |= 1 << NumBits; 644 else 645 ImmedOffset = -ImmedOffset; 646 } 647 ImmOp.ChangeToImmediate(ImmedOffset); 648 Offset = 0; 649 return true; 650 } 651 652 // Otherwise, offset doesn't fit. Pull in what we can to simplify 653 ImmedOffset = ImmedOffset & Mask; 654 if (isSub) { 655 if (AddrMode == ARMII::AddrMode5) 656 // FIXME: Not consistent. 657 ImmedOffset |= 1 << NumBits; 658 else { 659 ImmedOffset = -ImmedOffset; 660 if (ImmedOffset == 0) 661 // Change the opcode back if the encoded offset is zero. 662 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc))); 663 } 664 } 665 ImmOp.ChangeToImmediate(ImmedOffset); 666 Offset &= ~(Mask*Scale); 667 } 668 669 Offset = (isSub) ? -Offset : Offset; 670 return Offset == 0; 671 } 672 673 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI, 674 unsigned &PredReg) { 675 unsigned Opc = MI.getOpcode(); 676 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc) 677 return ARMCC::AL; 678 return getInstrPredicate(MI, PredReg); 679 } 680