1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Thumb-2 implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "Thumb2InstrInfo.h" 14 #include "ARMMachineFunctionInfo.h" 15 #include "MCTargetDesc/ARMAddressingModes.h" 16 #include "llvm/CodeGen/MachineBasicBlock.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineInstr.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineMemOperand.h" 22 #include "llvm/CodeGen/MachineOperand.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetRegisterInfo.h" 25 #include "llvm/IR/DebugLoc.h" 26 #include "llvm/MC/MCInst.h" 27 #include "llvm/MC/MCInstrDesc.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/MathExtras.h" 31 #include "llvm/Target/TargetMachine.h" 32 #include <cassert> 33 34 using namespace llvm; 35 36 static cl::opt<bool> 37 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden, 38 cl::desc("Use old-style Thumb2 if-conversion heuristics"), 39 cl::init(false)); 40 41 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI) 42 : ARMBaseInstrInfo(STI) {} 43 44 /// Return the noop instruction to use for a noop. 45 void Thumb2InstrInfo::getNoop(MCInst &NopInst) const { 46 NopInst.setOpcode(ARM::tHINT); 47 NopInst.addOperand(MCOperand::createImm(0)); 48 NopInst.addOperand(MCOperand::createImm(ARMCC::AL)); 49 NopInst.addOperand(MCOperand::createReg(0)); 50 } 51 52 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const { 53 // FIXME 54 return 0; 55 } 56 57 void 58 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 59 MachineBasicBlock *NewDest) const { 60 MachineBasicBlock *MBB = Tail->getParent(); 61 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 62 if (!AFI->hasITBlocks() || Tail->isBranch()) { 63 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 64 return; 65 } 66 67 // If the first instruction of Tail is predicated, we may have to update 68 // the IT instruction. 69 unsigned PredReg = 0; 70 ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg); 71 MachineBasicBlock::iterator MBBI = Tail; 72 if (CC != ARMCC::AL) 73 // Expecting at least the t2IT instruction before it. 74 --MBBI; 75 76 // Actually replace the tail. 77 TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest); 78 79 // Fix up IT. 80 if (CC != ARMCC::AL) { 81 MachineBasicBlock::iterator E = MBB->begin(); 82 unsigned Count = 4; // At most 4 instructions in an IT block. 83 while (Count && MBBI != E) { 84 if (MBBI->isDebugInstr()) { 85 --MBBI; 86 continue; 87 } 88 if (MBBI->getOpcode() == ARM::t2IT) { 89 unsigned Mask = MBBI->getOperand(1).getImm(); 90 if (Count == 4) 91 MBBI->eraseFromParent(); 92 else { 93 unsigned MaskOn = 1 << Count; 94 unsigned MaskOff = ~(MaskOn - 1); 95 MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn); 96 } 97 return; 98 } 99 --MBBI; 100 --Count; 101 } 102 103 // Ctrl flow can reach here if branch folding is run before IT block 104 // formation pass. 105 } 106 } 107 108 bool 109 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB, 110 MachineBasicBlock::iterator MBBI) const { 111 while (MBBI->isDebugInstr()) { 112 ++MBBI; 113 if (MBBI == MBB.end()) 114 return false; 115 } 116 117 unsigned PredReg = 0; 118 return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL; 119 } 120 121 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB, 122 MachineBasicBlock::iterator I, 123 const DebugLoc &DL, unsigned DestReg, 124 unsigned SrcReg, bool KillSrc) const { 125 // Handle SPR, DPR, and QPR copies. 126 if (!ARM::GPRRegClass.contains(DestReg, SrcReg)) 127 return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc); 128 129 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) 130 .addReg(SrcReg, getKillRegState(KillSrc)) 131 .add(predOps(ARMCC::AL)); 132 } 133 134 void Thumb2InstrInfo:: 135 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 136 unsigned SrcReg, bool isKill, int FI, 137 const TargetRegisterClass *RC, 138 const TargetRegisterInfo *TRI) const { 139 DebugLoc DL; 140 if (I != MBB.end()) DL = I->getDebugLoc(); 141 142 MachineFunction &MF = *MBB.getParent(); 143 MachineFrameInfo &MFI = MF.getFrameInfo(); 144 MachineMemOperand *MMO = MF.getMachineMemOperand( 145 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 146 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 147 148 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 149 BuildMI(MBB, I, DL, get(ARM::t2STRi12)) 150 .addReg(SrcReg, getKillRegState(isKill)) 151 .addFrameIndex(FI) 152 .addImm(0) 153 .addMemOperand(MMO) 154 .add(predOps(ARMCC::AL)); 155 return; 156 } 157 158 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 159 // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for 160 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 161 // otherwise). 162 if (Register::isVirtualRegister(SrcReg)) { 163 MachineRegisterInfo *MRI = &MF.getRegInfo(); 164 MRI->constrainRegClass(SrcReg, &ARM::GPRPairnospRegClass); 165 } 166 167 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8)); 168 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 169 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 170 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 171 return; 172 } 173 174 ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI); 175 } 176 177 void Thumb2InstrInfo:: 178 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 179 unsigned DestReg, int FI, 180 const TargetRegisterClass *RC, 181 const TargetRegisterInfo *TRI) const { 182 MachineFunction &MF = *MBB.getParent(); 183 MachineFrameInfo &MFI = MF.getFrameInfo(); 184 MachineMemOperand *MMO = MF.getMachineMemOperand( 185 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 186 MFI.getObjectSize(FI), MFI.getObjectAlignment(FI)); 187 DebugLoc DL; 188 if (I != MBB.end()) DL = I->getDebugLoc(); 189 190 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 191 BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg) 192 .addFrameIndex(FI) 193 .addImm(0) 194 .addMemOperand(MMO) 195 .add(predOps(ARMCC::AL)); 196 return; 197 } 198 199 if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 200 // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for 201 // gsub_0, but needs an extra constraint for gsub_1 (which could be sp 202 // otherwise). 203 if (Register::isVirtualRegister(DestReg)) { 204 MachineRegisterInfo *MRI = &MF.getRegInfo(); 205 MRI->constrainRegClass(DestReg, &ARM::GPRPairnospRegClass); 206 } 207 208 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8)); 209 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 210 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 211 MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL)); 212 213 if (Register::isPhysicalRegister(DestReg)) 214 MIB.addReg(DestReg, RegState::ImplicitDefine); 215 return; 216 } 217 218 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI); 219 } 220 221 void Thumb2InstrInfo::expandLoadStackGuard( 222 MachineBasicBlock::iterator MI) const { 223 MachineFunction &MF = *MI->getParent()->getParent(); 224 if (MF.getTarget().isPositionIndependent()) 225 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12); 226 else 227 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12); 228 } 229 230 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB, 231 MachineBasicBlock::iterator &MBBI, 232 const DebugLoc &dl, unsigned DestReg, 233 unsigned BaseReg, int NumBytes, 234 ARMCC::CondCodes Pred, unsigned PredReg, 235 const ARMBaseInstrInfo &TII, 236 unsigned MIFlags) { 237 if (NumBytes == 0 && DestReg != BaseReg) { 238 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 239 .addReg(BaseReg, RegState::Kill) 240 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 241 return; 242 } 243 244 bool isSub = NumBytes < 0; 245 if (isSub) NumBytes = -NumBytes; 246 247 // If profitable, use a movw or movt to materialize the offset. 248 // FIXME: Use the scavenger to grab a scratch register. 249 if (DestReg != ARM::SP && DestReg != BaseReg && 250 NumBytes >= 4096 && 251 ARM_AM::getT2SOImmVal(NumBytes) == -1) { 252 bool Fits = false; 253 if (NumBytes < 65536) { 254 // Use a movw to materialize the 16-bit constant. 255 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg) 256 .addImm(NumBytes) 257 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 258 Fits = true; 259 } else if ((NumBytes & 0xffff) == 0) { 260 // Use a movt to materialize the 32-bit constant. 261 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg) 262 .addReg(DestReg) 263 .addImm(NumBytes >> 16) 264 .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags); 265 Fits = true; 266 } 267 268 if (Fits) { 269 if (isSub) { 270 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg) 271 .addReg(BaseReg) 272 .addReg(DestReg, RegState::Kill) 273 .add(predOps(Pred, PredReg)) 274 .add(condCodeOp()) 275 .setMIFlags(MIFlags); 276 } else { 277 // Here we know that DestReg is not SP but we do not 278 // know anything about BaseReg. t2ADDrr is an invalid 279 // instruction is SP is used as the second argument, but 280 // is fine if SP is the first argument. To be sure we 281 // do not generate invalid encoding, put BaseReg first. 282 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg) 283 .addReg(BaseReg) 284 .addReg(DestReg, RegState::Kill) 285 .add(predOps(Pred, PredReg)) 286 .add(condCodeOp()) 287 .setMIFlags(MIFlags); 288 } 289 return; 290 } 291 } 292 293 while (NumBytes) { 294 unsigned ThisVal = NumBytes; 295 unsigned Opc = 0; 296 if (DestReg == ARM::SP && BaseReg != ARM::SP) { 297 // mov sp, rn. Note t2MOVr cannot be used. 298 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg) 299 .addReg(BaseReg) 300 .setMIFlags(MIFlags) 301 .add(predOps(ARMCC::AL)); 302 BaseReg = ARM::SP; 303 continue; 304 } 305 306 bool HasCCOut = true; 307 if (BaseReg == ARM::SP) { 308 // sub sp, sp, #imm7 309 if (DestReg == ARM::SP && (ThisVal < ((1 << 7)-1) * 4)) { 310 assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?"); 311 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi; 312 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 313 .addReg(BaseReg) 314 .addImm(ThisVal / 4) 315 .setMIFlags(MIFlags) 316 .add(predOps(ARMCC::AL)); 317 NumBytes = 0; 318 continue; 319 } 320 321 // sub rd, sp, so_imm 322 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; 323 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { 324 NumBytes = 0; 325 } else { 326 // FIXME: Move this to ARMAddressingModes.h? 327 unsigned RotAmt = countLeadingZeros(ThisVal); 328 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 329 NumBytes &= ~ThisVal; 330 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 331 "Bit extraction didn't work?"); 332 } 333 } else { 334 assert(DestReg != ARM::SP && BaseReg != ARM::SP); 335 Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri; 336 if (ARM_AM::getT2SOImmVal(NumBytes) != -1) { 337 NumBytes = 0; 338 } else if (ThisVal < 4096) { 339 Opc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; 340 HasCCOut = false; 341 NumBytes = 0; 342 } else { 343 // FIXME: Move this to ARMAddressingModes.h? 344 unsigned RotAmt = countLeadingZeros(ThisVal); 345 ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt); 346 NumBytes &= ~ThisVal; 347 assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 && 348 "Bit extraction didn't work?"); 349 } 350 } 351 352 // Build the new ADD / SUB. 353 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 354 .addReg(BaseReg, RegState::Kill) 355 .addImm(ThisVal) 356 .add(predOps(ARMCC::AL)) 357 .setMIFlags(MIFlags); 358 if (HasCCOut) 359 MIB.add(condCodeOp()); 360 361 BaseReg = DestReg; 362 } 363 } 364 365 static unsigned 366 negativeOffsetOpcode(unsigned opcode) 367 { 368 switch (opcode) { 369 case ARM::t2LDRi12: return ARM::t2LDRi8; 370 case ARM::t2LDRHi12: return ARM::t2LDRHi8; 371 case ARM::t2LDRBi12: return ARM::t2LDRBi8; 372 case ARM::t2LDRSHi12: return ARM::t2LDRSHi8; 373 case ARM::t2LDRSBi12: return ARM::t2LDRSBi8; 374 case ARM::t2STRi12: return ARM::t2STRi8; 375 case ARM::t2STRBi12: return ARM::t2STRBi8; 376 case ARM::t2STRHi12: return ARM::t2STRHi8; 377 case ARM::t2PLDi12: return ARM::t2PLDi8; 378 379 case ARM::t2LDRi8: 380 case ARM::t2LDRHi8: 381 case ARM::t2LDRBi8: 382 case ARM::t2LDRSHi8: 383 case ARM::t2LDRSBi8: 384 case ARM::t2STRi8: 385 case ARM::t2STRBi8: 386 case ARM::t2STRHi8: 387 case ARM::t2PLDi8: 388 return opcode; 389 390 default: 391 llvm_unreachable("unknown thumb2 opcode."); 392 } 393 } 394 395 static unsigned 396 positiveOffsetOpcode(unsigned opcode) 397 { 398 switch (opcode) { 399 case ARM::t2LDRi8: return ARM::t2LDRi12; 400 case ARM::t2LDRHi8: return ARM::t2LDRHi12; 401 case ARM::t2LDRBi8: return ARM::t2LDRBi12; 402 case ARM::t2LDRSHi8: return ARM::t2LDRSHi12; 403 case ARM::t2LDRSBi8: return ARM::t2LDRSBi12; 404 case ARM::t2STRi8: return ARM::t2STRi12; 405 case ARM::t2STRBi8: return ARM::t2STRBi12; 406 case ARM::t2STRHi8: return ARM::t2STRHi12; 407 case ARM::t2PLDi8: return ARM::t2PLDi12; 408 409 case ARM::t2LDRi12: 410 case ARM::t2LDRHi12: 411 case ARM::t2LDRBi12: 412 case ARM::t2LDRSHi12: 413 case ARM::t2LDRSBi12: 414 case ARM::t2STRi12: 415 case ARM::t2STRBi12: 416 case ARM::t2STRHi12: 417 case ARM::t2PLDi12: 418 return opcode; 419 420 default: 421 llvm_unreachable("unknown thumb2 opcode."); 422 } 423 } 424 425 static unsigned 426 immediateOffsetOpcode(unsigned opcode) 427 { 428 switch (opcode) { 429 case ARM::t2LDRs: return ARM::t2LDRi12; 430 case ARM::t2LDRHs: return ARM::t2LDRHi12; 431 case ARM::t2LDRBs: return ARM::t2LDRBi12; 432 case ARM::t2LDRSHs: return ARM::t2LDRSHi12; 433 case ARM::t2LDRSBs: return ARM::t2LDRSBi12; 434 case ARM::t2STRs: return ARM::t2STRi12; 435 case ARM::t2STRBs: return ARM::t2STRBi12; 436 case ARM::t2STRHs: return ARM::t2STRHi12; 437 case ARM::t2PLDs: return ARM::t2PLDi12; 438 439 case ARM::t2LDRi12: 440 case ARM::t2LDRHi12: 441 case ARM::t2LDRBi12: 442 case ARM::t2LDRSHi12: 443 case ARM::t2LDRSBi12: 444 case ARM::t2STRi12: 445 case ARM::t2STRBi12: 446 case ARM::t2STRHi12: 447 case ARM::t2PLDi12: 448 case ARM::t2LDRi8: 449 case ARM::t2LDRHi8: 450 case ARM::t2LDRBi8: 451 case ARM::t2LDRSHi8: 452 case ARM::t2LDRSBi8: 453 case ARM::t2STRi8: 454 case ARM::t2STRBi8: 455 case ARM::t2STRHi8: 456 case ARM::t2PLDi8: 457 return opcode; 458 459 default: 460 llvm_unreachable("unknown thumb2 opcode."); 461 } 462 } 463 464 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 465 unsigned FrameReg, int &Offset, 466 const ARMBaseInstrInfo &TII, 467 const TargetRegisterInfo *TRI) { 468 unsigned Opcode = MI.getOpcode(); 469 const MCInstrDesc &Desc = MI.getDesc(); 470 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 471 bool isSub = false; 472 473 MachineFunction &MF = *MI.getParent()->getParent(); 474 const TargetRegisterClass *RegClass = 475 TII.getRegClass(Desc, FrameRegIdx, TRI, MF); 476 477 // Memory operands in inline assembly always use AddrModeT2_i12. 478 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR) 479 AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2? 480 481 if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) { 482 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 483 484 unsigned PredReg; 485 if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL && 486 !MI.definesRegister(ARM::CPSR)) { 487 // Turn it into a move. 488 MI.setDesc(TII.get(ARM::tMOVr)); 489 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 490 // Remove offset and remaining explicit predicate operands. 491 do MI.RemoveOperand(FrameRegIdx+1); 492 while (MI.getNumOperands() > FrameRegIdx+1); 493 MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI); 494 MIB.add(predOps(ARMCC::AL)); 495 return true; 496 } 497 498 bool HasCCOut = Opcode != ARM::t2ADDri12; 499 500 if (Offset < 0) { 501 Offset = -Offset; 502 isSub = true; 503 MI.setDesc(TII.get(ARM::t2SUBri)); 504 } else { 505 MI.setDesc(TII.get(ARM::t2ADDri)); 506 } 507 508 // Common case: small offset, fits into instruction. 509 if (ARM_AM::getT2SOImmVal(Offset) != -1) { 510 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 511 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 512 // Add cc_out operand if the original instruction did not have one. 513 if (!HasCCOut) 514 MI.addOperand(MachineOperand::CreateReg(0, false)); 515 Offset = 0; 516 return true; 517 } 518 // Another common case: imm12. 519 if (Offset < 4096 && 520 (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) { 521 unsigned NewOpc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12; 522 MI.setDesc(TII.get(NewOpc)); 523 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 524 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 525 // Remove the cc_out operand. 526 if (HasCCOut) 527 MI.RemoveOperand(MI.getNumOperands()-1); 528 Offset = 0; 529 return true; 530 } 531 532 // Otherwise, extract 8 adjacent bits from the immediate into this 533 // t2ADDri/t2SUBri. 534 unsigned RotAmt = countLeadingZeros<unsigned>(Offset); 535 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt); 536 537 // We will handle these bits from offset, clear them. 538 Offset &= ~ThisImmVal; 539 540 assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 && 541 "Bit extraction didn't work?"); 542 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 543 // Add cc_out operand if the original instruction did not have one. 544 if (!HasCCOut) 545 MI.addOperand(MachineOperand::CreateReg(0, false)); 546 } else { 547 // AddrMode4 and AddrMode6 cannot handle any offset. 548 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 549 return false; 550 551 // AddrModeT2_so cannot handle any offset. If there is no offset 552 // register then we change to an immediate version. 553 unsigned NewOpc = Opcode; 554 if (AddrMode == ARMII::AddrModeT2_so) { 555 Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg(); 556 if (OffsetReg != 0) { 557 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 558 return Offset == 0; 559 } 560 561 MI.RemoveOperand(FrameRegIdx+1); 562 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0); 563 NewOpc = immediateOffsetOpcode(Opcode); 564 AddrMode = ARMII::AddrModeT2_i12; 565 } 566 567 unsigned NumBits = 0; 568 unsigned Scale = 1; 569 if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) { 570 // i8 supports only negative, and i12 supports only positive, so 571 // based on Offset sign convert Opcode to the appropriate 572 // instruction 573 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 574 if (Offset < 0) { 575 NewOpc = negativeOffsetOpcode(Opcode); 576 NumBits = 8; 577 isSub = true; 578 Offset = -Offset; 579 } else { 580 NewOpc = positiveOffsetOpcode(Opcode); 581 NumBits = 12; 582 } 583 } else if (AddrMode == ARMII::AddrMode5) { 584 // VFP address mode. 585 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 586 int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 587 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 588 InstrOffs *= -1; 589 NumBits = 8; 590 Scale = 4; 591 Offset += InstrOffs * 4; 592 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 593 if (Offset < 0) { 594 Offset = -Offset; 595 isSub = true; 596 } 597 } else if (AddrMode == ARMII::AddrMode5FP16) { 598 // VFP address mode. 599 const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1); 600 int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm()); 601 if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub) 602 InstrOffs *= -1; 603 NumBits = 8; 604 Scale = 2; 605 Offset += InstrOffs * 2; 606 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 607 if (Offset < 0) { 608 Offset = -Offset; 609 isSub = true; 610 } 611 } else if (AddrMode == ARMII::AddrModeT2_i7s4 || 612 AddrMode == ARMII::AddrModeT2_i7s2 || 613 AddrMode == ARMII::AddrModeT2_i7) { 614 Offset += MI.getOperand(FrameRegIdx + 1).getImm(); 615 unsigned OffsetMask; 616 switch (AddrMode) { 617 case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break; 618 case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break; 619 default: NumBits = 7; OffsetMask = 0x0; break; 620 } 621 // MCInst operand expects already scaled value. 622 Scale = 1; 623 assert((Offset & OffsetMask) == 0 && "Can't encode this offset!"); 624 (void)OffsetMask; // squash unused-variable warning at -NDEBUG 625 } else if (AddrMode == ARMII::AddrModeT2_i8s4) { 626 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; 627 NumBits = 8 + 2; 628 // MCInst operand expects already scaled value. 629 Scale = 1; 630 assert((Offset & 3) == 0 && "Can't encode this offset!"); 631 } else if (AddrMode == ARMII::AddrModeT2_ldrex) { 632 Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4; 633 NumBits = 8; // 8 bits scaled by 4 634 Scale = 4; 635 assert((Offset & 3) == 0 && "Can't encode this offset!"); 636 } else { 637 llvm_unreachable("Unsupported addressing mode!"); 638 } 639 640 if (NewOpc != Opcode) 641 MI.setDesc(TII.get(NewOpc)); 642 643 MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1); 644 645 // Attempt to fold address computation 646 // Common case: small offset, fits into instruction. We need to make sure 647 // the register class is correct too, for instructions like the MVE 648 // VLDRH.32, which only accepts low tGPR registers. 649 int ImmedOffset = Offset / Scale; 650 unsigned Mask = (1 << NumBits) - 1; 651 if ((unsigned)Offset <= Mask * Scale && 652 (Register::isVirtualRegister(FrameReg) || 653 RegClass->contains(FrameReg))) { 654 if (Register::isVirtualRegister(FrameReg)) { 655 // Make sure the register class for the virtual register is correct 656 MachineRegisterInfo *MRI = &MF.getRegInfo(); 657 if (!MRI->constrainRegClass(FrameReg, RegClass)) 658 llvm_unreachable("Unable to constrain virtual register class."); 659 } 660 661 // Replace the FrameIndex with fp/sp 662 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 663 if (isSub) { 664 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16) 665 // FIXME: Not consistent. 666 ImmedOffset |= 1 << NumBits; 667 else 668 ImmedOffset = -ImmedOffset; 669 } 670 ImmOp.ChangeToImmediate(ImmedOffset); 671 Offset = 0; 672 return true; 673 } 674 675 // Otherwise, offset doesn't fit. Pull in what we can to simplify 676 ImmedOffset = ImmedOffset & Mask; 677 if (isSub) { 678 if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16) 679 // FIXME: Not consistent. 680 ImmedOffset |= 1 << NumBits; 681 else { 682 ImmedOffset = -ImmedOffset; 683 if (ImmedOffset == 0) 684 // Change the opcode back if the encoded offset is zero. 685 MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc))); 686 } 687 } 688 ImmOp.ChangeToImmediate(ImmedOffset); 689 Offset &= ~(Mask*Scale); 690 } 691 692 Offset = (isSub) ? -Offset : Offset; 693 return Offset == 0 && (Register::isVirtualRegister(FrameReg) || 694 RegClass->contains(FrameReg)); 695 } 696 697 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI, 698 unsigned &PredReg) { 699 unsigned Opc = MI.getOpcode(); 700 if (Opc == ARM::tBcc || Opc == ARM::t2Bcc) 701 return ARMCC::AL; 702 return getInstrPredicate(MI, PredReg); 703 } 704 705 int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) { 706 const MCInstrDesc &MCID = MI.getDesc(); 707 708 if (!MCID.OpInfo) 709 return -1; 710 711 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) 712 if (ARM::isVpred(MCID.OpInfo[i].OperandType)) 713 return i; 714 715 return -1; 716 } 717 718 ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI, 719 unsigned &PredReg) { 720 int PIdx = findFirstVPTPredOperandIdx(MI); 721 if (PIdx == -1) { 722 PredReg = 0; 723 return ARMVCC::None; 724 } 725 726 PredReg = MI.getOperand(PIdx+1).getReg(); 727 return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm(); 728 } 729