1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Base ARM implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseInstrInfo.h" 14 #include "ARMBaseRegisterInfo.h" 15 #include "ARMConstantPoolValue.h" 16 #include "ARMFeatures.h" 17 #include "ARMHazardRecognizer.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMSubtarget.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "MCTargetDesc/ARMBaseInfo.h" 22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/CodeGen/LiveVariables.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineConstantPool.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineInstr.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineMemOperand.h" 35 #include "llvm/CodeGen/MachineOperand.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/TargetInstrInfo.h" 40 #include "llvm/CodeGen/TargetRegisterInfo.h" 41 #include "llvm/CodeGen/TargetSchedule.h" 42 #include "llvm/IR/Attributes.h" 43 #include "llvm/IR/Constants.h" 44 #include "llvm/IR/DebugLoc.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/GlobalValue.h" 47 #include "llvm/MC/MCAsmInfo.h" 48 #include "llvm/MC/MCInstrDesc.h" 49 #include "llvm/MC/MCInstrItineraries.h" 50 #include "llvm/Support/BranchProbability.h" 51 #include "llvm/Support/Casting.h" 52 #include "llvm/Support/CommandLine.h" 53 #include "llvm/Support/Compiler.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Target/TargetMachine.h" 58 #include <algorithm> 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <new> 63 #include <utility> 64 #include <vector> 65 66 using namespace llvm; 67 68 #define DEBUG_TYPE "arm-instrinfo" 69 70 #define GET_INSTRINFO_CTOR_DTOR 71 #include "ARMGenInstrInfo.inc" 72 73 static cl::opt<bool> 74 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, 75 cl::desc("Enable ARM 2-addr to 3-addr conv")); 76 77 /// ARM_MLxEntry - Record information about MLA / MLS instructions. 78 struct ARM_MLxEntry { 79 uint16_t MLxOpc; // MLA / MLS opcode 80 uint16_t MulOpc; // Expanded multiplication opcode 81 uint16_t AddSubOpc; // Expanded add / sub opcode 82 bool NegAcc; // True if the acc is negated before the add / sub. 83 bool HasLane; // True if instruction has an extra "lane" operand. 84 }; 85 86 static const ARM_MLxEntry ARM_MLxTable[] = { 87 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane 88 // fp scalar ops 89 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, 90 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, 91 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, 92 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, 93 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, 94 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, 95 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, 96 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, 97 98 // fp SIMD ops 99 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, 100 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, 101 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, 102 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, 103 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, 104 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, 105 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, 106 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, 107 }; 108 109 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) 110 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 111 Subtarget(STI) { 112 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { 113 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) 114 llvm_unreachable("Duplicated entries?"); 115 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); 116 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); 117 } 118 } 119 120 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl 121 // currently defaults to no prepass hazard recognizer. 122 ScheduleHazardRecognizer * 123 ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 124 const ScheduleDAG *DAG) const { 125 if (usePreRAHazardRecognizer()) { 126 const InstrItineraryData *II = 127 static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData(); 128 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); 129 } 130 return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG); 131 } 132 133 ScheduleHazardRecognizer *ARMBaseInstrInfo:: 134 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 135 const ScheduleDAG *DAG) const { 136 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base()) 137 return (ScheduleHazardRecognizer *)new ARMHazardRecognizer(II, DAG); 138 return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG); 139 } 140 141 MachineInstr *ARMBaseInstrInfo::convertToThreeAddress( 142 MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const { 143 // FIXME: Thumb2 support. 144 145 if (!EnableARM3Addr) 146 return nullptr; 147 148 MachineFunction &MF = *MI.getParent()->getParent(); 149 uint64_t TSFlags = MI.getDesc().TSFlags; 150 bool isPre = false; 151 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { 152 default: return nullptr; 153 case ARMII::IndexModePre: 154 isPre = true; 155 break; 156 case ARMII::IndexModePost: 157 break; 158 } 159 160 // Try splitting an indexed load/store to an un-indexed one plus an add/sub 161 // operation. 162 unsigned MemOpc = getUnindexedOpcode(MI.getOpcode()); 163 if (MemOpc == 0) 164 return nullptr; 165 166 MachineInstr *UpdateMI = nullptr; 167 MachineInstr *MemMI = nullptr; 168 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); 169 const MCInstrDesc &MCID = MI.getDesc(); 170 unsigned NumOps = MCID.getNumOperands(); 171 bool isLoad = !MI.mayStore(); 172 const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0); 173 const MachineOperand &Base = MI.getOperand(2); 174 const MachineOperand &Offset = MI.getOperand(NumOps - 3); 175 Register WBReg = WB.getReg(); 176 Register BaseReg = Base.getReg(); 177 Register OffReg = Offset.getReg(); 178 unsigned OffImm = MI.getOperand(NumOps - 2).getImm(); 179 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm(); 180 switch (AddrMode) { 181 default: llvm_unreachable("Unknown indexed op!"); 182 case ARMII::AddrMode2: { 183 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 184 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 185 if (OffReg == 0) { 186 if (ARM_AM::getSOImmVal(Amt) == -1) 187 // Can't encode it in a so_imm operand. This transformation will 188 // add more than 1 instruction. Abandon! 189 return nullptr; 190 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 191 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 192 .addReg(BaseReg) 193 .addImm(Amt) 194 .add(predOps(Pred)) 195 .add(condCodeOp()); 196 } else if (Amt != 0) { 197 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); 198 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); 199 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 200 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) 201 .addReg(BaseReg) 202 .addReg(OffReg) 203 .addReg(0) 204 .addImm(SOOpc) 205 .add(predOps(Pred)) 206 .add(condCodeOp()); 207 } else 208 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 209 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 210 .addReg(BaseReg) 211 .addReg(OffReg) 212 .add(predOps(Pred)) 213 .add(condCodeOp()); 214 break; 215 } 216 case ARMII::AddrMode3 : { 217 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub; 218 unsigned Amt = ARM_AM::getAM3Offset(OffImm); 219 if (OffReg == 0) 220 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand. 221 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 222 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 223 .addReg(BaseReg) 224 .addImm(Amt) 225 .add(predOps(Pred)) 226 .add(condCodeOp()); 227 else 228 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 229 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 230 .addReg(BaseReg) 231 .addReg(OffReg) 232 .add(predOps(Pred)) 233 .add(condCodeOp()); 234 break; 235 } 236 } 237 238 std::vector<MachineInstr*> NewMIs; 239 if (isPre) { 240 if (isLoad) 241 MemMI = 242 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg()) 243 .addReg(WBReg) 244 .addImm(0) 245 .addImm(Pred); 246 else 247 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc)) 248 .addReg(MI.getOperand(1).getReg()) 249 .addReg(WBReg) 250 .addReg(0) 251 .addImm(0) 252 .addImm(Pred); 253 NewMIs.push_back(MemMI); 254 NewMIs.push_back(UpdateMI); 255 } else { 256 if (isLoad) 257 MemMI = 258 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg()) 259 .addReg(BaseReg) 260 .addImm(0) 261 .addImm(Pred); 262 else 263 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc)) 264 .addReg(MI.getOperand(1).getReg()) 265 .addReg(BaseReg) 266 .addReg(0) 267 .addImm(0) 268 .addImm(Pred); 269 if (WB.isDead()) 270 UpdateMI->getOperand(0).setIsDead(); 271 NewMIs.push_back(UpdateMI); 272 NewMIs.push_back(MemMI); 273 } 274 275 // Transfer LiveVariables states, kill / dead info. 276 if (LV) { 277 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 278 MachineOperand &MO = MI.getOperand(i); 279 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) { 280 Register Reg = MO.getReg(); 281 282 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); 283 if (MO.isDef()) { 284 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI; 285 if (MO.isDead()) 286 LV->addVirtualRegisterDead(Reg, *NewMI); 287 } 288 if (MO.isUse() && MO.isKill()) { 289 for (unsigned j = 0; j < 2; ++j) { 290 // Look at the two new MI's in reverse order. 291 MachineInstr *NewMI = NewMIs[j]; 292 if (!NewMI->readsRegister(Reg)) 293 continue; 294 LV->addVirtualRegisterKilled(Reg, *NewMI); 295 if (VI.removeKill(MI)) 296 VI.Kills.push_back(NewMI); 297 break; 298 } 299 } 300 } 301 } 302 } 303 304 MachineBasicBlock::iterator MBBI = MI.getIterator(); 305 MFI->insert(MBBI, NewMIs[1]); 306 MFI->insert(MBBI, NewMIs[0]); 307 return NewMIs[0]; 308 } 309 310 // Branch analysis. 311 bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB, 312 MachineBasicBlock *&TBB, 313 MachineBasicBlock *&FBB, 314 SmallVectorImpl<MachineOperand> &Cond, 315 bool AllowModify) const { 316 TBB = nullptr; 317 FBB = nullptr; 318 319 MachineBasicBlock::iterator I = MBB.end(); 320 if (I == MBB.begin()) 321 return false; // Empty blocks are easy. 322 --I; 323 324 // Walk backwards from the end of the basic block until the branch is 325 // analyzed or we give up. 326 while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) { 327 // Flag to be raised on unanalyzeable instructions. This is useful in cases 328 // where we want to clean up on the end of the basic block before we bail 329 // out. 330 bool CantAnalyze = false; 331 332 // Skip over DEBUG values and predicated nonterminators. 333 while (I->isDebugInstr() || !I->isTerminator()) { 334 if (I == MBB.begin()) 335 return false; 336 --I; 337 } 338 339 if (isIndirectBranchOpcode(I->getOpcode()) || 340 isJumpTableBranchOpcode(I->getOpcode())) { 341 // Indirect branches and jump tables can't be analyzed, but we still want 342 // to clean up any instructions at the tail of the basic block. 343 CantAnalyze = true; 344 } else if (isUncondBranchOpcode(I->getOpcode())) { 345 TBB = I->getOperand(0).getMBB(); 346 } else if (isCondBranchOpcode(I->getOpcode())) { 347 // Bail out if we encounter multiple conditional branches. 348 if (!Cond.empty()) 349 return true; 350 351 assert(!FBB && "FBB should have been null."); 352 FBB = TBB; 353 TBB = I->getOperand(0).getMBB(); 354 Cond.push_back(I->getOperand(1)); 355 Cond.push_back(I->getOperand(2)); 356 } else if (I->isReturn()) { 357 // Returns can't be analyzed, but we should run cleanup. 358 CantAnalyze = !isPredicated(*I); 359 } else { 360 // We encountered other unrecognized terminator. Bail out immediately. 361 return true; 362 } 363 364 // Cleanup code - to be run for unpredicated unconditional branches and 365 // returns. 366 if (!isPredicated(*I) && 367 (isUncondBranchOpcode(I->getOpcode()) || 368 isIndirectBranchOpcode(I->getOpcode()) || 369 isJumpTableBranchOpcode(I->getOpcode()) || 370 I->isReturn())) { 371 // Forget any previous condition branch information - it no longer applies. 372 Cond.clear(); 373 FBB = nullptr; 374 375 // If we can modify the function, delete everything below this 376 // unconditional branch. 377 if (AllowModify) { 378 MachineBasicBlock::iterator DI = std::next(I); 379 while (DI != MBB.end()) { 380 MachineInstr &InstToDelete = *DI; 381 ++DI; 382 InstToDelete.eraseFromParent(); 383 } 384 } 385 } 386 387 if (CantAnalyze) 388 return true; 389 390 if (I == MBB.begin()) 391 return false; 392 393 --I; 394 } 395 396 // We made it past the terminators without bailing out - we must have 397 // analyzed this branch successfully. 398 return false; 399 } 400 401 unsigned ARMBaseInstrInfo::removeBranch(MachineBasicBlock &MBB, 402 int *BytesRemoved) const { 403 assert(!BytesRemoved && "code size not handled"); 404 405 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 406 if (I == MBB.end()) 407 return 0; 408 409 if (!isUncondBranchOpcode(I->getOpcode()) && 410 !isCondBranchOpcode(I->getOpcode())) 411 return 0; 412 413 // Remove the branch. 414 I->eraseFromParent(); 415 416 I = MBB.end(); 417 418 if (I == MBB.begin()) return 1; 419 --I; 420 if (!isCondBranchOpcode(I->getOpcode())) 421 return 1; 422 423 // Remove the branch. 424 I->eraseFromParent(); 425 return 2; 426 } 427 428 unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB, 429 MachineBasicBlock *TBB, 430 MachineBasicBlock *FBB, 431 ArrayRef<MachineOperand> Cond, 432 const DebugLoc &DL, 433 int *BytesAdded) const { 434 assert(!BytesAdded && "code size not handled"); 435 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>(); 436 int BOpc = !AFI->isThumbFunction() 437 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); 438 int BccOpc = !AFI->isThumbFunction() 439 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); 440 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); 441 442 // Shouldn't be a fall through. 443 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 444 assert((Cond.size() == 2 || Cond.size() == 0) && 445 "ARM branch conditions have two components!"); 446 447 // For conditional branches, we use addOperand to preserve CPSR flags. 448 449 if (!FBB) { 450 if (Cond.empty()) { // Unconditional branch? 451 if (isThumb) 452 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).add(predOps(ARMCC::AL)); 453 else 454 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 455 } else 456 BuildMI(&MBB, DL, get(BccOpc)) 457 .addMBB(TBB) 458 .addImm(Cond[0].getImm()) 459 .add(Cond[1]); 460 return 1; 461 } 462 463 // Two-way conditional branch. 464 BuildMI(&MBB, DL, get(BccOpc)) 465 .addMBB(TBB) 466 .addImm(Cond[0].getImm()) 467 .add(Cond[1]); 468 if (isThumb) 469 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).add(predOps(ARMCC::AL)); 470 else 471 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 472 return 2; 473 } 474 475 bool ARMBaseInstrInfo:: 476 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 477 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 478 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 479 return false; 480 } 481 482 bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const { 483 if (MI.isBundle()) { 484 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 485 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 486 while (++I != E && I->isInsideBundle()) { 487 int PIdx = I->findFirstPredOperandIdx(); 488 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL) 489 return true; 490 } 491 return false; 492 } 493 494 int PIdx = MI.findFirstPredOperandIdx(); 495 return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL; 496 } 497 498 bool ARMBaseInstrInfo::PredicateInstruction( 499 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 500 unsigned Opc = MI.getOpcode(); 501 if (isUncondBranchOpcode(Opc)) { 502 MI.setDesc(get(getMatchingCondBranchOpcode(Opc))); 503 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 504 .addImm(Pred[0].getImm()) 505 .addReg(Pred[1].getReg()); 506 return true; 507 } 508 509 int PIdx = MI.findFirstPredOperandIdx(); 510 if (PIdx != -1) { 511 MachineOperand &PMO = MI.getOperand(PIdx); 512 PMO.setImm(Pred[0].getImm()); 513 MI.getOperand(PIdx+1).setReg(Pred[1].getReg()); 514 return true; 515 } 516 return false; 517 } 518 519 bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 520 ArrayRef<MachineOperand> Pred2) const { 521 if (Pred1.size() > 2 || Pred2.size() > 2) 522 return false; 523 524 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 525 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 526 if (CC1 == CC2) 527 return true; 528 529 switch (CC1) { 530 default: 531 return false; 532 case ARMCC::AL: 533 return true; 534 case ARMCC::HS: 535 return CC2 == ARMCC::HI; 536 case ARMCC::LS: 537 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 538 case ARMCC::GE: 539 return CC2 == ARMCC::GT; 540 case ARMCC::LE: 541 return CC2 == ARMCC::LT; 542 } 543 } 544 545 bool ARMBaseInstrInfo::DefinesPredicate( 546 MachineInstr &MI, std::vector<MachineOperand> &Pred) const { 547 bool Found = false; 548 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 549 const MachineOperand &MO = MI.getOperand(i); 550 if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) || 551 (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) { 552 Pred.push_back(MO); 553 Found = true; 554 } 555 } 556 557 return Found; 558 } 559 560 bool ARMBaseInstrInfo::isCPSRDefined(const MachineInstr &MI) { 561 for (const auto &MO : MI.operands()) 562 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead()) 563 return true; 564 return false; 565 } 566 567 bool ARMBaseInstrInfo::isAddrMode3OpImm(const MachineInstr &MI, 568 unsigned Op) const { 569 const MachineOperand &Offset = MI.getOperand(Op + 1); 570 return Offset.getReg() != 0; 571 } 572 573 // Load with negative register offset requires additional 1cyc and +I unit 574 // for Cortex A57 575 bool ARMBaseInstrInfo::isAddrMode3OpMinusReg(const MachineInstr &MI, 576 unsigned Op) const { 577 const MachineOperand &Offset = MI.getOperand(Op + 1); 578 const MachineOperand &Opc = MI.getOperand(Op + 2); 579 assert(Opc.isImm()); 580 assert(Offset.isReg()); 581 int64_t OpcImm = Opc.getImm(); 582 583 bool isSub = ARM_AM::getAM3Op(OpcImm) == ARM_AM::sub; 584 return (isSub && Offset.getReg() != 0); 585 } 586 587 bool ARMBaseInstrInfo::isLdstScaledReg(const MachineInstr &MI, 588 unsigned Op) const { 589 const MachineOperand &Opc = MI.getOperand(Op + 2); 590 unsigned OffImm = Opc.getImm(); 591 return ARM_AM::getAM2ShiftOpc(OffImm) != ARM_AM::no_shift; 592 } 593 594 // Load, scaled register offset, not plus LSL2 595 bool ARMBaseInstrInfo::isLdstScaledRegNotPlusLsl2(const MachineInstr &MI, 596 unsigned Op) const { 597 const MachineOperand &Opc = MI.getOperand(Op + 2); 598 unsigned OffImm = Opc.getImm(); 599 600 bool isAdd = ARM_AM::getAM2Op(OffImm) == ARM_AM::add; 601 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 602 ARM_AM::ShiftOpc ShiftOpc = ARM_AM::getAM2ShiftOpc(OffImm); 603 if (ShiftOpc == ARM_AM::no_shift) return false; // not scaled 604 bool SimpleScaled = (isAdd && ShiftOpc == ARM_AM::lsl && Amt == 2); 605 return !SimpleScaled; 606 } 607 608 // Minus reg for ldstso addr mode 609 bool ARMBaseInstrInfo::isLdstSoMinusReg(const MachineInstr &MI, 610 unsigned Op) const { 611 unsigned OffImm = MI.getOperand(Op + 2).getImm(); 612 return ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 613 } 614 615 // Load, scaled register offset 616 bool ARMBaseInstrInfo::isAm2ScaledReg(const MachineInstr &MI, 617 unsigned Op) const { 618 unsigned OffImm = MI.getOperand(Op + 2).getImm(); 619 return ARM_AM::getAM2ShiftOpc(OffImm) != ARM_AM::no_shift; 620 } 621 622 static bool isEligibleForITBlock(const MachineInstr *MI) { 623 switch (MI->getOpcode()) { 624 default: return true; 625 case ARM::tADC: // ADC (register) T1 626 case ARM::tADDi3: // ADD (immediate) T1 627 case ARM::tADDi8: // ADD (immediate) T2 628 case ARM::tADDrr: // ADD (register) T1 629 case ARM::tAND: // AND (register) T1 630 case ARM::tASRri: // ASR (immediate) T1 631 case ARM::tASRrr: // ASR (register) T1 632 case ARM::tBIC: // BIC (register) T1 633 case ARM::tEOR: // EOR (register) T1 634 case ARM::tLSLri: // LSL (immediate) T1 635 case ARM::tLSLrr: // LSL (register) T1 636 case ARM::tLSRri: // LSR (immediate) T1 637 case ARM::tLSRrr: // LSR (register) T1 638 case ARM::tMUL: // MUL T1 639 case ARM::tMVN: // MVN (register) T1 640 case ARM::tORR: // ORR (register) T1 641 case ARM::tROR: // ROR (register) T1 642 case ARM::tRSB: // RSB (immediate) T1 643 case ARM::tSBC: // SBC (register) T1 644 case ARM::tSUBi3: // SUB (immediate) T1 645 case ARM::tSUBi8: // SUB (immediate) T2 646 case ARM::tSUBrr: // SUB (register) T1 647 return !ARMBaseInstrInfo::isCPSRDefined(*MI); 648 } 649 } 650 651 /// isPredicable - Return true if the specified instruction can be predicated. 652 /// By default, this returns true for every instruction with a 653 /// PredicateOperand. 654 bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const { 655 if (!MI.isPredicable()) 656 return false; 657 658 if (MI.isBundle()) 659 return false; 660 661 if (!isEligibleForITBlock(&MI)) 662 return false; 663 664 const ARMFunctionInfo *AFI = 665 MI.getParent()->getParent()->getInfo<ARMFunctionInfo>(); 666 667 // Neon instructions in Thumb2 IT blocks are deprecated, see ARMARM. 668 // In their ARM encoding, they can't be encoded in a conditional form. 669 if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) 670 return false; 671 672 if (AFI->isThumb2Function()) { 673 if (getSubtarget().restrictIT()) 674 return isV8EligibleForIT(&MI); 675 } 676 677 return true; 678 } 679 680 namespace llvm { 681 682 template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) { 683 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 684 const MachineOperand &MO = MI->getOperand(i); 685 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 686 continue; 687 if (MO.getReg() != ARM::CPSR) 688 continue; 689 if (!MO.isDead()) 690 return false; 691 } 692 // all definitions of CPSR are dead 693 return true; 694 } 695 696 } // end namespace llvm 697 698 /// GetInstSize - Return the size of the specified MachineInstr. 699 /// 700 unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 701 const MachineBasicBlock &MBB = *MI.getParent(); 702 const MachineFunction *MF = MBB.getParent(); 703 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 704 705 const MCInstrDesc &MCID = MI.getDesc(); 706 if (MCID.getSize()) 707 return MCID.getSize(); 708 709 switch (MI.getOpcode()) { 710 default: 711 // pseudo-instruction sizes are zero. 712 return 0; 713 case TargetOpcode::BUNDLE: 714 return getInstBundleLength(MI); 715 case ARM::MOVi16_ga_pcrel: 716 case ARM::MOVTi16_ga_pcrel: 717 case ARM::t2MOVi16_ga_pcrel: 718 case ARM::t2MOVTi16_ga_pcrel: 719 return 4; 720 case ARM::MOVi32imm: 721 case ARM::t2MOVi32imm: 722 return 8; 723 case ARM::CONSTPOOL_ENTRY: 724 case ARM::JUMPTABLE_INSTS: 725 case ARM::JUMPTABLE_ADDRS: 726 case ARM::JUMPTABLE_TBB: 727 case ARM::JUMPTABLE_TBH: 728 // If this machine instr is a constant pool entry, its size is recorded as 729 // operand #2. 730 return MI.getOperand(2).getImm(); 731 case ARM::Int_eh_sjlj_longjmp: 732 return 16; 733 case ARM::tInt_eh_sjlj_longjmp: 734 return 10; 735 case ARM::tInt_WIN_eh_sjlj_longjmp: 736 return 12; 737 case ARM::Int_eh_sjlj_setjmp: 738 case ARM::Int_eh_sjlj_setjmp_nofp: 739 return 20; 740 case ARM::tInt_eh_sjlj_setjmp: 741 case ARM::t2Int_eh_sjlj_setjmp: 742 case ARM::t2Int_eh_sjlj_setjmp_nofp: 743 return 12; 744 case ARM::SPACE: 745 return MI.getOperand(1).getImm(); 746 case ARM::INLINEASM: 747 case ARM::INLINEASM_BR: { 748 // If this machine instr is an inline asm, measure it. 749 unsigned Size = getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI); 750 if (!MF->getInfo<ARMFunctionInfo>()->isThumbFunction()) 751 Size = alignTo(Size, 4); 752 return Size; 753 } 754 } 755 } 756 757 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const { 758 unsigned Size = 0; 759 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 760 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 761 while (++I != E && I->isInsideBundle()) { 762 assert(!I->isBundle() && "No nested bundle!"); 763 Size += getInstSizeInBytes(*I); 764 } 765 return Size; 766 } 767 768 void ARMBaseInstrInfo::copyFromCPSR(MachineBasicBlock &MBB, 769 MachineBasicBlock::iterator I, 770 unsigned DestReg, bool KillSrc, 771 const ARMSubtarget &Subtarget) const { 772 unsigned Opc = Subtarget.isThumb() 773 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR) 774 : ARM::MRS; 775 776 MachineInstrBuilder MIB = 777 BuildMI(MBB, I, I->getDebugLoc(), get(Opc), DestReg); 778 779 // There is only 1 A/R class MRS instruction, and it always refers to 780 // APSR. However, there are lots of other possibilities on M-class cores. 781 if (Subtarget.isMClass()) 782 MIB.addImm(0x800); 783 784 MIB.add(predOps(ARMCC::AL)) 785 .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc)); 786 } 787 788 void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB, 789 MachineBasicBlock::iterator I, 790 unsigned SrcReg, bool KillSrc, 791 const ARMSubtarget &Subtarget) const { 792 unsigned Opc = Subtarget.isThumb() 793 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR) 794 : ARM::MSR; 795 796 MachineInstrBuilder MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Opc)); 797 798 if (Subtarget.isMClass()) 799 MIB.addImm(0x800); 800 else 801 MIB.addImm(8); 802 803 MIB.addReg(SrcReg, getKillRegState(KillSrc)) 804 .add(predOps(ARMCC::AL)) 805 .addReg(ARM::CPSR, RegState::Implicit | RegState::Define); 806 } 807 808 void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) { 809 MIB.addImm(ARMVCC::None); 810 MIB.addReg(0); 811 } 812 813 void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, 814 unsigned DestReg) { 815 addUnpredicatedMveVpredNOp(MIB); 816 MIB.addReg(DestReg, RegState::Undef); 817 } 818 819 void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) { 820 MIB.addImm(Cond); 821 MIB.addReg(ARM::VPR, RegState::Implicit); 822 } 823 824 void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB, 825 unsigned Cond, unsigned Inactive) { 826 addPredicatedMveVpredNOp(MIB, Cond); 827 MIB.addReg(Inactive); 828 } 829 830 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 831 MachineBasicBlock::iterator I, 832 const DebugLoc &DL, unsigned DestReg, 833 unsigned SrcReg, bool KillSrc) const { 834 bool GPRDest = ARM::GPRRegClass.contains(DestReg); 835 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); 836 837 if (GPRDest && GPRSrc) { 838 BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) 839 .addReg(SrcReg, getKillRegState(KillSrc)) 840 .add(predOps(ARMCC::AL)) 841 .add(condCodeOp()); 842 return; 843 } 844 845 bool SPRDest = ARM::SPRRegClass.contains(DestReg); 846 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); 847 848 unsigned Opc = 0; 849 if (SPRDest && SPRSrc) 850 Opc = ARM::VMOVS; 851 else if (GPRDest && SPRSrc) 852 Opc = ARM::VMOVRS; 853 else if (SPRDest && GPRSrc) 854 Opc = ARM::VMOVSR; 855 else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64()) 856 Opc = ARM::VMOVD; 857 else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) 858 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 859 860 if (Opc) { 861 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); 862 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 863 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) 864 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 865 if (Opc == ARM::MVE_VORR) 866 addUnpredicatedMveVpredROp(MIB, DestReg); 867 else 868 MIB.add(predOps(ARMCC::AL)); 869 return; 870 } 871 872 // Handle register classes that require multiple instructions. 873 unsigned BeginIdx = 0; 874 unsigned SubRegs = 0; 875 int Spacing = 1; 876 877 // Use VORRq when possible. 878 if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) { 879 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 880 BeginIdx = ARM::qsub_0; 881 SubRegs = 2; 882 } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) { 883 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 884 BeginIdx = ARM::qsub_0; 885 SubRegs = 4; 886 // Fall back to VMOVD. 887 } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) { 888 Opc = ARM::VMOVD; 889 BeginIdx = ARM::dsub_0; 890 SubRegs = 2; 891 } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) { 892 Opc = ARM::VMOVD; 893 BeginIdx = ARM::dsub_0; 894 SubRegs = 3; 895 } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) { 896 Opc = ARM::VMOVD; 897 BeginIdx = ARM::dsub_0; 898 SubRegs = 4; 899 } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) { 900 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr; 901 BeginIdx = ARM::gsub_0; 902 SubRegs = 2; 903 } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) { 904 Opc = ARM::VMOVD; 905 BeginIdx = ARM::dsub_0; 906 SubRegs = 2; 907 Spacing = 2; 908 } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) { 909 Opc = ARM::VMOVD; 910 BeginIdx = ARM::dsub_0; 911 SubRegs = 3; 912 Spacing = 2; 913 } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) { 914 Opc = ARM::VMOVD; 915 BeginIdx = ARM::dsub_0; 916 SubRegs = 4; 917 Spacing = 2; 918 } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && 919 !Subtarget.hasFP64()) { 920 Opc = ARM::VMOVS; 921 BeginIdx = ARM::ssub_0; 922 SubRegs = 2; 923 } else if (SrcReg == ARM::CPSR) { 924 copyFromCPSR(MBB, I, DestReg, KillSrc, Subtarget); 925 return; 926 } else if (DestReg == ARM::CPSR) { 927 copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget); 928 return; 929 } else if (DestReg == ARM::VPR) { 930 assert(ARM::GPRRegClass.contains(SrcReg)); 931 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_P0), DestReg) 932 .addReg(SrcReg, getKillRegState(KillSrc)) 933 .add(predOps(ARMCC::AL)); 934 return; 935 } else if (SrcReg == ARM::VPR) { 936 assert(ARM::GPRRegClass.contains(DestReg)); 937 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_P0), DestReg) 938 .addReg(SrcReg, getKillRegState(KillSrc)) 939 .add(predOps(ARMCC::AL)); 940 return; 941 } else if (DestReg == ARM::FPSCR_NZCV) { 942 assert(ARM::GPRRegClass.contains(SrcReg)); 943 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_FPSCR_NZCVQC), DestReg) 944 .addReg(SrcReg, getKillRegState(KillSrc)) 945 .add(predOps(ARMCC::AL)); 946 return; 947 } else if (SrcReg == ARM::FPSCR_NZCV) { 948 assert(ARM::GPRRegClass.contains(DestReg)); 949 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_FPSCR_NZCVQC), DestReg) 950 .addReg(SrcReg, getKillRegState(KillSrc)) 951 .add(predOps(ARMCC::AL)); 952 return; 953 } 954 955 assert(Opc && "Impossible reg-to-reg copy"); 956 957 const TargetRegisterInfo *TRI = &getRegisterInfo(); 958 MachineInstrBuilder Mov; 959 960 // Copy register tuples backward when the first Dest reg overlaps with SrcReg. 961 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) { 962 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing); 963 Spacing = -Spacing; 964 } 965 #ifndef NDEBUG 966 SmallSet<unsigned, 4> DstRegs; 967 #endif 968 for (unsigned i = 0; i != SubRegs; ++i) { 969 Register Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing); 970 Register Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing); 971 assert(Dst && Src && "Bad sub-register"); 972 #ifndef NDEBUG 973 assert(!DstRegs.count(Src) && "destructive vector copy"); 974 DstRegs.insert(Dst); 975 #endif 976 Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src); 977 // VORR (NEON or MVE) takes two source operands. 978 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) { 979 Mov.addReg(Src); 980 } 981 // MVE VORR takes predicate operands in place of an ordinary condition. 982 if (Opc == ARM::MVE_VORR) 983 addUnpredicatedMveVpredROp(Mov, Dst); 984 else 985 Mov = Mov.add(predOps(ARMCC::AL)); 986 // MOVr can set CC. 987 if (Opc == ARM::MOVr) 988 Mov = Mov.add(condCodeOp()); 989 } 990 // Add implicit super-register defs and kills to the last instruction. 991 Mov->addRegisterDefined(DestReg, TRI); 992 if (KillSrc) 993 Mov->addRegisterKilled(SrcReg, TRI); 994 } 995 996 bool ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI, 997 const MachineOperand *&Src, 998 const MachineOperand *&Dest) const { 999 // VMOVRRD is also a copy instruction but it requires 1000 // special way of handling. It is more complex copy version 1001 // and since that we are not considering it. For recognition 1002 // of such instruction isExtractSubregLike MI interface fuction 1003 // could be used. 1004 // VORRq is considered as a move only if two inputs are 1005 // the same register. 1006 if (!MI.isMoveReg() || 1007 (MI.getOpcode() == ARM::VORRq && 1008 MI.getOperand(1).getReg() != MI.getOperand(2).getReg())) 1009 return false; 1010 Dest = &MI.getOperand(0); 1011 Src = &MI.getOperand(1); 1012 return true; 1013 } 1014 1015 const MachineInstrBuilder & 1016 ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg, 1017 unsigned SubIdx, unsigned State, 1018 const TargetRegisterInfo *TRI) const { 1019 if (!SubIdx) 1020 return MIB.addReg(Reg, State); 1021 1022 if (Register::isPhysicalRegister(Reg)) 1023 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); 1024 return MIB.addReg(Reg, State, SubIdx); 1025 } 1026 1027 void ARMBaseInstrInfo:: 1028 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1029 unsigned SrcReg, bool isKill, int FI, 1030 const TargetRegisterClass *RC, 1031 const TargetRegisterInfo *TRI) const { 1032 MachineFunction &MF = *MBB.getParent(); 1033 MachineFrameInfo &MFI = MF.getFrameInfo(); 1034 unsigned Align = MFI.getObjectAlignment(FI); 1035 1036 MachineMemOperand *MMO = MF.getMachineMemOperand( 1037 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 1038 MFI.getObjectSize(FI), Align); 1039 1040 switch (TRI->getSpillSize(*RC)) { 1041 case 2: 1042 if (ARM::HPRRegClass.hasSubClassEq(RC)) { 1043 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH)) 1044 .addReg(SrcReg, getKillRegState(isKill)) 1045 .addFrameIndex(FI) 1046 .addImm(0) 1047 .addMemOperand(MMO) 1048 .add(predOps(ARMCC::AL)); 1049 } else 1050 llvm_unreachable("Unknown reg class!"); 1051 break; 1052 case 4: 1053 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1054 BuildMI(MBB, I, DebugLoc(), get(ARM::STRi12)) 1055 .addReg(SrcReg, getKillRegState(isKill)) 1056 .addFrameIndex(FI) 1057 .addImm(0) 1058 .addMemOperand(MMO) 1059 .add(predOps(ARMCC::AL)); 1060 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1061 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRS)) 1062 .addReg(SrcReg, getKillRegState(isKill)) 1063 .addFrameIndex(FI) 1064 .addImm(0) 1065 .addMemOperand(MMO) 1066 .add(predOps(ARMCC::AL)); 1067 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { 1068 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTR_P0_off)) 1069 .addReg(SrcReg, getKillRegState(isKill)) 1070 .addFrameIndex(FI) 1071 .addImm(0) 1072 .addMemOperand(MMO) 1073 .add(predOps(ARMCC::AL)); 1074 } else 1075 llvm_unreachable("Unknown reg class!"); 1076 break; 1077 case 8: 1078 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1079 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRD)) 1080 .addReg(SrcReg, getKillRegState(isKill)) 1081 .addFrameIndex(FI) 1082 .addImm(0) 1083 .addMemOperand(MMO) 1084 .add(predOps(ARMCC::AL)); 1085 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1086 if (Subtarget.hasV5TEOps()) { 1087 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD)); 1088 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 1089 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 1090 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) 1091 .add(predOps(ARMCC::AL)); 1092 } else { 1093 // Fallback to STM instruction, which has existed since the dawn of 1094 // time. 1095 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STMIA)) 1096 .addFrameIndex(FI) 1097 .addMemOperand(MMO) 1098 .add(predOps(ARMCC::AL)); 1099 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 1100 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 1101 } 1102 } else 1103 llvm_unreachable("Unknown reg class!"); 1104 break; 1105 case 16: 1106 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { 1107 // Use aligned spills if the stack can be realigned. 1108 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 1109 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64)) 1110 .addFrameIndex(FI) 1111 .addImm(16) 1112 .addReg(SrcReg, getKillRegState(isKill)) 1113 .addMemOperand(MMO) 1114 .add(predOps(ARMCC::AL)); 1115 } else { 1116 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMQIA)) 1117 .addReg(SrcReg, getKillRegState(isKill)) 1118 .addFrameIndex(FI) 1119 .addMemOperand(MMO) 1120 .add(predOps(ARMCC::AL)); 1121 } 1122 } else if (ARM::QPRRegClass.hasSubClassEq(RC) && 1123 Subtarget.hasMVEIntegerOps()) { 1124 auto MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::MVE_VSTRWU32)); 1125 MIB.addReg(SrcReg, getKillRegState(isKill)) 1126 .addFrameIndex(FI) 1127 .addImm(0) 1128 .addMemOperand(MMO); 1129 addUnpredicatedMveVpredNOp(MIB); 1130 } else 1131 llvm_unreachable("Unknown reg class!"); 1132 break; 1133 case 24: 1134 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1135 // Use aligned spills if the stack can be realigned. 1136 if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && 1137 Subtarget.hasNEON()) { 1138 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo)) 1139 .addFrameIndex(FI) 1140 .addImm(16) 1141 .addReg(SrcReg, getKillRegState(isKill)) 1142 .addMemOperand(MMO) 1143 .add(predOps(ARMCC::AL)); 1144 } else { 1145 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), 1146 get(ARM::VSTMDIA)) 1147 .addFrameIndex(FI) 1148 .add(predOps(ARMCC::AL)) 1149 .addMemOperand(MMO); 1150 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1151 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1152 AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1153 } 1154 } else 1155 llvm_unreachable("Unknown reg class!"); 1156 break; 1157 case 32: 1158 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { 1159 if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && 1160 Subtarget.hasNEON()) { 1161 // FIXME: It's possible to only store part of the QQ register if the 1162 // spilled def has a sub-register index. 1163 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64QPseudo)) 1164 .addFrameIndex(FI) 1165 .addImm(16) 1166 .addReg(SrcReg, getKillRegState(isKill)) 1167 .addMemOperand(MMO) 1168 .add(predOps(ARMCC::AL)); 1169 } else { 1170 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), 1171 get(ARM::VSTMDIA)) 1172 .addFrameIndex(FI) 1173 .add(predOps(ARMCC::AL)) 1174 .addMemOperand(MMO); 1175 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1176 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1177 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1178 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 1179 } 1180 } else 1181 llvm_unreachable("Unknown reg class!"); 1182 break; 1183 case 64: 1184 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1185 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMDIA)) 1186 .addFrameIndex(FI) 1187 .add(predOps(ARMCC::AL)) 1188 .addMemOperand(MMO); 1189 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1190 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1191 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1192 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 1193 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); 1194 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); 1195 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); 1196 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); 1197 } else 1198 llvm_unreachable("Unknown reg class!"); 1199 break; 1200 default: 1201 llvm_unreachable("Unknown reg class!"); 1202 } 1203 } 1204 1205 unsigned ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 1206 int &FrameIndex) const { 1207 switch (MI.getOpcode()) { 1208 default: break; 1209 case ARM::STRrs: 1210 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame. 1211 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() && 1212 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 && 1213 MI.getOperand(3).getImm() == 0) { 1214 FrameIndex = MI.getOperand(1).getIndex(); 1215 return MI.getOperand(0).getReg(); 1216 } 1217 break; 1218 case ARM::STRi12: 1219 case ARM::t2STRi12: 1220 case ARM::tSTRspi: 1221 case ARM::VSTRD: 1222 case ARM::VSTRS: 1223 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 1224 MI.getOperand(2).getImm() == 0) { 1225 FrameIndex = MI.getOperand(1).getIndex(); 1226 return MI.getOperand(0).getReg(); 1227 } 1228 break; 1229 case ARM::VSTR_P0_off: 1230 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && 1231 MI.getOperand(1).getImm() == 0) { 1232 FrameIndex = MI.getOperand(0).getIndex(); 1233 return ARM::P0; 1234 } 1235 break; 1236 case ARM::VST1q64: 1237 case ARM::VST1d64TPseudo: 1238 case ARM::VST1d64QPseudo: 1239 if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) { 1240 FrameIndex = MI.getOperand(0).getIndex(); 1241 return MI.getOperand(2).getReg(); 1242 } 1243 break; 1244 case ARM::VSTMQIA: 1245 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1246 FrameIndex = MI.getOperand(1).getIndex(); 1247 return MI.getOperand(0).getReg(); 1248 } 1249 break; 1250 } 1251 1252 return 0; 1253 } 1254 1255 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, 1256 int &FrameIndex) const { 1257 SmallVector<const MachineMemOperand *, 1> Accesses; 1258 if (MI.mayStore() && hasStoreToStackSlot(MI, Accesses) && 1259 Accesses.size() == 1) { 1260 FrameIndex = 1261 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 1262 ->getFrameIndex(); 1263 return true; 1264 } 1265 return false; 1266 } 1267 1268 void ARMBaseInstrInfo:: 1269 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1270 unsigned DestReg, int FI, 1271 const TargetRegisterClass *RC, 1272 const TargetRegisterInfo *TRI) const { 1273 DebugLoc DL; 1274 if (I != MBB.end()) DL = I->getDebugLoc(); 1275 MachineFunction &MF = *MBB.getParent(); 1276 MachineFrameInfo &MFI = MF.getFrameInfo(); 1277 unsigned Align = MFI.getObjectAlignment(FI); 1278 MachineMemOperand *MMO = MF.getMachineMemOperand( 1279 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 1280 MFI.getObjectSize(FI), Align); 1281 1282 switch (TRI->getSpillSize(*RC)) { 1283 case 2: 1284 if (ARM::HPRRegClass.hasSubClassEq(RC)) { 1285 BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg) 1286 .addFrameIndex(FI) 1287 .addImm(0) 1288 .addMemOperand(MMO) 1289 .add(predOps(ARMCC::AL)); 1290 } else 1291 llvm_unreachable("Unknown reg class!"); 1292 break; 1293 case 4: 1294 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1295 BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) 1296 .addFrameIndex(FI) 1297 .addImm(0) 1298 .addMemOperand(MMO) 1299 .add(predOps(ARMCC::AL)); 1300 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1301 BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) 1302 .addFrameIndex(FI) 1303 .addImm(0) 1304 .addMemOperand(MMO) 1305 .add(predOps(ARMCC::AL)); 1306 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { 1307 BuildMI(MBB, I, DL, get(ARM::VLDR_P0_off), DestReg) 1308 .addFrameIndex(FI) 1309 .addImm(0) 1310 .addMemOperand(MMO) 1311 .add(predOps(ARMCC::AL)); 1312 } else 1313 llvm_unreachable("Unknown reg class!"); 1314 break; 1315 case 8: 1316 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1317 BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) 1318 .addFrameIndex(FI) 1319 .addImm(0) 1320 .addMemOperand(MMO) 1321 .add(predOps(ARMCC::AL)); 1322 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1323 MachineInstrBuilder MIB; 1324 1325 if (Subtarget.hasV5TEOps()) { 1326 MIB = BuildMI(MBB, I, DL, get(ARM::LDRD)); 1327 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1328 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1329 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) 1330 .add(predOps(ARMCC::AL)); 1331 } else { 1332 // Fallback to LDM instruction, which has existed since the dawn of 1333 // time. 1334 MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA)) 1335 .addFrameIndex(FI) 1336 .addMemOperand(MMO) 1337 .add(predOps(ARMCC::AL)); 1338 MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1339 MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1340 } 1341 1342 if (Register::isPhysicalRegister(DestReg)) 1343 MIB.addReg(DestReg, RegState::ImplicitDefine); 1344 } else 1345 llvm_unreachable("Unknown reg class!"); 1346 break; 1347 case 16: 1348 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { 1349 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 1350 BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) 1351 .addFrameIndex(FI) 1352 .addImm(16) 1353 .addMemOperand(MMO) 1354 .add(predOps(ARMCC::AL)); 1355 } else { 1356 BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) 1357 .addFrameIndex(FI) 1358 .addMemOperand(MMO) 1359 .add(predOps(ARMCC::AL)); 1360 } 1361 } else if (ARM::QPRRegClass.hasSubClassEq(RC) && 1362 Subtarget.hasMVEIntegerOps()) { 1363 auto MIB = BuildMI(MBB, I, DL, get(ARM::MVE_VLDRWU32), DestReg); 1364 MIB.addFrameIndex(FI) 1365 .addImm(0) 1366 .addMemOperand(MMO); 1367 addUnpredicatedMveVpredNOp(MIB); 1368 } else 1369 llvm_unreachable("Unknown reg class!"); 1370 break; 1371 case 24: 1372 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1373 if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && 1374 Subtarget.hasNEON()) { 1375 BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) 1376 .addFrameIndex(FI) 1377 .addImm(16) 1378 .addMemOperand(MMO) 1379 .add(predOps(ARMCC::AL)); 1380 } else { 1381 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1382 .addFrameIndex(FI) 1383 .addMemOperand(MMO) 1384 .add(predOps(ARMCC::AL)); 1385 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1386 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1387 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1388 if (Register::isPhysicalRegister(DestReg)) 1389 MIB.addReg(DestReg, RegState::ImplicitDefine); 1390 } 1391 } else 1392 llvm_unreachable("Unknown reg class!"); 1393 break; 1394 case 32: 1395 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { 1396 if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && 1397 Subtarget.hasNEON()) { 1398 BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) 1399 .addFrameIndex(FI) 1400 .addImm(16) 1401 .addMemOperand(MMO) 1402 .add(predOps(ARMCC::AL)); 1403 } else { 1404 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1405 .addFrameIndex(FI) 1406 .add(predOps(ARMCC::AL)) 1407 .addMemOperand(MMO); 1408 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1409 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1410 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1411 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1412 if (Register::isPhysicalRegister(DestReg)) 1413 MIB.addReg(DestReg, RegState::ImplicitDefine); 1414 } 1415 } else 1416 llvm_unreachable("Unknown reg class!"); 1417 break; 1418 case 64: 1419 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1420 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1421 .addFrameIndex(FI) 1422 .add(predOps(ARMCC::AL)) 1423 .addMemOperand(MMO); 1424 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1425 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1426 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1427 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1428 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI); 1429 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI); 1430 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI); 1431 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI); 1432 if (Register::isPhysicalRegister(DestReg)) 1433 MIB.addReg(DestReg, RegState::ImplicitDefine); 1434 } else 1435 llvm_unreachable("Unknown reg class!"); 1436 break; 1437 default: 1438 llvm_unreachable("Unknown regclass!"); 1439 } 1440 } 1441 1442 unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 1443 int &FrameIndex) const { 1444 switch (MI.getOpcode()) { 1445 default: break; 1446 case ARM::LDRrs: 1447 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame. 1448 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() && 1449 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 && 1450 MI.getOperand(3).getImm() == 0) { 1451 FrameIndex = MI.getOperand(1).getIndex(); 1452 return MI.getOperand(0).getReg(); 1453 } 1454 break; 1455 case ARM::LDRi12: 1456 case ARM::t2LDRi12: 1457 case ARM::tLDRspi: 1458 case ARM::VLDRD: 1459 case ARM::VLDRS: 1460 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 1461 MI.getOperand(2).getImm() == 0) { 1462 FrameIndex = MI.getOperand(1).getIndex(); 1463 return MI.getOperand(0).getReg(); 1464 } 1465 break; 1466 case ARM::VLDR_P0_off: 1467 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && 1468 MI.getOperand(1).getImm() == 0) { 1469 FrameIndex = MI.getOperand(0).getIndex(); 1470 return ARM::P0; 1471 } 1472 break; 1473 case ARM::VLD1q64: 1474 case ARM::VLD1d8TPseudo: 1475 case ARM::VLD1d16TPseudo: 1476 case ARM::VLD1d32TPseudo: 1477 case ARM::VLD1d64TPseudo: 1478 case ARM::VLD1d8QPseudo: 1479 case ARM::VLD1d16QPseudo: 1480 case ARM::VLD1d32QPseudo: 1481 case ARM::VLD1d64QPseudo: 1482 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1483 FrameIndex = MI.getOperand(1).getIndex(); 1484 return MI.getOperand(0).getReg(); 1485 } 1486 break; 1487 case ARM::VLDMQIA: 1488 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1489 FrameIndex = MI.getOperand(1).getIndex(); 1490 return MI.getOperand(0).getReg(); 1491 } 1492 break; 1493 } 1494 1495 return 0; 1496 } 1497 1498 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, 1499 int &FrameIndex) const { 1500 SmallVector<const MachineMemOperand *, 1> Accesses; 1501 if (MI.mayLoad() && hasLoadFromStackSlot(MI, Accesses) && 1502 Accesses.size() == 1) { 1503 FrameIndex = 1504 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 1505 ->getFrameIndex(); 1506 return true; 1507 } 1508 return false; 1509 } 1510 1511 /// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD 1512 /// depending on whether the result is used. 1513 void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { 1514 bool isThumb1 = Subtarget.isThumb1Only(); 1515 bool isThumb2 = Subtarget.isThumb2(); 1516 const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo(); 1517 1518 DebugLoc dl = MI->getDebugLoc(); 1519 MachineBasicBlock *BB = MI->getParent(); 1520 1521 MachineInstrBuilder LDM, STM; 1522 if (isThumb1 || !MI->getOperand(1).isDead()) { 1523 MachineOperand LDWb(MI->getOperand(1)); 1524 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD 1525 : isThumb1 ? ARM::tLDMIA_UPD 1526 : ARM::LDMIA_UPD)) 1527 .add(LDWb); 1528 } else { 1529 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA)); 1530 } 1531 1532 if (isThumb1 || !MI->getOperand(0).isDead()) { 1533 MachineOperand STWb(MI->getOperand(0)); 1534 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD 1535 : isThumb1 ? ARM::tSTMIA_UPD 1536 : ARM::STMIA_UPD)) 1537 .add(STWb); 1538 } else { 1539 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA)); 1540 } 1541 1542 MachineOperand LDBase(MI->getOperand(3)); 1543 LDM.add(LDBase).add(predOps(ARMCC::AL)); 1544 1545 MachineOperand STBase(MI->getOperand(2)); 1546 STM.add(STBase).add(predOps(ARMCC::AL)); 1547 1548 // Sort the scratch registers into ascending order. 1549 const TargetRegisterInfo &TRI = getRegisterInfo(); 1550 SmallVector<unsigned, 6> ScratchRegs; 1551 for(unsigned I = 5; I < MI->getNumOperands(); ++I) 1552 ScratchRegs.push_back(MI->getOperand(I).getReg()); 1553 llvm::sort(ScratchRegs, 1554 [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool { 1555 return TRI.getEncodingValue(Reg1) < 1556 TRI.getEncodingValue(Reg2); 1557 }); 1558 1559 for (const auto &Reg : ScratchRegs) { 1560 LDM.addReg(Reg, RegState::Define); 1561 STM.addReg(Reg, RegState::Kill); 1562 } 1563 1564 BB->erase(MI); 1565 } 1566 1567 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1568 if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) { 1569 assert(getSubtarget().getTargetTriple().isOSBinFormatMachO() && 1570 "LOAD_STACK_GUARD currently supported only for MachO."); 1571 expandLoadStackGuard(MI); 1572 MI.getParent()->erase(MI); 1573 return true; 1574 } 1575 1576 if (MI.getOpcode() == ARM::MEMCPY) { 1577 expandMEMCPY(MI); 1578 return true; 1579 } 1580 1581 // This hook gets to expand COPY instructions before they become 1582 // copyPhysReg() calls. Look for VMOVS instructions that can legally be 1583 // widened to VMOVD. We prefer the VMOVD when possible because it may be 1584 // changed into a VORR that can go down the NEON pipeline. 1585 if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64()) 1586 return false; 1587 1588 // Look for a copy between even S-registers. That is where we keep floats 1589 // when using NEON v2f32 instructions for f32 arithmetic. 1590 Register DstRegS = MI.getOperand(0).getReg(); 1591 Register SrcRegS = MI.getOperand(1).getReg(); 1592 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) 1593 return false; 1594 1595 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1596 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, 1597 &ARM::DPRRegClass); 1598 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, 1599 &ARM::DPRRegClass); 1600 if (!DstRegD || !SrcRegD) 1601 return false; 1602 1603 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only 1604 // legal if the COPY already defines the full DstRegD, and it isn't a 1605 // sub-register insertion. 1606 if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI)) 1607 return false; 1608 1609 // A dead copy shouldn't show up here, but reject it just in case. 1610 if (MI.getOperand(0).isDead()) 1611 return false; 1612 1613 // All clear, widen the COPY. 1614 LLVM_DEBUG(dbgs() << "widening: " << MI); 1615 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1616 1617 // Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg 1618 // or some other super-register. 1619 int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD); 1620 if (ImpDefIdx != -1) 1621 MI.RemoveOperand(ImpDefIdx); 1622 1623 // Change the opcode and operands. 1624 MI.setDesc(get(ARM::VMOVD)); 1625 MI.getOperand(0).setReg(DstRegD); 1626 MI.getOperand(1).setReg(SrcRegD); 1627 MIB.add(predOps(ARMCC::AL)); 1628 1629 // We are now reading SrcRegD instead of SrcRegS. This may upset the 1630 // register scavenger and machine verifier, so we need to indicate that we 1631 // are reading an undefined value from SrcRegD, but a proper value from 1632 // SrcRegS. 1633 MI.getOperand(1).setIsUndef(); 1634 MIB.addReg(SrcRegS, RegState::Implicit); 1635 1636 // SrcRegD may actually contain an unrelated value in the ssub_1 1637 // sub-register. Don't kill it. Only kill the ssub_0 sub-register. 1638 if (MI.getOperand(1).isKill()) { 1639 MI.getOperand(1).setIsKill(false); 1640 MI.addRegisterKilled(SrcRegS, TRI, true); 1641 } 1642 1643 LLVM_DEBUG(dbgs() << "replaced by: " << MI); 1644 return true; 1645 } 1646 1647 /// Create a copy of a const pool value. Update CPI to the new index and return 1648 /// the label UID. 1649 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 1650 MachineConstantPool *MCP = MF.getConstantPool(); 1651 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1652 1653 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI]; 1654 assert(MCPE.isMachineConstantPoolEntry() && 1655 "Expecting a machine constantpool entry!"); 1656 ARMConstantPoolValue *ACPV = 1657 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal); 1658 1659 unsigned PCLabelId = AFI->createPICLabelUId(); 1660 ARMConstantPoolValue *NewCPV = nullptr; 1661 1662 // FIXME: The below assumes PIC relocation model and that the function 1663 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and 1664 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR 1665 // instructions, so that's probably OK, but is PIC always correct when 1666 // we get here? 1667 if (ACPV->isGlobalValue()) 1668 NewCPV = ARMConstantPoolConstant::Create( 1669 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, ARMCP::CPValue, 1670 4, ACPV->getModifier(), ACPV->mustAddCurrentAddress()); 1671 else if (ACPV->isExtSymbol()) 1672 NewCPV = ARMConstantPoolSymbol:: 1673 Create(MF.getFunction().getContext(), 1674 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); 1675 else if (ACPV->isBlockAddress()) 1676 NewCPV = ARMConstantPoolConstant:: 1677 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, 1678 ARMCP::CPBlockAddress, 4); 1679 else if (ACPV->isLSDA()) 1680 NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId, 1681 ARMCP::CPLSDA, 4); 1682 else if (ACPV->isMachineBasicBlock()) 1683 NewCPV = ARMConstantPoolMBB:: 1684 Create(MF.getFunction().getContext(), 1685 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); 1686 else 1687 llvm_unreachable("Unexpected ARM constantpool value type!!"); 1688 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment()); 1689 return PCLabelId; 1690 } 1691 1692 void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB, 1693 MachineBasicBlock::iterator I, 1694 unsigned DestReg, unsigned SubIdx, 1695 const MachineInstr &Orig, 1696 const TargetRegisterInfo &TRI) const { 1697 unsigned Opcode = Orig.getOpcode(); 1698 switch (Opcode) { 1699 default: { 1700 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 1701 MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); 1702 MBB.insert(I, MI); 1703 break; 1704 } 1705 case ARM::tLDRpci_pic: 1706 case ARM::t2LDRpci_pic: { 1707 MachineFunction &MF = *MBB.getParent(); 1708 unsigned CPI = Orig.getOperand(1).getIndex(); 1709 unsigned PCLabelId = duplicateCPV(MF, CPI); 1710 BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg) 1711 .addConstantPoolIndex(CPI) 1712 .addImm(PCLabelId) 1713 .cloneMemRefs(Orig); 1714 break; 1715 } 1716 } 1717 } 1718 1719 MachineInstr & 1720 ARMBaseInstrInfo::duplicate(MachineBasicBlock &MBB, 1721 MachineBasicBlock::iterator InsertBefore, 1722 const MachineInstr &Orig) const { 1723 MachineInstr &Cloned = TargetInstrInfo::duplicate(MBB, InsertBefore, Orig); 1724 MachineBasicBlock::instr_iterator I = Cloned.getIterator(); 1725 for (;;) { 1726 switch (I->getOpcode()) { 1727 case ARM::tLDRpci_pic: 1728 case ARM::t2LDRpci_pic: { 1729 MachineFunction &MF = *MBB.getParent(); 1730 unsigned CPI = I->getOperand(1).getIndex(); 1731 unsigned PCLabelId = duplicateCPV(MF, CPI); 1732 I->getOperand(1).setIndex(CPI); 1733 I->getOperand(2).setImm(PCLabelId); 1734 break; 1735 } 1736 } 1737 if (!I->isBundledWithSucc()) 1738 break; 1739 ++I; 1740 } 1741 return Cloned; 1742 } 1743 1744 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0, 1745 const MachineInstr &MI1, 1746 const MachineRegisterInfo *MRI) const { 1747 unsigned Opcode = MI0.getOpcode(); 1748 if (Opcode == ARM::t2LDRpci || 1749 Opcode == ARM::t2LDRpci_pic || 1750 Opcode == ARM::tLDRpci || 1751 Opcode == ARM::tLDRpci_pic || 1752 Opcode == ARM::LDRLIT_ga_pcrel || 1753 Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1754 Opcode == ARM::tLDRLIT_ga_pcrel || 1755 Opcode == ARM::MOV_ga_pcrel || 1756 Opcode == ARM::MOV_ga_pcrel_ldr || 1757 Opcode == ARM::t2MOV_ga_pcrel) { 1758 if (MI1.getOpcode() != Opcode) 1759 return false; 1760 if (MI0.getNumOperands() != MI1.getNumOperands()) 1761 return false; 1762 1763 const MachineOperand &MO0 = MI0.getOperand(1); 1764 const MachineOperand &MO1 = MI1.getOperand(1); 1765 if (MO0.getOffset() != MO1.getOffset()) 1766 return false; 1767 1768 if (Opcode == ARM::LDRLIT_ga_pcrel || 1769 Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1770 Opcode == ARM::tLDRLIT_ga_pcrel || 1771 Opcode == ARM::MOV_ga_pcrel || 1772 Opcode == ARM::MOV_ga_pcrel_ldr || 1773 Opcode == ARM::t2MOV_ga_pcrel) 1774 // Ignore the PC labels. 1775 return MO0.getGlobal() == MO1.getGlobal(); 1776 1777 const MachineFunction *MF = MI0.getParent()->getParent(); 1778 const MachineConstantPool *MCP = MF->getConstantPool(); 1779 int CPI0 = MO0.getIndex(); 1780 int CPI1 = MO1.getIndex(); 1781 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; 1782 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; 1783 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); 1784 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); 1785 if (isARMCP0 && isARMCP1) { 1786 ARMConstantPoolValue *ACPV0 = 1787 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal); 1788 ARMConstantPoolValue *ACPV1 = 1789 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal); 1790 return ACPV0->hasSameValue(ACPV1); 1791 } else if (!isARMCP0 && !isARMCP1) { 1792 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; 1793 } 1794 return false; 1795 } else if (Opcode == ARM::PICLDR) { 1796 if (MI1.getOpcode() != Opcode) 1797 return false; 1798 if (MI0.getNumOperands() != MI1.getNumOperands()) 1799 return false; 1800 1801 Register Addr0 = MI0.getOperand(1).getReg(); 1802 Register Addr1 = MI1.getOperand(1).getReg(); 1803 if (Addr0 != Addr1) { 1804 if (!MRI || !Register::isVirtualRegister(Addr0) || 1805 !Register::isVirtualRegister(Addr1)) 1806 return false; 1807 1808 // This assumes SSA form. 1809 MachineInstr *Def0 = MRI->getVRegDef(Addr0); 1810 MachineInstr *Def1 = MRI->getVRegDef(Addr1); 1811 // Check if the loaded value, e.g. a constantpool of a global address, are 1812 // the same. 1813 if (!produceSameValue(*Def0, *Def1, MRI)) 1814 return false; 1815 } 1816 1817 for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) { 1818 // %12 = PICLDR %11, 0, 14, %noreg 1819 const MachineOperand &MO0 = MI0.getOperand(i); 1820 const MachineOperand &MO1 = MI1.getOperand(i); 1821 if (!MO0.isIdenticalTo(MO1)) 1822 return false; 1823 } 1824 return true; 1825 } 1826 1827 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 1828 } 1829 1830 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 1831 /// determine if two loads are loading from the same base address. It should 1832 /// only return true if the base pointers are the same and the only differences 1833 /// between the two addresses is the offset. It also returns the offsets by 1834 /// reference. 1835 /// 1836 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 1837 /// is permanently disabled. 1838 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1839 int64_t &Offset1, 1840 int64_t &Offset2) const { 1841 // Don't worry about Thumb: just ARM and Thumb2. 1842 if (Subtarget.isThumb1Only()) return false; 1843 1844 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 1845 return false; 1846 1847 switch (Load1->getMachineOpcode()) { 1848 default: 1849 return false; 1850 case ARM::LDRi12: 1851 case ARM::LDRBi12: 1852 case ARM::LDRD: 1853 case ARM::LDRH: 1854 case ARM::LDRSB: 1855 case ARM::LDRSH: 1856 case ARM::VLDRD: 1857 case ARM::VLDRS: 1858 case ARM::t2LDRi8: 1859 case ARM::t2LDRBi8: 1860 case ARM::t2LDRDi8: 1861 case ARM::t2LDRSHi8: 1862 case ARM::t2LDRi12: 1863 case ARM::t2LDRBi12: 1864 case ARM::t2LDRSHi12: 1865 break; 1866 } 1867 1868 switch (Load2->getMachineOpcode()) { 1869 default: 1870 return false; 1871 case ARM::LDRi12: 1872 case ARM::LDRBi12: 1873 case ARM::LDRD: 1874 case ARM::LDRH: 1875 case ARM::LDRSB: 1876 case ARM::LDRSH: 1877 case ARM::VLDRD: 1878 case ARM::VLDRS: 1879 case ARM::t2LDRi8: 1880 case ARM::t2LDRBi8: 1881 case ARM::t2LDRSHi8: 1882 case ARM::t2LDRi12: 1883 case ARM::t2LDRBi12: 1884 case ARM::t2LDRSHi12: 1885 break; 1886 } 1887 1888 // Check if base addresses and chain operands match. 1889 if (Load1->getOperand(0) != Load2->getOperand(0) || 1890 Load1->getOperand(4) != Load2->getOperand(4)) 1891 return false; 1892 1893 // Index should be Reg0. 1894 if (Load1->getOperand(3) != Load2->getOperand(3)) 1895 return false; 1896 1897 // Determine the offsets. 1898 if (isa<ConstantSDNode>(Load1->getOperand(1)) && 1899 isa<ConstantSDNode>(Load2->getOperand(1))) { 1900 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); 1901 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); 1902 return true; 1903 } 1904 1905 return false; 1906 } 1907 1908 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 1909 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 1910 /// be scheduled togther. On some targets if two loads are loading from 1911 /// addresses in the same cache line, it's better if they are scheduled 1912 /// together. This function takes two integers that represent the load offsets 1913 /// from the common base address. It returns true if it decides it's desirable 1914 /// to schedule the two loads together. "NumLoads" is the number of loads that 1915 /// have already been scheduled after Load1. 1916 /// 1917 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 1918 /// is permanently disabled. 1919 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1920 int64_t Offset1, int64_t Offset2, 1921 unsigned NumLoads) const { 1922 // Don't worry about Thumb: just ARM and Thumb2. 1923 if (Subtarget.isThumb1Only()) return false; 1924 1925 assert(Offset2 > Offset1); 1926 1927 if ((Offset2 - Offset1) / 8 > 64) 1928 return false; 1929 1930 // Check if the machine opcodes are different. If they are different 1931 // then we consider them to not be of the same base address, 1932 // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12. 1933 // In this case, they are considered to be the same because they are different 1934 // encoding forms of the same basic instruction. 1935 if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) && 1936 !((Load1->getMachineOpcode() == ARM::t2LDRBi8 && 1937 Load2->getMachineOpcode() == ARM::t2LDRBi12) || 1938 (Load1->getMachineOpcode() == ARM::t2LDRBi12 && 1939 Load2->getMachineOpcode() == ARM::t2LDRBi8))) 1940 return false; // FIXME: overly conservative? 1941 1942 // Four loads in a row should be sufficient. 1943 if (NumLoads >= 3) 1944 return false; 1945 1946 return true; 1947 } 1948 1949 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 1950 const MachineBasicBlock *MBB, 1951 const MachineFunction &MF) const { 1952 // Debug info is never a scheduling boundary. It's necessary to be explicit 1953 // due to the special treatment of IT instructions below, otherwise a 1954 // dbg_value followed by an IT will result in the IT instruction being 1955 // considered a scheduling hazard, which is wrong. It should be the actual 1956 // instruction preceding the dbg_value instruction(s), just like it is 1957 // when debug info is not present. 1958 if (MI.isDebugInstr()) 1959 return false; 1960 1961 // Terminators and labels can't be scheduled around. 1962 if (MI.isTerminator() || MI.isPosition()) 1963 return true; 1964 1965 // Treat the start of the IT block as a scheduling boundary, but schedule 1966 // t2IT along with all instructions following it. 1967 // FIXME: This is a big hammer. But the alternative is to add all potential 1968 // true and anti dependencies to IT block instructions as implicit operands 1969 // to the t2IT instruction. The added compile time and complexity does not 1970 // seem worth it. 1971 MachineBasicBlock::const_iterator I = MI; 1972 // Make sure to skip any debug instructions 1973 while (++I != MBB->end() && I->isDebugInstr()) 1974 ; 1975 if (I != MBB->end() && I->getOpcode() == ARM::t2IT) 1976 return true; 1977 1978 // Don't attempt to schedule around any instruction that defines 1979 // a stack-oriented pointer, as it's unlikely to be profitable. This 1980 // saves compile time, because it doesn't require every single 1981 // stack slot reference to depend on the instruction that does the 1982 // modification. 1983 // Calls don't actually change the stack pointer, even if they have imp-defs. 1984 // No ARM calling conventions change the stack pointer. (X86 calling 1985 // conventions sometimes do). 1986 if (!MI.isCall() && MI.definesRegister(ARM::SP)) 1987 return true; 1988 1989 return false; 1990 } 1991 1992 bool ARMBaseInstrInfo:: 1993 isProfitableToIfCvt(MachineBasicBlock &MBB, 1994 unsigned NumCycles, unsigned ExtraPredCycles, 1995 BranchProbability Probability) const { 1996 if (!NumCycles) 1997 return false; 1998 1999 // If we are optimizing for size, see if the branch in the predecessor can be 2000 // lowered to cbn?z by the constant island lowering pass, and return false if 2001 // so. This results in a shorter instruction sequence. 2002 if (MBB.getParent()->getFunction().hasOptSize()) { 2003 MachineBasicBlock *Pred = *MBB.pred_begin(); 2004 if (!Pred->empty()) { 2005 MachineInstr *LastMI = &*Pred->rbegin(); 2006 if (LastMI->getOpcode() == ARM::t2Bcc) { 2007 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2008 MachineInstr *CmpMI = findCMPToFoldIntoCBZ(LastMI, TRI); 2009 if (CmpMI) 2010 return false; 2011 } 2012 } 2013 } 2014 return isProfitableToIfCvt(MBB, NumCycles, ExtraPredCycles, 2015 MBB, 0, 0, Probability); 2016 } 2017 2018 bool ARMBaseInstrInfo:: 2019 isProfitableToIfCvt(MachineBasicBlock &TBB, 2020 unsigned TCycles, unsigned TExtra, 2021 MachineBasicBlock &FBB, 2022 unsigned FCycles, unsigned FExtra, 2023 BranchProbability Probability) const { 2024 if (!TCycles) 2025 return false; 2026 2027 // In thumb code we often end up trading one branch for a IT block, and 2028 // if we are cloning the instruction can increase code size. Prevent 2029 // blocks with multiple predecesors from being ifcvted to prevent this 2030 // cloning. 2031 if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) { 2032 if (TBB.pred_size() != 1 || FBB.pred_size() != 1) 2033 return false; 2034 } 2035 2036 // Attempt to estimate the relative costs of predication versus branching. 2037 // Here we scale up each component of UnpredCost to avoid precision issue when 2038 // scaling TCycles/FCycles by Probability. 2039 const unsigned ScalingUpFactor = 1024; 2040 2041 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor; 2042 unsigned UnpredCost; 2043 if (!Subtarget.hasBranchPredictor()) { 2044 // When we don't have a branch predictor it's always cheaper to not take a 2045 // branch than take it, so we have to take that into account. 2046 unsigned NotTakenBranchCost = 1; 2047 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty(); 2048 unsigned TUnpredCycles, FUnpredCycles; 2049 if (!FCycles) { 2050 // Triangle: TBB is the fallthrough 2051 TUnpredCycles = TCycles + NotTakenBranchCost; 2052 FUnpredCycles = TakenBranchCost; 2053 } else { 2054 // Diamond: TBB is the block that is branched to, FBB is the fallthrough 2055 TUnpredCycles = TCycles + TakenBranchCost; 2056 FUnpredCycles = FCycles + NotTakenBranchCost; 2057 // The branch at the end of FBB will disappear when it's predicated, so 2058 // discount it from PredCost. 2059 PredCost -= 1 * ScalingUpFactor; 2060 } 2061 // The total cost is the cost of each path scaled by their probabilites 2062 unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor); 2063 unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor); 2064 UnpredCost = TUnpredCost + FUnpredCost; 2065 // When predicating assume that the first IT can be folded away but later 2066 // ones cost one cycle each 2067 if (Subtarget.isThumb2() && TCycles + FCycles > 4) { 2068 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor; 2069 } 2070 } else { 2071 unsigned TUnpredCost = Probability.scale(TCycles * ScalingUpFactor); 2072 unsigned FUnpredCost = 2073 Probability.getCompl().scale(FCycles * ScalingUpFactor); 2074 UnpredCost = TUnpredCost + FUnpredCost; 2075 UnpredCost += 1 * ScalingUpFactor; // The branch itself 2076 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10; 2077 } 2078 2079 return PredCost <= UnpredCost; 2080 } 2081 2082 bool 2083 ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, 2084 MachineBasicBlock &FMBB) const { 2085 // Reduce false anti-dependencies to let the target's out-of-order execution 2086 // engine do its thing. 2087 return Subtarget.isProfitableToUnpredicate(); 2088 } 2089 2090 /// getInstrPredicate - If instruction is predicated, returns its predicate 2091 /// condition, otherwise returns AL. It also returns the condition code 2092 /// register by reference. 2093 ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI, 2094 unsigned &PredReg) { 2095 int PIdx = MI.findFirstPredOperandIdx(); 2096 if (PIdx == -1) { 2097 PredReg = 0; 2098 return ARMCC::AL; 2099 } 2100 2101 PredReg = MI.getOperand(PIdx+1).getReg(); 2102 return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 2103 } 2104 2105 unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) { 2106 if (Opc == ARM::B) 2107 return ARM::Bcc; 2108 if (Opc == ARM::tB) 2109 return ARM::tBcc; 2110 if (Opc == ARM::t2B) 2111 return ARM::t2Bcc; 2112 2113 llvm_unreachable("Unknown unconditional branch opcode!"); 2114 } 2115 2116 MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI, 2117 bool NewMI, 2118 unsigned OpIdx1, 2119 unsigned OpIdx2) const { 2120 switch (MI.getOpcode()) { 2121 case ARM::MOVCCr: 2122 case ARM::t2MOVCCr: { 2123 // MOVCC can be commuted by inverting the condition. 2124 unsigned PredReg = 0; 2125 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg); 2126 // MOVCC AL can't be inverted. Shouldn't happen. 2127 if (CC == ARMCC::AL || PredReg != ARM::CPSR) 2128 return nullptr; 2129 MachineInstr *CommutedMI = 2130 TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 2131 if (!CommutedMI) 2132 return nullptr; 2133 // After swapping the MOVCC operands, also invert the condition. 2134 CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx()) 2135 .setImm(ARMCC::getOppositeCondition(CC)); 2136 return CommutedMI; 2137 } 2138 } 2139 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 2140 } 2141 2142 /// Identify instructions that can be folded into a MOVCC instruction, and 2143 /// return the defining instruction. 2144 MachineInstr * 2145 ARMBaseInstrInfo::canFoldIntoMOVCC(unsigned Reg, const MachineRegisterInfo &MRI, 2146 const TargetInstrInfo *TII) const { 2147 if (!Register::isVirtualRegister(Reg)) 2148 return nullptr; 2149 if (!MRI.hasOneNonDBGUse(Reg)) 2150 return nullptr; 2151 MachineInstr *MI = MRI.getVRegDef(Reg); 2152 if (!MI) 2153 return nullptr; 2154 // Check if MI can be predicated and folded into the MOVCC. 2155 if (!isPredicable(*MI)) 2156 return nullptr; 2157 // Check if MI has any non-dead defs or physreg uses. This also detects 2158 // predicated instructions which will be reading CPSR. 2159 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 2160 const MachineOperand &MO = MI->getOperand(i); 2161 // Reject frame index operands, PEI can't handle the predicated pseudos. 2162 if (MO.isFI() || MO.isCPI() || MO.isJTI()) 2163 return nullptr; 2164 if (!MO.isReg()) 2165 continue; 2166 // MI can't have any tied operands, that would conflict with predication. 2167 if (MO.isTied()) 2168 return nullptr; 2169 if (Register::isPhysicalRegister(MO.getReg())) 2170 return nullptr; 2171 if (MO.isDef() && !MO.isDead()) 2172 return nullptr; 2173 } 2174 bool DontMoveAcrossStores = true; 2175 if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores)) 2176 return nullptr; 2177 return MI; 2178 } 2179 2180 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI, 2181 SmallVectorImpl<MachineOperand> &Cond, 2182 unsigned &TrueOp, unsigned &FalseOp, 2183 bool &Optimizable) const { 2184 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) && 2185 "Unknown select instruction"); 2186 // MOVCC operands: 2187 // 0: Def. 2188 // 1: True use. 2189 // 2: False use. 2190 // 3: Condition code. 2191 // 4: CPSR use. 2192 TrueOp = 1; 2193 FalseOp = 2; 2194 Cond.push_back(MI.getOperand(3)); 2195 Cond.push_back(MI.getOperand(4)); 2196 // We can always fold a def. 2197 Optimizable = true; 2198 return false; 2199 } 2200 2201 MachineInstr * 2202 ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI, 2203 SmallPtrSetImpl<MachineInstr *> &SeenMIs, 2204 bool PreferFalse) const { 2205 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) && 2206 "Unknown select instruction"); 2207 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 2208 MachineInstr *DefMI = canFoldIntoMOVCC(MI.getOperand(2).getReg(), MRI, this); 2209 bool Invert = !DefMI; 2210 if (!DefMI) 2211 DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this); 2212 if (!DefMI) 2213 return nullptr; 2214 2215 // Find new register class to use. 2216 MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1); 2217 Register DestReg = MI.getOperand(0).getReg(); 2218 const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg()); 2219 if (!MRI.constrainRegClass(DestReg, PreviousClass)) 2220 return nullptr; 2221 2222 // Create a new predicated version of DefMI. 2223 // Rfalse is the first use. 2224 MachineInstrBuilder NewMI = 2225 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg); 2226 2227 // Copy all the DefMI operands, excluding its (null) predicate. 2228 const MCInstrDesc &DefDesc = DefMI->getDesc(); 2229 for (unsigned i = 1, e = DefDesc.getNumOperands(); 2230 i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) 2231 NewMI.add(DefMI->getOperand(i)); 2232 2233 unsigned CondCode = MI.getOperand(3).getImm(); 2234 if (Invert) 2235 NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode))); 2236 else 2237 NewMI.addImm(CondCode); 2238 NewMI.add(MI.getOperand(4)); 2239 2240 // DefMI is not the -S version that sets CPSR, so add an optional %noreg. 2241 if (NewMI->hasOptionalDef()) 2242 NewMI.add(condCodeOp()); 2243 2244 // The output register value when the predicate is false is an implicit 2245 // register operand tied to the first def. 2246 // The tie makes the register allocator ensure the FalseReg is allocated the 2247 // same register as operand 0. 2248 FalseReg.setImplicit(); 2249 NewMI.add(FalseReg); 2250 NewMI->tieOperands(0, NewMI->getNumOperands() - 1); 2251 2252 // Update SeenMIs set: register newly created MI and erase removed DefMI. 2253 SeenMIs.insert(NewMI); 2254 SeenMIs.erase(DefMI); 2255 2256 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on 2257 // DefMI would be invalid when tranferred inside the loop. Checking for a 2258 // loop is expensive, but at least remove kill flags if they are in different 2259 // BBs. 2260 if (DefMI->getParent() != MI.getParent()) 2261 NewMI->clearKillInfo(); 2262 2263 // The caller will erase MI, but not DefMI. 2264 DefMI->eraseFromParent(); 2265 return NewMI; 2266 } 2267 2268 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the 2269 /// instruction is encoded with an 'S' bit is determined by the optional CPSR 2270 /// def operand. 2271 /// 2272 /// This will go away once we can teach tblgen how to set the optional CPSR def 2273 /// operand itself. 2274 struct AddSubFlagsOpcodePair { 2275 uint16_t PseudoOpc; 2276 uint16_t MachineOpc; 2277 }; 2278 2279 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 2280 {ARM::ADDSri, ARM::ADDri}, 2281 {ARM::ADDSrr, ARM::ADDrr}, 2282 {ARM::ADDSrsi, ARM::ADDrsi}, 2283 {ARM::ADDSrsr, ARM::ADDrsr}, 2284 2285 {ARM::SUBSri, ARM::SUBri}, 2286 {ARM::SUBSrr, ARM::SUBrr}, 2287 {ARM::SUBSrsi, ARM::SUBrsi}, 2288 {ARM::SUBSrsr, ARM::SUBrsr}, 2289 2290 {ARM::RSBSri, ARM::RSBri}, 2291 {ARM::RSBSrsi, ARM::RSBrsi}, 2292 {ARM::RSBSrsr, ARM::RSBrsr}, 2293 2294 {ARM::tADDSi3, ARM::tADDi3}, 2295 {ARM::tADDSi8, ARM::tADDi8}, 2296 {ARM::tADDSrr, ARM::tADDrr}, 2297 {ARM::tADCS, ARM::tADC}, 2298 2299 {ARM::tSUBSi3, ARM::tSUBi3}, 2300 {ARM::tSUBSi8, ARM::tSUBi8}, 2301 {ARM::tSUBSrr, ARM::tSUBrr}, 2302 {ARM::tSBCS, ARM::tSBC}, 2303 {ARM::tRSBS, ARM::tRSB}, 2304 {ARM::tLSLSri, ARM::tLSLri}, 2305 2306 {ARM::t2ADDSri, ARM::t2ADDri}, 2307 {ARM::t2ADDSrr, ARM::t2ADDrr}, 2308 {ARM::t2ADDSrs, ARM::t2ADDrs}, 2309 2310 {ARM::t2SUBSri, ARM::t2SUBri}, 2311 {ARM::t2SUBSrr, ARM::t2SUBrr}, 2312 {ARM::t2SUBSrs, ARM::t2SUBrs}, 2313 2314 {ARM::t2RSBSri, ARM::t2RSBri}, 2315 {ARM::t2RSBSrs, ARM::t2RSBrs}, 2316 }; 2317 2318 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { 2319 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i) 2320 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc) 2321 return AddSubFlagsOpcodeMap[i].MachineOpc; 2322 return 0; 2323 } 2324 2325 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, 2326 MachineBasicBlock::iterator &MBBI, 2327 const DebugLoc &dl, unsigned DestReg, 2328 unsigned BaseReg, int NumBytes, 2329 ARMCC::CondCodes Pred, unsigned PredReg, 2330 const ARMBaseInstrInfo &TII, 2331 unsigned MIFlags) { 2332 if (NumBytes == 0 && DestReg != BaseReg) { 2333 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg) 2334 .addReg(BaseReg, RegState::Kill) 2335 .add(predOps(Pred, PredReg)) 2336 .add(condCodeOp()) 2337 .setMIFlags(MIFlags); 2338 return; 2339 } 2340 2341 bool isSub = NumBytes < 0; 2342 if (isSub) NumBytes = -NumBytes; 2343 2344 while (NumBytes) { 2345 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes); 2346 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt); 2347 assert(ThisVal && "Didn't extract field correctly"); 2348 2349 // We will handle these bits from offset, clear them. 2350 NumBytes &= ~ThisVal; 2351 2352 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?"); 2353 2354 // Build the new ADD / SUB. 2355 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; 2356 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 2357 .addReg(BaseReg, RegState::Kill) 2358 .addImm(ThisVal) 2359 .add(predOps(Pred, PredReg)) 2360 .add(condCodeOp()) 2361 .setMIFlags(MIFlags); 2362 BaseReg = DestReg; 2363 } 2364 } 2365 2366 bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, 2367 MachineFunction &MF, MachineInstr *MI, 2368 unsigned NumBytes) { 2369 // This optimisation potentially adds lots of load and store 2370 // micro-operations, it's only really a great benefit to code-size. 2371 if (!Subtarget.hasMinSize()) 2372 return false; 2373 2374 // If only one register is pushed/popped, LLVM can use an LDR/STR 2375 // instead. We can't modify those so make sure we're dealing with an 2376 // instruction we understand. 2377 bool IsPop = isPopOpcode(MI->getOpcode()); 2378 bool IsPush = isPushOpcode(MI->getOpcode()); 2379 if (!IsPush && !IsPop) 2380 return false; 2381 2382 bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD || 2383 MI->getOpcode() == ARM::VLDMDIA_UPD; 2384 bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH || 2385 MI->getOpcode() == ARM::tPOP || 2386 MI->getOpcode() == ARM::tPOP_RET; 2387 2388 assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP && 2389 MI->getOperand(1).getReg() == ARM::SP)) && 2390 "trying to fold sp update into non-sp-updating push/pop"); 2391 2392 // The VFP push & pop act on D-registers, so we can only fold an adjustment 2393 // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try 2394 // if this is violated. 2395 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0) 2396 return false; 2397 2398 // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+ 2399 // pred) so the list starts at 4. Thumb1 starts after the predicate. 2400 int RegListIdx = IsT1PushPop ? 2 : 4; 2401 2402 // Calculate the space we'll need in terms of registers. 2403 unsigned RegsNeeded; 2404 const TargetRegisterClass *RegClass; 2405 if (IsVFPPushPop) { 2406 RegsNeeded = NumBytes / 8; 2407 RegClass = &ARM::DPRRegClass; 2408 } else { 2409 RegsNeeded = NumBytes / 4; 2410 RegClass = &ARM::GPRRegClass; 2411 } 2412 2413 // We're going to have to strip all list operands off before 2414 // re-adding them since the order matters, so save the existing ones 2415 // for later. 2416 SmallVector<MachineOperand, 4> RegList; 2417 2418 // We're also going to need the first register transferred by this 2419 // instruction, which won't necessarily be the first register in the list. 2420 unsigned FirstRegEnc = -1; 2421 2422 const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo(); 2423 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) { 2424 MachineOperand &MO = MI->getOperand(i); 2425 RegList.push_back(MO); 2426 2427 if (MO.isReg() && !MO.isImplicit() && 2428 TRI->getEncodingValue(MO.getReg()) < FirstRegEnc) 2429 FirstRegEnc = TRI->getEncodingValue(MO.getReg()); 2430 } 2431 2432 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF); 2433 2434 // Now try to find enough space in the reglist to allocate NumBytes. 2435 for (int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded; 2436 --CurRegEnc) { 2437 unsigned CurReg = RegClass->getRegister(CurRegEnc); 2438 if (IsT1PushPop && CurRegEnc > TRI->getEncodingValue(ARM::R7)) 2439 continue; 2440 if (!IsPop) { 2441 // Pushing any register is completely harmless, mark the register involved 2442 // as undef since we don't care about its value and must not restore it 2443 // during stack unwinding. 2444 RegList.push_back(MachineOperand::CreateReg(CurReg, false, false, 2445 false, false, true)); 2446 --RegsNeeded; 2447 continue; 2448 } 2449 2450 // However, we can only pop an extra register if it's not live. For 2451 // registers live within the function we might clobber a return value 2452 // register; the other way a register can be live here is if it's 2453 // callee-saved. 2454 if (isCalleeSavedRegister(CurReg, CSRegs) || 2455 MI->getParent()->computeRegisterLiveness(TRI, CurReg, MI) != 2456 MachineBasicBlock::LQR_Dead) { 2457 // VFP pops don't allow holes in the register list, so any skip is fatal 2458 // for our transformation. GPR pops do, so we should just keep looking. 2459 if (IsVFPPushPop) 2460 return false; 2461 else 2462 continue; 2463 } 2464 2465 // Mark the unimportant registers as <def,dead> in the POP. 2466 RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false, 2467 true)); 2468 --RegsNeeded; 2469 } 2470 2471 if (RegsNeeded > 0) 2472 return false; 2473 2474 // Finally we know we can profitably perform the optimisation so go 2475 // ahead: strip all existing registers off and add them back again 2476 // in the right order. 2477 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) 2478 MI->RemoveOperand(i); 2479 2480 // Add the complete list back in. 2481 MachineInstrBuilder MIB(MF, &*MI); 2482 for (int i = RegList.size() - 1; i >= 0; --i) 2483 MIB.add(RegList[i]); 2484 2485 return true; 2486 } 2487 2488 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 2489 unsigned FrameReg, int &Offset, 2490 const ARMBaseInstrInfo &TII) { 2491 unsigned Opcode = MI.getOpcode(); 2492 const MCInstrDesc &Desc = MI.getDesc(); 2493 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 2494 bool isSub = false; 2495 2496 // Memory operands in inline assembly always use AddrMode2. 2497 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR) 2498 AddrMode = ARMII::AddrMode2; 2499 2500 if (Opcode == ARM::ADDri) { 2501 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 2502 if (Offset == 0) { 2503 // Turn it into a move. 2504 MI.setDesc(TII.get(ARM::MOVr)); 2505 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2506 MI.RemoveOperand(FrameRegIdx+1); 2507 Offset = 0; 2508 return true; 2509 } else if (Offset < 0) { 2510 Offset = -Offset; 2511 isSub = true; 2512 MI.setDesc(TII.get(ARM::SUBri)); 2513 } 2514 2515 // Common case: small offset, fits into instruction. 2516 if (ARM_AM::getSOImmVal(Offset) != -1) { 2517 // Replace the FrameIndex with sp / fp 2518 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2519 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 2520 Offset = 0; 2521 return true; 2522 } 2523 2524 // Otherwise, pull as much of the immedidate into this ADDri/SUBri 2525 // as possible. 2526 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset); 2527 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt); 2528 2529 // We will handle these bits from offset, clear them. 2530 Offset &= ~ThisImmVal; 2531 2532 // Get the properly encoded SOImmVal field. 2533 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 && 2534 "Bit extraction didn't work?"); 2535 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 2536 } else { 2537 unsigned ImmIdx = 0; 2538 int InstrOffs = 0; 2539 unsigned NumBits = 0; 2540 unsigned Scale = 1; 2541 switch (AddrMode) { 2542 case ARMII::AddrMode_i12: 2543 ImmIdx = FrameRegIdx + 1; 2544 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2545 NumBits = 12; 2546 break; 2547 case ARMII::AddrMode2: 2548 ImmIdx = FrameRegIdx+2; 2549 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm()); 2550 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2551 InstrOffs *= -1; 2552 NumBits = 12; 2553 break; 2554 case ARMII::AddrMode3: 2555 ImmIdx = FrameRegIdx+2; 2556 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm()); 2557 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2558 InstrOffs *= -1; 2559 NumBits = 8; 2560 break; 2561 case ARMII::AddrMode4: 2562 case ARMII::AddrMode6: 2563 // Can't fold any offset even if it's zero. 2564 return false; 2565 case ARMII::AddrMode5: 2566 ImmIdx = FrameRegIdx+1; 2567 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2568 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2569 InstrOffs *= -1; 2570 NumBits = 8; 2571 Scale = 4; 2572 break; 2573 case ARMII::AddrMode5FP16: 2574 ImmIdx = FrameRegIdx+1; 2575 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2576 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2577 InstrOffs *= -1; 2578 NumBits = 8; 2579 Scale = 2; 2580 break; 2581 case ARMII::AddrModeT2_i7: 2582 case ARMII::AddrModeT2_i7s2: 2583 case ARMII::AddrModeT2_i7s4: 2584 ImmIdx = FrameRegIdx+1; 2585 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2586 NumBits = 7; 2587 Scale = (AddrMode == ARMII::AddrModeT2_i7s2 ? 2 : 2588 AddrMode == ARMII::AddrModeT2_i7s4 ? 4 : 1); 2589 break; 2590 default: 2591 llvm_unreachable("Unsupported addressing mode!"); 2592 } 2593 2594 Offset += InstrOffs * Scale; 2595 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 2596 if (Offset < 0) { 2597 Offset = -Offset; 2598 isSub = true; 2599 } 2600 2601 // Attempt to fold address comp. if opcode has offset bits 2602 if (NumBits > 0) { 2603 // Common case: small offset, fits into instruction. 2604 MachineOperand &ImmOp = MI.getOperand(ImmIdx); 2605 int ImmedOffset = Offset / Scale; 2606 unsigned Mask = (1 << NumBits) - 1; 2607 if ((unsigned)Offset <= Mask * Scale) { 2608 // Replace the FrameIndex with sp 2609 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2610 // FIXME: When addrmode2 goes away, this will simplify (like the 2611 // T2 version), as the LDR.i12 versions don't need the encoding 2612 // tricks for the offset value. 2613 if (isSub) { 2614 if (AddrMode == ARMII::AddrMode_i12) 2615 ImmedOffset = -ImmedOffset; 2616 else 2617 ImmedOffset |= 1 << NumBits; 2618 } 2619 ImmOp.ChangeToImmediate(ImmedOffset); 2620 Offset = 0; 2621 return true; 2622 } 2623 2624 // Otherwise, it didn't fit. Pull in what we can to simplify the immed. 2625 ImmedOffset = ImmedOffset & Mask; 2626 if (isSub) { 2627 if (AddrMode == ARMII::AddrMode_i12) 2628 ImmedOffset = -ImmedOffset; 2629 else 2630 ImmedOffset |= 1 << NumBits; 2631 } 2632 ImmOp.ChangeToImmediate(ImmedOffset); 2633 Offset &= ~(Mask*Scale); 2634 } 2635 } 2636 2637 Offset = (isSub) ? -Offset : Offset; 2638 return Offset == 0; 2639 } 2640 2641 /// analyzeCompare - For a comparison instruction, return the source registers 2642 /// in SrcReg and SrcReg2 if having two register operands, and the value it 2643 /// compares against in CmpValue. Return true if the comparison instruction 2644 /// can be analyzed. 2645 bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, 2646 unsigned &SrcReg2, int &CmpMask, 2647 int &CmpValue) const { 2648 switch (MI.getOpcode()) { 2649 default: break; 2650 case ARM::CMPri: 2651 case ARM::t2CMPri: 2652 case ARM::tCMPi8: 2653 SrcReg = MI.getOperand(0).getReg(); 2654 SrcReg2 = 0; 2655 CmpMask = ~0; 2656 CmpValue = MI.getOperand(1).getImm(); 2657 return true; 2658 case ARM::CMPrr: 2659 case ARM::t2CMPrr: 2660 case ARM::tCMPr: 2661 SrcReg = MI.getOperand(0).getReg(); 2662 SrcReg2 = MI.getOperand(1).getReg(); 2663 CmpMask = ~0; 2664 CmpValue = 0; 2665 return true; 2666 case ARM::TSTri: 2667 case ARM::t2TSTri: 2668 SrcReg = MI.getOperand(0).getReg(); 2669 SrcReg2 = 0; 2670 CmpMask = MI.getOperand(1).getImm(); 2671 CmpValue = 0; 2672 return true; 2673 } 2674 2675 return false; 2676 } 2677 2678 /// isSuitableForMask - Identify a suitable 'and' instruction that 2679 /// operates on the given source register and applies the same mask 2680 /// as a 'tst' instruction. Provide a limited look-through for copies. 2681 /// When successful, MI will hold the found instruction. 2682 static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg, 2683 int CmpMask, bool CommonUse) { 2684 switch (MI->getOpcode()) { 2685 case ARM::ANDri: 2686 case ARM::t2ANDri: 2687 if (CmpMask != MI->getOperand(2).getImm()) 2688 return false; 2689 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg()) 2690 return true; 2691 break; 2692 } 2693 2694 return false; 2695 } 2696 2697 /// getSwappedCondition - assume the flags are set by MI(a,b), return 2698 /// the condition code if we modify the instructions such that flags are 2699 /// set by MI(b,a). 2700 inline static ARMCC::CondCodes getSwappedCondition(ARMCC::CondCodes CC) { 2701 switch (CC) { 2702 default: return ARMCC::AL; 2703 case ARMCC::EQ: return ARMCC::EQ; 2704 case ARMCC::NE: return ARMCC::NE; 2705 case ARMCC::HS: return ARMCC::LS; 2706 case ARMCC::LO: return ARMCC::HI; 2707 case ARMCC::HI: return ARMCC::LO; 2708 case ARMCC::LS: return ARMCC::HS; 2709 case ARMCC::GE: return ARMCC::LE; 2710 case ARMCC::LT: return ARMCC::GT; 2711 case ARMCC::GT: return ARMCC::LT; 2712 case ARMCC::LE: return ARMCC::GE; 2713 } 2714 } 2715 2716 /// getCmpToAddCondition - assume the flags are set by CMP(a,b), return 2717 /// the condition code if we modify the instructions such that flags are 2718 /// set by ADD(a,b,X). 2719 inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) { 2720 switch (CC) { 2721 default: return ARMCC::AL; 2722 case ARMCC::HS: return ARMCC::LO; 2723 case ARMCC::LO: return ARMCC::HS; 2724 case ARMCC::VS: return ARMCC::VS; 2725 case ARMCC::VC: return ARMCC::VC; 2726 } 2727 } 2728 2729 /// isRedundantFlagInstr - check whether the first instruction, whose only 2730 /// purpose is to update flags, can be made redundant. 2731 /// CMPrr can be made redundant by SUBrr if the operands are the same. 2732 /// CMPri can be made redundant by SUBri if the operands are the same. 2733 /// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X). 2734 /// This function can be extended later on. 2735 inline static bool isRedundantFlagInstr(const MachineInstr *CmpI, 2736 unsigned SrcReg, unsigned SrcReg2, 2737 int ImmValue, const MachineInstr *OI, 2738 bool &IsThumb1) { 2739 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && 2740 (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) && 2741 ((OI->getOperand(1).getReg() == SrcReg && 2742 OI->getOperand(2).getReg() == SrcReg2) || 2743 (OI->getOperand(1).getReg() == SrcReg2 && 2744 OI->getOperand(2).getReg() == SrcReg))) { 2745 IsThumb1 = false; 2746 return true; 2747 } 2748 2749 if (CmpI->getOpcode() == ARM::tCMPr && OI->getOpcode() == ARM::tSUBrr && 2750 ((OI->getOperand(2).getReg() == SrcReg && 2751 OI->getOperand(3).getReg() == SrcReg2) || 2752 (OI->getOperand(2).getReg() == SrcReg2 && 2753 OI->getOperand(3).getReg() == SrcReg))) { 2754 IsThumb1 = true; 2755 return true; 2756 } 2757 2758 if ((CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) && 2759 (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) && 2760 OI->getOperand(1).getReg() == SrcReg && 2761 OI->getOperand(2).getImm() == ImmValue) { 2762 IsThumb1 = false; 2763 return true; 2764 } 2765 2766 if (CmpI->getOpcode() == ARM::tCMPi8 && 2767 (OI->getOpcode() == ARM::tSUBi8 || OI->getOpcode() == ARM::tSUBi3) && 2768 OI->getOperand(2).getReg() == SrcReg && 2769 OI->getOperand(3).getImm() == ImmValue) { 2770 IsThumb1 = true; 2771 return true; 2772 } 2773 2774 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && 2775 (OI->getOpcode() == ARM::ADDrr || OI->getOpcode() == ARM::t2ADDrr || 2776 OI->getOpcode() == ARM::ADDri || OI->getOpcode() == ARM::t2ADDri) && 2777 OI->getOperand(0).isReg() && OI->getOperand(1).isReg() && 2778 OI->getOperand(0).getReg() == SrcReg && 2779 OI->getOperand(1).getReg() == SrcReg2) { 2780 IsThumb1 = false; 2781 return true; 2782 } 2783 2784 if (CmpI->getOpcode() == ARM::tCMPr && 2785 (OI->getOpcode() == ARM::tADDi3 || OI->getOpcode() == ARM::tADDi8 || 2786 OI->getOpcode() == ARM::tADDrr) && 2787 OI->getOperand(0).getReg() == SrcReg && 2788 OI->getOperand(2).getReg() == SrcReg2) { 2789 IsThumb1 = true; 2790 return true; 2791 } 2792 2793 return false; 2794 } 2795 2796 static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) { 2797 switch (MI->getOpcode()) { 2798 default: return false; 2799 case ARM::tLSLri: 2800 case ARM::tLSRri: 2801 case ARM::tLSLrr: 2802 case ARM::tLSRrr: 2803 case ARM::tSUBrr: 2804 case ARM::tADDrr: 2805 case ARM::tADDi3: 2806 case ARM::tADDi8: 2807 case ARM::tSUBi3: 2808 case ARM::tSUBi8: 2809 case ARM::tMUL: 2810 case ARM::tADC: 2811 case ARM::tSBC: 2812 case ARM::tRSB: 2813 case ARM::tAND: 2814 case ARM::tORR: 2815 case ARM::tEOR: 2816 case ARM::tBIC: 2817 case ARM::tMVN: 2818 case ARM::tASRri: 2819 case ARM::tASRrr: 2820 case ARM::tROR: 2821 IsThumb1 = true; 2822 LLVM_FALLTHROUGH; 2823 case ARM::RSBrr: 2824 case ARM::RSBri: 2825 case ARM::RSCrr: 2826 case ARM::RSCri: 2827 case ARM::ADDrr: 2828 case ARM::ADDri: 2829 case ARM::ADCrr: 2830 case ARM::ADCri: 2831 case ARM::SUBrr: 2832 case ARM::SUBri: 2833 case ARM::SBCrr: 2834 case ARM::SBCri: 2835 case ARM::t2RSBri: 2836 case ARM::t2ADDrr: 2837 case ARM::t2ADDri: 2838 case ARM::t2ADCrr: 2839 case ARM::t2ADCri: 2840 case ARM::t2SUBrr: 2841 case ARM::t2SUBri: 2842 case ARM::t2SBCrr: 2843 case ARM::t2SBCri: 2844 case ARM::ANDrr: 2845 case ARM::ANDri: 2846 case ARM::t2ANDrr: 2847 case ARM::t2ANDri: 2848 case ARM::ORRrr: 2849 case ARM::ORRri: 2850 case ARM::t2ORRrr: 2851 case ARM::t2ORRri: 2852 case ARM::EORrr: 2853 case ARM::EORri: 2854 case ARM::t2EORrr: 2855 case ARM::t2EORri: 2856 case ARM::t2LSRri: 2857 case ARM::t2LSRrr: 2858 case ARM::t2LSLri: 2859 case ARM::t2LSLrr: 2860 return true; 2861 } 2862 } 2863 2864 /// optimizeCompareInstr - Convert the instruction supplying the argument to the 2865 /// comparison into one that sets the zero bit in the flags register; 2866 /// Remove a redundant Compare instruction if an earlier instruction can set the 2867 /// flags in the same way as Compare. 2868 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two 2869 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the 2870 /// condition code of instructions which use the flags. 2871 bool ARMBaseInstrInfo::optimizeCompareInstr( 2872 MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask, 2873 int CmpValue, const MachineRegisterInfo *MRI) const { 2874 // Get the unique definition of SrcReg. 2875 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 2876 if (!MI) return false; 2877 2878 // Masked compares sometimes use the same register as the corresponding 'and'. 2879 if (CmpMask != ~0) { 2880 if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) { 2881 MI = nullptr; 2882 for (MachineRegisterInfo::use_instr_iterator 2883 UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end(); 2884 UI != UE; ++UI) { 2885 if (UI->getParent() != CmpInstr.getParent()) 2886 continue; 2887 MachineInstr *PotentialAND = &*UI; 2888 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) || 2889 isPredicated(*PotentialAND)) 2890 continue; 2891 MI = PotentialAND; 2892 break; 2893 } 2894 if (!MI) return false; 2895 } 2896 } 2897 2898 // Get ready to iterate backward from CmpInstr. 2899 MachineBasicBlock::iterator I = CmpInstr, E = MI, 2900 B = CmpInstr.getParent()->begin(); 2901 2902 // Early exit if CmpInstr is at the beginning of the BB. 2903 if (I == B) return false; 2904 2905 // There are two possible candidates which can be changed to set CPSR: 2906 // One is MI, the other is a SUB or ADD instruction. 2907 // For CMPrr(r1,r2), we are looking for SUB(r1,r2), SUB(r2,r1), or 2908 // ADDr[ri](r1, r2, X). 2909 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue). 2910 MachineInstr *SubAdd = nullptr; 2911 if (SrcReg2 != 0) 2912 // MI is not a candidate for CMPrr. 2913 MI = nullptr; 2914 else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) { 2915 // Conservatively refuse to convert an instruction which isn't in the same 2916 // BB as the comparison. 2917 // For CMPri w/ CmpValue != 0, a SubAdd may still be a candidate. 2918 // Thus we cannot return here. 2919 if (CmpInstr.getOpcode() == ARM::CMPri || 2920 CmpInstr.getOpcode() == ARM::t2CMPri || 2921 CmpInstr.getOpcode() == ARM::tCMPi8) 2922 MI = nullptr; 2923 else 2924 return false; 2925 } 2926 2927 bool IsThumb1 = false; 2928 if (MI && !isOptimizeCompareCandidate(MI, IsThumb1)) 2929 return false; 2930 2931 // We also want to do this peephole for cases like this: if (a*b == 0), 2932 // and optimise away the CMP instruction from the generated code sequence: 2933 // MULS, MOVS, MOVS, CMP. Here the MOVS instructions load the boolean values 2934 // resulting from the select instruction, but these MOVS instructions for 2935 // Thumb1 (V6M) are flag setting and are thus preventing this optimisation. 2936 // However, if we only have MOVS instructions in between the CMP and the 2937 // other instruction (the MULS in this example), then the CPSR is dead so we 2938 // can safely reorder the sequence into: MOVS, MOVS, MULS, CMP. We do this 2939 // reordering and then continue the analysis hoping we can eliminate the 2940 // CMP. This peephole works on the vregs, so is still in SSA form. As a 2941 // consequence, the movs won't redefine/kill the MUL operands which would 2942 // make this reordering illegal. 2943 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2944 if (MI && IsThumb1) { 2945 --I; 2946 if (I != E && !MI->readsRegister(ARM::CPSR, TRI)) { 2947 bool CanReorder = true; 2948 for (; I != E; --I) { 2949 if (I->getOpcode() != ARM::tMOVi8) { 2950 CanReorder = false; 2951 break; 2952 } 2953 } 2954 if (CanReorder) { 2955 MI = MI->removeFromParent(); 2956 E = CmpInstr; 2957 CmpInstr.getParent()->insert(E, MI); 2958 } 2959 } 2960 I = CmpInstr; 2961 E = MI; 2962 } 2963 2964 // Check that CPSR isn't set between the comparison instruction and the one we 2965 // want to change. At the same time, search for SubAdd. 2966 bool SubAddIsThumb1 = false; 2967 do { 2968 const MachineInstr &Instr = *--I; 2969 2970 // Check whether CmpInstr can be made redundant by the current instruction. 2971 if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &Instr, 2972 SubAddIsThumb1)) { 2973 SubAdd = &*I; 2974 break; 2975 } 2976 2977 // Allow E (which was initially MI) to be SubAdd but do not search before E. 2978 if (I == E) 2979 break; 2980 2981 if (Instr.modifiesRegister(ARM::CPSR, TRI) || 2982 Instr.readsRegister(ARM::CPSR, TRI)) 2983 // This instruction modifies or uses CPSR after the one we want to 2984 // change. We can't do this transformation. 2985 return false; 2986 2987 if (I == B) { 2988 // In some cases, we scan the use-list of an instruction for an AND; 2989 // that AND is in the same BB, but may not be scheduled before the 2990 // corresponding TST. In that case, bail out. 2991 // 2992 // FIXME: We could try to reschedule the AND. 2993 return false; 2994 } 2995 } while (true); 2996 2997 // Return false if no candidates exist. 2998 if (!MI && !SubAdd) 2999 return false; 3000 3001 // If we found a SubAdd, use it as it will be closer to the CMP 3002 if (SubAdd) { 3003 MI = SubAdd; 3004 IsThumb1 = SubAddIsThumb1; 3005 } 3006 3007 // We can't use a predicated instruction - it doesn't always write the flags. 3008 if (isPredicated(*MI)) 3009 return false; 3010 3011 // Scan forward for the use of CPSR 3012 // When checking against MI: if it's a conditional code that requires 3013 // checking of the V bit or C bit, then this is not safe to do. 3014 // It is safe to remove CmpInstr if CPSR is redefined or killed. 3015 // If we are done with the basic block, we need to check whether CPSR is 3016 // live-out. 3017 SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4> 3018 OperandsToUpdate; 3019 bool isSafe = false; 3020 I = CmpInstr; 3021 E = CmpInstr.getParent()->end(); 3022 while (!isSafe && ++I != E) { 3023 const MachineInstr &Instr = *I; 3024 for (unsigned IO = 0, EO = Instr.getNumOperands(); 3025 !isSafe && IO != EO; ++IO) { 3026 const MachineOperand &MO = Instr.getOperand(IO); 3027 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) { 3028 isSafe = true; 3029 break; 3030 } 3031 if (!MO.isReg() || MO.getReg() != ARM::CPSR) 3032 continue; 3033 if (MO.isDef()) { 3034 isSafe = true; 3035 break; 3036 } 3037 // Condition code is after the operand before CPSR except for VSELs. 3038 ARMCC::CondCodes CC; 3039 bool IsInstrVSel = true; 3040 switch (Instr.getOpcode()) { 3041 default: 3042 IsInstrVSel = false; 3043 CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm(); 3044 break; 3045 case ARM::VSELEQD: 3046 case ARM::VSELEQS: 3047 case ARM::VSELEQH: 3048 CC = ARMCC::EQ; 3049 break; 3050 case ARM::VSELGTD: 3051 case ARM::VSELGTS: 3052 case ARM::VSELGTH: 3053 CC = ARMCC::GT; 3054 break; 3055 case ARM::VSELGED: 3056 case ARM::VSELGES: 3057 case ARM::VSELGEH: 3058 CC = ARMCC::GE; 3059 break; 3060 case ARM::VSELVSD: 3061 case ARM::VSELVSS: 3062 case ARM::VSELVSH: 3063 CC = ARMCC::VS; 3064 break; 3065 } 3066 3067 if (SubAdd) { 3068 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based 3069 // on CMP needs to be updated to be based on SUB. 3070 // If we have ADD(r1, r2, X) and CMP(r1, r2), the condition code also 3071 // needs to be modified. 3072 // Push the condition code operands to OperandsToUpdate. 3073 // If it is safe to remove CmpInstr, the condition code of these 3074 // operands will be modified. 3075 unsigned Opc = SubAdd->getOpcode(); 3076 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr || 3077 Opc == ARM::SUBri || Opc == ARM::t2SUBri || 3078 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 || 3079 Opc == ARM::tSUBi8; 3080 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2; 3081 if (!IsSub || 3082 (SrcReg2 != 0 && SubAdd->getOperand(OpI).getReg() == SrcReg2 && 3083 SubAdd->getOperand(OpI + 1).getReg() == SrcReg)) { 3084 // VSel doesn't support condition code update. 3085 if (IsInstrVSel) 3086 return false; 3087 // Ensure we can swap the condition. 3088 ARMCC::CondCodes NewCC = (IsSub ? getSwappedCondition(CC) : getCmpToAddCondition(CC)); 3089 if (NewCC == ARMCC::AL) 3090 return false; 3091 OperandsToUpdate.push_back( 3092 std::make_pair(&((*I).getOperand(IO - 1)), NewCC)); 3093 } 3094 } else { 3095 // No SubAdd, so this is x = <op> y, z; cmp x, 0. 3096 switch (CC) { 3097 case ARMCC::EQ: // Z 3098 case ARMCC::NE: // Z 3099 case ARMCC::MI: // N 3100 case ARMCC::PL: // N 3101 case ARMCC::AL: // none 3102 // CPSR can be used multiple times, we should continue. 3103 break; 3104 case ARMCC::HS: // C 3105 case ARMCC::LO: // C 3106 case ARMCC::VS: // V 3107 case ARMCC::VC: // V 3108 case ARMCC::HI: // C Z 3109 case ARMCC::LS: // C Z 3110 case ARMCC::GE: // N V 3111 case ARMCC::LT: // N V 3112 case ARMCC::GT: // Z N V 3113 case ARMCC::LE: // Z N V 3114 // The instruction uses the V bit or C bit which is not safe. 3115 return false; 3116 } 3117 } 3118 } 3119 } 3120 3121 // If CPSR is not killed nor re-defined, we should check whether it is 3122 // live-out. If it is live-out, do not optimize. 3123 if (!isSafe) { 3124 MachineBasicBlock *MBB = CmpInstr.getParent(); 3125 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), 3126 SE = MBB->succ_end(); SI != SE; ++SI) 3127 if ((*SI)->isLiveIn(ARM::CPSR)) 3128 return false; 3129 } 3130 3131 // Toggle the optional operand to CPSR (if it exists - in Thumb1 we always 3132 // set CPSR so this is represented as an explicit output) 3133 if (!IsThumb1) { 3134 MI->getOperand(5).setReg(ARM::CPSR); 3135 MI->getOperand(5).setIsDef(true); 3136 } 3137 assert(!isPredicated(*MI) && "Can't use flags from predicated instruction"); 3138 CmpInstr.eraseFromParent(); 3139 3140 // Modify the condition code of operands in OperandsToUpdate. 3141 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to 3142 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 3143 for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++) 3144 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second); 3145 3146 MI->clearRegisterDeads(ARM::CPSR); 3147 3148 return true; 3149 } 3150 3151 bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const { 3152 // Do not sink MI if it might be used to optimize a redundant compare. 3153 // We heuristically only look at the instruction immediately following MI to 3154 // avoid potentially searching the entire basic block. 3155 if (isPredicated(MI)) 3156 return true; 3157 MachineBasicBlock::const_iterator Next = &MI; 3158 ++Next; 3159 unsigned SrcReg, SrcReg2; 3160 int CmpMask, CmpValue; 3161 bool IsThumb1; 3162 if (Next != MI.getParent()->end() && 3163 analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) && 3164 isRedundantFlagInstr(&*Next, SrcReg, SrcReg2, CmpValue, &MI, IsThumb1)) 3165 return false; 3166 return true; 3167 } 3168 3169 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 3170 unsigned Reg, 3171 MachineRegisterInfo *MRI) const { 3172 // Fold large immediates into add, sub, or, xor. 3173 unsigned DefOpc = DefMI.getOpcode(); 3174 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) 3175 return false; 3176 if (!DefMI.getOperand(1).isImm()) 3177 // Could be t2MOVi32imm @xx 3178 return false; 3179 3180 if (!MRI->hasOneNonDBGUse(Reg)) 3181 return false; 3182 3183 const MCInstrDesc &DefMCID = DefMI.getDesc(); 3184 if (DefMCID.hasOptionalDef()) { 3185 unsigned NumOps = DefMCID.getNumOperands(); 3186 const MachineOperand &MO = DefMI.getOperand(NumOps - 1); 3187 if (MO.getReg() == ARM::CPSR && !MO.isDead()) 3188 // If DefMI defines CPSR and it is not dead, it's obviously not safe 3189 // to delete DefMI. 3190 return false; 3191 } 3192 3193 const MCInstrDesc &UseMCID = UseMI.getDesc(); 3194 if (UseMCID.hasOptionalDef()) { 3195 unsigned NumOps = UseMCID.getNumOperands(); 3196 if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR) 3197 // If the instruction sets the flag, do not attempt this optimization 3198 // since it may change the semantics of the code. 3199 return false; 3200 } 3201 3202 unsigned UseOpc = UseMI.getOpcode(); 3203 unsigned NewUseOpc = 0; 3204 uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm(); 3205 uint32_t SOImmValV1 = 0, SOImmValV2 = 0; 3206 bool Commute = false; 3207 switch (UseOpc) { 3208 default: return false; 3209 case ARM::SUBrr: 3210 case ARM::ADDrr: 3211 case ARM::ORRrr: 3212 case ARM::EORrr: 3213 case ARM::t2SUBrr: 3214 case ARM::t2ADDrr: 3215 case ARM::t2ORRrr: 3216 case ARM::t2EORrr: { 3217 Commute = UseMI.getOperand(2).getReg() != Reg; 3218 switch (UseOpc) { 3219 default: break; 3220 case ARM::ADDrr: 3221 case ARM::SUBrr: 3222 if (UseOpc == ARM::SUBrr && Commute) 3223 return false; 3224 3225 // ADD/SUB are special because they're essentially the same operation, so 3226 // we can handle a larger range of immediates. 3227 if (ARM_AM::isSOImmTwoPartVal(ImmVal)) 3228 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri; 3229 else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) { 3230 ImmVal = -ImmVal; 3231 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri; 3232 } else 3233 return false; 3234 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 3235 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 3236 break; 3237 case ARM::ORRrr: 3238 case ARM::EORrr: 3239 if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) 3240 return false; 3241 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 3242 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 3243 switch (UseOpc) { 3244 default: break; 3245 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; 3246 case ARM::EORrr: NewUseOpc = ARM::EORri; break; 3247 } 3248 break; 3249 case ARM::t2ADDrr: 3250 case ARM::t2SUBrr: 3251 if (UseOpc == ARM::t2SUBrr && Commute) 3252 return false; 3253 3254 // ADD/SUB are special because they're essentially the same operation, so 3255 // we can handle a larger range of immediates. 3256 if (ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 3257 NewUseOpc = UseOpc == ARM::t2ADDrr ? ARM::t2ADDri : ARM::t2SUBri; 3258 else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) { 3259 ImmVal = -ImmVal; 3260 NewUseOpc = UseOpc == ARM::t2ADDrr ? ARM::t2SUBri : ARM::t2ADDri; 3261 } else 3262 return false; 3263 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 3264 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 3265 break; 3266 case ARM::t2ORRrr: 3267 case ARM::t2EORrr: 3268 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 3269 return false; 3270 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 3271 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 3272 switch (UseOpc) { 3273 default: break; 3274 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; 3275 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; 3276 } 3277 break; 3278 } 3279 } 3280 } 3281 3282 unsigned OpIdx = Commute ? 2 : 1; 3283 Register Reg1 = UseMI.getOperand(OpIdx).getReg(); 3284 bool isKill = UseMI.getOperand(OpIdx).isKill(); 3285 Register NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); 3286 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc), 3287 NewReg) 3288 .addReg(Reg1, getKillRegState(isKill)) 3289 .addImm(SOImmValV1) 3290 .add(predOps(ARMCC::AL)) 3291 .add(condCodeOp()); 3292 UseMI.setDesc(get(NewUseOpc)); 3293 UseMI.getOperand(1).setReg(NewReg); 3294 UseMI.getOperand(1).setIsKill(); 3295 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2); 3296 DefMI.eraseFromParent(); 3297 return true; 3298 } 3299 3300 static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, 3301 const MachineInstr &MI) { 3302 switch (MI.getOpcode()) { 3303 default: { 3304 const MCInstrDesc &Desc = MI.getDesc(); 3305 int UOps = ItinData->getNumMicroOps(Desc.getSchedClass()); 3306 assert(UOps >= 0 && "bad # UOps"); 3307 return UOps; 3308 } 3309 3310 case ARM::LDRrs: 3311 case ARM::LDRBrs: 3312 case ARM::STRrs: 3313 case ARM::STRBrs: { 3314 unsigned ShOpVal = MI.getOperand(3).getImm(); 3315 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3316 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3317 if (!isSub && 3318 (ShImm == 0 || 3319 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3320 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3321 return 1; 3322 return 2; 3323 } 3324 3325 case ARM::LDRH: 3326 case ARM::STRH: { 3327 if (!MI.getOperand(2).getReg()) 3328 return 1; 3329 3330 unsigned ShOpVal = MI.getOperand(3).getImm(); 3331 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3332 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3333 if (!isSub && 3334 (ShImm == 0 || 3335 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3336 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3337 return 1; 3338 return 2; 3339 } 3340 3341 case ARM::LDRSB: 3342 case ARM::LDRSH: 3343 return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2; 3344 3345 case ARM::LDRSB_POST: 3346 case ARM::LDRSH_POST: { 3347 Register Rt = MI.getOperand(0).getReg(); 3348 Register Rm = MI.getOperand(3).getReg(); 3349 return (Rt == Rm) ? 4 : 3; 3350 } 3351 3352 case ARM::LDR_PRE_REG: 3353 case ARM::LDRB_PRE_REG: { 3354 Register Rt = MI.getOperand(0).getReg(); 3355 Register Rm = MI.getOperand(3).getReg(); 3356 if (Rt == Rm) 3357 return 3; 3358 unsigned ShOpVal = MI.getOperand(4).getImm(); 3359 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3360 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3361 if (!isSub && 3362 (ShImm == 0 || 3363 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3364 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3365 return 2; 3366 return 3; 3367 } 3368 3369 case ARM::STR_PRE_REG: 3370 case ARM::STRB_PRE_REG: { 3371 unsigned ShOpVal = MI.getOperand(4).getImm(); 3372 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3373 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3374 if (!isSub && 3375 (ShImm == 0 || 3376 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3377 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3378 return 2; 3379 return 3; 3380 } 3381 3382 case ARM::LDRH_PRE: 3383 case ARM::STRH_PRE: { 3384 Register Rt = MI.getOperand(0).getReg(); 3385 Register Rm = MI.getOperand(3).getReg(); 3386 if (!Rm) 3387 return 2; 3388 if (Rt == Rm) 3389 return 3; 3390 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2; 3391 } 3392 3393 case ARM::LDR_POST_REG: 3394 case ARM::LDRB_POST_REG: 3395 case ARM::LDRH_POST: { 3396 Register Rt = MI.getOperand(0).getReg(); 3397 Register Rm = MI.getOperand(3).getReg(); 3398 return (Rt == Rm) ? 3 : 2; 3399 } 3400 3401 case ARM::LDR_PRE_IMM: 3402 case ARM::LDRB_PRE_IMM: 3403 case ARM::LDR_POST_IMM: 3404 case ARM::LDRB_POST_IMM: 3405 case ARM::STRB_POST_IMM: 3406 case ARM::STRB_POST_REG: 3407 case ARM::STRB_PRE_IMM: 3408 case ARM::STRH_POST: 3409 case ARM::STR_POST_IMM: 3410 case ARM::STR_POST_REG: 3411 case ARM::STR_PRE_IMM: 3412 return 2; 3413 3414 case ARM::LDRSB_PRE: 3415 case ARM::LDRSH_PRE: { 3416 Register Rm = MI.getOperand(3).getReg(); 3417 if (Rm == 0) 3418 return 3; 3419 Register Rt = MI.getOperand(0).getReg(); 3420 if (Rt == Rm) 3421 return 4; 3422 unsigned ShOpVal = MI.getOperand(4).getImm(); 3423 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3424 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3425 if (!isSub && 3426 (ShImm == 0 || 3427 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3428 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3429 return 3; 3430 return 4; 3431 } 3432 3433 case ARM::LDRD: { 3434 Register Rt = MI.getOperand(0).getReg(); 3435 Register Rn = MI.getOperand(2).getReg(); 3436 Register Rm = MI.getOperand(3).getReg(); 3437 if (Rm) 3438 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 3439 : 3; 3440 return (Rt == Rn) ? 3 : 2; 3441 } 3442 3443 case ARM::STRD: { 3444 Register Rm = MI.getOperand(3).getReg(); 3445 if (Rm) 3446 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 3447 : 3; 3448 return 2; 3449 } 3450 3451 case ARM::LDRD_POST: 3452 case ARM::t2LDRD_POST: 3453 return 3; 3454 3455 case ARM::STRD_POST: 3456 case ARM::t2STRD_POST: 3457 return 4; 3458 3459 case ARM::LDRD_PRE: { 3460 Register Rt = MI.getOperand(0).getReg(); 3461 Register Rn = MI.getOperand(3).getReg(); 3462 Register Rm = MI.getOperand(4).getReg(); 3463 if (Rm) 3464 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 3465 : 4; 3466 return (Rt == Rn) ? 4 : 3; 3467 } 3468 3469 case ARM::t2LDRD_PRE: { 3470 Register Rt = MI.getOperand(0).getReg(); 3471 Register Rn = MI.getOperand(3).getReg(); 3472 return (Rt == Rn) ? 4 : 3; 3473 } 3474 3475 case ARM::STRD_PRE: { 3476 Register Rm = MI.getOperand(4).getReg(); 3477 if (Rm) 3478 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 3479 : 4; 3480 return 3; 3481 } 3482 3483 case ARM::t2STRD_PRE: 3484 return 3; 3485 3486 case ARM::t2LDR_POST: 3487 case ARM::t2LDRB_POST: 3488 case ARM::t2LDRB_PRE: 3489 case ARM::t2LDRSBi12: 3490 case ARM::t2LDRSBi8: 3491 case ARM::t2LDRSBpci: 3492 case ARM::t2LDRSBs: 3493 case ARM::t2LDRH_POST: 3494 case ARM::t2LDRH_PRE: 3495 case ARM::t2LDRSBT: 3496 case ARM::t2LDRSB_POST: 3497 case ARM::t2LDRSB_PRE: 3498 case ARM::t2LDRSH_POST: 3499 case ARM::t2LDRSH_PRE: 3500 case ARM::t2LDRSHi12: 3501 case ARM::t2LDRSHi8: 3502 case ARM::t2LDRSHpci: 3503 case ARM::t2LDRSHs: 3504 return 2; 3505 3506 case ARM::t2LDRDi8: { 3507 Register Rt = MI.getOperand(0).getReg(); 3508 Register Rn = MI.getOperand(2).getReg(); 3509 return (Rt == Rn) ? 3 : 2; 3510 } 3511 3512 case ARM::t2STRB_POST: 3513 case ARM::t2STRB_PRE: 3514 case ARM::t2STRBs: 3515 case ARM::t2STRDi8: 3516 case ARM::t2STRH_POST: 3517 case ARM::t2STRH_PRE: 3518 case ARM::t2STRHs: 3519 case ARM::t2STR_POST: 3520 case ARM::t2STR_PRE: 3521 case ARM::t2STRs: 3522 return 2; 3523 } 3524 } 3525 3526 // Return the number of 32-bit words loaded by LDM or stored by STM. If this 3527 // can't be easily determined return 0 (missing MachineMemOperand). 3528 // 3529 // FIXME: The current MachineInstr design does not support relying on machine 3530 // mem operands to determine the width of a memory access. Instead, we expect 3531 // the target to provide this information based on the instruction opcode and 3532 // operands. However, using MachineMemOperand is the best solution now for 3533 // two reasons: 3534 // 3535 // 1) getNumMicroOps tries to infer LDM memory width from the total number of MI 3536 // operands. This is much more dangerous than using the MachineMemOperand 3537 // sizes because CodeGen passes can insert/remove optional machine operands. In 3538 // fact, it's totally incorrect for preRA passes and appears to be wrong for 3539 // postRA passes as well. 3540 // 3541 // 2) getNumLDMAddresses is only used by the scheduling machine model and any 3542 // machine model that calls this should handle the unknown (zero size) case. 3543 // 3544 // Long term, we should require a target hook that verifies MachineMemOperand 3545 // sizes during MC lowering. That target hook should be local to MC lowering 3546 // because we can't ensure that it is aware of other MI forms. Doing this will 3547 // ensure that MachineMemOperands are correctly propagated through all passes. 3548 unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const { 3549 unsigned Size = 0; 3550 for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), 3551 E = MI.memoperands_end(); 3552 I != E; ++I) { 3553 Size += (*I)->getSize(); 3554 } 3555 // FIXME: The scheduler currently can't handle values larger than 16. But 3556 // the values can actually go up to 32 for floating-point load/store 3557 // multiple (VLDMIA etc.). Also, the way this code is reasoning about memory 3558 // operations isn't right; we could end up with "extra" memory operands for 3559 // various reasons, like tail merge merging two memory operations. 3560 return std::min(Size / 4, 16U); 3561 } 3562 3563 static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, 3564 unsigned NumRegs) { 3565 unsigned UOps = 1 + NumRegs; // 1 for address computation. 3566 switch (Opc) { 3567 default: 3568 break; 3569 case ARM::VLDMDIA_UPD: 3570 case ARM::VLDMDDB_UPD: 3571 case ARM::VLDMSIA_UPD: 3572 case ARM::VLDMSDB_UPD: 3573 case ARM::VSTMDIA_UPD: 3574 case ARM::VSTMDDB_UPD: 3575 case ARM::VSTMSIA_UPD: 3576 case ARM::VSTMSDB_UPD: 3577 case ARM::LDMIA_UPD: 3578 case ARM::LDMDA_UPD: 3579 case ARM::LDMDB_UPD: 3580 case ARM::LDMIB_UPD: 3581 case ARM::STMIA_UPD: 3582 case ARM::STMDA_UPD: 3583 case ARM::STMDB_UPD: 3584 case ARM::STMIB_UPD: 3585 case ARM::tLDMIA_UPD: 3586 case ARM::tSTMIA_UPD: 3587 case ARM::t2LDMIA_UPD: 3588 case ARM::t2LDMDB_UPD: 3589 case ARM::t2STMIA_UPD: 3590 case ARM::t2STMDB_UPD: 3591 ++UOps; // One for base register writeback. 3592 break; 3593 case ARM::LDMIA_RET: 3594 case ARM::tPOP_RET: 3595 case ARM::t2LDMIA_RET: 3596 UOps += 2; // One for base reg wb, one for write to pc. 3597 break; 3598 } 3599 return UOps; 3600 } 3601 3602 unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 3603 const MachineInstr &MI) const { 3604 if (!ItinData || ItinData->isEmpty()) 3605 return 1; 3606 3607 const MCInstrDesc &Desc = MI.getDesc(); 3608 unsigned Class = Desc.getSchedClass(); 3609 int ItinUOps = ItinData->getNumMicroOps(Class); 3610 if (ItinUOps >= 0) { 3611 if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore())) 3612 return getNumMicroOpsSwiftLdSt(ItinData, MI); 3613 3614 return ItinUOps; 3615 } 3616 3617 unsigned Opc = MI.getOpcode(); 3618 switch (Opc) { 3619 default: 3620 llvm_unreachable("Unexpected multi-uops instruction!"); 3621 case ARM::VLDMQIA: 3622 case ARM::VSTMQIA: 3623 return 2; 3624 3625 // The number of uOps for load / store multiple are determined by the number 3626 // registers. 3627 // 3628 // On Cortex-A8, each pair of register loads / stores can be scheduled on the 3629 // same cycle. The scheduling for the first load / store must be done 3630 // separately by assuming the address is not 64-bit aligned. 3631 // 3632 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address 3633 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON 3634 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. 3635 case ARM::VLDMDIA: 3636 case ARM::VLDMDIA_UPD: 3637 case ARM::VLDMDDB_UPD: 3638 case ARM::VLDMSIA: 3639 case ARM::VLDMSIA_UPD: 3640 case ARM::VLDMSDB_UPD: 3641 case ARM::VSTMDIA: 3642 case ARM::VSTMDIA_UPD: 3643 case ARM::VSTMDDB_UPD: 3644 case ARM::VSTMSIA: 3645 case ARM::VSTMSIA_UPD: 3646 case ARM::VSTMSDB_UPD: { 3647 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands(); 3648 return (NumRegs / 2) + (NumRegs % 2) + 1; 3649 } 3650 3651 case ARM::LDMIA_RET: 3652 case ARM::LDMIA: 3653 case ARM::LDMDA: 3654 case ARM::LDMDB: 3655 case ARM::LDMIB: 3656 case ARM::LDMIA_UPD: 3657 case ARM::LDMDA_UPD: 3658 case ARM::LDMDB_UPD: 3659 case ARM::LDMIB_UPD: 3660 case ARM::STMIA: 3661 case ARM::STMDA: 3662 case ARM::STMDB: 3663 case ARM::STMIB: 3664 case ARM::STMIA_UPD: 3665 case ARM::STMDA_UPD: 3666 case ARM::STMDB_UPD: 3667 case ARM::STMIB_UPD: 3668 case ARM::tLDMIA: 3669 case ARM::tLDMIA_UPD: 3670 case ARM::tSTMIA_UPD: 3671 case ARM::tPOP_RET: 3672 case ARM::tPOP: 3673 case ARM::tPUSH: 3674 case ARM::t2LDMIA_RET: 3675 case ARM::t2LDMIA: 3676 case ARM::t2LDMDB: 3677 case ARM::t2LDMIA_UPD: 3678 case ARM::t2LDMDB_UPD: 3679 case ARM::t2STMIA: 3680 case ARM::t2STMDB: 3681 case ARM::t2STMIA_UPD: 3682 case ARM::t2STMDB_UPD: { 3683 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1; 3684 switch (Subtarget.getLdStMultipleTiming()) { 3685 case ARMSubtarget::SingleIssuePlusExtras: 3686 return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs); 3687 case ARMSubtarget::SingleIssue: 3688 // Assume the worst. 3689 return NumRegs; 3690 case ARMSubtarget::DoubleIssue: { 3691 if (NumRegs < 4) 3692 return 2; 3693 // 4 registers would be issued: 2, 2. 3694 // 5 registers would be issued: 2, 2, 1. 3695 unsigned UOps = (NumRegs / 2); 3696 if (NumRegs % 2) 3697 ++UOps; 3698 return UOps; 3699 } 3700 case ARMSubtarget::DoubleIssueCheckUnalignedAccess: { 3701 unsigned UOps = (NumRegs / 2); 3702 // If there are odd number of registers or if it's not 64-bit aligned, 3703 // then it takes an extra AGU (Address Generation Unit) cycle. 3704 if ((NumRegs % 2) || !MI.hasOneMemOperand() || 3705 (*MI.memoperands_begin())->getAlignment() < 8) 3706 ++UOps; 3707 return UOps; 3708 } 3709 } 3710 } 3711 } 3712 llvm_unreachable("Didn't find the number of microops"); 3713 } 3714 3715 int 3716 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, 3717 const MCInstrDesc &DefMCID, 3718 unsigned DefClass, 3719 unsigned DefIdx, unsigned DefAlign) const { 3720 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3721 if (RegNo <= 0) 3722 // Def is the address writeback. 3723 return ItinData->getOperandCycle(DefClass, DefIdx); 3724 3725 int DefCycle; 3726 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3727 // (regno / 2) + (regno % 2) + 1 3728 DefCycle = RegNo / 2 + 1; 3729 if (RegNo % 2) 3730 ++DefCycle; 3731 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3732 DefCycle = RegNo; 3733 bool isSLoad = false; 3734 3735 switch (DefMCID.getOpcode()) { 3736 default: break; 3737 case ARM::VLDMSIA: 3738 case ARM::VLDMSIA_UPD: 3739 case ARM::VLDMSDB_UPD: 3740 isSLoad = true; 3741 break; 3742 } 3743 3744 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3745 // then it takes an extra cycle. 3746 if ((isSLoad && (RegNo % 2)) || DefAlign < 8) 3747 ++DefCycle; 3748 } else { 3749 // Assume the worst. 3750 DefCycle = RegNo + 2; 3751 } 3752 3753 return DefCycle; 3754 } 3755 3756 bool ARMBaseInstrInfo::isLDMBaseRegInList(const MachineInstr &MI) const { 3757 Register BaseReg = MI.getOperand(0).getReg(); 3758 for (unsigned i = 1, sz = MI.getNumOperands(); i < sz; ++i) { 3759 const auto &Op = MI.getOperand(i); 3760 if (Op.isReg() && Op.getReg() == BaseReg) 3761 return true; 3762 } 3763 return false; 3764 } 3765 unsigned 3766 ARMBaseInstrInfo::getLDMVariableDefsSize(const MachineInstr &MI) const { 3767 // ins GPR:$Rn, $p (2xOp), reglist:$regs, variable_ops 3768 // (outs GPR:$wb), (ins GPR:$Rn, $p (2xOp), reglist:$regs, variable_ops) 3769 return MI.getNumOperands() + 1 - MI.getDesc().getNumOperands(); 3770 } 3771 3772 int 3773 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, 3774 const MCInstrDesc &DefMCID, 3775 unsigned DefClass, 3776 unsigned DefIdx, unsigned DefAlign) const { 3777 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3778 if (RegNo <= 0) 3779 // Def is the address writeback. 3780 return ItinData->getOperandCycle(DefClass, DefIdx); 3781 3782 int DefCycle; 3783 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3784 // 4 registers would be issued: 1, 2, 1. 3785 // 5 registers would be issued: 1, 2, 2. 3786 DefCycle = RegNo / 2; 3787 if (DefCycle < 1) 3788 DefCycle = 1; 3789 // Result latency is issue cycle + 2: E2. 3790 DefCycle += 2; 3791 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3792 DefCycle = (RegNo / 2); 3793 // If there are odd number of registers or if it's not 64-bit aligned, 3794 // then it takes an extra AGU (Address Generation Unit) cycle. 3795 if ((RegNo % 2) || DefAlign < 8) 3796 ++DefCycle; 3797 // Result latency is AGU cycles + 2. 3798 DefCycle += 2; 3799 } else { 3800 // Assume the worst. 3801 DefCycle = RegNo + 2; 3802 } 3803 3804 return DefCycle; 3805 } 3806 3807 int 3808 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, 3809 const MCInstrDesc &UseMCID, 3810 unsigned UseClass, 3811 unsigned UseIdx, unsigned UseAlign) const { 3812 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3813 if (RegNo <= 0) 3814 return ItinData->getOperandCycle(UseClass, UseIdx); 3815 3816 int UseCycle; 3817 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3818 // (regno / 2) + (regno % 2) + 1 3819 UseCycle = RegNo / 2 + 1; 3820 if (RegNo % 2) 3821 ++UseCycle; 3822 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3823 UseCycle = RegNo; 3824 bool isSStore = false; 3825 3826 switch (UseMCID.getOpcode()) { 3827 default: break; 3828 case ARM::VSTMSIA: 3829 case ARM::VSTMSIA_UPD: 3830 case ARM::VSTMSDB_UPD: 3831 isSStore = true; 3832 break; 3833 } 3834 3835 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3836 // then it takes an extra cycle. 3837 if ((isSStore && (RegNo % 2)) || UseAlign < 8) 3838 ++UseCycle; 3839 } else { 3840 // Assume the worst. 3841 UseCycle = RegNo + 2; 3842 } 3843 3844 return UseCycle; 3845 } 3846 3847 int 3848 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, 3849 const MCInstrDesc &UseMCID, 3850 unsigned UseClass, 3851 unsigned UseIdx, unsigned UseAlign) const { 3852 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3853 if (RegNo <= 0) 3854 return ItinData->getOperandCycle(UseClass, UseIdx); 3855 3856 int UseCycle; 3857 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3858 UseCycle = RegNo / 2; 3859 if (UseCycle < 2) 3860 UseCycle = 2; 3861 // Read in E3. 3862 UseCycle += 2; 3863 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3864 UseCycle = (RegNo / 2); 3865 // If there are odd number of registers or if it's not 64-bit aligned, 3866 // then it takes an extra AGU (Address Generation Unit) cycle. 3867 if ((RegNo % 2) || UseAlign < 8) 3868 ++UseCycle; 3869 } else { 3870 // Assume the worst. 3871 UseCycle = 1; 3872 } 3873 return UseCycle; 3874 } 3875 3876 int 3877 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 3878 const MCInstrDesc &DefMCID, 3879 unsigned DefIdx, unsigned DefAlign, 3880 const MCInstrDesc &UseMCID, 3881 unsigned UseIdx, unsigned UseAlign) const { 3882 unsigned DefClass = DefMCID.getSchedClass(); 3883 unsigned UseClass = UseMCID.getSchedClass(); 3884 3885 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) 3886 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 3887 3888 // This may be a def / use of a variable_ops instruction, the operand 3889 // latency might be determinable dynamically. Let the target try to 3890 // figure it out. 3891 int DefCycle = -1; 3892 bool LdmBypass = false; 3893 switch (DefMCID.getOpcode()) { 3894 default: 3895 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 3896 break; 3897 3898 case ARM::VLDMDIA: 3899 case ARM::VLDMDIA_UPD: 3900 case ARM::VLDMDDB_UPD: 3901 case ARM::VLDMSIA: 3902 case ARM::VLDMSIA_UPD: 3903 case ARM::VLDMSDB_UPD: 3904 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 3905 break; 3906 3907 case ARM::LDMIA_RET: 3908 case ARM::LDMIA: 3909 case ARM::LDMDA: 3910 case ARM::LDMDB: 3911 case ARM::LDMIB: 3912 case ARM::LDMIA_UPD: 3913 case ARM::LDMDA_UPD: 3914 case ARM::LDMDB_UPD: 3915 case ARM::LDMIB_UPD: 3916 case ARM::tLDMIA: 3917 case ARM::tLDMIA_UPD: 3918 case ARM::tPUSH: 3919 case ARM::t2LDMIA_RET: 3920 case ARM::t2LDMIA: 3921 case ARM::t2LDMDB: 3922 case ARM::t2LDMIA_UPD: 3923 case ARM::t2LDMDB_UPD: 3924 LdmBypass = true; 3925 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 3926 break; 3927 } 3928 3929 if (DefCycle == -1) 3930 // We can't seem to determine the result latency of the def, assume it's 2. 3931 DefCycle = 2; 3932 3933 int UseCycle = -1; 3934 switch (UseMCID.getOpcode()) { 3935 default: 3936 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); 3937 break; 3938 3939 case ARM::VSTMDIA: 3940 case ARM::VSTMDIA_UPD: 3941 case ARM::VSTMDDB_UPD: 3942 case ARM::VSTMSIA: 3943 case ARM::VSTMSIA_UPD: 3944 case ARM::VSTMSDB_UPD: 3945 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 3946 break; 3947 3948 case ARM::STMIA: 3949 case ARM::STMDA: 3950 case ARM::STMDB: 3951 case ARM::STMIB: 3952 case ARM::STMIA_UPD: 3953 case ARM::STMDA_UPD: 3954 case ARM::STMDB_UPD: 3955 case ARM::STMIB_UPD: 3956 case ARM::tSTMIA_UPD: 3957 case ARM::tPOP_RET: 3958 case ARM::tPOP: 3959 case ARM::t2STMIA: 3960 case ARM::t2STMDB: 3961 case ARM::t2STMIA_UPD: 3962 case ARM::t2STMDB_UPD: 3963 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 3964 break; 3965 } 3966 3967 if (UseCycle == -1) 3968 // Assume it's read in the first stage. 3969 UseCycle = 1; 3970 3971 UseCycle = DefCycle - UseCycle + 1; 3972 if (UseCycle > 0) { 3973 if (LdmBypass) { 3974 // It's a variable_ops instruction so we can't use DefIdx here. Just use 3975 // first def operand. 3976 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, 3977 UseClass, UseIdx)) 3978 --UseCycle; 3979 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, 3980 UseClass, UseIdx)) { 3981 --UseCycle; 3982 } 3983 } 3984 3985 return UseCycle; 3986 } 3987 3988 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, 3989 const MachineInstr *MI, unsigned Reg, 3990 unsigned &DefIdx, unsigned &Dist) { 3991 Dist = 0; 3992 3993 MachineBasicBlock::const_iterator I = MI; ++I; 3994 MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator()); 3995 assert(II->isInsideBundle() && "Empty bundle?"); 3996 3997 int Idx = -1; 3998 while (II->isInsideBundle()) { 3999 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI); 4000 if (Idx != -1) 4001 break; 4002 --II; 4003 ++Dist; 4004 } 4005 4006 assert(Idx != -1 && "Cannot find bundled definition!"); 4007 DefIdx = Idx; 4008 return &*II; 4009 } 4010 4011 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, 4012 const MachineInstr &MI, unsigned Reg, 4013 unsigned &UseIdx, unsigned &Dist) { 4014 Dist = 0; 4015 4016 MachineBasicBlock::const_instr_iterator II = ++MI.getIterator(); 4017 assert(II->isInsideBundle() && "Empty bundle?"); 4018 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 4019 4020 // FIXME: This doesn't properly handle multiple uses. 4021 int Idx = -1; 4022 while (II != E && II->isInsideBundle()) { 4023 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI); 4024 if (Idx != -1) 4025 break; 4026 if (II->getOpcode() != ARM::t2IT) 4027 ++Dist; 4028 ++II; 4029 } 4030 4031 if (Idx == -1) { 4032 Dist = 0; 4033 return nullptr; 4034 } 4035 4036 UseIdx = Idx; 4037 return &*II; 4038 } 4039 4040 /// Return the number of cycles to add to (or subtract from) the static 4041 /// itinerary based on the def opcode and alignment. The caller will ensure that 4042 /// adjusted latency is at least one cycle. 4043 static int adjustDefLatency(const ARMSubtarget &Subtarget, 4044 const MachineInstr &DefMI, 4045 const MCInstrDesc &DefMCID, unsigned DefAlign) { 4046 int Adjust = 0; 4047 if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) { 4048 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 4049 // variants are one cycle cheaper. 4050 switch (DefMCID.getOpcode()) { 4051 default: break; 4052 case ARM::LDRrs: 4053 case ARM::LDRBrs: { 4054 unsigned ShOpVal = DefMI.getOperand(3).getImm(); 4055 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4056 if (ShImm == 0 || 4057 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4058 --Adjust; 4059 break; 4060 } 4061 case ARM::t2LDRs: 4062 case ARM::t2LDRBs: 4063 case ARM::t2LDRHs: 4064 case ARM::t2LDRSHs: { 4065 // Thumb2 mode: lsl only. 4066 unsigned ShAmt = DefMI.getOperand(3).getImm(); 4067 if (ShAmt == 0 || ShAmt == 2) 4068 --Adjust; 4069 break; 4070 } 4071 } 4072 } else if (Subtarget.isSwift()) { 4073 // FIXME: Properly handle all of the latency adjustments for address 4074 // writeback. 4075 switch (DefMCID.getOpcode()) { 4076 default: break; 4077 case ARM::LDRrs: 4078 case ARM::LDRBrs: { 4079 unsigned ShOpVal = DefMI.getOperand(3).getImm(); 4080 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 4081 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4082 if (!isSub && 4083 (ShImm == 0 || 4084 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 4085 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 4086 Adjust -= 2; 4087 else if (!isSub && 4088 ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 4089 --Adjust; 4090 break; 4091 } 4092 case ARM::t2LDRs: 4093 case ARM::t2LDRBs: 4094 case ARM::t2LDRHs: 4095 case ARM::t2LDRSHs: { 4096 // Thumb2 mode: lsl only. 4097 unsigned ShAmt = DefMI.getOperand(3).getImm(); 4098 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3) 4099 Adjust -= 2; 4100 break; 4101 } 4102 } 4103 } 4104 4105 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) { 4106 switch (DefMCID.getOpcode()) { 4107 default: break; 4108 case ARM::VLD1q8: 4109 case ARM::VLD1q16: 4110 case ARM::VLD1q32: 4111 case ARM::VLD1q64: 4112 case ARM::VLD1q8wb_fixed: 4113 case ARM::VLD1q16wb_fixed: 4114 case ARM::VLD1q32wb_fixed: 4115 case ARM::VLD1q64wb_fixed: 4116 case ARM::VLD1q8wb_register: 4117 case ARM::VLD1q16wb_register: 4118 case ARM::VLD1q32wb_register: 4119 case ARM::VLD1q64wb_register: 4120 case ARM::VLD2d8: 4121 case ARM::VLD2d16: 4122 case ARM::VLD2d32: 4123 case ARM::VLD2q8: 4124 case ARM::VLD2q16: 4125 case ARM::VLD2q32: 4126 case ARM::VLD2d8wb_fixed: 4127 case ARM::VLD2d16wb_fixed: 4128 case ARM::VLD2d32wb_fixed: 4129 case ARM::VLD2q8wb_fixed: 4130 case ARM::VLD2q16wb_fixed: 4131 case ARM::VLD2q32wb_fixed: 4132 case ARM::VLD2d8wb_register: 4133 case ARM::VLD2d16wb_register: 4134 case ARM::VLD2d32wb_register: 4135 case ARM::VLD2q8wb_register: 4136 case ARM::VLD2q16wb_register: 4137 case ARM::VLD2q32wb_register: 4138 case ARM::VLD3d8: 4139 case ARM::VLD3d16: 4140 case ARM::VLD3d32: 4141 case ARM::VLD1d64T: 4142 case ARM::VLD3d8_UPD: 4143 case ARM::VLD3d16_UPD: 4144 case ARM::VLD3d32_UPD: 4145 case ARM::VLD1d64Twb_fixed: 4146 case ARM::VLD1d64Twb_register: 4147 case ARM::VLD3q8_UPD: 4148 case ARM::VLD3q16_UPD: 4149 case ARM::VLD3q32_UPD: 4150 case ARM::VLD4d8: 4151 case ARM::VLD4d16: 4152 case ARM::VLD4d32: 4153 case ARM::VLD1d64Q: 4154 case ARM::VLD4d8_UPD: 4155 case ARM::VLD4d16_UPD: 4156 case ARM::VLD4d32_UPD: 4157 case ARM::VLD1d64Qwb_fixed: 4158 case ARM::VLD1d64Qwb_register: 4159 case ARM::VLD4q8_UPD: 4160 case ARM::VLD4q16_UPD: 4161 case ARM::VLD4q32_UPD: 4162 case ARM::VLD1DUPq8: 4163 case ARM::VLD1DUPq16: 4164 case ARM::VLD1DUPq32: 4165 case ARM::VLD1DUPq8wb_fixed: 4166 case ARM::VLD1DUPq16wb_fixed: 4167 case ARM::VLD1DUPq32wb_fixed: 4168 case ARM::VLD1DUPq8wb_register: 4169 case ARM::VLD1DUPq16wb_register: 4170 case ARM::VLD1DUPq32wb_register: 4171 case ARM::VLD2DUPd8: 4172 case ARM::VLD2DUPd16: 4173 case ARM::VLD2DUPd32: 4174 case ARM::VLD2DUPd8wb_fixed: 4175 case ARM::VLD2DUPd16wb_fixed: 4176 case ARM::VLD2DUPd32wb_fixed: 4177 case ARM::VLD2DUPd8wb_register: 4178 case ARM::VLD2DUPd16wb_register: 4179 case ARM::VLD2DUPd32wb_register: 4180 case ARM::VLD4DUPd8: 4181 case ARM::VLD4DUPd16: 4182 case ARM::VLD4DUPd32: 4183 case ARM::VLD4DUPd8_UPD: 4184 case ARM::VLD4DUPd16_UPD: 4185 case ARM::VLD4DUPd32_UPD: 4186 case ARM::VLD1LNd8: 4187 case ARM::VLD1LNd16: 4188 case ARM::VLD1LNd32: 4189 case ARM::VLD1LNd8_UPD: 4190 case ARM::VLD1LNd16_UPD: 4191 case ARM::VLD1LNd32_UPD: 4192 case ARM::VLD2LNd8: 4193 case ARM::VLD2LNd16: 4194 case ARM::VLD2LNd32: 4195 case ARM::VLD2LNq16: 4196 case ARM::VLD2LNq32: 4197 case ARM::VLD2LNd8_UPD: 4198 case ARM::VLD2LNd16_UPD: 4199 case ARM::VLD2LNd32_UPD: 4200 case ARM::VLD2LNq16_UPD: 4201 case ARM::VLD2LNq32_UPD: 4202 case ARM::VLD4LNd8: 4203 case ARM::VLD4LNd16: 4204 case ARM::VLD4LNd32: 4205 case ARM::VLD4LNq16: 4206 case ARM::VLD4LNq32: 4207 case ARM::VLD4LNd8_UPD: 4208 case ARM::VLD4LNd16_UPD: 4209 case ARM::VLD4LNd32_UPD: 4210 case ARM::VLD4LNq16_UPD: 4211 case ARM::VLD4LNq32_UPD: 4212 // If the address is not 64-bit aligned, the latencies of these 4213 // instructions increases by one. 4214 ++Adjust; 4215 break; 4216 } 4217 } 4218 return Adjust; 4219 } 4220 4221 int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4222 const MachineInstr &DefMI, 4223 unsigned DefIdx, 4224 const MachineInstr &UseMI, 4225 unsigned UseIdx) const { 4226 // No operand latency. The caller may fall back to getInstrLatency. 4227 if (!ItinData || ItinData->isEmpty()) 4228 return -1; 4229 4230 const MachineOperand &DefMO = DefMI.getOperand(DefIdx); 4231 Register Reg = DefMO.getReg(); 4232 4233 const MachineInstr *ResolvedDefMI = &DefMI; 4234 unsigned DefAdj = 0; 4235 if (DefMI.isBundle()) 4236 ResolvedDefMI = 4237 getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj); 4238 if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() || 4239 ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) { 4240 return 1; 4241 } 4242 4243 const MachineInstr *ResolvedUseMI = &UseMI; 4244 unsigned UseAdj = 0; 4245 if (UseMI.isBundle()) { 4246 ResolvedUseMI = 4247 getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj); 4248 if (!ResolvedUseMI) 4249 return -1; 4250 } 4251 4252 return getOperandLatencyImpl( 4253 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO, 4254 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj); 4255 } 4256 4257 int ARMBaseInstrInfo::getOperandLatencyImpl( 4258 const InstrItineraryData *ItinData, const MachineInstr &DefMI, 4259 unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj, 4260 const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI, 4261 unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const { 4262 if (Reg == ARM::CPSR) { 4263 if (DefMI.getOpcode() == ARM::FMSTAT) { 4264 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) 4265 return Subtarget.isLikeA9() ? 1 : 20; 4266 } 4267 4268 // CPSR set and branch can be paired in the same cycle. 4269 if (UseMI.isBranch()) 4270 return 0; 4271 4272 // Otherwise it takes the instruction latency (generally one). 4273 unsigned Latency = getInstrLatency(ItinData, DefMI); 4274 4275 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to 4276 // its uses. Instructions which are otherwise scheduled between them may 4277 // incur a code size penalty (not able to use the CPSR setting 16-bit 4278 // instructions). 4279 if (Latency > 0 && Subtarget.isThumb2()) { 4280 const MachineFunction *MF = DefMI.getParent()->getParent(); 4281 // FIXME: Use Function::hasOptSize(). 4282 if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize)) 4283 --Latency; 4284 } 4285 return Latency; 4286 } 4287 4288 if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit()) 4289 return -1; 4290 4291 unsigned DefAlign = DefMI.hasOneMemOperand() 4292 ? (*DefMI.memoperands_begin())->getAlignment() 4293 : 0; 4294 unsigned UseAlign = UseMI.hasOneMemOperand() 4295 ? (*UseMI.memoperands_begin())->getAlignment() 4296 : 0; 4297 4298 // Get the itinerary's latency if possible, and handle variable_ops. 4299 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID, 4300 UseIdx, UseAlign); 4301 // Unable to find operand latency. The caller may resort to getInstrLatency. 4302 if (Latency < 0) 4303 return Latency; 4304 4305 // Adjust for IT block position. 4306 int Adj = DefAdj + UseAdj; 4307 4308 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 4309 Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign); 4310 if (Adj >= 0 || (int)Latency > -Adj) { 4311 return Latency + Adj; 4312 } 4313 // Return the itinerary latency, which may be zero but not less than zero. 4314 return Latency; 4315 } 4316 4317 int 4318 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4319 SDNode *DefNode, unsigned DefIdx, 4320 SDNode *UseNode, unsigned UseIdx) const { 4321 if (!DefNode->isMachineOpcode()) 4322 return 1; 4323 4324 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); 4325 4326 if (isZeroCost(DefMCID.Opcode)) 4327 return 0; 4328 4329 if (!ItinData || ItinData->isEmpty()) 4330 return DefMCID.mayLoad() ? 3 : 1; 4331 4332 if (!UseNode->isMachineOpcode()) { 4333 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); 4334 int Adj = Subtarget.getPreISelOperandLatencyAdjustment(); 4335 int Threshold = 1 + Adj; 4336 return Latency <= Threshold ? 1 : Latency - Adj; 4337 } 4338 4339 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); 4340 const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode); 4341 unsigned DefAlign = !DefMN->memoperands_empty() 4342 ? (*DefMN->memoperands_begin())->getAlignment() : 0; 4343 const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode); 4344 unsigned UseAlign = !UseMN->memoperands_empty() 4345 ? (*UseMN->memoperands_begin())->getAlignment() : 0; 4346 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, 4347 UseMCID, UseIdx, UseAlign); 4348 4349 if (Latency > 1 && 4350 (Subtarget.isCortexA8() || Subtarget.isLikeA9() || 4351 Subtarget.isCortexA7())) { 4352 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 4353 // variants are one cycle cheaper. 4354 switch (DefMCID.getOpcode()) { 4355 default: break; 4356 case ARM::LDRrs: 4357 case ARM::LDRBrs: { 4358 unsigned ShOpVal = 4359 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4360 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4361 if (ShImm == 0 || 4362 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4363 --Latency; 4364 break; 4365 } 4366 case ARM::t2LDRs: 4367 case ARM::t2LDRBs: 4368 case ARM::t2LDRHs: 4369 case ARM::t2LDRSHs: { 4370 // Thumb2 mode: lsl only. 4371 unsigned ShAmt = 4372 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4373 if (ShAmt == 0 || ShAmt == 2) 4374 --Latency; 4375 break; 4376 } 4377 } 4378 } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) { 4379 // FIXME: Properly handle all of the latency adjustments for address 4380 // writeback. 4381 switch (DefMCID.getOpcode()) { 4382 default: break; 4383 case ARM::LDRrs: 4384 case ARM::LDRBrs: { 4385 unsigned ShOpVal = 4386 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4387 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4388 if (ShImm == 0 || 4389 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 4390 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4391 Latency -= 2; 4392 else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 4393 --Latency; 4394 break; 4395 } 4396 case ARM::t2LDRs: 4397 case ARM::t2LDRBs: 4398 case ARM::t2LDRHs: 4399 case ARM::t2LDRSHs: 4400 // Thumb2 mode: lsl 0-3 only. 4401 Latency -= 2; 4402 break; 4403 } 4404 } 4405 4406 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) 4407 switch (DefMCID.getOpcode()) { 4408 default: break; 4409 case ARM::VLD1q8: 4410 case ARM::VLD1q16: 4411 case ARM::VLD1q32: 4412 case ARM::VLD1q64: 4413 case ARM::VLD1q8wb_register: 4414 case ARM::VLD1q16wb_register: 4415 case ARM::VLD1q32wb_register: 4416 case ARM::VLD1q64wb_register: 4417 case ARM::VLD1q8wb_fixed: 4418 case ARM::VLD1q16wb_fixed: 4419 case ARM::VLD1q32wb_fixed: 4420 case ARM::VLD1q64wb_fixed: 4421 case ARM::VLD2d8: 4422 case ARM::VLD2d16: 4423 case ARM::VLD2d32: 4424 case ARM::VLD2q8Pseudo: 4425 case ARM::VLD2q16Pseudo: 4426 case ARM::VLD2q32Pseudo: 4427 case ARM::VLD2d8wb_fixed: 4428 case ARM::VLD2d16wb_fixed: 4429 case ARM::VLD2d32wb_fixed: 4430 case ARM::VLD2q8PseudoWB_fixed: 4431 case ARM::VLD2q16PseudoWB_fixed: 4432 case ARM::VLD2q32PseudoWB_fixed: 4433 case ARM::VLD2d8wb_register: 4434 case ARM::VLD2d16wb_register: 4435 case ARM::VLD2d32wb_register: 4436 case ARM::VLD2q8PseudoWB_register: 4437 case ARM::VLD2q16PseudoWB_register: 4438 case ARM::VLD2q32PseudoWB_register: 4439 case ARM::VLD3d8Pseudo: 4440 case ARM::VLD3d16Pseudo: 4441 case ARM::VLD3d32Pseudo: 4442 case ARM::VLD1d8TPseudo: 4443 case ARM::VLD1d16TPseudo: 4444 case ARM::VLD1d32TPseudo: 4445 case ARM::VLD1d64TPseudo: 4446 case ARM::VLD1d64TPseudoWB_fixed: 4447 case ARM::VLD1d64TPseudoWB_register: 4448 case ARM::VLD3d8Pseudo_UPD: 4449 case ARM::VLD3d16Pseudo_UPD: 4450 case ARM::VLD3d32Pseudo_UPD: 4451 case ARM::VLD3q8Pseudo_UPD: 4452 case ARM::VLD3q16Pseudo_UPD: 4453 case ARM::VLD3q32Pseudo_UPD: 4454 case ARM::VLD3q8oddPseudo: 4455 case ARM::VLD3q16oddPseudo: 4456 case ARM::VLD3q32oddPseudo: 4457 case ARM::VLD3q8oddPseudo_UPD: 4458 case ARM::VLD3q16oddPseudo_UPD: 4459 case ARM::VLD3q32oddPseudo_UPD: 4460 case ARM::VLD4d8Pseudo: 4461 case ARM::VLD4d16Pseudo: 4462 case ARM::VLD4d32Pseudo: 4463 case ARM::VLD1d8QPseudo: 4464 case ARM::VLD1d16QPseudo: 4465 case ARM::VLD1d32QPseudo: 4466 case ARM::VLD1d64QPseudo: 4467 case ARM::VLD1d64QPseudoWB_fixed: 4468 case ARM::VLD1d64QPseudoWB_register: 4469 case ARM::VLD1q8HighQPseudo: 4470 case ARM::VLD1q8LowQPseudo_UPD: 4471 case ARM::VLD1q8HighTPseudo: 4472 case ARM::VLD1q8LowTPseudo_UPD: 4473 case ARM::VLD1q16HighQPseudo: 4474 case ARM::VLD1q16LowQPseudo_UPD: 4475 case ARM::VLD1q16HighTPseudo: 4476 case ARM::VLD1q16LowTPseudo_UPD: 4477 case ARM::VLD1q32HighQPseudo: 4478 case ARM::VLD1q32LowQPseudo_UPD: 4479 case ARM::VLD1q32HighTPseudo: 4480 case ARM::VLD1q32LowTPseudo_UPD: 4481 case ARM::VLD1q64HighQPseudo: 4482 case ARM::VLD1q64LowQPseudo_UPD: 4483 case ARM::VLD1q64HighTPseudo: 4484 case ARM::VLD1q64LowTPseudo_UPD: 4485 case ARM::VLD4d8Pseudo_UPD: 4486 case ARM::VLD4d16Pseudo_UPD: 4487 case ARM::VLD4d32Pseudo_UPD: 4488 case ARM::VLD4q8Pseudo_UPD: 4489 case ARM::VLD4q16Pseudo_UPD: 4490 case ARM::VLD4q32Pseudo_UPD: 4491 case ARM::VLD4q8oddPseudo: 4492 case ARM::VLD4q16oddPseudo: 4493 case ARM::VLD4q32oddPseudo: 4494 case ARM::VLD4q8oddPseudo_UPD: 4495 case ARM::VLD4q16oddPseudo_UPD: 4496 case ARM::VLD4q32oddPseudo_UPD: 4497 case ARM::VLD1DUPq8: 4498 case ARM::VLD1DUPq16: 4499 case ARM::VLD1DUPq32: 4500 case ARM::VLD1DUPq8wb_fixed: 4501 case ARM::VLD1DUPq16wb_fixed: 4502 case ARM::VLD1DUPq32wb_fixed: 4503 case ARM::VLD1DUPq8wb_register: 4504 case ARM::VLD1DUPq16wb_register: 4505 case ARM::VLD1DUPq32wb_register: 4506 case ARM::VLD2DUPd8: 4507 case ARM::VLD2DUPd16: 4508 case ARM::VLD2DUPd32: 4509 case ARM::VLD2DUPd8wb_fixed: 4510 case ARM::VLD2DUPd16wb_fixed: 4511 case ARM::VLD2DUPd32wb_fixed: 4512 case ARM::VLD2DUPd8wb_register: 4513 case ARM::VLD2DUPd16wb_register: 4514 case ARM::VLD2DUPd32wb_register: 4515 case ARM::VLD2DUPq8EvenPseudo: 4516 case ARM::VLD2DUPq8OddPseudo: 4517 case ARM::VLD2DUPq16EvenPseudo: 4518 case ARM::VLD2DUPq16OddPseudo: 4519 case ARM::VLD2DUPq32EvenPseudo: 4520 case ARM::VLD2DUPq32OddPseudo: 4521 case ARM::VLD3DUPq8EvenPseudo: 4522 case ARM::VLD3DUPq8OddPseudo: 4523 case ARM::VLD3DUPq16EvenPseudo: 4524 case ARM::VLD3DUPq16OddPseudo: 4525 case ARM::VLD3DUPq32EvenPseudo: 4526 case ARM::VLD3DUPq32OddPseudo: 4527 case ARM::VLD4DUPd8Pseudo: 4528 case ARM::VLD4DUPd16Pseudo: 4529 case ARM::VLD4DUPd32Pseudo: 4530 case ARM::VLD4DUPd8Pseudo_UPD: 4531 case ARM::VLD4DUPd16Pseudo_UPD: 4532 case ARM::VLD4DUPd32Pseudo_UPD: 4533 case ARM::VLD4DUPq8EvenPseudo: 4534 case ARM::VLD4DUPq8OddPseudo: 4535 case ARM::VLD4DUPq16EvenPseudo: 4536 case ARM::VLD4DUPq16OddPseudo: 4537 case ARM::VLD4DUPq32EvenPseudo: 4538 case ARM::VLD4DUPq32OddPseudo: 4539 case ARM::VLD1LNq8Pseudo: 4540 case ARM::VLD1LNq16Pseudo: 4541 case ARM::VLD1LNq32Pseudo: 4542 case ARM::VLD1LNq8Pseudo_UPD: 4543 case ARM::VLD1LNq16Pseudo_UPD: 4544 case ARM::VLD1LNq32Pseudo_UPD: 4545 case ARM::VLD2LNd8Pseudo: 4546 case ARM::VLD2LNd16Pseudo: 4547 case ARM::VLD2LNd32Pseudo: 4548 case ARM::VLD2LNq16Pseudo: 4549 case ARM::VLD2LNq32Pseudo: 4550 case ARM::VLD2LNd8Pseudo_UPD: 4551 case ARM::VLD2LNd16Pseudo_UPD: 4552 case ARM::VLD2LNd32Pseudo_UPD: 4553 case ARM::VLD2LNq16Pseudo_UPD: 4554 case ARM::VLD2LNq32Pseudo_UPD: 4555 case ARM::VLD4LNd8Pseudo: 4556 case ARM::VLD4LNd16Pseudo: 4557 case ARM::VLD4LNd32Pseudo: 4558 case ARM::VLD4LNq16Pseudo: 4559 case ARM::VLD4LNq32Pseudo: 4560 case ARM::VLD4LNd8Pseudo_UPD: 4561 case ARM::VLD4LNd16Pseudo_UPD: 4562 case ARM::VLD4LNd32Pseudo_UPD: 4563 case ARM::VLD4LNq16Pseudo_UPD: 4564 case ARM::VLD4LNq32Pseudo_UPD: 4565 // If the address is not 64-bit aligned, the latencies of these 4566 // instructions increases by one. 4567 ++Latency; 4568 break; 4569 } 4570 4571 return Latency; 4572 } 4573 4574 unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const { 4575 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() || 4576 MI.isImplicitDef()) 4577 return 0; 4578 4579 if (MI.isBundle()) 4580 return 0; 4581 4582 const MCInstrDesc &MCID = MI.getDesc(); 4583 4584 if (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) && 4585 !Subtarget.cheapPredicableCPSRDef())) { 4586 // When predicated, CPSR is an additional source operand for CPSR updating 4587 // instructions, this apparently increases their latencies. 4588 return 1; 4589 } 4590 return 0; 4591 } 4592 4593 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 4594 const MachineInstr &MI, 4595 unsigned *PredCost) const { 4596 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() || 4597 MI.isImplicitDef()) 4598 return 1; 4599 4600 // An instruction scheduler typically runs on unbundled instructions, however 4601 // other passes may query the latency of a bundled instruction. 4602 if (MI.isBundle()) { 4603 unsigned Latency = 0; 4604 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 4605 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 4606 while (++I != E && I->isInsideBundle()) { 4607 if (I->getOpcode() != ARM::t2IT) 4608 Latency += getInstrLatency(ItinData, *I, PredCost); 4609 } 4610 return Latency; 4611 } 4612 4613 const MCInstrDesc &MCID = MI.getDesc(); 4614 if (PredCost && (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) && 4615 !Subtarget.cheapPredicableCPSRDef()))) { 4616 // When predicated, CPSR is an additional source operand for CPSR updating 4617 // instructions, this apparently increases their latencies. 4618 *PredCost = 1; 4619 } 4620 // Be sure to call getStageLatency for an empty itinerary in case it has a 4621 // valid MinLatency property. 4622 if (!ItinData) 4623 return MI.mayLoad() ? 3 : 1; 4624 4625 unsigned Class = MCID.getSchedClass(); 4626 4627 // For instructions with variable uops, use uops as latency. 4628 if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0) 4629 return getNumMicroOps(ItinData, MI); 4630 4631 // For the common case, fall back on the itinerary's latency. 4632 unsigned Latency = ItinData->getStageLatency(Class); 4633 4634 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 4635 unsigned DefAlign = 4636 MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlignment() : 0; 4637 int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign); 4638 if (Adj >= 0 || (int)Latency > -Adj) { 4639 return Latency + Adj; 4640 } 4641 return Latency; 4642 } 4643 4644 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 4645 SDNode *Node) const { 4646 if (!Node->isMachineOpcode()) 4647 return 1; 4648 4649 if (!ItinData || ItinData->isEmpty()) 4650 return 1; 4651 4652 unsigned Opcode = Node->getMachineOpcode(); 4653 switch (Opcode) { 4654 default: 4655 return ItinData->getStageLatency(get(Opcode).getSchedClass()); 4656 case ARM::VLDMQIA: 4657 case ARM::VSTMQIA: 4658 return 2; 4659 } 4660 } 4661 4662 bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, 4663 const MachineRegisterInfo *MRI, 4664 const MachineInstr &DefMI, 4665 unsigned DefIdx, 4666 const MachineInstr &UseMI, 4667 unsigned UseIdx) const { 4668 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask; 4669 unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask; 4670 if (Subtarget.nonpipelinedVFP() && 4671 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP)) 4672 return true; 4673 4674 // Hoist VFP / NEON instructions with 4 or higher latency. 4675 unsigned Latency = 4676 SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx); 4677 if (Latency <= 3) 4678 return false; 4679 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON || 4680 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON; 4681 } 4682 4683 bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 4684 const MachineInstr &DefMI, 4685 unsigned DefIdx) const { 4686 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 4687 if (!ItinData || ItinData->isEmpty()) 4688 return false; 4689 4690 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask; 4691 if (DDomain == ARMII::DomainGeneral) { 4692 unsigned DefClass = DefMI.getDesc().getSchedClass(); 4693 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 4694 return (DefCycle != -1 && DefCycle <= 2); 4695 } 4696 return false; 4697 } 4698 4699 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI, 4700 StringRef &ErrInfo) const { 4701 if (convertAddSubFlagsOpcode(MI.getOpcode())) { 4702 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; 4703 return false; 4704 } 4705 if (MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) { 4706 // Make sure we don't generate a lo-lo mov that isn't supported. 4707 if (!ARM::hGPRRegClass.contains(MI.getOperand(0).getReg()) && 4708 !ARM::hGPRRegClass.contains(MI.getOperand(1).getReg())) { 4709 ErrInfo = "Non-flag-setting Thumb1 mov is v6-only"; 4710 return false; 4711 } 4712 } 4713 if (MI.getOpcode() == ARM::tPUSH || 4714 MI.getOpcode() == ARM::tPOP || 4715 MI.getOpcode() == ARM::tPOP_RET) { 4716 for (int i = 2, e = MI.getNumOperands(); i < e; ++i) { 4717 if (MI.getOperand(i).isImplicit() || 4718 !MI.getOperand(i).isReg()) 4719 continue; 4720 Register Reg = MI.getOperand(i).getReg(); 4721 if (Reg < ARM::R0 || Reg > ARM::R7) { 4722 if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) && 4723 !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) { 4724 ErrInfo = "Unsupported register in Thumb1 push/pop"; 4725 return false; 4726 } 4727 } 4728 } 4729 } 4730 return true; 4731 } 4732 4733 // LoadStackGuard has so far only been implemented for MachO. Different code 4734 // sequence is needed for other targets. 4735 void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI, 4736 unsigned LoadImmOpc, 4737 unsigned LoadOpc) const { 4738 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() && 4739 "ROPI/RWPI not currently supported with stack guard"); 4740 4741 MachineBasicBlock &MBB = *MI->getParent(); 4742 DebugLoc DL = MI->getDebugLoc(); 4743 Register Reg = MI->getOperand(0).getReg(); 4744 const GlobalValue *GV = 4745 cast<GlobalValue>((*MI->memoperands_begin())->getValue()); 4746 MachineInstrBuilder MIB; 4747 4748 BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg) 4749 .addGlobalAddress(GV, 0, ARMII::MO_NONLAZY); 4750 4751 if (Subtarget.isGVIndirectSymbol(GV)) { 4752 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); 4753 MIB.addReg(Reg, RegState::Kill).addImm(0); 4754 auto Flags = MachineMemOperand::MOLoad | 4755 MachineMemOperand::MODereferenceable | 4756 MachineMemOperand::MOInvariant; 4757 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( 4758 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, 4); 4759 MIB.addMemOperand(MMO).add(predOps(ARMCC::AL)); 4760 } 4761 4762 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); 4763 MIB.addReg(Reg, RegState::Kill) 4764 .addImm(0) 4765 .cloneMemRefs(*MI) 4766 .add(predOps(ARMCC::AL)); 4767 } 4768 4769 bool 4770 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 4771 unsigned &AddSubOpc, 4772 bool &NegAcc, bool &HasLane) const { 4773 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode); 4774 if (I == MLxEntryMap.end()) 4775 return false; 4776 4777 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; 4778 MulOpc = Entry.MulOpc; 4779 AddSubOpc = Entry.AddSubOpc; 4780 NegAcc = Entry.NegAcc; 4781 HasLane = Entry.HasLane; 4782 return true; 4783 } 4784 4785 //===----------------------------------------------------------------------===// 4786 // Execution domains. 4787 //===----------------------------------------------------------------------===// 4788 // 4789 // Some instructions go down the NEON pipeline, some go down the VFP pipeline, 4790 // and some can go down both. The vmov instructions go down the VFP pipeline, 4791 // but they can be changed to vorr equivalents that are executed by the NEON 4792 // pipeline. 4793 // 4794 // We use the following execution domain numbering: 4795 // 4796 enum ARMExeDomain { 4797 ExeGeneric = 0, 4798 ExeVFP = 1, 4799 ExeNEON = 2 4800 }; 4801 4802 // 4803 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h 4804 // 4805 std::pair<uint16_t, uint16_t> 4806 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const { 4807 // If we don't have access to NEON instructions then we won't be able 4808 // to swizzle anything to the NEON domain. Check to make sure. 4809 if (Subtarget.hasNEON()) { 4810 // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON 4811 // if they are not predicated. 4812 if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI)) 4813 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON)); 4814 4815 // CortexA9 is particularly picky about mixing the two and wants these 4816 // converted. 4817 if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) && 4818 (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR || 4819 MI.getOpcode() == ARM::VMOVS)) 4820 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON)); 4821 } 4822 // No other instructions can be swizzled, so just determine their domain. 4823 unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask; 4824 4825 if (Domain & ARMII::DomainNEON) 4826 return std::make_pair(ExeNEON, 0); 4827 4828 // Certain instructions can go either way on Cortex-A8. 4829 // Treat them as NEON instructions. 4830 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) 4831 return std::make_pair(ExeNEON, 0); 4832 4833 if (Domain & ARMII::DomainVFP) 4834 return std::make_pair(ExeVFP, 0); 4835 4836 return std::make_pair(ExeGeneric, 0); 4837 } 4838 4839 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, 4840 unsigned SReg, unsigned &Lane) { 4841 unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass); 4842 Lane = 0; 4843 4844 if (DReg != ARM::NoRegister) 4845 return DReg; 4846 4847 Lane = 1; 4848 DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass); 4849 4850 assert(DReg && "S-register with no D super-register?"); 4851 return DReg; 4852 } 4853 4854 /// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, 4855 /// set ImplicitSReg to a register number that must be marked as implicit-use or 4856 /// zero if no register needs to be defined as implicit-use. 4857 /// 4858 /// If the function cannot determine if an SPR should be marked implicit use or 4859 /// not, it returns false. 4860 /// 4861 /// This function handles cases where an instruction is being modified from taking 4862 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict 4863 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other 4864 /// lane of the DPR). 4865 /// 4866 /// If the other SPR is defined, an implicit-use of it should be added. Else, 4867 /// (including the case where the DPR itself is defined), it should not. 4868 /// 4869 static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, 4870 MachineInstr &MI, unsigned DReg, 4871 unsigned Lane, unsigned &ImplicitSReg) { 4872 // If the DPR is defined or used already, the other SPR lane will be chained 4873 // correctly, so there is nothing to be done. 4874 if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) { 4875 ImplicitSReg = 0; 4876 return true; 4877 } 4878 4879 // Otherwise we need to go searching to see if the SPR is set explicitly. 4880 ImplicitSReg = TRI->getSubReg(DReg, 4881 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1); 4882 MachineBasicBlock::LivenessQueryResult LQR = 4883 MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI); 4884 4885 if (LQR == MachineBasicBlock::LQR_Live) 4886 return true; 4887 else if (LQR == MachineBasicBlock::LQR_Unknown) 4888 return false; 4889 4890 // If the register is known not to be live, there is no need to add an 4891 // implicit-use. 4892 ImplicitSReg = 0; 4893 return true; 4894 } 4895 4896 void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI, 4897 unsigned Domain) const { 4898 unsigned DstReg, SrcReg, DReg; 4899 unsigned Lane; 4900 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 4901 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4902 switch (MI.getOpcode()) { 4903 default: 4904 llvm_unreachable("cannot handle opcode!"); 4905 break; 4906 case ARM::VMOVD: 4907 if (Domain != ExeNEON) 4908 break; 4909 4910 // Zap the predicate operands. 4911 assert(!isPredicated(MI) && "Cannot predicate a VORRd"); 4912 4913 // Make sure we've got NEON instructions. 4914 assert(Subtarget.hasNEON() && "VORRd requires NEON"); 4915 4916 // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits) 4917 DstReg = MI.getOperand(0).getReg(); 4918 SrcReg = MI.getOperand(1).getReg(); 4919 4920 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 4921 MI.RemoveOperand(i - 1); 4922 4923 // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits) 4924 MI.setDesc(get(ARM::VORRd)); 4925 MIB.addReg(DstReg, RegState::Define) 4926 .addReg(SrcReg) 4927 .addReg(SrcReg) 4928 .add(predOps(ARMCC::AL)); 4929 break; 4930 case ARM::VMOVRS: 4931 if (Domain != ExeNEON) 4932 break; 4933 assert(!isPredicated(MI) && "Cannot predicate a VGETLN"); 4934 4935 // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits) 4936 DstReg = MI.getOperand(0).getReg(); 4937 SrcReg = MI.getOperand(1).getReg(); 4938 4939 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 4940 MI.RemoveOperand(i - 1); 4941 4942 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); 4943 4944 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps) 4945 // Note that DSrc has been widened and the other lane may be undef, which 4946 // contaminates the entire register. 4947 MI.setDesc(get(ARM::VGETLNi32)); 4948 MIB.addReg(DstReg, RegState::Define) 4949 .addReg(DReg, RegState::Undef) 4950 .addImm(Lane) 4951 .add(predOps(ARMCC::AL)); 4952 4953 // The old source should be an implicit use, otherwise we might think it 4954 // was dead before here. 4955 MIB.addReg(SrcReg, RegState::Implicit); 4956 break; 4957 case ARM::VMOVSR: { 4958 if (Domain != ExeNEON) 4959 break; 4960 assert(!isPredicated(MI) && "Cannot predicate a VSETLN"); 4961 4962 // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits) 4963 DstReg = MI.getOperand(0).getReg(); 4964 SrcReg = MI.getOperand(1).getReg(); 4965 4966 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane); 4967 4968 unsigned ImplicitSReg; 4969 if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg)) 4970 break; 4971 4972 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 4973 MI.RemoveOperand(i - 1); 4974 4975 // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps) 4976 // Again DDst may be undefined at the beginning of this instruction. 4977 MI.setDesc(get(ARM::VSETLNi32)); 4978 MIB.addReg(DReg, RegState::Define) 4979 .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI))) 4980 .addReg(SrcReg) 4981 .addImm(Lane) 4982 .add(predOps(ARMCC::AL)); 4983 4984 // The narrower destination must be marked as set to keep previous chains 4985 // in place. 4986 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 4987 if (ImplicitSReg != 0) 4988 MIB.addReg(ImplicitSReg, RegState::Implicit); 4989 break; 4990 } 4991 case ARM::VMOVS: { 4992 if (Domain != ExeNEON) 4993 break; 4994 4995 // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits) 4996 DstReg = MI.getOperand(0).getReg(); 4997 SrcReg = MI.getOperand(1).getReg(); 4998 4999 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc; 5000 DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane); 5001 DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane); 5002 5003 unsigned ImplicitSReg; 5004 if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg)) 5005 break; 5006 5007 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5008 MI.RemoveOperand(i - 1); 5009 5010 if (DSrc == DDst) { 5011 // Destination can be: 5012 // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits) 5013 MI.setDesc(get(ARM::VDUPLN32d)); 5014 MIB.addReg(DDst, RegState::Define) 5015 .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI))) 5016 .addImm(SrcLane) 5017 .add(predOps(ARMCC::AL)); 5018 5019 // Neither the source or the destination are naturally represented any 5020 // more, so add them in manually. 5021 MIB.addReg(DstReg, RegState::Implicit | RegState::Define); 5022 MIB.addReg(SrcReg, RegState::Implicit); 5023 if (ImplicitSReg != 0) 5024 MIB.addReg(ImplicitSReg, RegState::Implicit); 5025 break; 5026 } 5027 5028 // In general there's no single instruction that can perform an S <-> S 5029 // move in NEON space, but a pair of VEXT instructions *can* do the 5030 // job. It turns out that the VEXTs needed will only use DSrc once, with 5031 // the position based purely on the combination of lane-0 and lane-1 5032 // involved. For example 5033 // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1 5034 // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1 5035 // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1 5036 // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1 5037 // 5038 // Pattern of the MachineInstrs is: 5039 // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits) 5040 MachineInstrBuilder NewMIB; 5041 NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32), 5042 DDst); 5043 5044 // On the first instruction, both DSrc and DDst may be undef if present. 5045 // Specifically when the original instruction didn't have them as an 5046 // <imp-use>. 5047 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst; 5048 bool CurUndef = !MI.readsRegister(CurReg, TRI); 5049 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); 5050 5051 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst; 5052 CurUndef = !MI.readsRegister(CurReg, TRI); 5053 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)) 5054 .addImm(1) 5055 .add(predOps(ARMCC::AL)); 5056 5057 if (SrcLane == DstLane) 5058 NewMIB.addReg(SrcReg, RegState::Implicit); 5059 5060 MI.setDesc(get(ARM::VEXTd32)); 5061 MIB.addReg(DDst, RegState::Define); 5062 5063 // On the second instruction, DDst has definitely been defined above, so 5064 // it is not undef. DSrc, if present, can be undef as above. 5065 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst; 5066 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); 5067 MIB.addReg(CurReg, getUndefRegState(CurUndef)); 5068 5069 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst; 5070 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); 5071 MIB.addReg(CurReg, getUndefRegState(CurUndef)) 5072 .addImm(1) 5073 .add(predOps(ARMCC::AL)); 5074 5075 if (SrcLane != DstLane) 5076 MIB.addReg(SrcReg, RegState::Implicit); 5077 5078 // As before, the original destination is no longer represented, add it 5079 // implicitly. 5080 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 5081 if (ImplicitSReg != 0) 5082 MIB.addReg(ImplicitSReg, RegState::Implicit); 5083 break; 5084 } 5085 } 5086 } 5087 5088 //===----------------------------------------------------------------------===// 5089 // Partial register updates 5090 //===----------------------------------------------------------------------===// 5091 // 5092 // Swift renames NEON registers with 64-bit granularity. That means any 5093 // instruction writing an S-reg implicitly reads the containing D-reg. The 5094 // problem is mostly avoided by translating f32 operations to v2f32 operations 5095 // on D-registers, but f32 loads are still a problem. 5096 // 5097 // These instructions can load an f32 into a NEON register: 5098 // 5099 // VLDRS - Only writes S, partial D update. 5100 // VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops. 5101 // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops. 5102 // 5103 // FCONSTD can be used as a dependency-breaking instruction. 5104 unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance( 5105 const MachineInstr &MI, unsigned OpNum, 5106 const TargetRegisterInfo *TRI) const { 5107 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance(); 5108 if (!PartialUpdateClearance) 5109 return 0; 5110 5111 assert(TRI && "Need TRI instance"); 5112 5113 const MachineOperand &MO = MI.getOperand(OpNum); 5114 if (MO.readsReg()) 5115 return 0; 5116 Register Reg = MO.getReg(); 5117 int UseOp = -1; 5118 5119 switch (MI.getOpcode()) { 5120 // Normal instructions writing only an S-register. 5121 case ARM::VLDRS: 5122 case ARM::FCONSTS: 5123 case ARM::VMOVSR: 5124 case ARM::VMOVv8i8: 5125 case ARM::VMOVv4i16: 5126 case ARM::VMOVv2i32: 5127 case ARM::VMOVv2f32: 5128 case ARM::VMOVv1i64: 5129 UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI); 5130 break; 5131 5132 // Explicitly reads the dependency. 5133 case ARM::VLD1LNd32: 5134 UseOp = 3; 5135 break; 5136 default: 5137 return 0; 5138 } 5139 5140 // If this instruction actually reads a value from Reg, there is no unwanted 5141 // dependency. 5142 if (UseOp != -1 && MI.getOperand(UseOp).readsReg()) 5143 return 0; 5144 5145 // We must be able to clobber the whole D-reg. 5146 if (Register::isVirtualRegister(Reg)) { 5147 // Virtual register must be a def undef foo:ssub_0 operand. 5148 if (!MO.getSubReg() || MI.readsVirtualRegister(Reg)) 5149 return 0; 5150 } else if (ARM::SPRRegClass.contains(Reg)) { 5151 // Physical register: MI must define the full D-reg. 5152 unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0, 5153 &ARM::DPRRegClass); 5154 if (!DReg || !MI.definesRegister(DReg, TRI)) 5155 return 0; 5156 } 5157 5158 // MI has an unwanted D-register dependency. 5159 // Avoid defs in the previous N instructrions. 5160 return PartialUpdateClearance; 5161 } 5162 5163 // Break a partial register dependency after getPartialRegUpdateClearance 5164 // returned non-zero. 5165 void ARMBaseInstrInfo::breakPartialRegDependency( 5166 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { 5167 assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def"); 5168 assert(TRI && "Need TRI instance"); 5169 5170 const MachineOperand &MO = MI.getOperand(OpNum); 5171 Register Reg = MO.getReg(); 5172 assert(Register::isPhysicalRegister(Reg) && 5173 "Can't break virtual register dependencies."); 5174 unsigned DReg = Reg; 5175 5176 // If MI defines an S-reg, find the corresponding D super-register. 5177 if (ARM::SPRRegClass.contains(Reg)) { 5178 DReg = ARM::D0 + (Reg - ARM::S0) / 2; 5179 assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken"); 5180 } 5181 5182 assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps"); 5183 assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg"); 5184 5185 // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines 5186 // the full D-register by loading the same value to both lanes. The 5187 // instruction is micro-coded with 2 uops, so don't do this until we can 5188 // properly schedule micro-coded instructions. The dispatcher stalls cause 5189 // too big regressions. 5190 5191 // Insert the dependency-breaking FCONSTD before MI. 5192 // 96 is the encoding of 0.5, but the actual value doesn't matter here. 5193 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg) 5194 .addImm(96) 5195 .add(predOps(ARMCC::AL)); 5196 MI.addRegisterKilled(DReg, TRI, true); 5197 } 5198 5199 bool ARMBaseInstrInfo::hasNOP() const { 5200 return Subtarget.getFeatureBits()[ARM::HasV6KOps]; 5201 } 5202 5203 bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const { 5204 if (MI->getNumOperands() < 4) 5205 return true; 5206 unsigned ShOpVal = MI->getOperand(3).getImm(); 5207 unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal); 5208 // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1. 5209 if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || 5210 ((ShImm == 1 || ShImm == 2) && 5211 ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl)) 5212 return true; 5213 5214 return false; 5215 } 5216 5217 bool ARMBaseInstrInfo::getRegSequenceLikeInputs( 5218 const MachineInstr &MI, unsigned DefIdx, 5219 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 5220 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5221 assert(MI.isRegSequenceLike() && "Invalid kind of instruction"); 5222 5223 switch (MI.getOpcode()) { 5224 case ARM::VMOVDRR: 5225 // dX = VMOVDRR rY, rZ 5226 // is the same as: 5227 // dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1 5228 // Populate the InputRegs accordingly. 5229 // rY 5230 const MachineOperand *MOReg = &MI.getOperand(1); 5231 if (!MOReg->isUndef()) 5232 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(), 5233 MOReg->getSubReg(), ARM::ssub_0)); 5234 // rZ 5235 MOReg = &MI.getOperand(2); 5236 if (!MOReg->isUndef()) 5237 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(), 5238 MOReg->getSubReg(), ARM::ssub_1)); 5239 return true; 5240 } 5241 llvm_unreachable("Target dependent opcode missing"); 5242 } 5243 5244 bool ARMBaseInstrInfo::getExtractSubregLikeInputs( 5245 const MachineInstr &MI, unsigned DefIdx, 5246 RegSubRegPairAndIdx &InputReg) const { 5247 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5248 assert(MI.isExtractSubregLike() && "Invalid kind of instruction"); 5249 5250 switch (MI.getOpcode()) { 5251 case ARM::VMOVRRD: 5252 // rX, rY = VMOVRRD dZ 5253 // is the same as: 5254 // rX = EXTRACT_SUBREG dZ, ssub_0 5255 // rY = EXTRACT_SUBREG dZ, ssub_1 5256 const MachineOperand &MOReg = MI.getOperand(2); 5257 if (MOReg.isUndef()) 5258 return false; 5259 InputReg.Reg = MOReg.getReg(); 5260 InputReg.SubReg = MOReg.getSubReg(); 5261 InputReg.SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1; 5262 return true; 5263 } 5264 llvm_unreachable("Target dependent opcode missing"); 5265 } 5266 5267 bool ARMBaseInstrInfo::getInsertSubregLikeInputs( 5268 const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, 5269 RegSubRegPairAndIdx &InsertedReg) const { 5270 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5271 assert(MI.isInsertSubregLike() && "Invalid kind of instruction"); 5272 5273 switch (MI.getOpcode()) { 5274 case ARM::VSETLNi32: 5275 // dX = VSETLNi32 dY, rZ, imm 5276 const MachineOperand &MOBaseReg = MI.getOperand(1); 5277 const MachineOperand &MOInsertedReg = MI.getOperand(2); 5278 if (MOInsertedReg.isUndef()) 5279 return false; 5280 const MachineOperand &MOIndex = MI.getOperand(3); 5281 BaseReg.Reg = MOBaseReg.getReg(); 5282 BaseReg.SubReg = MOBaseReg.getSubReg(); 5283 5284 InsertedReg.Reg = MOInsertedReg.getReg(); 5285 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 5286 InsertedReg.SubIdx = MOIndex.getImm() == 0 ? ARM::ssub_0 : ARM::ssub_1; 5287 return true; 5288 } 5289 llvm_unreachable("Target dependent opcode missing"); 5290 } 5291 5292 std::pair<unsigned, unsigned> 5293 ARMBaseInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 5294 const unsigned Mask = ARMII::MO_OPTION_MASK; 5295 return std::make_pair(TF & Mask, TF & ~Mask); 5296 } 5297 5298 ArrayRef<std::pair<unsigned, const char *>> 5299 ARMBaseInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 5300 using namespace ARMII; 5301 5302 static const std::pair<unsigned, const char *> TargetFlags[] = { 5303 {MO_LO16, "arm-lo16"}, {MO_HI16, "arm-hi16"}}; 5304 return makeArrayRef(TargetFlags); 5305 } 5306 5307 ArrayRef<std::pair<unsigned, const char *>> 5308 ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { 5309 using namespace ARMII; 5310 5311 static const std::pair<unsigned, const char *> TargetFlags[] = { 5312 {MO_COFFSTUB, "arm-coffstub"}, 5313 {MO_GOT, "arm-got"}, 5314 {MO_SBREL, "arm-sbrel"}, 5315 {MO_DLLIMPORT, "arm-dllimport"}, 5316 {MO_SECREL, "arm-secrel"}, 5317 {MO_NONLAZY, "arm-nonlazy"}}; 5318 return makeArrayRef(TargetFlags); 5319 } 5320 5321 bool llvm::registerDefinedBetween(unsigned Reg, 5322 MachineBasicBlock::iterator From, 5323 MachineBasicBlock::iterator To, 5324 const TargetRegisterInfo *TRI) { 5325 for (auto I = From; I != To; ++I) 5326 if (I->modifiesRegister(Reg, TRI)) 5327 return true; 5328 return false; 5329 } 5330 5331 MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br, 5332 const TargetRegisterInfo *TRI) { 5333 // Search backwards to the instruction that defines CSPR. This may or not 5334 // be a CMP, we check that after this loop. If we find another instruction 5335 // that reads cpsr, we return nullptr. 5336 MachineBasicBlock::iterator CmpMI = Br; 5337 while (CmpMI != Br->getParent()->begin()) { 5338 --CmpMI; 5339 if (CmpMI->modifiesRegister(ARM::CPSR, TRI)) 5340 break; 5341 if (CmpMI->readsRegister(ARM::CPSR, TRI)) 5342 break; 5343 } 5344 5345 // Check that this inst is a CMP r[0-7], #0 and that the register 5346 // is not redefined between the cmp and the br. 5347 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri) 5348 return nullptr; 5349 Register Reg = CmpMI->getOperand(0).getReg(); 5350 unsigned PredReg = 0; 5351 ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg); 5352 if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0) 5353 return nullptr; 5354 if (!isARMLowRegister(Reg)) 5355 return nullptr; 5356 if (registerDefinedBetween(Reg, CmpMI->getNextNode(), Br, TRI)) 5357 return nullptr; 5358 5359 return &*CmpMI; 5360 } 5361 5362 unsigned llvm::ConstantMaterializationCost(unsigned Val, 5363 const ARMSubtarget *Subtarget, 5364 bool ForCodesize) { 5365 if (Subtarget->isThumb()) { 5366 if (Val <= 255) // MOV 5367 return ForCodesize ? 2 : 1; 5368 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff || // MOV 5369 ARM_AM::getT2SOImmVal(Val) != -1 || // MOVW 5370 ARM_AM::getT2SOImmVal(~Val) != -1)) // MVN 5371 return ForCodesize ? 4 : 1; 5372 if (Val <= 510) // MOV + ADDi8 5373 return ForCodesize ? 4 : 2; 5374 if (~Val <= 255) // MOV + MVN 5375 return ForCodesize ? 4 : 2; 5376 if (ARM_AM::isThumbImmShiftedVal(Val)) // MOV + LSL 5377 return ForCodesize ? 4 : 2; 5378 } else { 5379 if (ARM_AM::getSOImmVal(Val) != -1) // MOV 5380 return ForCodesize ? 4 : 1; 5381 if (ARM_AM::getSOImmVal(~Val) != -1) // MVN 5382 return ForCodesize ? 4 : 1; 5383 if (Subtarget->hasV6T2Ops() && Val <= 0xffff) // MOVW 5384 return ForCodesize ? 4 : 1; 5385 if (ARM_AM::isSOImmTwoPartVal(Val)) // two instrs 5386 return ForCodesize ? 8 : 2; 5387 } 5388 if (Subtarget->useMovt()) // MOVW + MOVT 5389 return ForCodesize ? 8 : 2; 5390 return ForCodesize ? 8 : 3; // Literal pool load 5391 } 5392 5393 bool llvm::HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, 5394 const ARMSubtarget *Subtarget, 5395 bool ForCodesize) { 5396 // Check with ForCodesize 5397 unsigned Cost1 = ConstantMaterializationCost(Val1, Subtarget, ForCodesize); 5398 unsigned Cost2 = ConstantMaterializationCost(Val2, Subtarget, ForCodesize); 5399 if (Cost1 < Cost2) 5400 return true; 5401 if (Cost1 > Cost2) 5402 return false; 5403 5404 // If they are equal, try with !ForCodesize 5405 return ConstantMaterializationCost(Val1, Subtarget, !ForCodesize) < 5406 ConstantMaterializationCost(Val2, Subtarget, !ForCodesize); 5407 } 5408