1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Base ARM implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseInstrInfo.h" 14 #include "ARMBaseRegisterInfo.h" 15 #include "ARMConstantPoolValue.h" 16 #include "ARMFeatures.h" 17 #include "ARMHazardRecognizer.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMSubtarget.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "MCTargetDesc/ARMBaseInfo.h" 22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/CodeGen/LiveVariables.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineConstantPool.h" 30 #include "llvm/CodeGen/MachineFrameInfo.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineInstr.h" 33 #include "llvm/CodeGen/MachineInstrBuilder.h" 34 #include "llvm/CodeGen/MachineMemOperand.h" 35 #include "llvm/CodeGen/MachineModuleInfo.h" 36 #include "llvm/CodeGen/MachineOperand.h" 37 #include "llvm/CodeGen/MachineRegisterInfo.h" 38 #include "llvm/CodeGen/MachineScheduler.h" 39 #include "llvm/CodeGen/MultiHazardRecognizer.h" 40 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 41 #include "llvm/CodeGen/SelectionDAGNodes.h" 42 #include "llvm/CodeGen/TargetInstrInfo.h" 43 #include "llvm/CodeGen/TargetRegisterInfo.h" 44 #include "llvm/CodeGen/TargetSchedule.h" 45 #include "llvm/IR/Attributes.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DebugLoc.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/GlobalValue.h" 50 #include "llvm/MC/MCAsmInfo.h" 51 #include "llvm/MC/MCInstrDesc.h" 52 #include "llvm/MC/MCInstrItineraries.h" 53 #include "llvm/Support/BranchProbability.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/ErrorHandling.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include "llvm/Target/TargetMachine.h" 61 #include <algorithm> 62 #include <cassert> 63 #include <cstdint> 64 #include <iterator> 65 #include <new> 66 #include <utility> 67 #include <vector> 68 69 using namespace llvm; 70 71 #define DEBUG_TYPE "arm-instrinfo" 72 73 #define GET_INSTRINFO_CTOR_DTOR 74 #include "ARMGenInstrInfo.inc" 75 76 static cl::opt<bool> 77 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, 78 cl::desc("Enable ARM 2-addr to 3-addr conv")); 79 80 /// ARM_MLxEntry - Record information about MLA / MLS instructions. 81 struct ARM_MLxEntry { 82 uint16_t MLxOpc; // MLA / MLS opcode 83 uint16_t MulOpc; // Expanded multiplication opcode 84 uint16_t AddSubOpc; // Expanded add / sub opcode 85 bool NegAcc; // True if the acc is negated before the add / sub. 86 bool HasLane; // True if instruction has an extra "lane" operand. 87 }; 88 89 static const ARM_MLxEntry ARM_MLxTable[] = { 90 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane 91 // fp scalar ops 92 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, 93 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, 94 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, 95 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, 96 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, 97 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, 98 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, 99 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, 100 101 // fp SIMD ops 102 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, 103 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, 104 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, 105 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, 106 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, 107 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, 108 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, 109 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, 110 }; 111 112 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) 113 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 114 Subtarget(STI) { 115 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { 116 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) 117 llvm_unreachable("Duplicated entries?"); 118 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); 119 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); 120 } 121 } 122 123 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl 124 // currently defaults to no prepass hazard recognizer. 125 ScheduleHazardRecognizer * 126 ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 127 const ScheduleDAG *DAG) const { 128 if (usePreRAHazardRecognizer()) { 129 const InstrItineraryData *II = 130 static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData(); 131 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); 132 } 133 return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG); 134 } 135 136 ScheduleHazardRecognizer *ARMBaseInstrInfo:: 137 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 138 const ScheduleDAG *DAG) const { 139 MultiHazardRecognizer *MHR = new MultiHazardRecognizer(); 140 141 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base()) 142 MHR->AddHazardRecognizer(std::make_unique<ARMHazardRecognizerFPMLx>()); 143 144 auto BHR = TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG); 145 if (BHR) 146 MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR)); 147 return MHR; 148 } 149 150 MachineInstr *ARMBaseInstrInfo::convertToThreeAddress( 151 MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const { 152 // FIXME: Thumb2 support. 153 154 if (!EnableARM3Addr) 155 return nullptr; 156 157 MachineFunction &MF = *MI.getParent()->getParent(); 158 uint64_t TSFlags = MI.getDesc().TSFlags; 159 bool isPre = false; 160 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { 161 default: return nullptr; 162 case ARMII::IndexModePre: 163 isPre = true; 164 break; 165 case ARMII::IndexModePost: 166 break; 167 } 168 169 // Try splitting an indexed load/store to an un-indexed one plus an add/sub 170 // operation. 171 unsigned MemOpc = getUnindexedOpcode(MI.getOpcode()); 172 if (MemOpc == 0) 173 return nullptr; 174 175 MachineInstr *UpdateMI = nullptr; 176 MachineInstr *MemMI = nullptr; 177 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); 178 const MCInstrDesc &MCID = MI.getDesc(); 179 unsigned NumOps = MCID.getNumOperands(); 180 bool isLoad = !MI.mayStore(); 181 const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0); 182 const MachineOperand &Base = MI.getOperand(2); 183 const MachineOperand &Offset = MI.getOperand(NumOps - 3); 184 Register WBReg = WB.getReg(); 185 Register BaseReg = Base.getReg(); 186 Register OffReg = Offset.getReg(); 187 unsigned OffImm = MI.getOperand(NumOps - 2).getImm(); 188 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm(); 189 switch (AddrMode) { 190 default: llvm_unreachable("Unknown indexed op!"); 191 case ARMII::AddrMode2: { 192 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 193 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 194 if (OffReg == 0) { 195 if (ARM_AM::getSOImmVal(Amt) == -1) 196 // Can't encode it in a so_imm operand. This transformation will 197 // add more than 1 instruction. Abandon! 198 return nullptr; 199 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 200 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 201 .addReg(BaseReg) 202 .addImm(Amt) 203 .add(predOps(Pred)) 204 .add(condCodeOp()); 205 } else if (Amt != 0) { 206 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); 207 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); 208 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 209 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) 210 .addReg(BaseReg) 211 .addReg(OffReg) 212 .addReg(0) 213 .addImm(SOOpc) 214 .add(predOps(Pred)) 215 .add(condCodeOp()); 216 } else 217 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 218 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 219 .addReg(BaseReg) 220 .addReg(OffReg) 221 .add(predOps(Pred)) 222 .add(condCodeOp()); 223 break; 224 } 225 case ARMII::AddrMode3 : { 226 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub; 227 unsigned Amt = ARM_AM::getAM3Offset(OffImm); 228 if (OffReg == 0) 229 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand. 230 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 231 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 232 .addReg(BaseReg) 233 .addImm(Amt) 234 .add(predOps(Pred)) 235 .add(condCodeOp()); 236 else 237 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 238 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 239 .addReg(BaseReg) 240 .addReg(OffReg) 241 .add(predOps(Pred)) 242 .add(condCodeOp()); 243 break; 244 } 245 } 246 247 std::vector<MachineInstr*> NewMIs; 248 if (isPre) { 249 if (isLoad) 250 MemMI = 251 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg()) 252 .addReg(WBReg) 253 .addImm(0) 254 .addImm(Pred); 255 else 256 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc)) 257 .addReg(MI.getOperand(1).getReg()) 258 .addReg(WBReg) 259 .addReg(0) 260 .addImm(0) 261 .addImm(Pred); 262 NewMIs.push_back(MemMI); 263 NewMIs.push_back(UpdateMI); 264 } else { 265 if (isLoad) 266 MemMI = 267 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg()) 268 .addReg(BaseReg) 269 .addImm(0) 270 .addImm(Pred); 271 else 272 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc)) 273 .addReg(MI.getOperand(1).getReg()) 274 .addReg(BaseReg) 275 .addReg(0) 276 .addImm(0) 277 .addImm(Pred); 278 if (WB.isDead()) 279 UpdateMI->getOperand(0).setIsDead(); 280 NewMIs.push_back(UpdateMI); 281 NewMIs.push_back(MemMI); 282 } 283 284 // Transfer LiveVariables states, kill / dead info. 285 if (LV) { 286 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 287 MachineOperand &MO = MI.getOperand(i); 288 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) { 289 Register Reg = MO.getReg(); 290 291 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); 292 if (MO.isDef()) { 293 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI; 294 if (MO.isDead()) 295 LV->addVirtualRegisterDead(Reg, *NewMI); 296 } 297 if (MO.isUse() && MO.isKill()) { 298 for (unsigned j = 0; j < 2; ++j) { 299 // Look at the two new MI's in reverse order. 300 MachineInstr *NewMI = NewMIs[j]; 301 if (!NewMI->readsRegister(Reg)) 302 continue; 303 LV->addVirtualRegisterKilled(Reg, *NewMI); 304 if (VI.removeKill(MI)) 305 VI.Kills.push_back(NewMI); 306 break; 307 } 308 } 309 } 310 } 311 } 312 313 MachineBasicBlock::iterator MBBI = MI.getIterator(); 314 MFI->insert(MBBI, NewMIs[1]); 315 MFI->insert(MBBI, NewMIs[0]); 316 return NewMIs[0]; 317 } 318 319 // Branch analysis. 320 bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB, 321 MachineBasicBlock *&TBB, 322 MachineBasicBlock *&FBB, 323 SmallVectorImpl<MachineOperand> &Cond, 324 bool AllowModify) const { 325 TBB = nullptr; 326 FBB = nullptr; 327 328 MachineBasicBlock::instr_iterator I = MBB.instr_end(); 329 if (I == MBB.instr_begin()) 330 return false; // Empty blocks are easy. 331 --I; 332 333 // Walk backwards from the end of the basic block until the branch is 334 // analyzed or we give up. 335 while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) { 336 // Flag to be raised on unanalyzeable instructions. This is useful in cases 337 // where we want to clean up on the end of the basic block before we bail 338 // out. 339 bool CantAnalyze = false; 340 341 // Skip over DEBUG values and predicated nonterminators. 342 while (I->isDebugInstr() || !I->isTerminator()) { 343 if (I == MBB.instr_begin()) 344 return false; 345 --I; 346 } 347 348 if (isIndirectBranchOpcode(I->getOpcode()) || 349 isJumpTableBranchOpcode(I->getOpcode())) { 350 // Indirect branches and jump tables can't be analyzed, but we still want 351 // to clean up any instructions at the tail of the basic block. 352 CantAnalyze = true; 353 } else if (isUncondBranchOpcode(I->getOpcode())) { 354 TBB = I->getOperand(0).getMBB(); 355 } else if (isCondBranchOpcode(I->getOpcode())) { 356 // Bail out if we encounter multiple conditional branches. 357 if (!Cond.empty()) 358 return true; 359 360 assert(!FBB && "FBB should have been null."); 361 FBB = TBB; 362 TBB = I->getOperand(0).getMBB(); 363 Cond.push_back(I->getOperand(1)); 364 Cond.push_back(I->getOperand(2)); 365 } else if (I->isReturn()) { 366 // Returns can't be analyzed, but we should run cleanup. 367 CantAnalyze = true; 368 } else { 369 // We encountered other unrecognized terminator. Bail out immediately. 370 return true; 371 } 372 373 // Cleanup code - to be run for unpredicated unconditional branches and 374 // returns. 375 if (!isPredicated(*I) && 376 (isUncondBranchOpcode(I->getOpcode()) || 377 isIndirectBranchOpcode(I->getOpcode()) || 378 isJumpTableBranchOpcode(I->getOpcode()) || 379 I->isReturn())) { 380 // Forget any previous condition branch information - it no longer applies. 381 Cond.clear(); 382 FBB = nullptr; 383 384 // If we can modify the function, delete everything below this 385 // unconditional branch. 386 if (AllowModify) { 387 MachineBasicBlock::iterator DI = std::next(I); 388 while (DI != MBB.instr_end()) { 389 MachineInstr &InstToDelete = *DI; 390 ++DI; 391 InstToDelete.eraseFromParent(); 392 } 393 } 394 } 395 396 if (CantAnalyze) { 397 // We may not be able to analyze the block, but we could still have 398 // an unconditional branch as the last instruction in the block, which 399 // just branches to layout successor. If this is the case, then just 400 // remove it if we're allowed to make modifications. 401 if (AllowModify && !isPredicated(MBB.back()) && 402 isUncondBranchOpcode(MBB.back().getOpcode()) && 403 TBB && MBB.isLayoutSuccessor(TBB)) 404 removeBranch(MBB); 405 return true; 406 } 407 408 if (I == MBB.instr_begin()) 409 return false; 410 411 --I; 412 } 413 414 // We made it past the terminators without bailing out - we must have 415 // analyzed this branch successfully. 416 return false; 417 } 418 419 unsigned ARMBaseInstrInfo::removeBranch(MachineBasicBlock &MBB, 420 int *BytesRemoved) const { 421 assert(!BytesRemoved && "code size not handled"); 422 423 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 424 if (I == MBB.end()) 425 return 0; 426 427 if (!isUncondBranchOpcode(I->getOpcode()) && 428 !isCondBranchOpcode(I->getOpcode())) 429 return 0; 430 431 // Remove the branch. 432 I->eraseFromParent(); 433 434 I = MBB.end(); 435 436 if (I == MBB.begin()) return 1; 437 --I; 438 if (!isCondBranchOpcode(I->getOpcode())) 439 return 1; 440 441 // Remove the branch. 442 I->eraseFromParent(); 443 return 2; 444 } 445 446 unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB, 447 MachineBasicBlock *TBB, 448 MachineBasicBlock *FBB, 449 ArrayRef<MachineOperand> Cond, 450 const DebugLoc &DL, 451 int *BytesAdded) const { 452 assert(!BytesAdded && "code size not handled"); 453 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>(); 454 int BOpc = !AFI->isThumbFunction() 455 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); 456 int BccOpc = !AFI->isThumbFunction() 457 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); 458 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); 459 460 // Shouldn't be a fall through. 461 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 462 assert((Cond.size() == 2 || Cond.size() == 0) && 463 "ARM branch conditions have two components!"); 464 465 // For conditional branches, we use addOperand to preserve CPSR flags. 466 467 if (!FBB) { 468 if (Cond.empty()) { // Unconditional branch? 469 if (isThumb) 470 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).add(predOps(ARMCC::AL)); 471 else 472 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 473 } else 474 BuildMI(&MBB, DL, get(BccOpc)) 475 .addMBB(TBB) 476 .addImm(Cond[0].getImm()) 477 .add(Cond[1]); 478 return 1; 479 } 480 481 // Two-way conditional branch. 482 BuildMI(&MBB, DL, get(BccOpc)) 483 .addMBB(TBB) 484 .addImm(Cond[0].getImm()) 485 .add(Cond[1]); 486 if (isThumb) 487 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).add(predOps(ARMCC::AL)); 488 else 489 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 490 return 2; 491 } 492 493 bool ARMBaseInstrInfo:: 494 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 495 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 496 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 497 return false; 498 } 499 500 bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const { 501 if (MI.isBundle()) { 502 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 503 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 504 while (++I != E && I->isInsideBundle()) { 505 int PIdx = I->findFirstPredOperandIdx(); 506 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL) 507 return true; 508 } 509 return false; 510 } 511 512 int PIdx = MI.findFirstPredOperandIdx(); 513 return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL; 514 } 515 516 std::string ARMBaseInstrInfo::createMIROperandComment( 517 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, 518 const TargetRegisterInfo *TRI) const { 519 520 // First, let's see if there is a generic comment for this operand 521 std::string GenericComment = 522 TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI); 523 if (!GenericComment.empty()) 524 return GenericComment; 525 526 // If not, check if we have an immediate operand. 527 if (Op.getType() != MachineOperand::MO_Immediate) 528 return std::string(); 529 530 // And print its corresponding condition code if the immediate is a 531 // predicate. 532 int FirstPredOp = MI.findFirstPredOperandIdx(); 533 if (FirstPredOp != (int) OpIdx) 534 return std::string(); 535 536 std::string CC = "CC::"; 537 CC += ARMCondCodeToString((ARMCC::CondCodes)Op.getImm()); 538 return CC; 539 } 540 541 bool ARMBaseInstrInfo::PredicateInstruction( 542 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 543 unsigned Opc = MI.getOpcode(); 544 if (isUncondBranchOpcode(Opc)) { 545 MI.setDesc(get(getMatchingCondBranchOpcode(Opc))); 546 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 547 .addImm(Pred[0].getImm()) 548 .addReg(Pred[1].getReg()); 549 return true; 550 } 551 552 int PIdx = MI.findFirstPredOperandIdx(); 553 if (PIdx != -1) { 554 MachineOperand &PMO = MI.getOperand(PIdx); 555 PMO.setImm(Pred[0].getImm()); 556 MI.getOperand(PIdx+1).setReg(Pred[1].getReg()); 557 558 // Thumb 1 arithmetic instructions do not set CPSR when executed inside an 559 // IT block. This affects how they are printed. 560 const MCInstrDesc &MCID = MI.getDesc(); 561 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 562 assert(MCID.OpInfo[1].isOptionalDef() && "CPSR def isn't expected operand"); 563 assert((MI.getOperand(1).isDead() || 564 MI.getOperand(1).getReg() != ARM::CPSR) && 565 "if conversion tried to stop defining used CPSR"); 566 MI.getOperand(1).setReg(ARM::NoRegister); 567 } 568 569 return true; 570 } 571 return false; 572 } 573 574 bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 575 ArrayRef<MachineOperand> Pred2) const { 576 if (Pred1.size() > 2 || Pred2.size() > 2) 577 return false; 578 579 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 580 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 581 if (CC1 == CC2) 582 return true; 583 584 switch (CC1) { 585 default: 586 return false; 587 case ARMCC::AL: 588 return true; 589 case ARMCC::HS: 590 return CC2 == ARMCC::HI; 591 case ARMCC::LS: 592 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 593 case ARMCC::GE: 594 return CC2 == ARMCC::GT; 595 case ARMCC::LE: 596 return CC2 == ARMCC::LT; 597 } 598 } 599 600 bool ARMBaseInstrInfo::ClobbersPredicate(MachineInstr &MI, 601 std::vector<MachineOperand> &Pred, 602 bool SkipDead) const { 603 bool Found = false; 604 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 605 const MachineOperand &MO = MI.getOperand(i); 606 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR); 607 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR; 608 if (ClobbersCPSR || IsCPSR) { 609 610 // Filter out T1 instructions that have a dead CPSR, 611 // allowing IT blocks to be generated containing T1 instructions 612 const MCInstrDesc &MCID = MI.getDesc(); 613 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting && MO.isDead() && 614 SkipDead) 615 continue; 616 617 Pred.push_back(MO); 618 Found = true; 619 } 620 } 621 622 return Found; 623 } 624 625 bool ARMBaseInstrInfo::isCPSRDefined(const MachineInstr &MI) { 626 for (const auto &MO : MI.operands()) 627 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead()) 628 return true; 629 return false; 630 } 631 632 static bool isEligibleForITBlock(const MachineInstr *MI) { 633 switch (MI->getOpcode()) { 634 default: return true; 635 case ARM::tADC: // ADC (register) T1 636 case ARM::tADDi3: // ADD (immediate) T1 637 case ARM::tADDi8: // ADD (immediate) T2 638 case ARM::tADDrr: // ADD (register) T1 639 case ARM::tAND: // AND (register) T1 640 case ARM::tASRri: // ASR (immediate) T1 641 case ARM::tASRrr: // ASR (register) T1 642 case ARM::tBIC: // BIC (register) T1 643 case ARM::tEOR: // EOR (register) T1 644 case ARM::tLSLri: // LSL (immediate) T1 645 case ARM::tLSLrr: // LSL (register) T1 646 case ARM::tLSRri: // LSR (immediate) T1 647 case ARM::tLSRrr: // LSR (register) T1 648 case ARM::tMUL: // MUL T1 649 case ARM::tMVN: // MVN (register) T1 650 case ARM::tORR: // ORR (register) T1 651 case ARM::tROR: // ROR (register) T1 652 case ARM::tRSB: // RSB (immediate) T1 653 case ARM::tSBC: // SBC (register) T1 654 case ARM::tSUBi3: // SUB (immediate) T1 655 case ARM::tSUBi8: // SUB (immediate) T2 656 case ARM::tSUBrr: // SUB (register) T1 657 return !ARMBaseInstrInfo::isCPSRDefined(*MI); 658 } 659 } 660 661 /// isPredicable - Return true if the specified instruction can be predicated. 662 /// By default, this returns true for every instruction with a 663 /// PredicateOperand. 664 bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const { 665 if (!MI.isPredicable()) 666 return false; 667 668 if (MI.isBundle()) 669 return false; 670 671 if (!isEligibleForITBlock(&MI)) 672 return false; 673 674 const ARMFunctionInfo *AFI = 675 MI.getParent()->getParent()->getInfo<ARMFunctionInfo>(); 676 677 // Neon instructions in Thumb2 IT blocks are deprecated, see ARMARM. 678 // In their ARM encoding, they can't be encoded in a conditional form. 679 if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) 680 return false; 681 682 if (AFI->isThumb2Function()) { 683 if (getSubtarget().restrictIT()) 684 return isV8EligibleForIT(&MI); 685 } 686 687 return true; 688 } 689 690 namespace llvm { 691 692 template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) { 693 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 694 const MachineOperand &MO = MI->getOperand(i); 695 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 696 continue; 697 if (MO.getReg() != ARM::CPSR) 698 continue; 699 if (!MO.isDead()) 700 return false; 701 } 702 // all definitions of CPSR are dead 703 return true; 704 } 705 706 } // end namespace llvm 707 708 /// GetInstSize - Return the size of the specified MachineInstr. 709 /// 710 unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 711 const MachineBasicBlock &MBB = *MI.getParent(); 712 const MachineFunction *MF = MBB.getParent(); 713 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 714 715 const MCInstrDesc &MCID = MI.getDesc(); 716 if (MCID.getSize()) 717 return MCID.getSize(); 718 719 switch (MI.getOpcode()) { 720 default: 721 // pseudo-instruction sizes are zero. 722 return 0; 723 case TargetOpcode::BUNDLE: 724 return getInstBundleLength(MI); 725 case ARM::MOVi16_ga_pcrel: 726 case ARM::MOVTi16_ga_pcrel: 727 case ARM::t2MOVi16_ga_pcrel: 728 case ARM::t2MOVTi16_ga_pcrel: 729 return 4; 730 case ARM::MOVi32imm: 731 case ARM::t2MOVi32imm: 732 return 8; 733 case ARM::CONSTPOOL_ENTRY: 734 case ARM::JUMPTABLE_INSTS: 735 case ARM::JUMPTABLE_ADDRS: 736 case ARM::JUMPTABLE_TBB: 737 case ARM::JUMPTABLE_TBH: 738 // If this machine instr is a constant pool entry, its size is recorded as 739 // operand #2. 740 return MI.getOperand(2).getImm(); 741 case ARM::Int_eh_sjlj_longjmp: 742 return 16; 743 case ARM::tInt_eh_sjlj_longjmp: 744 return 10; 745 case ARM::tInt_WIN_eh_sjlj_longjmp: 746 return 12; 747 case ARM::Int_eh_sjlj_setjmp: 748 case ARM::Int_eh_sjlj_setjmp_nofp: 749 return 20; 750 case ARM::tInt_eh_sjlj_setjmp: 751 case ARM::t2Int_eh_sjlj_setjmp: 752 case ARM::t2Int_eh_sjlj_setjmp_nofp: 753 return 12; 754 case ARM::SPACE: 755 return MI.getOperand(1).getImm(); 756 case ARM::INLINEASM: 757 case ARM::INLINEASM_BR: { 758 // If this machine instr is an inline asm, measure it. 759 unsigned Size = getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI); 760 if (!MF->getInfo<ARMFunctionInfo>()->isThumbFunction()) 761 Size = alignTo(Size, 4); 762 return Size; 763 } 764 } 765 } 766 767 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const { 768 unsigned Size = 0; 769 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 770 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 771 while (++I != E && I->isInsideBundle()) { 772 assert(!I->isBundle() && "No nested bundle!"); 773 Size += getInstSizeInBytes(*I); 774 } 775 return Size; 776 } 777 778 void ARMBaseInstrInfo::copyFromCPSR(MachineBasicBlock &MBB, 779 MachineBasicBlock::iterator I, 780 unsigned DestReg, bool KillSrc, 781 const ARMSubtarget &Subtarget) const { 782 unsigned Opc = Subtarget.isThumb() 783 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR) 784 : ARM::MRS; 785 786 MachineInstrBuilder MIB = 787 BuildMI(MBB, I, I->getDebugLoc(), get(Opc), DestReg); 788 789 // There is only 1 A/R class MRS instruction, and it always refers to 790 // APSR. However, there are lots of other possibilities on M-class cores. 791 if (Subtarget.isMClass()) 792 MIB.addImm(0x800); 793 794 MIB.add(predOps(ARMCC::AL)) 795 .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc)); 796 } 797 798 void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB, 799 MachineBasicBlock::iterator I, 800 unsigned SrcReg, bool KillSrc, 801 const ARMSubtarget &Subtarget) const { 802 unsigned Opc = Subtarget.isThumb() 803 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR) 804 : ARM::MSR; 805 806 MachineInstrBuilder MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Opc)); 807 808 if (Subtarget.isMClass()) 809 MIB.addImm(0x800); 810 else 811 MIB.addImm(8); 812 813 MIB.addReg(SrcReg, getKillRegState(KillSrc)) 814 .add(predOps(ARMCC::AL)) 815 .addReg(ARM::CPSR, RegState::Implicit | RegState::Define); 816 } 817 818 void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) { 819 MIB.addImm(ARMVCC::None); 820 MIB.addReg(0); 821 } 822 823 void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, 824 Register DestReg) { 825 addUnpredicatedMveVpredNOp(MIB); 826 MIB.addReg(DestReg, RegState::Undef); 827 } 828 829 void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) { 830 MIB.addImm(Cond); 831 MIB.addReg(ARM::VPR, RegState::Implicit); 832 } 833 834 void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB, 835 unsigned Cond, unsigned Inactive) { 836 addPredicatedMveVpredNOp(MIB, Cond); 837 MIB.addReg(Inactive); 838 } 839 840 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 841 MachineBasicBlock::iterator I, 842 const DebugLoc &DL, MCRegister DestReg, 843 MCRegister SrcReg, bool KillSrc) const { 844 bool GPRDest = ARM::GPRRegClass.contains(DestReg); 845 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); 846 847 if (GPRDest && GPRSrc) { 848 BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) 849 .addReg(SrcReg, getKillRegState(KillSrc)) 850 .add(predOps(ARMCC::AL)) 851 .add(condCodeOp()); 852 return; 853 } 854 855 bool SPRDest = ARM::SPRRegClass.contains(DestReg); 856 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); 857 858 unsigned Opc = 0; 859 if (SPRDest && SPRSrc) 860 Opc = ARM::VMOVS; 861 else if (GPRDest && SPRSrc) 862 Opc = ARM::VMOVRS; 863 else if (SPRDest && GPRSrc) 864 Opc = ARM::VMOVSR; 865 else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64()) 866 Opc = ARM::VMOVD; 867 else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) 868 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 869 870 if (Opc) { 871 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); 872 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 873 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) 874 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 875 if (Opc == ARM::MVE_VORR) 876 addUnpredicatedMveVpredROp(MIB, DestReg); 877 else 878 MIB.add(predOps(ARMCC::AL)); 879 return; 880 } 881 882 // Handle register classes that require multiple instructions. 883 unsigned BeginIdx = 0; 884 unsigned SubRegs = 0; 885 int Spacing = 1; 886 887 // Use VORRq when possible. 888 if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) { 889 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 890 BeginIdx = ARM::qsub_0; 891 SubRegs = 2; 892 } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) { 893 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 894 BeginIdx = ARM::qsub_0; 895 SubRegs = 4; 896 // Fall back to VMOVD. 897 } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) { 898 Opc = ARM::VMOVD; 899 BeginIdx = ARM::dsub_0; 900 SubRegs = 2; 901 } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) { 902 Opc = ARM::VMOVD; 903 BeginIdx = ARM::dsub_0; 904 SubRegs = 3; 905 } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) { 906 Opc = ARM::VMOVD; 907 BeginIdx = ARM::dsub_0; 908 SubRegs = 4; 909 } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) { 910 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr; 911 BeginIdx = ARM::gsub_0; 912 SubRegs = 2; 913 } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) { 914 Opc = ARM::VMOVD; 915 BeginIdx = ARM::dsub_0; 916 SubRegs = 2; 917 Spacing = 2; 918 } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) { 919 Opc = ARM::VMOVD; 920 BeginIdx = ARM::dsub_0; 921 SubRegs = 3; 922 Spacing = 2; 923 } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) { 924 Opc = ARM::VMOVD; 925 BeginIdx = ARM::dsub_0; 926 SubRegs = 4; 927 Spacing = 2; 928 } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && 929 !Subtarget.hasFP64()) { 930 Opc = ARM::VMOVS; 931 BeginIdx = ARM::ssub_0; 932 SubRegs = 2; 933 } else if (SrcReg == ARM::CPSR) { 934 copyFromCPSR(MBB, I, DestReg, KillSrc, Subtarget); 935 return; 936 } else if (DestReg == ARM::CPSR) { 937 copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget); 938 return; 939 } else if (DestReg == ARM::VPR) { 940 assert(ARM::GPRRegClass.contains(SrcReg)); 941 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_P0), DestReg) 942 .addReg(SrcReg, getKillRegState(KillSrc)) 943 .add(predOps(ARMCC::AL)); 944 return; 945 } else if (SrcReg == ARM::VPR) { 946 assert(ARM::GPRRegClass.contains(DestReg)); 947 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_P0), DestReg) 948 .addReg(SrcReg, getKillRegState(KillSrc)) 949 .add(predOps(ARMCC::AL)); 950 return; 951 } else if (DestReg == ARM::FPSCR_NZCV) { 952 assert(ARM::GPRRegClass.contains(SrcReg)); 953 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_FPSCR_NZCVQC), DestReg) 954 .addReg(SrcReg, getKillRegState(KillSrc)) 955 .add(predOps(ARMCC::AL)); 956 return; 957 } else if (SrcReg == ARM::FPSCR_NZCV) { 958 assert(ARM::GPRRegClass.contains(DestReg)); 959 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_FPSCR_NZCVQC), DestReg) 960 .addReg(SrcReg, getKillRegState(KillSrc)) 961 .add(predOps(ARMCC::AL)); 962 return; 963 } 964 965 assert(Opc && "Impossible reg-to-reg copy"); 966 967 const TargetRegisterInfo *TRI = &getRegisterInfo(); 968 MachineInstrBuilder Mov; 969 970 // Copy register tuples backward when the first Dest reg overlaps with SrcReg. 971 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) { 972 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing); 973 Spacing = -Spacing; 974 } 975 #ifndef NDEBUG 976 SmallSet<unsigned, 4> DstRegs; 977 #endif 978 for (unsigned i = 0; i != SubRegs; ++i) { 979 Register Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing); 980 Register Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing); 981 assert(Dst && Src && "Bad sub-register"); 982 #ifndef NDEBUG 983 assert(!DstRegs.count(Src) && "destructive vector copy"); 984 DstRegs.insert(Dst); 985 #endif 986 Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src); 987 // VORR (NEON or MVE) takes two source operands. 988 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) { 989 Mov.addReg(Src); 990 } 991 // MVE VORR takes predicate operands in place of an ordinary condition. 992 if (Opc == ARM::MVE_VORR) 993 addUnpredicatedMveVpredROp(Mov, Dst); 994 else 995 Mov = Mov.add(predOps(ARMCC::AL)); 996 // MOVr can set CC. 997 if (Opc == ARM::MOVr) 998 Mov = Mov.add(condCodeOp()); 999 } 1000 // Add implicit super-register defs and kills to the last instruction. 1001 Mov->addRegisterDefined(DestReg, TRI); 1002 if (KillSrc) 1003 Mov->addRegisterKilled(SrcReg, TRI); 1004 } 1005 1006 Optional<DestSourcePair> 1007 ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { 1008 // VMOVRRD is also a copy instruction but it requires 1009 // special way of handling. It is more complex copy version 1010 // and since that we are not considering it. For recognition 1011 // of such instruction isExtractSubregLike MI interface fuction 1012 // could be used. 1013 // VORRq is considered as a move only if two inputs are 1014 // the same register. 1015 if (!MI.isMoveReg() || 1016 (MI.getOpcode() == ARM::VORRq && 1017 MI.getOperand(1).getReg() != MI.getOperand(2).getReg())) 1018 return None; 1019 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; 1020 } 1021 1022 Optional<ParamLoadedValue> 1023 ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI, 1024 Register Reg) const { 1025 if (auto DstSrcPair = isCopyInstrImpl(MI)) { 1026 Register DstReg = DstSrcPair->Destination->getReg(); 1027 1028 // TODO: We don't handle cases where the forwarding reg is narrower/wider 1029 // than the copy registers. Consider for example: 1030 // 1031 // s16 = VMOVS s0 1032 // s17 = VMOVS s1 1033 // call @callee(d0) 1034 // 1035 // We'd like to describe the call site value of d0 as d8, but this requires 1036 // gathering and merging the descriptions for the two VMOVS instructions. 1037 // 1038 // We also don't handle the reverse situation, where the forwarding reg is 1039 // narrower than the copy destination: 1040 // 1041 // d8 = VMOVD d0 1042 // call @callee(s1) 1043 // 1044 // We need to produce a fragment description (the call site value of s1 is 1045 // /not/ just d8). 1046 if (DstReg != Reg) 1047 return None; 1048 } 1049 return TargetInstrInfo::describeLoadedValue(MI, Reg); 1050 } 1051 1052 const MachineInstrBuilder & 1053 ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg, 1054 unsigned SubIdx, unsigned State, 1055 const TargetRegisterInfo *TRI) const { 1056 if (!SubIdx) 1057 return MIB.addReg(Reg, State); 1058 1059 if (Register::isPhysicalRegister(Reg)) 1060 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); 1061 return MIB.addReg(Reg, State, SubIdx); 1062 } 1063 1064 void ARMBaseInstrInfo:: 1065 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1066 Register SrcReg, bool isKill, int FI, 1067 const TargetRegisterClass *RC, 1068 const TargetRegisterInfo *TRI) const { 1069 MachineFunction &MF = *MBB.getParent(); 1070 MachineFrameInfo &MFI = MF.getFrameInfo(); 1071 Align Alignment = MFI.getObjectAlign(FI); 1072 1073 MachineMemOperand *MMO = MF.getMachineMemOperand( 1074 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 1075 MFI.getObjectSize(FI), Alignment); 1076 1077 switch (TRI->getSpillSize(*RC)) { 1078 case 2: 1079 if (ARM::HPRRegClass.hasSubClassEq(RC)) { 1080 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH)) 1081 .addReg(SrcReg, getKillRegState(isKill)) 1082 .addFrameIndex(FI) 1083 .addImm(0) 1084 .addMemOperand(MMO) 1085 .add(predOps(ARMCC::AL)); 1086 } else 1087 llvm_unreachable("Unknown reg class!"); 1088 break; 1089 case 4: 1090 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1091 BuildMI(MBB, I, DebugLoc(), get(ARM::STRi12)) 1092 .addReg(SrcReg, getKillRegState(isKill)) 1093 .addFrameIndex(FI) 1094 .addImm(0) 1095 .addMemOperand(MMO) 1096 .add(predOps(ARMCC::AL)); 1097 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1098 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRS)) 1099 .addReg(SrcReg, getKillRegState(isKill)) 1100 .addFrameIndex(FI) 1101 .addImm(0) 1102 .addMemOperand(MMO) 1103 .add(predOps(ARMCC::AL)); 1104 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { 1105 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTR_P0_off)) 1106 .addReg(SrcReg, getKillRegState(isKill)) 1107 .addFrameIndex(FI) 1108 .addImm(0) 1109 .addMemOperand(MMO) 1110 .add(predOps(ARMCC::AL)); 1111 } else 1112 llvm_unreachable("Unknown reg class!"); 1113 break; 1114 case 8: 1115 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1116 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRD)) 1117 .addReg(SrcReg, getKillRegState(isKill)) 1118 .addFrameIndex(FI) 1119 .addImm(0) 1120 .addMemOperand(MMO) 1121 .add(predOps(ARMCC::AL)); 1122 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1123 if (Subtarget.hasV5TEOps()) { 1124 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD)); 1125 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 1126 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 1127 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) 1128 .add(predOps(ARMCC::AL)); 1129 } else { 1130 // Fallback to STM instruction, which has existed since the dawn of 1131 // time. 1132 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STMIA)) 1133 .addFrameIndex(FI) 1134 .addMemOperand(MMO) 1135 .add(predOps(ARMCC::AL)); 1136 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 1137 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 1138 } 1139 } else 1140 llvm_unreachable("Unknown reg class!"); 1141 break; 1142 case 16: 1143 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { 1144 // Use aligned spills if the stack can be realigned. 1145 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { 1146 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64)) 1147 .addFrameIndex(FI) 1148 .addImm(16) 1149 .addReg(SrcReg, getKillRegState(isKill)) 1150 .addMemOperand(MMO) 1151 .add(predOps(ARMCC::AL)); 1152 } else { 1153 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMQIA)) 1154 .addReg(SrcReg, getKillRegState(isKill)) 1155 .addFrameIndex(FI) 1156 .addMemOperand(MMO) 1157 .add(predOps(ARMCC::AL)); 1158 } 1159 } else if (ARM::QPRRegClass.hasSubClassEq(RC) && 1160 Subtarget.hasMVEIntegerOps()) { 1161 auto MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::MVE_VSTRWU32)); 1162 MIB.addReg(SrcReg, getKillRegState(isKill)) 1163 .addFrameIndex(FI) 1164 .addImm(0) 1165 .addMemOperand(MMO); 1166 addUnpredicatedMveVpredNOp(MIB); 1167 } else 1168 llvm_unreachable("Unknown reg class!"); 1169 break; 1170 case 24: 1171 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1172 // Use aligned spills if the stack can be realigned. 1173 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1174 Subtarget.hasNEON()) { 1175 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo)) 1176 .addFrameIndex(FI) 1177 .addImm(16) 1178 .addReg(SrcReg, getKillRegState(isKill)) 1179 .addMemOperand(MMO) 1180 .add(predOps(ARMCC::AL)); 1181 } else { 1182 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), 1183 get(ARM::VSTMDIA)) 1184 .addFrameIndex(FI) 1185 .add(predOps(ARMCC::AL)) 1186 .addMemOperand(MMO); 1187 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1188 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1189 AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1190 } 1191 } else 1192 llvm_unreachable("Unknown reg class!"); 1193 break; 1194 case 32: 1195 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { 1196 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1197 Subtarget.hasNEON()) { 1198 // FIXME: It's possible to only store part of the QQ register if the 1199 // spilled def has a sub-register index. 1200 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64QPseudo)) 1201 .addFrameIndex(FI) 1202 .addImm(16) 1203 .addReg(SrcReg, getKillRegState(isKill)) 1204 .addMemOperand(MMO) 1205 .add(predOps(ARMCC::AL)); 1206 } else { 1207 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), 1208 get(ARM::VSTMDIA)) 1209 .addFrameIndex(FI) 1210 .add(predOps(ARMCC::AL)) 1211 .addMemOperand(MMO); 1212 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1213 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1214 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1215 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 1216 } 1217 } else 1218 llvm_unreachable("Unknown reg class!"); 1219 break; 1220 case 64: 1221 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1222 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMDIA)) 1223 .addFrameIndex(FI) 1224 .add(predOps(ARMCC::AL)) 1225 .addMemOperand(MMO); 1226 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1227 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1228 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1229 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 1230 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); 1231 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); 1232 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); 1233 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); 1234 } else 1235 llvm_unreachable("Unknown reg class!"); 1236 break; 1237 default: 1238 llvm_unreachable("Unknown reg class!"); 1239 } 1240 } 1241 1242 unsigned ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 1243 int &FrameIndex) const { 1244 switch (MI.getOpcode()) { 1245 default: break; 1246 case ARM::STRrs: 1247 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame. 1248 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() && 1249 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 && 1250 MI.getOperand(3).getImm() == 0) { 1251 FrameIndex = MI.getOperand(1).getIndex(); 1252 return MI.getOperand(0).getReg(); 1253 } 1254 break; 1255 case ARM::STRi12: 1256 case ARM::t2STRi12: 1257 case ARM::tSTRspi: 1258 case ARM::VSTRD: 1259 case ARM::VSTRS: 1260 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 1261 MI.getOperand(2).getImm() == 0) { 1262 FrameIndex = MI.getOperand(1).getIndex(); 1263 return MI.getOperand(0).getReg(); 1264 } 1265 break; 1266 case ARM::VSTR_P0_off: 1267 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && 1268 MI.getOperand(1).getImm() == 0) { 1269 FrameIndex = MI.getOperand(0).getIndex(); 1270 return ARM::P0; 1271 } 1272 break; 1273 case ARM::VST1q64: 1274 case ARM::VST1d64TPseudo: 1275 case ARM::VST1d64QPseudo: 1276 if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) { 1277 FrameIndex = MI.getOperand(0).getIndex(); 1278 return MI.getOperand(2).getReg(); 1279 } 1280 break; 1281 case ARM::VSTMQIA: 1282 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1283 FrameIndex = MI.getOperand(1).getIndex(); 1284 return MI.getOperand(0).getReg(); 1285 } 1286 break; 1287 } 1288 1289 return 0; 1290 } 1291 1292 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, 1293 int &FrameIndex) const { 1294 SmallVector<const MachineMemOperand *, 1> Accesses; 1295 if (MI.mayStore() && hasStoreToStackSlot(MI, Accesses) && 1296 Accesses.size() == 1) { 1297 FrameIndex = 1298 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 1299 ->getFrameIndex(); 1300 return true; 1301 } 1302 return false; 1303 } 1304 1305 void ARMBaseInstrInfo:: 1306 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1307 Register DestReg, int FI, 1308 const TargetRegisterClass *RC, 1309 const TargetRegisterInfo *TRI) const { 1310 DebugLoc DL; 1311 if (I != MBB.end()) DL = I->getDebugLoc(); 1312 MachineFunction &MF = *MBB.getParent(); 1313 MachineFrameInfo &MFI = MF.getFrameInfo(); 1314 const Align Alignment = MFI.getObjectAlign(FI); 1315 MachineMemOperand *MMO = MF.getMachineMemOperand( 1316 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 1317 MFI.getObjectSize(FI), Alignment); 1318 1319 switch (TRI->getSpillSize(*RC)) { 1320 case 2: 1321 if (ARM::HPRRegClass.hasSubClassEq(RC)) { 1322 BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg) 1323 .addFrameIndex(FI) 1324 .addImm(0) 1325 .addMemOperand(MMO) 1326 .add(predOps(ARMCC::AL)); 1327 } else 1328 llvm_unreachable("Unknown reg class!"); 1329 break; 1330 case 4: 1331 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1332 BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) 1333 .addFrameIndex(FI) 1334 .addImm(0) 1335 .addMemOperand(MMO) 1336 .add(predOps(ARMCC::AL)); 1337 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1338 BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) 1339 .addFrameIndex(FI) 1340 .addImm(0) 1341 .addMemOperand(MMO) 1342 .add(predOps(ARMCC::AL)); 1343 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { 1344 BuildMI(MBB, I, DL, get(ARM::VLDR_P0_off), DestReg) 1345 .addFrameIndex(FI) 1346 .addImm(0) 1347 .addMemOperand(MMO) 1348 .add(predOps(ARMCC::AL)); 1349 } else 1350 llvm_unreachable("Unknown reg class!"); 1351 break; 1352 case 8: 1353 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1354 BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) 1355 .addFrameIndex(FI) 1356 .addImm(0) 1357 .addMemOperand(MMO) 1358 .add(predOps(ARMCC::AL)); 1359 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1360 MachineInstrBuilder MIB; 1361 1362 if (Subtarget.hasV5TEOps()) { 1363 MIB = BuildMI(MBB, I, DL, get(ARM::LDRD)); 1364 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1365 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1366 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) 1367 .add(predOps(ARMCC::AL)); 1368 } else { 1369 // Fallback to LDM instruction, which has existed since the dawn of 1370 // time. 1371 MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA)) 1372 .addFrameIndex(FI) 1373 .addMemOperand(MMO) 1374 .add(predOps(ARMCC::AL)); 1375 MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1376 MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1377 } 1378 1379 if (Register::isPhysicalRegister(DestReg)) 1380 MIB.addReg(DestReg, RegState::ImplicitDefine); 1381 } else 1382 llvm_unreachable("Unknown reg class!"); 1383 break; 1384 case 16: 1385 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { 1386 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { 1387 BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) 1388 .addFrameIndex(FI) 1389 .addImm(16) 1390 .addMemOperand(MMO) 1391 .add(predOps(ARMCC::AL)); 1392 } else { 1393 BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) 1394 .addFrameIndex(FI) 1395 .addMemOperand(MMO) 1396 .add(predOps(ARMCC::AL)); 1397 } 1398 } else if (ARM::QPRRegClass.hasSubClassEq(RC) && 1399 Subtarget.hasMVEIntegerOps()) { 1400 auto MIB = BuildMI(MBB, I, DL, get(ARM::MVE_VLDRWU32), DestReg); 1401 MIB.addFrameIndex(FI) 1402 .addImm(0) 1403 .addMemOperand(MMO); 1404 addUnpredicatedMveVpredNOp(MIB); 1405 } else 1406 llvm_unreachable("Unknown reg class!"); 1407 break; 1408 case 24: 1409 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1410 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1411 Subtarget.hasNEON()) { 1412 BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) 1413 .addFrameIndex(FI) 1414 .addImm(16) 1415 .addMemOperand(MMO) 1416 .add(predOps(ARMCC::AL)); 1417 } else { 1418 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1419 .addFrameIndex(FI) 1420 .addMemOperand(MMO) 1421 .add(predOps(ARMCC::AL)); 1422 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1423 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1424 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1425 if (Register::isPhysicalRegister(DestReg)) 1426 MIB.addReg(DestReg, RegState::ImplicitDefine); 1427 } 1428 } else 1429 llvm_unreachable("Unknown reg class!"); 1430 break; 1431 case 32: 1432 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { 1433 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1434 Subtarget.hasNEON()) { 1435 BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) 1436 .addFrameIndex(FI) 1437 .addImm(16) 1438 .addMemOperand(MMO) 1439 .add(predOps(ARMCC::AL)); 1440 } else { 1441 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1442 .addFrameIndex(FI) 1443 .add(predOps(ARMCC::AL)) 1444 .addMemOperand(MMO); 1445 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1446 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1447 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1448 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1449 if (Register::isPhysicalRegister(DestReg)) 1450 MIB.addReg(DestReg, RegState::ImplicitDefine); 1451 } 1452 } else 1453 llvm_unreachable("Unknown reg class!"); 1454 break; 1455 case 64: 1456 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1457 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1458 .addFrameIndex(FI) 1459 .add(predOps(ARMCC::AL)) 1460 .addMemOperand(MMO); 1461 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1462 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1463 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1464 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1465 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI); 1466 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI); 1467 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI); 1468 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI); 1469 if (Register::isPhysicalRegister(DestReg)) 1470 MIB.addReg(DestReg, RegState::ImplicitDefine); 1471 } else 1472 llvm_unreachable("Unknown reg class!"); 1473 break; 1474 default: 1475 llvm_unreachable("Unknown regclass!"); 1476 } 1477 } 1478 1479 unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 1480 int &FrameIndex) const { 1481 switch (MI.getOpcode()) { 1482 default: break; 1483 case ARM::LDRrs: 1484 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame. 1485 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() && 1486 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 && 1487 MI.getOperand(3).getImm() == 0) { 1488 FrameIndex = MI.getOperand(1).getIndex(); 1489 return MI.getOperand(0).getReg(); 1490 } 1491 break; 1492 case ARM::LDRi12: 1493 case ARM::t2LDRi12: 1494 case ARM::tLDRspi: 1495 case ARM::VLDRD: 1496 case ARM::VLDRS: 1497 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 1498 MI.getOperand(2).getImm() == 0) { 1499 FrameIndex = MI.getOperand(1).getIndex(); 1500 return MI.getOperand(0).getReg(); 1501 } 1502 break; 1503 case ARM::VLDR_P0_off: 1504 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() && 1505 MI.getOperand(1).getImm() == 0) { 1506 FrameIndex = MI.getOperand(0).getIndex(); 1507 return ARM::P0; 1508 } 1509 break; 1510 case ARM::VLD1q64: 1511 case ARM::VLD1d8TPseudo: 1512 case ARM::VLD1d16TPseudo: 1513 case ARM::VLD1d32TPseudo: 1514 case ARM::VLD1d64TPseudo: 1515 case ARM::VLD1d8QPseudo: 1516 case ARM::VLD1d16QPseudo: 1517 case ARM::VLD1d32QPseudo: 1518 case ARM::VLD1d64QPseudo: 1519 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1520 FrameIndex = MI.getOperand(1).getIndex(); 1521 return MI.getOperand(0).getReg(); 1522 } 1523 break; 1524 case ARM::VLDMQIA: 1525 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1526 FrameIndex = MI.getOperand(1).getIndex(); 1527 return MI.getOperand(0).getReg(); 1528 } 1529 break; 1530 } 1531 1532 return 0; 1533 } 1534 1535 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, 1536 int &FrameIndex) const { 1537 SmallVector<const MachineMemOperand *, 1> Accesses; 1538 if (MI.mayLoad() && hasLoadFromStackSlot(MI, Accesses) && 1539 Accesses.size() == 1) { 1540 FrameIndex = 1541 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 1542 ->getFrameIndex(); 1543 return true; 1544 } 1545 return false; 1546 } 1547 1548 /// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD 1549 /// depending on whether the result is used. 1550 void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { 1551 bool isThumb1 = Subtarget.isThumb1Only(); 1552 bool isThumb2 = Subtarget.isThumb2(); 1553 const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo(); 1554 1555 DebugLoc dl = MI->getDebugLoc(); 1556 MachineBasicBlock *BB = MI->getParent(); 1557 1558 MachineInstrBuilder LDM, STM; 1559 if (isThumb1 || !MI->getOperand(1).isDead()) { 1560 MachineOperand LDWb(MI->getOperand(1)); 1561 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD 1562 : isThumb1 ? ARM::tLDMIA_UPD 1563 : ARM::LDMIA_UPD)) 1564 .add(LDWb); 1565 } else { 1566 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA)); 1567 } 1568 1569 if (isThumb1 || !MI->getOperand(0).isDead()) { 1570 MachineOperand STWb(MI->getOperand(0)); 1571 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD 1572 : isThumb1 ? ARM::tSTMIA_UPD 1573 : ARM::STMIA_UPD)) 1574 .add(STWb); 1575 } else { 1576 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA)); 1577 } 1578 1579 MachineOperand LDBase(MI->getOperand(3)); 1580 LDM.add(LDBase).add(predOps(ARMCC::AL)); 1581 1582 MachineOperand STBase(MI->getOperand(2)); 1583 STM.add(STBase).add(predOps(ARMCC::AL)); 1584 1585 // Sort the scratch registers into ascending order. 1586 const TargetRegisterInfo &TRI = getRegisterInfo(); 1587 SmallVector<unsigned, 6> ScratchRegs; 1588 for(unsigned I = 5; I < MI->getNumOperands(); ++I) 1589 ScratchRegs.push_back(MI->getOperand(I).getReg()); 1590 llvm::sort(ScratchRegs, 1591 [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool { 1592 return TRI.getEncodingValue(Reg1) < 1593 TRI.getEncodingValue(Reg2); 1594 }); 1595 1596 for (const auto &Reg : ScratchRegs) { 1597 LDM.addReg(Reg, RegState::Define); 1598 STM.addReg(Reg, RegState::Kill); 1599 } 1600 1601 BB->erase(MI); 1602 } 1603 1604 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1605 if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) { 1606 assert(getSubtarget().getTargetTriple().isOSBinFormatMachO() && 1607 "LOAD_STACK_GUARD currently supported only for MachO."); 1608 expandLoadStackGuard(MI); 1609 MI.getParent()->erase(MI); 1610 return true; 1611 } 1612 1613 if (MI.getOpcode() == ARM::MEMCPY) { 1614 expandMEMCPY(MI); 1615 return true; 1616 } 1617 1618 // This hook gets to expand COPY instructions before they become 1619 // copyPhysReg() calls. Look for VMOVS instructions that can legally be 1620 // widened to VMOVD. We prefer the VMOVD when possible because it may be 1621 // changed into a VORR that can go down the NEON pipeline. 1622 if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64()) 1623 return false; 1624 1625 // Look for a copy between even S-registers. That is where we keep floats 1626 // when using NEON v2f32 instructions for f32 arithmetic. 1627 Register DstRegS = MI.getOperand(0).getReg(); 1628 Register SrcRegS = MI.getOperand(1).getReg(); 1629 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) 1630 return false; 1631 1632 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1633 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, 1634 &ARM::DPRRegClass); 1635 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, 1636 &ARM::DPRRegClass); 1637 if (!DstRegD || !SrcRegD) 1638 return false; 1639 1640 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only 1641 // legal if the COPY already defines the full DstRegD, and it isn't a 1642 // sub-register insertion. 1643 if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI)) 1644 return false; 1645 1646 // A dead copy shouldn't show up here, but reject it just in case. 1647 if (MI.getOperand(0).isDead()) 1648 return false; 1649 1650 // All clear, widen the COPY. 1651 LLVM_DEBUG(dbgs() << "widening: " << MI); 1652 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1653 1654 // Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg 1655 // or some other super-register. 1656 int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD); 1657 if (ImpDefIdx != -1) 1658 MI.RemoveOperand(ImpDefIdx); 1659 1660 // Change the opcode and operands. 1661 MI.setDesc(get(ARM::VMOVD)); 1662 MI.getOperand(0).setReg(DstRegD); 1663 MI.getOperand(1).setReg(SrcRegD); 1664 MIB.add(predOps(ARMCC::AL)); 1665 1666 // We are now reading SrcRegD instead of SrcRegS. This may upset the 1667 // register scavenger and machine verifier, so we need to indicate that we 1668 // are reading an undefined value from SrcRegD, but a proper value from 1669 // SrcRegS. 1670 MI.getOperand(1).setIsUndef(); 1671 MIB.addReg(SrcRegS, RegState::Implicit); 1672 1673 // SrcRegD may actually contain an unrelated value in the ssub_1 1674 // sub-register. Don't kill it. Only kill the ssub_0 sub-register. 1675 if (MI.getOperand(1).isKill()) { 1676 MI.getOperand(1).setIsKill(false); 1677 MI.addRegisterKilled(SrcRegS, TRI, true); 1678 } 1679 1680 LLVM_DEBUG(dbgs() << "replaced by: " << MI); 1681 return true; 1682 } 1683 1684 /// Create a copy of a const pool value. Update CPI to the new index and return 1685 /// the label UID. 1686 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 1687 MachineConstantPool *MCP = MF.getConstantPool(); 1688 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1689 1690 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI]; 1691 assert(MCPE.isMachineConstantPoolEntry() && 1692 "Expecting a machine constantpool entry!"); 1693 ARMConstantPoolValue *ACPV = 1694 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal); 1695 1696 unsigned PCLabelId = AFI->createPICLabelUId(); 1697 ARMConstantPoolValue *NewCPV = nullptr; 1698 1699 // FIXME: The below assumes PIC relocation model and that the function 1700 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and 1701 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR 1702 // instructions, so that's probably OK, but is PIC always correct when 1703 // we get here? 1704 if (ACPV->isGlobalValue()) 1705 NewCPV = ARMConstantPoolConstant::Create( 1706 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, ARMCP::CPValue, 1707 4, ACPV->getModifier(), ACPV->mustAddCurrentAddress()); 1708 else if (ACPV->isExtSymbol()) 1709 NewCPV = ARMConstantPoolSymbol:: 1710 Create(MF.getFunction().getContext(), 1711 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); 1712 else if (ACPV->isBlockAddress()) 1713 NewCPV = ARMConstantPoolConstant:: 1714 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, 1715 ARMCP::CPBlockAddress, 4); 1716 else if (ACPV->isLSDA()) 1717 NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId, 1718 ARMCP::CPLSDA, 4); 1719 else if (ACPV->isMachineBasicBlock()) 1720 NewCPV = ARMConstantPoolMBB:: 1721 Create(MF.getFunction().getContext(), 1722 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); 1723 else 1724 llvm_unreachable("Unexpected ARM constantpool value type!!"); 1725 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlign()); 1726 return PCLabelId; 1727 } 1728 1729 void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB, 1730 MachineBasicBlock::iterator I, 1731 Register DestReg, unsigned SubIdx, 1732 const MachineInstr &Orig, 1733 const TargetRegisterInfo &TRI) const { 1734 unsigned Opcode = Orig.getOpcode(); 1735 switch (Opcode) { 1736 default: { 1737 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 1738 MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); 1739 MBB.insert(I, MI); 1740 break; 1741 } 1742 case ARM::tLDRpci_pic: 1743 case ARM::t2LDRpci_pic: { 1744 MachineFunction &MF = *MBB.getParent(); 1745 unsigned CPI = Orig.getOperand(1).getIndex(); 1746 unsigned PCLabelId = duplicateCPV(MF, CPI); 1747 BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg) 1748 .addConstantPoolIndex(CPI) 1749 .addImm(PCLabelId) 1750 .cloneMemRefs(Orig); 1751 break; 1752 } 1753 } 1754 } 1755 1756 MachineInstr & 1757 ARMBaseInstrInfo::duplicate(MachineBasicBlock &MBB, 1758 MachineBasicBlock::iterator InsertBefore, 1759 const MachineInstr &Orig) const { 1760 MachineInstr &Cloned = TargetInstrInfo::duplicate(MBB, InsertBefore, Orig); 1761 MachineBasicBlock::instr_iterator I = Cloned.getIterator(); 1762 for (;;) { 1763 switch (I->getOpcode()) { 1764 case ARM::tLDRpci_pic: 1765 case ARM::t2LDRpci_pic: { 1766 MachineFunction &MF = *MBB.getParent(); 1767 unsigned CPI = I->getOperand(1).getIndex(); 1768 unsigned PCLabelId = duplicateCPV(MF, CPI); 1769 I->getOperand(1).setIndex(CPI); 1770 I->getOperand(2).setImm(PCLabelId); 1771 break; 1772 } 1773 } 1774 if (!I->isBundledWithSucc()) 1775 break; 1776 ++I; 1777 } 1778 return Cloned; 1779 } 1780 1781 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0, 1782 const MachineInstr &MI1, 1783 const MachineRegisterInfo *MRI) const { 1784 unsigned Opcode = MI0.getOpcode(); 1785 if (Opcode == ARM::t2LDRpci || 1786 Opcode == ARM::t2LDRpci_pic || 1787 Opcode == ARM::tLDRpci || 1788 Opcode == ARM::tLDRpci_pic || 1789 Opcode == ARM::LDRLIT_ga_pcrel || 1790 Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1791 Opcode == ARM::tLDRLIT_ga_pcrel || 1792 Opcode == ARM::MOV_ga_pcrel || 1793 Opcode == ARM::MOV_ga_pcrel_ldr || 1794 Opcode == ARM::t2MOV_ga_pcrel) { 1795 if (MI1.getOpcode() != Opcode) 1796 return false; 1797 if (MI0.getNumOperands() != MI1.getNumOperands()) 1798 return false; 1799 1800 const MachineOperand &MO0 = MI0.getOperand(1); 1801 const MachineOperand &MO1 = MI1.getOperand(1); 1802 if (MO0.getOffset() != MO1.getOffset()) 1803 return false; 1804 1805 if (Opcode == ARM::LDRLIT_ga_pcrel || 1806 Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1807 Opcode == ARM::tLDRLIT_ga_pcrel || 1808 Opcode == ARM::MOV_ga_pcrel || 1809 Opcode == ARM::MOV_ga_pcrel_ldr || 1810 Opcode == ARM::t2MOV_ga_pcrel) 1811 // Ignore the PC labels. 1812 return MO0.getGlobal() == MO1.getGlobal(); 1813 1814 const MachineFunction *MF = MI0.getParent()->getParent(); 1815 const MachineConstantPool *MCP = MF->getConstantPool(); 1816 int CPI0 = MO0.getIndex(); 1817 int CPI1 = MO1.getIndex(); 1818 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; 1819 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; 1820 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); 1821 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); 1822 if (isARMCP0 && isARMCP1) { 1823 ARMConstantPoolValue *ACPV0 = 1824 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal); 1825 ARMConstantPoolValue *ACPV1 = 1826 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal); 1827 return ACPV0->hasSameValue(ACPV1); 1828 } else if (!isARMCP0 && !isARMCP1) { 1829 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; 1830 } 1831 return false; 1832 } else if (Opcode == ARM::PICLDR) { 1833 if (MI1.getOpcode() != Opcode) 1834 return false; 1835 if (MI0.getNumOperands() != MI1.getNumOperands()) 1836 return false; 1837 1838 Register Addr0 = MI0.getOperand(1).getReg(); 1839 Register Addr1 = MI1.getOperand(1).getReg(); 1840 if (Addr0 != Addr1) { 1841 if (!MRI || !Register::isVirtualRegister(Addr0) || 1842 !Register::isVirtualRegister(Addr1)) 1843 return false; 1844 1845 // This assumes SSA form. 1846 MachineInstr *Def0 = MRI->getVRegDef(Addr0); 1847 MachineInstr *Def1 = MRI->getVRegDef(Addr1); 1848 // Check if the loaded value, e.g. a constantpool of a global address, are 1849 // the same. 1850 if (!produceSameValue(*Def0, *Def1, MRI)) 1851 return false; 1852 } 1853 1854 for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) { 1855 // %12 = PICLDR %11, 0, 14, %noreg 1856 const MachineOperand &MO0 = MI0.getOperand(i); 1857 const MachineOperand &MO1 = MI1.getOperand(i); 1858 if (!MO0.isIdenticalTo(MO1)) 1859 return false; 1860 } 1861 return true; 1862 } 1863 1864 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 1865 } 1866 1867 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 1868 /// determine if two loads are loading from the same base address. It should 1869 /// only return true if the base pointers are the same and the only differences 1870 /// between the two addresses is the offset. It also returns the offsets by 1871 /// reference. 1872 /// 1873 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 1874 /// is permanently disabled. 1875 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1876 int64_t &Offset1, 1877 int64_t &Offset2) const { 1878 // Don't worry about Thumb: just ARM and Thumb2. 1879 if (Subtarget.isThumb1Only()) return false; 1880 1881 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 1882 return false; 1883 1884 switch (Load1->getMachineOpcode()) { 1885 default: 1886 return false; 1887 case ARM::LDRi12: 1888 case ARM::LDRBi12: 1889 case ARM::LDRD: 1890 case ARM::LDRH: 1891 case ARM::LDRSB: 1892 case ARM::LDRSH: 1893 case ARM::VLDRD: 1894 case ARM::VLDRS: 1895 case ARM::t2LDRi8: 1896 case ARM::t2LDRBi8: 1897 case ARM::t2LDRDi8: 1898 case ARM::t2LDRSHi8: 1899 case ARM::t2LDRi12: 1900 case ARM::t2LDRBi12: 1901 case ARM::t2LDRSHi12: 1902 break; 1903 } 1904 1905 switch (Load2->getMachineOpcode()) { 1906 default: 1907 return false; 1908 case ARM::LDRi12: 1909 case ARM::LDRBi12: 1910 case ARM::LDRD: 1911 case ARM::LDRH: 1912 case ARM::LDRSB: 1913 case ARM::LDRSH: 1914 case ARM::VLDRD: 1915 case ARM::VLDRS: 1916 case ARM::t2LDRi8: 1917 case ARM::t2LDRBi8: 1918 case ARM::t2LDRSHi8: 1919 case ARM::t2LDRi12: 1920 case ARM::t2LDRBi12: 1921 case ARM::t2LDRSHi12: 1922 break; 1923 } 1924 1925 // Check if base addresses and chain operands match. 1926 if (Load1->getOperand(0) != Load2->getOperand(0) || 1927 Load1->getOperand(4) != Load2->getOperand(4)) 1928 return false; 1929 1930 // Index should be Reg0. 1931 if (Load1->getOperand(3) != Load2->getOperand(3)) 1932 return false; 1933 1934 // Determine the offsets. 1935 if (isa<ConstantSDNode>(Load1->getOperand(1)) && 1936 isa<ConstantSDNode>(Load2->getOperand(1))) { 1937 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); 1938 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); 1939 return true; 1940 } 1941 1942 return false; 1943 } 1944 1945 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 1946 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 1947 /// be scheduled togther. On some targets if two loads are loading from 1948 /// addresses in the same cache line, it's better if they are scheduled 1949 /// together. This function takes two integers that represent the load offsets 1950 /// from the common base address. It returns true if it decides it's desirable 1951 /// to schedule the two loads together. "NumLoads" is the number of loads that 1952 /// have already been scheduled after Load1. 1953 /// 1954 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 1955 /// is permanently disabled. 1956 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1957 int64_t Offset1, int64_t Offset2, 1958 unsigned NumLoads) const { 1959 // Don't worry about Thumb: just ARM and Thumb2. 1960 if (Subtarget.isThumb1Only()) return false; 1961 1962 assert(Offset2 > Offset1); 1963 1964 if ((Offset2 - Offset1) / 8 > 64) 1965 return false; 1966 1967 // Check if the machine opcodes are different. If they are different 1968 // then we consider them to not be of the same base address, 1969 // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12. 1970 // In this case, they are considered to be the same because they are different 1971 // encoding forms of the same basic instruction. 1972 if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) && 1973 !((Load1->getMachineOpcode() == ARM::t2LDRBi8 && 1974 Load2->getMachineOpcode() == ARM::t2LDRBi12) || 1975 (Load1->getMachineOpcode() == ARM::t2LDRBi12 && 1976 Load2->getMachineOpcode() == ARM::t2LDRBi8))) 1977 return false; // FIXME: overly conservative? 1978 1979 // Four loads in a row should be sufficient. 1980 if (NumLoads >= 3) 1981 return false; 1982 1983 return true; 1984 } 1985 1986 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 1987 const MachineBasicBlock *MBB, 1988 const MachineFunction &MF) const { 1989 // Debug info is never a scheduling boundary. It's necessary to be explicit 1990 // due to the special treatment of IT instructions below, otherwise a 1991 // dbg_value followed by an IT will result in the IT instruction being 1992 // considered a scheduling hazard, which is wrong. It should be the actual 1993 // instruction preceding the dbg_value instruction(s), just like it is 1994 // when debug info is not present. 1995 if (MI.isDebugInstr()) 1996 return false; 1997 1998 // Terminators and labels can't be scheduled around. 1999 if (MI.isTerminator() || MI.isPosition()) 2000 return true; 2001 2002 // INLINEASM_BR can jump to another block 2003 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 2004 return true; 2005 2006 // Treat the start of the IT block as a scheduling boundary, but schedule 2007 // t2IT along with all instructions following it. 2008 // FIXME: This is a big hammer. But the alternative is to add all potential 2009 // true and anti dependencies to IT block instructions as implicit operands 2010 // to the t2IT instruction. The added compile time and complexity does not 2011 // seem worth it. 2012 MachineBasicBlock::const_iterator I = MI; 2013 // Make sure to skip any debug instructions 2014 while (++I != MBB->end() && I->isDebugInstr()) 2015 ; 2016 if (I != MBB->end() && I->getOpcode() == ARM::t2IT) 2017 return true; 2018 2019 // Don't attempt to schedule around any instruction that defines 2020 // a stack-oriented pointer, as it's unlikely to be profitable. This 2021 // saves compile time, because it doesn't require every single 2022 // stack slot reference to depend on the instruction that does the 2023 // modification. 2024 // Calls don't actually change the stack pointer, even if they have imp-defs. 2025 // No ARM calling conventions change the stack pointer. (X86 calling 2026 // conventions sometimes do). 2027 if (!MI.isCall() && MI.definesRegister(ARM::SP)) 2028 return true; 2029 2030 return false; 2031 } 2032 2033 bool ARMBaseInstrInfo:: 2034 isProfitableToIfCvt(MachineBasicBlock &MBB, 2035 unsigned NumCycles, unsigned ExtraPredCycles, 2036 BranchProbability Probability) const { 2037 if (!NumCycles) 2038 return false; 2039 2040 // If we are optimizing for size, see if the branch in the predecessor can be 2041 // lowered to cbn?z by the constant island lowering pass, and return false if 2042 // so. This results in a shorter instruction sequence. 2043 if (MBB.getParent()->getFunction().hasOptSize()) { 2044 MachineBasicBlock *Pred = *MBB.pred_begin(); 2045 if (!Pred->empty()) { 2046 MachineInstr *LastMI = &*Pred->rbegin(); 2047 if (LastMI->getOpcode() == ARM::t2Bcc) { 2048 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2049 MachineInstr *CmpMI = findCMPToFoldIntoCBZ(LastMI, TRI); 2050 if (CmpMI) 2051 return false; 2052 } 2053 } 2054 } 2055 return isProfitableToIfCvt(MBB, NumCycles, ExtraPredCycles, 2056 MBB, 0, 0, Probability); 2057 } 2058 2059 bool ARMBaseInstrInfo:: 2060 isProfitableToIfCvt(MachineBasicBlock &TBB, 2061 unsigned TCycles, unsigned TExtra, 2062 MachineBasicBlock &FBB, 2063 unsigned FCycles, unsigned FExtra, 2064 BranchProbability Probability) const { 2065 if (!TCycles) 2066 return false; 2067 2068 // In thumb code we often end up trading one branch for a IT block, and 2069 // if we are cloning the instruction can increase code size. Prevent 2070 // blocks with multiple predecesors from being ifcvted to prevent this 2071 // cloning. 2072 if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) { 2073 if (TBB.pred_size() != 1 || FBB.pred_size() != 1) 2074 return false; 2075 } 2076 2077 // Attempt to estimate the relative costs of predication versus branching. 2078 // Here we scale up each component of UnpredCost to avoid precision issue when 2079 // scaling TCycles/FCycles by Probability. 2080 const unsigned ScalingUpFactor = 1024; 2081 2082 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor; 2083 unsigned UnpredCost; 2084 if (!Subtarget.hasBranchPredictor()) { 2085 // When we don't have a branch predictor it's always cheaper to not take a 2086 // branch than take it, so we have to take that into account. 2087 unsigned NotTakenBranchCost = 1; 2088 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty(); 2089 unsigned TUnpredCycles, FUnpredCycles; 2090 if (!FCycles) { 2091 // Triangle: TBB is the fallthrough 2092 TUnpredCycles = TCycles + NotTakenBranchCost; 2093 FUnpredCycles = TakenBranchCost; 2094 } else { 2095 // Diamond: TBB is the block that is branched to, FBB is the fallthrough 2096 TUnpredCycles = TCycles + TakenBranchCost; 2097 FUnpredCycles = FCycles + NotTakenBranchCost; 2098 // The branch at the end of FBB will disappear when it's predicated, so 2099 // discount it from PredCost. 2100 PredCost -= 1 * ScalingUpFactor; 2101 } 2102 // The total cost is the cost of each path scaled by their probabilites 2103 unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor); 2104 unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor); 2105 UnpredCost = TUnpredCost + FUnpredCost; 2106 // When predicating assume that the first IT can be folded away but later 2107 // ones cost one cycle each 2108 if (Subtarget.isThumb2() && TCycles + FCycles > 4) { 2109 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor; 2110 } 2111 } else { 2112 unsigned TUnpredCost = Probability.scale(TCycles * ScalingUpFactor); 2113 unsigned FUnpredCost = 2114 Probability.getCompl().scale(FCycles * ScalingUpFactor); 2115 UnpredCost = TUnpredCost + FUnpredCost; 2116 UnpredCost += 1 * ScalingUpFactor; // The branch itself 2117 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10; 2118 } 2119 2120 return PredCost <= UnpredCost; 2121 } 2122 2123 unsigned 2124 ARMBaseInstrInfo::extraSizeToPredicateInstructions(const MachineFunction &MF, 2125 unsigned NumInsts) const { 2126 // Thumb2 needs a 2-byte IT instruction to predicate up to 4 instructions. 2127 // ARM has a condition code field in every predicable instruction, using it 2128 // doesn't change code size. 2129 if (!Subtarget.isThumb2()) 2130 return 0; 2131 2132 // It's possible that the size of the IT is restricted to a single block. 2133 unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4; 2134 return divideCeil(NumInsts, MaxInsts) * 2; 2135 } 2136 2137 unsigned 2138 ARMBaseInstrInfo::predictBranchSizeForIfCvt(MachineInstr &MI) const { 2139 // If this branch is likely to be folded into the comparison to form a 2140 // CB(N)Z, then removing it won't reduce code size at all, because that will 2141 // just replace the CB(N)Z with a CMP. 2142 if (MI.getOpcode() == ARM::t2Bcc && 2143 findCMPToFoldIntoCBZ(&MI, &getRegisterInfo())) 2144 return 0; 2145 2146 unsigned Size = getInstSizeInBytes(MI); 2147 2148 // For Thumb2, all branches are 32-bit instructions during the if conversion 2149 // pass, but may be replaced with 16-bit instructions during size reduction. 2150 // Since the branches considered by if conversion tend to be forward branches 2151 // over small basic blocks, they are very likely to be in range for the 2152 // narrow instructions, so we assume the final code size will be half what it 2153 // currently is. 2154 if (Subtarget.isThumb2()) 2155 Size /= 2; 2156 2157 return Size; 2158 } 2159 2160 bool 2161 ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, 2162 MachineBasicBlock &FMBB) const { 2163 // Reduce false anti-dependencies to let the target's out-of-order execution 2164 // engine do its thing. 2165 return Subtarget.isProfitableToUnpredicate(); 2166 } 2167 2168 /// getInstrPredicate - If instruction is predicated, returns its predicate 2169 /// condition, otherwise returns AL. It also returns the condition code 2170 /// register by reference. 2171 ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI, 2172 Register &PredReg) { 2173 int PIdx = MI.findFirstPredOperandIdx(); 2174 if (PIdx == -1) { 2175 PredReg = 0; 2176 return ARMCC::AL; 2177 } 2178 2179 PredReg = MI.getOperand(PIdx+1).getReg(); 2180 return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 2181 } 2182 2183 unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) { 2184 if (Opc == ARM::B) 2185 return ARM::Bcc; 2186 if (Opc == ARM::tB) 2187 return ARM::tBcc; 2188 if (Opc == ARM::t2B) 2189 return ARM::t2Bcc; 2190 2191 llvm_unreachable("Unknown unconditional branch opcode!"); 2192 } 2193 2194 MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI, 2195 bool NewMI, 2196 unsigned OpIdx1, 2197 unsigned OpIdx2) const { 2198 switch (MI.getOpcode()) { 2199 case ARM::MOVCCr: 2200 case ARM::t2MOVCCr: { 2201 // MOVCC can be commuted by inverting the condition. 2202 Register PredReg; 2203 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg); 2204 // MOVCC AL can't be inverted. Shouldn't happen. 2205 if (CC == ARMCC::AL || PredReg != ARM::CPSR) 2206 return nullptr; 2207 MachineInstr *CommutedMI = 2208 TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 2209 if (!CommutedMI) 2210 return nullptr; 2211 // After swapping the MOVCC operands, also invert the condition. 2212 CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx()) 2213 .setImm(ARMCC::getOppositeCondition(CC)); 2214 return CommutedMI; 2215 } 2216 } 2217 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 2218 } 2219 2220 /// Identify instructions that can be folded into a MOVCC instruction, and 2221 /// return the defining instruction. 2222 MachineInstr * 2223 ARMBaseInstrInfo::canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI, 2224 const TargetInstrInfo *TII) const { 2225 if (!Reg.isVirtual()) 2226 return nullptr; 2227 if (!MRI.hasOneNonDBGUse(Reg)) 2228 return nullptr; 2229 MachineInstr *MI = MRI.getVRegDef(Reg); 2230 if (!MI) 2231 return nullptr; 2232 // Check if MI can be predicated and folded into the MOVCC. 2233 if (!isPredicable(*MI)) 2234 return nullptr; 2235 // Check if MI has any non-dead defs or physreg uses. This also detects 2236 // predicated instructions which will be reading CPSR. 2237 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 2238 const MachineOperand &MO = MI->getOperand(i); 2239 // Reject frame index operands, PEI can't handle the predicated pseudos. 2240 if (MO.isFI() || MO.isCPI() || MO.isJTI()) 2241 return nullptr; 2242 if (!MO.isReg()) 2243 continue; 2244 // MI can't have any tied operands, that would conflict with predication. 2245 if (MO.isTied()) 2246 return nullptr; 2247 if (Register::isPhysicalRegister(MO.getReg())) 2248 return nullptr; 2249 if (MO.isDef() && !MO.isDead()) 2250 return nullptr; 2251 } 2252 bool DontMoveAcrossStores = true; 2253 if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores)) 2254 return nullptr; 2255 return MI; 2256 } 2257 2258 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI, 2259 SmallVectorImpl<MachineOperand> &Cond, 2260 unsigned &TrueOp, unsigned &FalseOp, 2261 bool &Optimizable) const { 2262 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) && 2263 "Unknown select instruction"); 2264 // MOVCC operands: 2265 // 0: Def. 2266 // 1: True use. 2267 // 2: False use. 2268 // 3: Condition code. 2269 // 4: CPSR use. 2270 TrueOp = 1; 2271 FalseOp = 2; 2272 Cond.push_back(MI.getOperand(3)); 2273 Cond.push_back(MI.getOperand(4)); 2274 // We can always fold a def. 2275 Optimizable = true; 2276 return false; 2277 } 2278 2279 MachineInstr * 2280 ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI, 2281 SmallPtrSetImpl<MachineInstr *> &SeenMIs, 2282 bool PreferFalse) const { 2283 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) && 2284 "Unknown select instruction"); 2285 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 2286 MachineInstr *DefMI = canFoldIntoMOVCC(MI.getOperand(2).getReg(), MRI, this); 2287 bool Invert = !DefMI; 2288 if (!DefMI) 2289 DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this); 2290 if (!DefMI) 2291 return nullptr; 2292 2293 // Find new register class to use. 2294 MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1); 2295 Register DestReg = MI.getOperand(0).getReg(); 2296 const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg()); 2297 if (!MRI.constrainRegClass(DestReg, PreviousClass)) 2298 return nullptr; 2299 2300 // Create a new predicated version of DefMI. 2301 // Rfalse is the first use. 2302 MachineInstrBuilder NewMI = 2303 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg); 2304 2305 // Copy all the DefMI operands, excluding its (null) predicate. 2306 const MCInstrDesc &DefDesc = DefMI->getDesc(); 2307 for (unsigned i = 1, e = DefDesc.getNumOperands(); 2308 i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) 2309 NewMI.add(DefMI->getOperand(i)); 2310 2311 unsigned CondCode = MI.getOperand(3).getImm(); 2312 if (Invert) 2313 NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode))); 2314 else 2315 NewMI.addImm(CondCode); 2316 NewMI.add(MI.getOperand(4)); 2317 2318 // DefMI is not the -S version that sets CPSR, so add an optional %noreg. 2319 if (NewMI->hasOptionalDef()) 2320 NewMI.add(condCodeOp()); 2321 2322 // The output register value when the predicate is false is an implicit 2323 // register operand tied to the first def. 2324 // The tie makes the register allocator ensure the FalseReg is allocated the 2325 // same register as operand 0. 2326 FalseReg.setImplicit(); 2327 NewMI.add(FalseReg); 2328 NewMI->tieOperands(0, NewMI->getNumOperands() - 1); 2329 2330 // Update SeenMIs set: register newly created MI and erase removed DefMI. 2331 SeenMIs.insert(NewMI); 2332 SeenMIs.erase(DefMI); 2333 2334 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on 2335 // DefMI would be invalid when tranferred inside the loop. Checking for a 2336 // loop is expensive, but at least remove kill flags if they are in different 2337 // BBs. 2338 if (DefMI->getParent() != MI.getParent()) 2339 NewMI->clearKillInfo(); 2340 2341 // The caller will erase MI, but not DefMI. 2342 DefMI->eraseFromParent(); 2343 return NewMI; 2344 } 2345 2346 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the 2347 /// instruction is encoded with an 'S' bit is determined by the optional CPSR 2348 /// def operand. 2349 /// 2350 /// This will go away once we can teach tblgen how to set the optional CPSR def 2351 /// operand itself. 2352 struct AddSubFlagsOpcodePair { 2353 uint16_t PseudoOpc; 2354 uint16_t MachineOpc; 2355 }; 2356 2357 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 2358 {ARM::ADDSri, ARM::ADDri}, 2359 {ARM::ADDSrr, ARM::ADDrr}, 2360 {ARM::ADDSrsi, ARM::ADDrsi}, 2361 {ARM::ADDSrsr, ARM::ADDrsr}, 2362 2363 {ARM::SUBSri, ARM::SUBri}, 2364 {ARM::SUBSrr, ARM::SUBrr}, 2365 {ARM::SUBSrsi, ARM::SUBrsi}, 2366 {ARM::SUBSrsr, ARM::SUBrsr}, 2367 2368 {ARM::RSBSri, ARM::RSBri}, 2369 {ARM::RSBSrsi, ARM::RSBrsi}, 2370 {ARM::RSBSrsr, ARM::RSBrsr}, 2371 2372 {ARM::tADDSi3, ARM::tADDi3}, 2373 {ARM::tADDSi8, ARM::tADDi8}, 2374 {ARM::tADDSrr, ARM::tADDrr}, 2375 {ARM::tADCS, ARM::tADC}, 2376 2377 {ARM::tSUBSi3, ARM::tSUBi3}, 2378 {ARM::tSUBSi8, ARM::tSUBi8}, 2379 {ARM::tSUBSrr, ARM::tSUBrr}, 2380 {ARM::tSBCS, ARM::tSBC}, 2381 {ARM::tRSBS, ARM::tRSB}, 2382 {ARM::tLSLSri, ARM::tLSLri}, 2383 2384 {ARM::t2ADDSri, ARM::t2ADDri}, 2385 {ARM::t2ADDSrr, ARM::t2ADDrr}, 2386 {ARM::t2ADDSrs, ARM::t2ADDrs}, 2387 2388 {ARM::t2SUBSri, ARM::t2SUBri}, 2389 {ARM::t2SUBSrr, ARM::t2SUBrr}, 2390 {ARM::t2SUBSrs, ARM::t2SUBrs}, 2391 2392 {ARM::t2RSBSri, ARM::t2RSBri}, 2393 {ARM::t2RSBSrs, ARM::t2RSBrs}, 2394 }; 2395 2396 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { 2397 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i) 2398 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc) 2399 return AddSubFlagsOpcodeMap[i].MachineOpc; 2400 return 0; 2401 } 2402 2403 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, 2404 MachineBasicBlock::iterator &MBBI, 2405 const DebugLoc &dl, Register DestReg, 2406 Register BaseReg, int NumBytes, 2407 ARMCC::CondCodes Pred, Register PredReg, 2408 const ARMBaseInstrInfo &TII, 2409 unsigned MIFlags) { 2410 if (NumBytes == 0 && DestReg != BaseReg) { 2411 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg) 2412 .addReg(BaseReg, RegState::Kill) 2413 .add(predOps(Pred, PredReg)) 2414 .add(condCodeOp()) 2415 .setMIFlags(MIFlags); 2416 return; 2417 } 2418 2419 bool isSub = NumBytes < 0; 2420 if (isSub) NumBytes = -NumBytes; 2421 2422 while (NumBytes) { 2423 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes); 2424 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt); 2425 assert(ThisVal && "Didn't extract field correctly"); 2426 2427 // We will handle these bits from offset, clear them. 2428 NumBytes &= ~ThisVal; 2429 2430 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?"); 2431 2432 // Build the new ADD / SUB. 2433 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; 2434 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 2435 .addReg(BaseReg, RegState::Kill) 2436 .addImm(ThisVal) 2437 .add(predOps(Pred, PredReg)) 2438 .add(condCodeOp()) 2439 .setMIFlags(MIFlags); 2440 BaseReg = DestReg; 2441 } 2442 } 2443 2444 bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, 2445 MachineFunction &MF, MachineInstr *MI, 2446 unsigned NumBytes) { 2447 // This optimisation potentially adds lots of load and store 2448 // micro-operations, it's only really a great benefit to code-size. 2449 if (!Subtarget.hasMinSize()) 2450 return false; 2451 2452 // If only one register is pushed/popped, LLVM can use an LDR/STR 2453 // instead. We can't modify those so make sure we're dealing with an 2454 // instruction we understand. 2455 bool IsPop = isPopOpcode(MI->getOpcode()); 2456 bool IsPush = isPushOpcode(MI->getOpcode()); 2457 if (!IsPush && !IsPop) 2458 return false; 2459 2460 bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD || 2461 MI->getOpcode() == ARM::VLDMDIA_UPD; 2462 bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH || 2463 MI->getOpcode() == ARM::tPOP || 2464 MI->getOpcode() == ARM::tPOP_RET; 2465 2466 assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP && 2467 MI->getOperand(1).getReg() == ARM::SP)) && 2468 "trying to fold sp update into non-sp-updating push/pop"); 2469 2470 // The VFP push & pop act on D-registers, so we can only fold an adjustment 2471 // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try 2472 // if this is violated. 2473 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0) 2474 return false; 2475 2476 // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+ 2477 // pred) so the list starts at 4. Thumb1 starts after the predicate. 2478 int RegListIdx = IsT1PushPop ? 2 : 4; 2479 2480 // Calculate the space we'll need in terms of registers. 2481 unsigned RegsNeeded; 2482 const TargetRegisterClass *RegClass; 2483 if (IsVFPPushPop) { 2484 RegsNeeded = NumBytes / 8; 2485 RegClass = &ARM::DPRRegClass; 2486 } else { 2487 RegsNeeded = NumBytes / 4; 2488 RegClass = &ARM::GPRRegClass; 2489 } 2490 2491 // We're going to have to strip all list operands off before 2492 // re-adding them since the order matters, so save the existing ones 2493 // for later. 2494 SmallVector<MachineOperand, 4> RegList; 2495 2496 // We're also going to need the first register transferred by this 2497 // instruction, which won't necessarily be the first register in the list. 2498 unsigned FirstRegEnc = -1; 2499 2500 const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo(); 2501 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) { 2502 MachineOperand &MO = MI->getOperand(i); 2503 RegList.push_back(MO); 2504 2505 if (MO.isReg() && !MO.isImplicit() && 2506 TRI->getEncodingValue(MO.getReg()) < FirstRegEnc) 2507 FirstRegEnc = TRI->getEncodingValue(MO.getReg()); 2508 } 2509 2510 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF); 2511 2512 // Now try to find enough space in the reglist to allocate NumBytes. 2513 for (int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded; 2514 --CurRegEnc) { 2515 unsigned CurReg = RegClass->getRegister(CurRegEnc); 2516 if (IsT1PushPop && CurRegEnc > TRI->getEncodingValue(ARM::R7)) 2517 continue; 2518 if (!IsPop) { 2519 // Pushing any register is completely harmless, mark the register involved 2520 // as undef since we don't care about its value and must not restore it 2521 // during stack unwinding. 2522 RegList.push_back(MachineOperand::CreateReg(CurReg, false, false, 2523 false, false, true)); 2524 --RegsNeeded; 2525 continue; 2526 } 2527 2528 // However, we can only pop an extra register if it's not live. For 2529 // registers live within the function we might clobber a return value 2530 // register; the other way a register can be live here is if it's 2531 // callee-saved. 2532 if (isCalleeSavedRegister(CurReg, CSRegs) || 2533 MI->getParent()->computeRegisterLiveness(TRI, CurReg, MI) != 2534 MachineBasicBlock::LQR_Dead) { 2535 // VFP pops don't allow holes in the register list, so any skip is fatal 2536 // for our transformation. GPR pops do, so we should just keep looking. 2537 if (IsVFPPushPop) 2538 return false; 2539 else 2540 continue; 2541 } 2542 2543 // Mark the unimportant registers as <def,dead> in the POP. 2544 RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false, 2545 true)); 2546 --RegsNeeded; 2547 } 2548 2549 if (RegsNeeded > 0) 2550 return false; 2551 2552 // Finally we know we can profitably perform the optimisation so go 2553 // ahead: strip all existing registers off and add them back again 2554 // in the right order. 2555 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) 2556 MI->RemoveOperand(i); 2557 2558 // Add the complete list back in. 2559 MachineInstrBuilder MIB(MF, &*MI); 2560 for (int i = RegList.size() - 1; i >= 0; --i) 2561 MIB.add(RegList[i]); 2562 2563 return true; 2564 } 2565 2566 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 2567 Register FrameReg, int &Offset, 2568 const ARMBaseInstrInfo &TII) { 2569 unsigned Opcode = MI.getOpcode(); 2570 const MCInstrDesc &Desc = MI.getDesc(); 2571 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 2572 bool isSub = false; 2573 2574 // Memory operands in inline assembly always use AddrMode2. 2575 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR) 2576 AddrMode = ARMII::AddrMode2; 2577 2578 if (Opcode == ARM::ADDri) { 2579 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 2580 if (Offset == 0) { 2581 // Turn it into a move. 2582 MI.setDesc(TII.get(ARM::MOVr)); 2583 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2584 MI.RemoveOperand(FrameRegIdx+1); 2585 Offset = 0; 2586 return true; 2587 } else if (Offset < 0) { 2588 Offset = -Offset; 2589 isSub = true; 2590 MI.setDesc(TII.get(ARM::SUBri)); 2591 } 2592 2593 // Common case: small offset, fits into instruction. 2594 if (ARM_AM::getSOImmVal(Offset) != -1) { 2595 // Replace the FrameIndex with sp / fp 2596 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2597 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 2598 Offset = 0; 2599 return true; 2600 } 2601 2602 // Otherwise, pull as much of the immedidate into this ADDri/SUBri 2603 // as possible. 2604 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset); 2605 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt); 2606 2607 // We will handle these bits from offset, clear them. 2608 Offset &= ~ThisImmVal; 2609 2610 // Get the properly encoded SOImmVal field. 2611 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 && 2612 "Bit extraction didn't work?"); 2613 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 2614 } else { 2615 unsigned ImmIdx = 0; 2616 int InstrOffs = 0; 2617 unsigned NumBits = 0; 2618 unsigned Scale = 1; 2619 switch (AddrMode) { 2620 case ARMII::AddrMode_i12: 2621 ImmIdx = FrameRegIdx + 1; 2622 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2623 NumBits = 12; 2624 break; 2625 case ARMII::AddrMode2: 2626 ImmIdx = FrameRegIdx+2; 2627 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm()); 2628 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2629 InstrOffs *= -1; 2630 NumBits = 12; 2631 break; 2632 case ARMII::AddrMode3: 2633 ImmIdx = FrameRegIdx+2; 2634 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm()); 2635 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2636 InstrOffs *= -1; 2637 NumBits = 8; 2638 break; 2639 case ARMII::AddrMode4: 2640 case ARMII::AddrMode6: 2641 // Can't fold any offset even if it's zero. 2642 return false; 2643 case ARMII::AddrMode5: 2644 ImmIdx = FrameRegIdx+1; 2645 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2646 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2647 InstrOffs *= -1; 2648 NumBits = 8; 2649 Scale = 4; 2650 break; 2651 case ARMII::AddrMode5FP16: 2652 ImmIdx = FrameRegIdx+1; 2653 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2654 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2655 InstrOffs *= -1; 2656 NumBits = 8; 2657 Scale = 2; 2658 break; 2659 case ARMII::AddrModeT2_i7: 2660 case ARMII::AddrModeT2_i7s2: 2661 case ARMII::AddrModeT2_i7s4: 2662 ImmIdx = FrameRegIdx+1; 2663 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2664 NumBits = 7; 2665 Scale = (AddrMode == ARMII::AddrModeT2_i7s2 ? 2 : 2666 AddrMode == ARMII::AddrModeT2_i7s4 ? 4 : 1); 2667 break; 2668 default: 2669 llvm_unreachable("Unsupported addressing mode!"); 2670 } 2671 2672 Offset += InstrOffs * Scale; 2673 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 2674 if (Offset < 0) { 2675 Offset = -Offset; 2676 isSub = true; 2677 } 2678 2679 // Attempt to fold address comp. if opcode has offset bits 2680 if (NumBits > 0) { 2681 // Common case: small offset, fits into instruction. 2682 MachineOperand &ImmOp = MI.getOperand(ImmIdx); 2683 int ImmedOffset = Offset / Scale; 2684 unsigned Mask = (1 << NumBits) - 1; 2685 if ((unsigned)Offset <= Mask * Scale) { 2686 // Replace the FrameIndex with sp 2687 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2688 // FIXME: When addrmode2 goes away, this will simplify (like the 2689 // T2 version), as the LDR.i12 versions don't need the encoding 2690 // tricks for the offset value. 2691 if (isSub) { 2692 if (AddrMode == ARMII::AddrMode_i12) 2693 ImmedOffset = -ImmedOffset; 2694 else 2695 ImmedOffset |= 1 << NumBits; 2696 } 2697 ImmOp.ChangeToImmediate(ImmedOffset); 2698 Offset = 0; 2699 return true; 2700 } 2701 2702 // Otherwise, it didn't fit. Pull in what we can to simplify the immed. 2703 ImmedOffset = ImmedOffset & Mask; 2704 if (isSub) { 2705 if (AddrMode == ARMII::AddrMode_i12) 2706 ImmedOffset = -ImmedOffset; 2707 else 2708 ImmedOffset |= 1 << NumBits; 2709 } 2710 ImmOp.ChangeToImmediate(ImmedOffset); 2711 Offset &= ~(Mask*Scale); 2712 } 2713 } 2714 2715 Offset = (isSub) ? -Offset : Offset; 2716 return Offset == 0; 2717 } 2718 2719 /// analyzeCompare - For a comparison instruction, return the source registers 2720 /// in SrcReg and SrcReg2 if having two register operands, and the value it 2721 /// compares against in CmpValue. Return true if the comparison instruction 2722 /// can be analyzed. 2723 bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, 2724 Register &SrcReg2, int &CmpMask, 2725 int &CmpValue) const { 2726 switch (MI.getOpcode()) { 2727 default: break; 2728 case ARM::CMPri: 2729 case ARM::t2CMPri: 2730 case ARM::tCMPi8: 2731 SrcReg = MI.getOperand(0).getReg(); 2732 SrcReg2 = 0; 2733 CmpMask = ~0; 2734 CmpValue = MI.getOperand(1).getImm(); 2735 return true; 2736 case ARM::CMPrr: 2737 case ARM::t2CMPrr: 2738 case ARM::tCMPr: 2739 SrcReg = MI.getOperand(0).getReg(); 2740 SrcReg2 = MI.getOperand(1).getReg(); 2741 CmpMask = ~0; 2742 CmpValue = 0; 2743 return true; 2744 case ARM::TSTri: 2745 case ARM::t2TSTri: 2746 SrcReg = MI.getOperand(0).getReg(); 2747 SrcReg2 = 0; 2748 CmpMask = MI.getOperand(1).getImm(); 2749 CmpValue = 0; 2750 return true; 2751 } 2752 2753 return false; 2754 } 2755 2756 /// isSuitableForMask - Identify a suitable 'and' instruction that 2757 /// operates on the given source register and applies the same mask 2758 /// as a 'tst' instruction. Provide a limited look-through for copies. 2759 /// When successful, MI will hold the found instruction. 2760 static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, 2761 int CmpMask, bool CommonUse) { 2762 switch (MI->getOpcode()) { 2763 case ARM::ANDri: 2764 case ARM::t2ANDri: 2765 if (CmpMask != MI->getOperand(2).getImm()) 2766 return false; 2767 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg()) 2768 return true; 2769 break; 2770 } 2771 2772 return false; 2773 } 2774 2775 /// getCmpToAddCondition - assume the flags are set by CMP(a,b), return 2776 /// the condition code if we modify the instructions such that flags are 2777 /// set by ADD(a,b,X). 2778 inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) { 2779 switch (CC) { 2780 default: return ARMCC::AL; 2781 case ARMCC::HS: return ARMCC::LO; 2782 case ARMCC::LO: return ARMCC::HS; 2783 case ARMCC::VS: return ARMCC::VS; 2784 case ARMCC::VC: return ARMCC::VC; 2785 } 2786 } 2787 2788 /// isRedundantFlagInstr - check whether the first instruction, whose only 2789 /// purpose is to update flags, can be made redundant. 2790 /// CMPrr can be made redundant by SUBrr if the operands are the same. 2791 /// CMPri can be made redundant by SUBri if the operands are the same. 2792 /// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X). 2793 /// This function can be extended later on. 2794 inline static bool isRedundantFlagInstr(const MachineInstr *CmpI, 2795 Register SrcReg, Register SrcReg2, 2796 int ImmValue, const MachineInstr *OI, 2797 bool &IsThumb1) { 2798 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && 2799 (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) && 2800 ((OI->getOperand(1).getReg() == SrcReg && 2801 OI->getOperand(2).getReg() == SrcReg2) || 2802 (OI->getOperand(1).getReg() == SrcReg2 && 2803 OI->getOperand(2).getReg() == SrcReg))) { 2804 IsThumb1 = false; 2805 return true; 2806 } 2807 2808 if (CmpI->getOpcode() == ARM::tCMPr && OI->getOpcode() == ARM::tSUBrr && 2809 ((OI->getOperand(2).getReg() == SrcReg && 2810 OI->getOperand(3).getReg() == SrcReg2) || 2811 (OI->getOperand(2).getReg() == SrcReg2 && 2812 OI->getOperand(3).getReg() == SrcReg))) { 2813 IsThumb1 = true; 2814 return true; 2815 } 2816 2817 if ((CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) && 2818 (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) && 2819 OI->getOperand(1).getReg() == SrcReg && 2820 OI->getOperand(2).getImm() == ImmValue) { 2821 IsThumb1 = false; 2822 return true; 2823 } 2824 2825 if (CmpI->getOpcode() == ARM::tCMPi8 && 2826 (OI->getOpcode() == ARM::tSUBi8 || OI->getOpcode() == ARM::tSUBi3) && 2827 OI->getOperand(2).getReg() == SrcReg && 2828 OI->getOperand(3).getImm() == ImmValue) { 2829 IsThumb1 = true; 2830 return true; 2831 } 2832 2833 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && 2834 (OI->getOpcode() == ARM::ADDrr || OI->getOpcode() == ARM::t2ADDrr || 2835 OI->getOpcode() == ARM::ADDri || OI->getOpcode() == ARM::t2ADDri) && 2836 OI->getOperand(0).isReg() && OI->getOperand(1).isReg() && 2837 OI->getOperand(0).getReg() == SrcReg && 2838 OI->getOperand(1).getReg() == SrcReg2) { 2839 IsThumb1 = false; 2840 return true; 2841 } 2842 2843 if (CmpI->getOpcode() == ARM::tCMPr && 2844 (OI->getOpcode() == ARM::tADDi3 || OI->getOpcode() == ARM::tADDi8 || 2845 OI->getOpcode() == ARM::tADDrr) && 2846 OI->getOperand(0).getReg() == SrcReg && 2847 OI->getOperand(2).getReg() == SrcReg2) { 2848 IsThumb1 = true; 2849 return true; 2850 } 2851 2852 return false; 2853 } 2854 2855 static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) { 2856 switch (MI->getOpcode()) { 2857 default: return false; 2858 case ARM::tLSLri: 2859 case ARM::tLSRri: 2860 case ARM::tLSLrr: 2861 case ARM::tLSRrr: 2862 case ARM::tSUBrr: 2863 case ARM::tADDrr: 2864 case ARM::tADDi3: 2865 case ARM::tADDi8: 2866 case ARM::tSUBi3: 2867 case ARM::tSUBi8: 2868 case ARM::tMUL: 2869 case ARM::tADC: 2870 case ARM::tSBC: 2871 case ARM::tRSB: 2872 case ARM::tAND: 2873 case ARM::tORR: 2874 case ARM::tEOR: 2875 case ARM::tBIC: 2876 case ARM::tMVN: 2877 case ARM::tASRri: 2878 case ARM::tASRrr: 2879 case ARM::tROR: 2880 IsThumb1 = true; 2881 LLVM_FALLTHROUGH; 2882 case ARM::RSBrr: 2883 case ARM::RSBri: 2884 case ARM::RSCrr: 2885 case ARM::RSCri: 2886 case ARM::ADDrr: 2887 case ARM::ADDri: 2888 case ARM::ADCrr: 2889 case ARM::ADCri: 2890 case ARM::SUBrr: 2891 case ARM::SUBri: 2892 case ARM::SBCrr: 2893 case ARM::SBCri: 2894 case ARM::t2RSBri: 2895 case ARM::t2ADDrr: 2896 case ARM::t2ADDri: 2897 case ARM::t2ADCrr: 2898 case ARM::t2ADCri: 2899 case ARM::t2SUBrr: 2900 case ARM::t2SUBri: 2901 case ARM::t2SBCrr: 2902 case ARM::t2SBCri: 2903 case ARM::ANDrr: 2904 case ARM::ANDri: 2905 case ARM::t2ANDrr: 2906 case ARM::t2ANDri: 2907 case ARM::ORRrr: 2908 case ARM::ORRri: 2909 case ARM::t2ORRrr: 2910 case ARM::t2ORRri: 2911 case ARM::EORrr: 2912 case ARM::EORri: 2913 case ARM::t2EORrr: 2914 case ARM::t2EORri: 2915 case ARM::t2LSRri: 2916 case ARM::t2LSRrr: 2917 case ARM::t2LSLri: 2918 case ARM::t2LSLrr: 2919 return true; 2920 } 2921 } 2922 2923 /// optimizeCompareInstr - Convert the instruction supplying the argument to the 2924 /// comparison into one that sets the zero bit in the flags register; 2925 /// Remove a redundant Compare instruction if an earlier instruction can set the 2926 /// flags in the same way as Compare. 2927 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two 2928 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the 2929 /// condition code of instructions which use the flags. 2930 bool ARMBaseInstrInfo::optimizeCompareInstr( 2931 MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int CmpMask, 2932 int CmpValue, const MachineRegisterInfo *MRI) const { 2933 // Get the unique definition of SrcReg. 2934 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 2935 if (!MI) return false; 2936 2937 // Masked compares sometimes use the same register as the corresponding 'and'. 2938 if (CmpMask != ~0) { 2939 if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) { 2940 MI = nullptr; 2941 for (MachineRegisterInfo::use_instr_iterator 2942 UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end(); 2943 UI != UE; ++UI) { 2944 if (UI->getParent() != CmpInstr.getParent()) 2945 continue; 2946 MachineInstr *PotentialAND = &*UI; 2947 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) || 2948 isPredicated(*PotentialAND)) 2949 continue; 2950 MI = PotentialAND; 2951 break; 2952 } 2953 if (!MI) return false; 2954 } 2955 } 2956 2957 // Get ready to iterate backward from CmpInstr. 2958 MachineBasicBlock::iterator I = CmpInstr, E = MI, 2959 B = CmpInstr.getParent()->begin(); 2960 2961 // Early exit if CmpInstr is at the beginning of the BB. 2962 if (I == B) return false; 2963 2964 // There are two possible candidates which can be changed to set CPSR: 2965 // One is MI, the other is a SUB or ADD instruction. 2966 // For CMPrr(r1,r2), we are looking for SUB(r1,r2), SUB(r2,r1), or 2967 // ADDr[ri](r1, r2, X). 2968 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue). 2969 MachineInstr *SubAdd = nullptr; 2970 if (SrcReg2 != 0) 2971 // MI is not a candidate for CMPrr. 2972 MI = nullptr; 2973 else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) { 2974 // Conservatively refuse to convert an instruction which isn't in the same 2975 // BB as the comparison. 2976 // For CMPri w/ CmpValue != 0, a SubAdd may still be a candidate. 2977 // Thus we cannot return here. 2978 if (CmpInstr.getOpcode() == ARM::CMPri || 2979 CmpInstr.getOpcode() == ARM::t2CMPri || 2980 CmpInstr.getOpcode() == ARM::tCMPi8) 2981 MI = nullptr; 2982 else 2983 return false; 2984 } 2985 2986 bool IsThumb1 = false; 2987 if (MI && !isOptimizeCompareCandidate(MI, IsThumb1)) 2988 return false; 2989 2990 // We also want to do this peephole for cases like this: if (a*b == 0), 2991 // and optimise away the CMP instruction from the generated code sequence: 2992 // MULS, MOVS, MOVS, CMP. Here the MOVS instructions load the boolean values 2993 // resulting from the select instruction, but these MOVS instructions for 2994 // Thumb1 (V6M) are flag setting and are thus preventing this optimisation. 2995 // However, if we only have MOVS instructions in between the CMP and the 2996 // other instruction (the MULS in this example), then the CPSR is dead so we 2997 // can safely reorder the sequence into: MOVS, MOVS, MULS, CMP. We do this 2998 // reordering and then continue the analysis hoping we can eliminate the 2999 // CMP. This peephole works on the vregs, so is still in SSA form. As a 3000 // consequence, the movs won't redefine/kill the MUL operands which would 3001 // make this reordering illegal. 3002 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3003 if (MI && IsThumb1) { 3004 --I; 3005 if (I != E && !MI->readsRegister(ARM::CPSR, TRI)) { 3006 bool CanReorder = true; 3007 for (; I != E; --I) { 3008 if (I->getOpcode() != ARM::tMOVi8) { 3009 CanReorder = false; 3010 break; 3011 } 3012 } 3013 if (CanReorder) { 3014 MI = MI->removeFromParent(); 3015 E = CmpInstr; 3016 CmpInstr.getParent()->insert(E, MI); 3017 } 3018 } 3019 I = CmpInstr; 3020 E = MI; 3021 } 3022 3023 // Check that CPSR isn't set between the comparison instruction and the one we 3024 // want to change. At the same time, search for SubAdd. 3025 bool SubAddIsThumb1 = false; 3026 do { 3027 const MachineInstr &Instr = *--I; 3028 3029 // Check whether CmpInstr can be made redundant by the current instruction. 3030 if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &Instr, 3031 SubAddIsThumb1)) { 3032 SubAdd = &*I; 3033 break; 3034 } 3035 3036 // Allow E (which was initially MI) to be SubAdd but do not search before E. 3037 if (I == E) 3038 break; 3039 3040 if (Instr.modifiesRegister(ARM::CPSR, TRI) || 3041 Instr.readsRegister(ARM::CPSR, TRI)) 3042 // This instruction modifies or uses CPSR after the one we want to 3043 // change. We can't do this transformation. 3044 return false; 3045 3046 if (I == B) { 3047 // In some cases, we scan the use-list of an instruction for an AND; 3048 // that AND is in the same BB, but may not be scheduled before the 3049 // corresponding TST. In that case, bail out. 3050 // 3051 // FIXME: We could try to reschedule the AND. 3052 return false; 3053 } 3054 } while (true); 3055 3056 // Return false if no candidates exist. 3057 if (!MI && !SubAdd) 3058 return false; 3059 3060 // If we found a SubAdd, use it as it will be closer to the CMP 3061 if (SubAdd) { 3062 MI = SubAdd; 3063 IsThumb1 = SubAddIsThumb1; 3064 } 3065 3066 // We can't use a predicated instruction - it doesn't always write the flags. 3067 if (isPredicated(*MI)) 3068 return false; 3069 3070 // Scan forward for the use of CPSR 3071 // When checking against MI: if it's a conditional code that requires 3072 // checking of the V bit or C bit, then this is not safe to do. 3073 // It is safe to remove CmpInstr if CPSR is redefined or killed. 3074 // If we are done with the basic block, we need to check whether CPSR is 3075 // live-out. 3076 SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4> 3077 OperandsToUpdate; 3078 bool isSafe = false; 3079 I = CmpInstr; 3080 E = CmpInstr.getParent()->end(); 3081 while (!isSafe && ++I != E) { 3082 const MachineInstr &Instr = *I; 3083 for (unsigned IO = 0, EO = Instr.getNumOperands(); 3084 !isSafe && IO != EO; ++IO) { 3085 const MachineOperand &MO = Instr.getOperand(IO); 3086 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) { 3087 isSafe = true; 3088 break; 3089 } 3090 if (!MO.isReg() || MO.getReg() != ARM::CPSR) 3091 continue; 3092 if (MO.isDef()) { 3093 isSafe = true; 3094 break; 3095 } 3096 // Condition code is after the operand before CPSR except for VSELs. 3097 ARMCC::CondCodes CC; 3098 bool IsInstrVSel = true; 3099 switch (Instr.getOpcode()) { 3100 default: 3101 IsInstrVSel = false; 3102 CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm(); 3103 break; 3104 case ARM::VSELEQD: 3105 case ARM::VSELEQS: 3106 case ARM::VSELEQH: 3107 CC = ARMCC::EQ; 3108 break; 3109 case ARM::VSELGTD: 3110 case ARM::VSELGTS: 3111 case ARM::VSELGTH: 3112 CC = ARMCC::GT; 3113 break; 3114 case ARM::VSELGED: 3115 case ARM::VSELGES: 3116 case ARM::VSELGEH: 3117 CC = ARMCC::GE; 3118 break; 3119 case ARM::VSELVSD: 3120 case ARM::VSELVSS: 3121 case ARM::VSELVSH: 3122 CC = ARMCC::VS; 3123 break; 3124 } 3125 3126 if (SubAdd) { 3127 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based 3128 // on CMP needs to be updated to be based on SUB. 3129 // If we have ADD(r1, r2, X) and CMP(r1, r2), the condition code also 3130 // needs to be modified. 3131 // Push the condition code operands to OperandsToUpdate. 3132 // If it is safe to remove CmpInstr, the condition code of these 3133 // operands will be modified. 3134 unsigned Opc = SubAdd->getOpcode(); 3135 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr || 3136 Opc == ARM::SUBri || Opc == ARM::t2SUBri || 3137 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 || 3138 Opc == ARM::tSUBi8; 3139 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2; 3140 if (!IsSub || 3141 (SrcReg2 != 0 && SubAdd->getOperand(OpI).getReg() == SrcReg2 && 3142 SubAdd->getOperand(OpI + 1).getReg() == SrcReg)) { 3143 // VSel doesn't support condition code update. 3144 if (IsInstrVSel) 3145 return false; 3146 // Ensure we can swap the condition. 3147 ARMCC::CondCodes NewCC = (IsSub ? getSwappedCondition(CC) : getCmpToAddCondition(CC)); 3148 if (NewCC == ARMCC::AL) 3149 return false; 3150 OperandsToUpdate.push_back( 3151 std::make_pair(&((*I).getOperand(IO - 1)), NewCC)); 3152 } 3153 } else { 3154 // No SubAdd, so this is x = <op> y, z; cmp x, 0. 3155 switch (CC) { 3156 case ARMCC::EQ: // Z 3157 case ARMCC::NE: // Z 3158 case ARMCC::MI: // N 3159 case ARMCC::PL: // N 3160 case ARMCC::AL: // none 3161 // CPSR can be used multiple times, we should continue. 3162 break; 3163 case ARMCC::HS: // C 3164 case ARMCC::LO: // C 3165 case ARMCC::VS: // V 3166 case ARMCC::VC: // V 3167 case ARMCC::HI: // C Z 3168 case ARMCC::LS: // C Z 3169 case ARMCC::GE: // N V 3170 case ARMCC::LT: // N V 3171 case ARMCC::GT: // Z N V 3172 case ARMCC::LE: // Z N V 3173 // The instruction uses the V bit or C bit which is not safe. 3174 return false; 3175 } 3176 } 3177 } 3178 } 3179 3180 // If CPSR is not killed nor re-defined, we should check whether it is 3181 // live-out. If it is live-out, do not optimize. 3182 if (!isSafe) { 3183 MachineBasicBlock *MBB = CmpInstr.getParent(); 3184 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), 3185 SE = MBB->succ_end(); SI != SE; ++SI) 3186 if ((*SI)->isLiveIn(ARM::CPSR)) 3187 return false; 3188 } 3189 3190 // Toggle the optional operand to CPSR (if it exists - in Thumb1 we always 3191 // set CPSR so this is represented as an explicit output) 3192 if (!IsThumb1) { 3193 MI->getOperand(5).setReg(ARM::CPSR); 3194 MI->getOperand(5).setIsDef(true); 3195 } 3196 assert(!isPredicated(*MI) && "Can't use flags from predicated instruction"); 3197 CmpInstr.eraseFromParent(); 3198 3199 // Modify the condition code of operands in OperandsToUpdate. 3200 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to 3201 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 3202 for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++) 3203 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second); 3204 3205 MI->clearRegisterDeads(ARM::CPSR); 3206 3207 return true; 3208 } 3209 3210 bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const { 3211 // Do not sink MI if it might be used to optimize a redundant compare. 3212 // We heuristically only look at the instruction immediately following MI to 3213 // avoid potentially searching the entire basic block. 3214 if (isPredicated(MI)) 3215 return true; 3216 MachineBasicBlock::const_iterator Next = &MI; 3217 ++Next; 3218 Register SrcReg, SrcReg2; 3219 int CmpMask, CmpValue; 3220 bool IsThumb1; 3221 if (Next != MI.getParent()->end() && 3222 analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) && 3223 isRedundantFlagInstr(&*Next, SrcReg, SrcReg2, CmpValue, &MI, IsThumb1)) 3224 return false; 3225 return true; 3226 } 3227 3228 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 3229 Register Reg, 3230 MachineRegisterInfo *MRI) const { 3231 // Fold large immediates into add, sub, or, xor. 3232 unsigned DefOpc = DefMI.getOpcode(); 3233 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) 3234 return false; 3235 if (!DefMI.getOperand(1).isImm()) 3236 // Could be t2MOVi32imm @xx 3237 return false; 3238 3239 if (!MRI->hasOneNonDBGUse(Reg)) 3240 return false; 3241 3242 const MCInstrDesc &DefMCID = DefMI.getDesc(); 3243 if (DefMCID.hasOptionalDef()) { 3244 unsigned NumOps = DefMCID.getNumOperands(); 3245 const MachineOperand &MO = DefMI.getOperand(NumOps - 1); 3246 if (MO.getReg() == ARM::CPSR && !MO.isDead()) 3247 // If DefMI defines CPSR and it is not dead, it's obviously not safe 3248 // to delete DefMI. 3249 return false; 3250 } 3251 3252 const MCInstrDesc &UseMCID = UseMI.getDesc(); 3253 if (UseMCID.hasOptionalDef()) { 3254 unsigned NumOps = UseMCID.getNumOperands(); 3255 if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR) 3256 // If the instruction sets the flag, do not attempt this optimization 3257 // since it may change the semantics of the code. 3258 return false; 3259 } 3260 3261 unsigned UseOpc = UseMI.getOpcode(); 3262 unsigned NewUseOpc = 0; 3263 uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm(); 3264 uint32_t SOImmValV1 = 0, SOImmValV2 = 0; 3265 bool Commute = false; 3266 switch (UseOpc) { 3267 default: return false; 3268 case ARM::SUBrr: 3269 case ARM::ADDrr: 3270 case ARM::ORRrr: 3271 case ARM::EORrr: 3272 case ARM::t2SUBrr: 3273 case ARM::t2ADDrr: 3274 case ARM::t2ORRrr: 3275 case ARM::t2EORrr: { 3276 Commute = UseMI.getOperand(2).getReg() != Reg; 3277 switch (UseOpc) { 3278 default: break; 3279 case ARM::ADDrr: 3280 case ARM::SUBrr: 3281 if (UseOpc == ARM::SUBrr && Commute) 3282 return false; 3283 3284 // ADD/SUB are special because they're essentially the same operation, so 3285 // we can handle a larger range of immediates. 3286 if (ARM_AM::isSOImmTwoPartVal(ImmVal)) 3287 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri; 3288 else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) { 3289 ImmVal = -ImmVal; 3290 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri; 3291 } else 3292 return false; 3293 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 3294 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 3295 break; 3296 case ARM::ORRrr: 3297 case ARM::EORrr: 3298 if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) 3299 return false; 3300 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 3301 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 3302 switch (UseOpc) { 3303 default: break; 3304 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; 3305 case ARM::EORrr: NewUseOpc = ARM::EORri; break; 3306 } 3307 break; 3308 case ARM::t2ADDrr: 3309 case ARM::t2SUBrr: { 3310 if (UseOpc == ARM::t2SUBrr && Commute) 3311 return false; 3312 3313 // ADD/SUB are special because they're essentially the same operation, so 3314 // we can handle a larger range of immediates. 3315 const bool ToSP = DefMI.getOperand(0).getReg() == ARM::SP; 3316 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri; 3317 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri; 3318 if (ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 3319 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB; 3320 else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) { 3321 ImmVal = -ImmVal; 3322 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD; 3323 } else 3324 return false; 3325 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 3326 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 3327 break; 3328 } 3329 case ARM::t2ORRrr: 3330 case ARM::t2EORrr: 3331 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 3332 return false; 3333 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 3334 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 3335 switch (UseOpc) { 3336 default: break; 3337 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; 3338 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; 3339 } 3340 break; 3341 } 3342 } 3343 } 3344 3345 unsigned OpIdx = Commute ? 2 : 1; 3346 Register Reg1 = UseMI.getOperand(OpIdx).getReg(); 3347 bool isKill = UseMI.getOperand(OpIdx).isKill(); 3348 const TargetRegisterClass *TRC = MRI->getRegClass(Reg); 3349 Register NewReg = MRI->createVirtualRegister(TRC); 3350 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc), 3351 NewReg) 3352 .addReg(Reg1, getKillRegState(isKill)) 3353 .addImm(SOImmValV1) 3354 .add(predOps(ARMCC::AL)) 3355 .add(condCodeOp()); 3356 UseMI.setDesc(get(NewUseOpc)); 3357 UseMI.getOperand(1).setReg(NewReg); 3358 UseMI.getOperand(1).setIsKill(); 3359 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2); 3360 DefMI.eraseFromParent(); 3361 // FIXME: t2ADDrr should be split, as different rulles apply when writing to SP. 3362 // Just as t2ADDri, that was split to [t2ADDri, t2ADDspImm]. 3363 // Then the below code will not be needed, as the input/output register 3364 // classes will be rgpr or gprSP. 3365 // For now, we fix the UseMI operand explicitly here: 3366 switch(NewUseOpc){ 3367 case ARM::t2ADDspImm: 3368 case ARM::t2SUBspImm: 3369 case ARM::t2ADDri: 3370 case ARM::t2SUBri: 3371 MRI->setRegClass(UseMI.getOperand(0).getReg(), TRC); 3372 } 3373 return true; 3374 } 3375 3376 static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, 3377 const MachineInstr &MI) { 3378 switch (MI.getOpcode()) { 3379 default: { 3380 const MCInstrDesc &Desc = MI.getDesc(); 3381 int UOps = ItinData->getNumMicroOps(Desc.getSchedClass()); 3382 assert(UOps >= 0 && "bad # UOps"); 3383 return UOps; 3384 } 3385 3386 case ARM::LDRrs: 3387 case ARM::LDRBrs: 3388 case ARM::STRrs: 3389 case ARM::STRBrs: { 3390 unsigned ShOpVal = MI.getOperand(3).getImm(); 3391 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3392 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3393 if (!isSub && 3394 (ShImm == 0 || 3395 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3396 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3397 return 1; 3398 return 2; 3399 } 3400 3401 case ARM::LDRH: 3402 case ARM::STRH: { 3403 if (!MI.getOperand(2).getReg()) 3404 return 1; 3405 3406 unsigned ShOpVal = MI.getOperand(3).getImm(); 3407 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3408 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3409 if (!isSub && 3410 (ShImm == 0 || 3411 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3412 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3413 return 1; 3414 return 2; 3415 } 3416 3417 case ARM::LDRSB: 3418 case ARM::LDRSH: 3419 return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2; 3420 3421 case ARM::LDRSB_POST: 3422 case ARM::LDRSH_POST: { 3423 Register Rt = MI.getOperand(0).getReg(); 3424 Register Rm = MI.getOperand(3).getReg(); 3425 return (Rt == Rm) ? 4 : 3; 3426 } 3427 3428 case ARM::LDR_PRE_REG: 3429 case ARM::LDRB_PRE_REG: { 3430 Register Rt = MI.getOperand(0).getReg(); 3431 Register Rm = MI.getOperand(3).getReg(); 3432 if (Rt == Rm) 3433 return 3; 3434 unsigned ShOpVal = MI.getOperand(4).getImm(); 3435 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3436 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3437 if (!isSub && 3438 (ShImm == 0 || 3439 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3440 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3441 return 2; 3442 return 3; 3443 } 3444 3445 case ARM::STR_PRE_REG: 3446 case ARM::STRB_PRE_REG: { 3447 unsigned ShOpVal = MI.getOperand(4).getImm(); 3448 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3449 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3450 if (!isSub && 3451 (ShImm == 0 || 3452 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3453 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3454 return 2; 3455 return 3; 3456 } 3457 3458 case ARM::LDRH_PRE: 3459 case ARM::STRH_PRE: { 3460 Register Rt = MI.getOperand(0).getReg(); 3461 Register Rm = MI.getOperand(3).getReg(); 3462 if (!Rm) 3463 return 2; 3464 if (Rt == Rm) 3465 return 3; 3466 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2; 3467 } 3468 3469 case ARM::LDR_POST_REG: 3470 case ARM::LDRB_POST_REG: 3471 case ARM::LDRH_POST: { 3472 Register Rt = MI.getOperand(0).getReg(); 3473 Register Rm = MI.getOperand(3).getReg(); 3474 return (Rt == Rm) ? 3 : 2; 3475 } 3476 3477 case ARM::LDR_PRE_IMM: 3478 case ARM::LDRB_PRE_IMM: 3479 case ARM::LDR_POST_IMM: 3480 case ARM::LDRB_POST_IMM: 3481 case ARM::STRB_POST_IMM: 3482 case ARM::STRB_POST_REG: 3483 case ARM::STRB_PRE_IMM: 3484 case ARM::STRH_POST: 3485 case ARM::STR_POST_IMM: 3486 case ARM::STR_POST_REG: 3487 case ARM::STR_PRE_IMM: 3488 return 2; 3489 3490 case ARM::LDRSB_PRE: 3491 case ARM::LDRSH_PRE: { 3492 Register Rm = MI.getOperand(3).getReg(); 3493 if (Rm == 0) 3494 return 3; 3495 Register Rt = MI.getOperand(0).getReg(); 3496 if (Rt == Rm) 3497 return 4; 3498 unsigned ShOpVal = MI.getOperand(4).getImm(); 3499 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3500 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3501 if (!isSub && 3502 (ShImm == 0 || 3503 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3504 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3505 return 3; 3506 return 4; 3507 } 3508 3509 case ARM::LDRD: { 3510 Register Rt = MI.getOperand(0).getReg(); 3511 Register Rn = MI.getOperand(2).getReg(); 3512 Register Rm = MI.getOperand(3).getReg(); 3513 if (Rm) 3514 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 3515 : 3; 3516 return (Rt == Rn) ? 3 : 2; 3517 } 3518 3519 case ARM::STRD: { 3520 Register Rm = MI.getOperand(3).getReg(); 3521 if (Rm) 3522 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 3523 : 3; 3524 return 2; 3525 } 3526 3527 case ARM::LDRD_POST: 3528 case ARM::t2LDRD_POST: 3529 return 3; 3530 3531 case ARM::STRD_POST: 3532 case ARM::t2STRD_POST: 3533 return 4; 3534 3535 case ARM::LDRD_PRE: { 3536 Register Rt = MI.getOperand(0).getReg(); 3537 Register Rn = MI.getOperand(3).getReg(); 3538 Register Rm = MI.getOperand(4).getReg(); 3539 if (Rm) 3540 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 3541 : 4; 3542 return (Rt == Rn) ? 4 : 3; 3543 } 3544 3545 case ARM::t2LDRD_PRE: { 3546 Register Rt = MI.getOperand(0).getReg(); 3547 Register Rn = MI.getOperand(3).getReg(); 3548 return (Rt == Rn) ? 4 : 3; 3549 } 3550 3551 case ARM::STRD_PRE: { 3552 Register Rm = MI.getOperand(4).getReg(); 3553 if (Rm) 3554 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 3555 : 4; 3556 return 3; 3557 } 3558 3559 case ARM::t2STRD_PRE: 3560 return 3; 3561 3562 case ARM::t2LDR_POST: 3563 case ARM::t2LDRB_POST: 3564 case ARM::t2LDRB_PRE: 3565 case ARM::t2LDRSBi12: 3566 case ARM::t2LDRSBi8: 3567 case ARM::t2LDRSBpci: 3568 case ARM::t2LDRSBs: 3569 case ARM::t2LDRH_POST: 3570 case ARM::t2LDRH_PRE: 3571 case ARM::t2LDRSBT: 3572 case ARM::t2LDRSB_POST: 3573 case ARM::t2LDRSB_PRE: 3574 case ARM::t2LDRSH_POST: 3575 case ARM::t2LDRSH_PRE: 3576 case ARM::t2LDRSHi12: 3577 case ARM::t2LDRSHi8: 3578 case ARM::t2LDRSHpci: 3579 case ARM::t2LDRSHs: 3580 return 2; 3581 3582 case ARM::t2LDRDi8: { 3583 Register Rt = MI.getOperand(0).getReg(); 3584 Register Rn = MI.getOperand(2).getReg(); 3585 return (Rt == Rn) ? 3 : 2; 3586 } 3587 3588 case ARM::t2STRB_POST: 3589 case ARM::t2STRB_PRE: 3590 case ARM::t2STRBs: 3591 case ARM::t2STRDi8: 3592 case ARM::t2STRH_POST: 3593 case ARM::t2STRH_PRE: 3594 case ARM::t2STRHs: 3595 case ARM::t2STR_POST: 3596 case ARM::t2STR_PRE: 3597 case ARM::t2STRs: 3598 return 2; 3599 } 3600 } 3601 3602 // Return the number of 32-bit words loaded by LDM or stored by STM. If this 3603 // can't be easily determined return 0 (missing MachineMemOperand). 3604 // 3605 // FIXME: The current MachineInstr design does not support relying on machine 3606 // mem operands to determine the width of a memory access. Instead, we expect 3607 // the target to provide this information based on the instruction opcode and 3608 // operands. However, using MachineMemOperand is the best solution now for 3609 // two reasons: 3610 // 3611 // 1) getNumMicroOps tries to infer LDM memory width from the total number of MI 3612 // operands. This is much more dangerous than using the MachineMemOperand 3613 // sizes because CodeGen passes can insert/remove optional machine operands. In 3614 // fact, it's totally incorrect for preRA passes and appears to be wrong for 3615 // postRA passes as well. 3616 // 3617 // 2) getNumLDMAddresses is only used by the scheduling machine model and any 3618 // machine model that calls this should handle the unknown (zero size) case. 3619 // 3620 // Long term, we should require a target hook that verifies MachineMemOperand 3621 // sizes during MC lowering. That target hook should be local to MC lowering 3622 // because we can't ensure that it is aware of other MI forms. Doing this will 3623 // ensure that MachineMemOperands are correctly propagated through all passes. 3624 unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const { 3625 unsigned Size = 0; 3626 for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), 3627 E = MI.memoperands_end(); 3628 I != E; ++I) { 3629 Size += (*I)->getSize(); 3630 } 3631 // FIXME: The scheduler currently can't handle values larger than 16. But 3632 // the values can actually go up to 32 for floating-point load/store 3633 // multiple (VLDMIA etc.). Also, the way this code is reasoning about memory 3634 // operations isn't right; we could end up with "extra" memory operands for 3635 // various reasons, like tail merge merging two memory operations. 3636 return std::min(Size / 4, 16U); 3637 } 3638 3639 static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, 3640 unsigned NumRegs) { 3641 unsigned UOps = 1 + NumRegs; // 1 for address computation. 3642 switch (Opc) { 3643 default: 3644 break; 3645 case ARM::VLDMDIA_UPD: 3646 case ARM::VLDMDDB_UPD: 3647 case ARM::VLDMSIA_UPD: 3648 case ARM::VLDMSDB_UPD: 3649 case ARM::VSTMDIA_UPD: 3650 case ARM::VSTMDDB_UPD: 3651 case ARM::VSTMSIA_UPD: 3652 case ARM::VSTMSDB_UPD: 3653 case ARM::LDMIA_UPD: 3654 case ARM::LDMDA_UPD: 3655 case ARM::LDMDB_UPD: 3656 case ARM::LDMIB_UPD: 3657 case ARM::STMIA_UPD: 3658 case ARM::STMDA_UPD: 3659 case ARM::STMDB_UPD: 3660 case ARM::STMIB_UPD: 3661 case ARM::tLDMIA_UPD: 3662 case ARM::tSTMIA_UPD: 3663 case ARM::t2LDMIA_UPD: 3664 case ARM::t2LDMDB_UPD: 3665 case ARM::t2STMIA_UPD: 3666 case ARM::t2STMDB_UPD: 3667 ++UOps; // One for base register writeback. 3668 break; 3669 case ARM::LDMIA_RET: 3670 case ARM::tPOP_RET: 3671 case ARM::t2LDMIA_RET: 3672 UOps += 2; // One for base reg wb, one for write to pc. 3673 break; 3674 } 3675 return UOps; 3676 } 3677 3678 unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 3679 const MachineInstr &MI) const { 3680 if (!ItinData || ItinData->isEmpty()) 3681 return 1; 3682 3683 const MCInstrDesc &Desc = MI.getDesc(); 3684 unsigned Class = Desc.getSchedClass(); 3685 int ItinUOps = ItinData->getNumMicroOps(Class); 3686 if (ItinUOps >= 0) { 3687 if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore())) 3688 return getNumMicroOpsSwiftLdSt(ItinData, MI); 3689 3690 return ItinUOps; 3691 } 3692 3693 unsigned Opc = MI.getOpcode(); 3694 switch (Opc) { 3695 default: 3696 llvm_unreachable("Unexpected multi-uops instruction!"); 3697 case ARM::VLDMQIA: 3698 case ARM::VSTMQIA: 3699 return 2; 3700 3701 // The number of uOps for load / store multiple are determined by the number 3702 // registers. 3703 // 3704 // On Cortex-A8, each pair of register loads / stores can be scheduled on the 3705 // same cycle. The scheduling for the first load / store must be done 3706 // separately by assuming the address is not 64-bit aligned. 3707 // 3708 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address 3709 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON 3710 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. 3711 case ARM::VLDMDIA: 3712 case ARM::VLDMDIA_UPD: 3713 case ARM::VLDMDDB_UPD: 3714 case ARM::VLDMSIA: 3715 case ARM::VLDMSIA_UPD: 3716 case ARM::VLDMSDB_UPD: 3717 case ARM::VSTMDIA: 3718 case ARM::VSTMDIA_UPD: 3719 case ARM::VSTMDDB_UPD: 3720 case ARM::VSTMSIA: 3721 case ARM::VSTMSIA_UPD: 3722 case ARM::VSTMSDB_UPD: { 3723 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands(); 3724 return (NumRegs / 2) + (NumRegs % 2) + 1; 3725 } 3726 3727 case ARM::LDMIA_RET: 3728 case ARM::LDMIA: 3729 case ARM::LDMDA: 3730 case ARM::LDMDB: 3731 case ARM::LDMIB: 3732 case ARM::LDMIA_UPD: 3733 case ARM::LDMDA_UPD: 3734 case ARM::LDMDB_UPD: 3735 case ARM::LDMIB_UPD: 3736 case ARM::STMIA: 3737 case ARM::STMDA: 3738 case ARM::STMDB: 3739 case ARM::STMIB: 3740 case ARM::STMIA_UPD: 3741 case ARM::STMDA_UPD: 3742 case ARM::STMDB_UPD: 3743 case ARM::STMIB_UPD: 3744 case ARM::tLDMIA: 3745 case ARM::tLDMIA_UPD: 3746 case ARM::tSTMIA_UPD: 3747 case ARM::tPOP_RET: 3748 case ARM::tPOP: 3749 case ARM::tPUSH: 3750 case ARM::t2LDMIA_RET: 3751 case ARM::t2LDMIA: 3752 case ARM::t2LDMDB: 3753 case ARM::t2LDMIA_UPD: 3754 case ARM::t2LDMDB_UPD: 3755 case ARM::t2STMIA: 3756 case ARM::t2STMDB: 3757 case ARM::t2STMIA_UPD: 3758 case ARM::t2STMDB_UPD: { 3759 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1; 3760 switch (Subtarget.getLdStMultipleTiming()) { 3761 case ARMSubtarget::SingleIssuePlusExtras: 3762 return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs); 3763 case ARMSubtarget::SingleIssue: 3764 // Assume the worst. 3765 return NumRegs; 3766 case ARMSubtarget::DoubleIssue: { 3767 if (NumRegs < 4) 3768 return 2; 3769 // 4 registers would be issued: 2, 2. 3770 // 5 registers would be issued: 2, 2, 1. 3771 unsigned UOps = (NumRegs / 2); 3772 if (NumRegs % 2) 3773 ++UOps; 3774 return UOps; 3775 } 3776 case ARMSubtarget::DoubleIssueCheckUnalignedAccess: { 3777 unsigned UOps = (NumRegs / 2); 3778 // If there are odd number of registers or if it's not 64-bit aligned, 3779 // then it takes an extra AGU (Address Generation Unit) cycle. 3780 if ((NumRegs % 2) || !MI.hasOneMemOperand() || 3781 (*MI.memoperands_begin())->getAlign() < Align(8)) 3782 ++UOps; 3783 return UOps; 3784 } 3785 } 3786 } 3787 } 3788 llvm_unreachable("Didn't find the number of microops"); 3789 } 3790 3791 int 3792 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, 3793 const MCInstrDesc &DefMCID, 3794 unsigned DefClass, 3795 unsigned DefIdx, unsigned DefAlign) const { 3796 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3797 if (RegNo <= 0) 3798 // Def is the address writeback. 3799 return ItinData->getOperandCycle(DefClass, DefIdx); 3800 3801 int DefCycle; 3802 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3803 // (regno / 2) + (regno % 2) + 1 3804 DefCycle = RegNo / 2 + 1; 3805 if (RegNo % 2) 3806 ++DefCycle; 3807 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3808 DefCycle = RegNo; 3809 bool isSLoad = false; 3810 3811 switch (DefMCID.getOpcode()) { 3812 default: break; 3813 case ARM::VLDMSIA: 3814 case ARM::VLDMSIA_UPD: 3815 case ARM::VLDMSDB_UPD: 3816 isSLoad = true; 3817 break; 3818 } 3819 3820 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3821 // then it takes an extra cycle. 3822 if ((isSLoad && (RegNo % 2)) || DefAlign < 8) 3823 ++DefCycle; 3824 } else { 3825 // Assume the worst. 3826 DefCycle = RegNo + 2; 3827 } 3828 3829 return DefCycle; 3830 } 3831 3832 int 3833 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, 3834 const MCInstrDesc &DefMCID, 3835 unsigned DefClass, 3836 unsigned DefIdx, unsigned DefAlign) const { 3837 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3838 if (RegNo <= 0) 3839 // Def is the address writeback. 3840 return ItinData->getOperandCycle(DefClass, DefIdx); 3841 3842 int DefCycle; 3843 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3844 // 4 registers would be issued: 1, 2, 1. 3845 // 5 registers would be issued: 1, 2, 2. 3846 DefCycle = RegNo / 2; 3847 if (DefCycle < 1) 3848 DefCycle = 1; 3849 // Result latency is issue cycle + 2: E2. 3850 DefCycle += 2; 3851 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3852 DefCycle = (RegNo / 2); 3853 // If there are odd number of registers or if it's not 64-bit aligned, 3854 // then it takes an extra AGU (Address Generation Unit) cycle. 3855 if ((RegNo % 2) || DefAlign < 8) 3856 ++DefCycle; 3857 // Result latency is AGU cycles + 2. 3858 DefCycle += 2; 3859 } else { 3860 // Assume the worst. 3861 DefCycle = RegNo + 2; 3862 } 3863 3864 return DefCycle; 3865 } 3866 3867 int 3868 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, 3869 const MCInstrDesc &UseMCID, 3870 unsigned UseClass, 3871 unsigned UseIdx, unsigned UseAlign) const { 3872 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3873 if (RegNo <= 0) 3874 return ItinData->getOperandCycle(UseClass, UseIdx); 3875 3876 int UseCycle; 3877 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3878 // (regno / 2) + (regno % 2) + 1 3879 UseCycle = RegNo / 2 + 1; 3880 if (RegNo % 2) 3881 ++UseCycle; 3882 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3883 UseCycle = RegNo; 3884 bool isSStore = false; 3885 3886 switch (UseMCID.getOpcode()) { 3887 default: break; 3888 case ARM::VSTMSIA: 3889 case ARM::VSTMSIA_UPD: 3890 case ARM::VSTMSDB_UPD: 3891 isSStore = true; 3892 break; 3893 } 3894 3895 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3896 // then it takes an extra cycle. 3897 if ((isSStore && (RegNo % 2)) || UseAlign < 8) 3898 ++UseCycle; 3899 } else { 3900 // Assume the worst. 3901 UseCycle = RegNo + 2; 3902 } 3903 3904 return UseCycle; 3905 } 3906 3907 int 3908 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, 3909 const MCInstrDesc &UseMCID, 3910 unsigned UseClass, 3911 unsigned UseIdx, unsigned UseAlign) const { 3912 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3913 if (RegNo <= 0) 3914 return ItinData->getOperandCycle(UseClass, UseIdx); 3915 3916 int UseCycle; 3917 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3918 UseCycle = RegNo / 2; 3919 if (UseCycle < 2) 3920 UseCycle = 2; 3921 // Read in E3. 3922 UseCycle += 2; 3923 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3924 UseCycle = (RegNo / 2); 3925 // If there are odd number of registers or if it's not 64-bit aligned, 3926 // then it takes an extra AGU (Address Generation Unit) cycle. 3927 if ((RegNo % 2) || UseAlign < 8) 3928 ++UseCycle; 3929 } else { 3930 // Assume the worst. 3931 UseCycle = 1; 3932 } 3933 return UseCycle; 3934 } 3935 3936 int 3937 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 3938 const MCInstrDesc &DefMCID, 3939 unsigned DefIdx, unsigned DefAlign, 3940 const MCInstrDesc &UseMCID, 3941 unsigned UseIdx, unsigned UseAlign) const { 3942 unsigned DefClass = DefMCID.getSchedClass(); 3943 unsigned UseClass = UseMCID.getSchedClass(); 3944 3945 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) 3946 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 3947 3948 // This may be a def / use of a variable_ops instruction, the operand 3949 // latency might be determinable dynamically. Let the target try to 3950 // figure it out. 3951 int DefCycle = -1; 3952 bool LdmBypass = false; 3953 switch (DefMCID.getOpcode()) { 3954 default: 3955 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 3956 break; 3957 3958 case ARM::VLDMDIA: 3959 case ARM::VLDMDIA_UPD: 3960 case ARM::VLDMDDB_UPD: 3961 case ARM::VLDMSIA: 3962 case ARM::VLDMSIA_UPD: 3963 case ARM::VLDMSDB_UPD: 3964 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 3965 break; 3966 3967 case ARM::LDMIA_RET: 3968 case ARM::LDMIA: 3969 case ARM::LDMDA: 3970 case ARM::LDMDB: 3971 case ARM::LDMIB: 3972 case ARM::LDMIA_UPD: 3973 case ARM::LDMDA_UPD: 3974 case ARM::LDMDB_UPD: 3975 case ARM::LDMIB_UPD: 3976 case ARM::tLDMIA: 3977 case ARM::tLDMIA_UPD: 3978 case ARM::tPUSH: 3979 case ARM::t2LDMIA_RET: 3980 case ARM::t2LDMIA: 3981 case ARM::t2LDMDB: 3982 case ARM::t2LDMIA_UPD: 3983 case ARM::t2LDMDB_UPD: 3984 LdmBypass = true; 3985 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 3986 break; 3987 } 3988 3989 if (DefCycle == -1) 3990 // We can't seem to determine the result latency of the def, assume it's 2. 3991 DefCycle = 2; 3992 3993 int UseCycle = -1; 3994 switch (UseMCID.getOpcode()) { 3995 default: 3996 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); 3997 break; 3998 3999 case ARM::VSTMDIA: 4000 case ARM::VSTMDIA_UPD: 4001 case ARM::VSTMDDB_UPD: 4002 case ARM::VSTMSIA: 4003 case ARM::VSTMSIA_UPD: 4004 case ARM::VSTMSDB_UPD: 4005 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 4006 break; 4007 4008 case ARM::STMIA: 4009 case ARM::STMDA: 4010 case ARM::STMDB: 4011 case ARM::STMIB: 4012 case ARM::STMIA_UPD: 4013 case ARM::STMDA_UPD: 4014 case ARM::STMDB_UPD: 4015 case ARM::STMIB_UPD: 4016 case ARM::tSTMIA_UPD: 4017 case ARM::tPOP_RET: 4018 case ARM::tPOP: 4019 case ARM::t2STMIA: 4020 case ARM::t2STMDB: 4021 case ARM::t2STMIA_UPD: 4022 case ARM::t2STMDB_UPD: 4023 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 4024 break; 4025 } 4026 4027 if (UseCycle == -1) 4028 // Assume it's read in the first stage. 4029 UseCycle = 1; 4030 4031 UseCycle = DefCycle - UseCycle + 1; 4032 if (UseCycle > 0) { 4033 if (LdmBypass) { 4034 // It's a variable_ops instruction so we can't use DefIdx here. Just use 4035 // first def operand. 4036 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, 4037 UseClass, UseIdx)) 4038 --UseCycle; 4039 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, 4040 UseClass, UseIdx)) { 4041 --UseCycle; 4042 } 4043 } 4044 4045 return UseCycle; 4046 } 4047 4048 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, 4049 const MachineInstr *MI, unsigned Reg, 4050 unsigned &DefIdx, unsigned &Dist) { 4051 Dist = 0; 4052 4053 MachineBasicBlock::const_iterator I = MI; ++I; 4054 MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator()); 4055 assert(II->isInsideBundle() && "Empty bundle?"); 4056 4057 int Idx = -1; 4058 while (II->isInsideBundle()) { 4059 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI); 4060 if (Idx != -1) 4061 break; 4062 --II; 4063 ++Dist; 4064 } 4065 4066 assert(Idx != -1 && "Cannot find bundled definition!"); 4067 DefIdx = Idx; 4068 return &*II; 4069 } 4070 4071 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, 4072 const MachineInstr &MI, unsigned Reg, 4073 unsigned &UseIdx, unsigned &Dist) { 4074 Dist = 0; 4075 4076 MachineBasicBlock::const_instr_iterator II = ++MI.getIterator(); 4077 assert(II->isInsideBundle() && "Empty bundle?"); 4078 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 4079 4080 // FIXME: This doesn't properly handle multiple uses. 4081 int Idx = -1; 4082 while (II != E && II->isInsideBundle()) { 4083 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI); 4084 if (Idx != -1) 4085 break; 4086 if (II->getOpcode() != ARM::t2IT) 4087 ++Dist; 4088 ++II; 4089 } 4090 4091 if (Idx == -1) { 4092 Dist = 0; 4093 return nullptr; 4094 } 4095 4096 UseIdx = Idx; 4097 return &*II; 4098 } 4099 4100 /// Return the number of cycles to add to (or subtract from) the static 4101 /// itinerary based on the def opcode and alignment. The caller will ensure that 4102 /// adjusted latency is at least one cycle. 4103 static int adjustDefLatency(const ARMSubtarget &Subtarget, 4104 const MachineInstr &DefMI, 4105 const MCInstrDesc &DefMCID, unsigned DefAlign) { 4106 int Adjust = 0; 4107 if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) { 4108 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 4109 // variants are one cycle cheaper. 4110 switch (DefMCID.getOpcode()) { 4111 default: break; 4112 case ARM::LDRrs: 4113 case ARM::LDRBrs: { 4114 unsigned ShOpVal = DefMI.getOperand(3).getImm(); 4115 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4116 if (ShImm == 0 || 4117 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4118 --Adjust; 4119 break; 4120 } 4121 case ARM::t2LDRs: 4122 case ARM::t2LDRBs: 4123 case ARM::t2LDRHs: 4124 case ARM::t2LDRSHs: { 4125 // Thumb2 mode: lsl only. 4126 unsigned ShAmt = DefMI.getOperand(3).getImm(); 4127 if (ShAmt == 0 || ShAmt == 2) 4128 --Adjust; 4129 break; 4130 } 4131 } 4132 } else if (Subtarget.isSwift()) { 4133 // FIXME: Properly handle all of the latency adjustments for address 4134 // writeback. 4135 switch (DefMCID.getOpcode()) { 4136 default: break; 4137 case ARM::LDRrs: 4138 case ARM::LDRBrs: { 4139 unsigned ShOpVal = DefMI.getOperand(3).getImm(); 4140 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 4141 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4142 if (!isSub && 4143 (ShImm == 0 || 4144 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 4145 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 4146 Adjust -= 2; 4147 else if (!isSub && 4148 ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 4149 --Adjust; 4150 break; 4151 } 4152 case ARM::t2LDRs: 4153 case ARM::t2LDRBs: 4154 case ARM::t2LDRHs: 4155 case ARM::t2LDRSHs: { 4156 // Thumb2 mode: lsl only. 4157 unsigned ShAmt = DefMI.getOperand(3).getImm(); 4158 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3) 4159 Adjust -= 2; 4160 break; 4161 } 4162 } 4163 } 4164 4165 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) { 4166 switch (DefMCID.getOpcode()) { 4167 default: break; 4168 case ARM::VLD1q8: 4169 case ARM::VLD1q16: 4170 case ARM::VLD1q32: 4171 case ARM::VLD1q64: 4172 case ARM::VLD1q8wb_fixed: 4173 case ARM::VLD1q16wb_fixed: 4174 case ARM::VLD1q32wb_fixed: 4175 case ARM::VLD1q64wb_fixed: 4176 case ARM::VLD1q8wb_register: 4177 case ARM::VLD1q16wb_register: 4178 case ARM::VLD1q32wb_register: 4179 case ARM::VLD1q64wb_register: 4180 case ARM::VLD2d8: 4181 case ARM::VLD2d16: 4182 case ARM::VLD2d32: 4183 case ARM::VLD2q8: 4184 case ARM::VLD2q16: 4185 case ARM::VLD2q32: 4186 case ARM::VLD2d8wb_fixed: 4187 case ARM::VLD2d16wb_fixed: 4188 case ARM::VLD2d32wb_fixed: 4189 case ARM::VLD2q8wb_fixed: 4190 case ARM::VLD2q16wb_fixed: 4191 case ARM::VLD2q32wb_fixed: 4192 case ARM::VLD2d8wb_register: 4193 case ARM::VLD2d16wb_register: 4194 case ARM::VLD2d32wb_register: 4195 case ARM::VLD2q8wb_register: 4196 case ARM::VLD2q16wb_register: 4197 case ARM::VLD2q32wb_register: 4198 case ARM::VLD3d8: 4199 case ARM::VLD3d16: 4200 case ARM::VLD3d32: 4201 case ARM::VLD1d64T: 4202 case ARM::VLD3d8_UPD: 4203 case ARM::VLD3d16_UPD: 4204 case ARM::VLD3d32_UPD: 4205 case ARM::VLD1d64Twb_fixed: 4206 case ARM::VLD1d64Twb_register: 4207 case ARM::VLD3q8_UPD: 4208 case ARM::VLD3q16_UPD: 4209 case ARM::VLD3q32_UPD: 4210 case ARM::VLD4d8: 4211 case ARM::VLD4d16: 4212 case ARM::VLD4d32: 4213 case ARM::VLD1d64Q: 4214 case ARM::VLD4d8_UPD: 4215 case ARM::VLD4d16_UPD: 4216 case ARM::VLD4d32_UPD: 4217 case ARM::VLD1d64Qwb_fixed: 4218 case ARM::VLD1d64Qwb_register: 4219 case ARM::VLD4q8_UPD: 4220 case ARM::VLD4q16_UPD: 4221 case ARM::VLD4q32_UPD: 4222 case ARM::VLD1DUPq8: 4223 case ARM::VLD1DUPq16: 4224 case ARM::VLD1DUPq32: 4225 case ARM::VLD1DUPq8wb_fixed: 4226 case ARM::VLD1DUPq16wb_fixed: 4227 case ARM::VLD1DUPq32wb_fixed: 4228 case ARM::VLD1DUPq8wb_register: 4229 case ARM::VLD1DUPq16wb_register: 4230 case ARM::VLD1DUPq32wb_register: 4231 case ARM::VLD2DUPd8: 4232 case ARM::VLD2DUPd16: 4233 case ARM::VLD2DUPd32: 4234 case ARM::VLD2DUPd8wb_fixed: 4235 case ARM::VLD2DUPd16wb_fixed: 4236 case ARM::VLD2DUPd32wb_fixed: 4237 case ARM::VLD2DUPd8wb_register: 4238 case ARM::VLD2DUPd16wb_register: 4239 case ARM::VLD2DUPd32wb_register: 4240 case ARM::VLD4DUPd8: 4241 case ARM::VLD4DUPd16: 4242 case ARM::VLD4DUPd32: 4243 case ARM::VLD4DUPd8_UPD: 4244 case ARM::VLD4DUPd16_UPD: 4245 case ARM::VLD4DUPd32_UPD: 4246 case ARM::VLD1LNd8: 4247 case ARM::VLD1LNd16: 4248 case ARM::VLD1LNd32: 4249 case ARM::VLD1LNd8_UPD: 4250 case ARM::VLD1LNd16_UPD: 4251 case ARM::VLD1LNd32_UPD: 4252 case ARM::VLD2LNd8: 4253 case ARM::VLD2LNd16: 4254 case ARM::VLD2LNd32: 4255 case ARM::VLD2LNq16: 4256 case ARM::VLD2LNq32: 4257 case ARM::VLD2LNd8_UPD: 4258 case ARM::VLD2LNd16_UPD: 4259 case ARM::VLD2LNd32_UPD: 4260 case ARM::VLD2LNq16_UPD: 4261 case ARM::VLD2LNq32_UPD: 4262 case ARM::VLD4LNd8: 4263 case ARM::VLD4LNd16: 4264 case ARM::VLD4LNd32: 4265 case ARM::VLD4LNq16: 4266 case ARM::VLD4LNq32: 4267 case ARM::VLD4LNd8_UPD: 4268 case ARM::VLD4LNd16_UPD: 4269 case ARM::VLD4LNd32_UPD: 4270 case ARM::VLD4LNq16_UPD: 4271 case ARM::VLD4LNq32_UPD: 4272 // If the address is not 64-bit aligned, the latencies of these 4273 // instructions increases by one. 4274 ++Adjust; 4275 break; 4276 } 4277 } 4278 return Adjust; 4279 } 4280 4281 int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4282 const MachineInstr &DefMI, 4283 unsigned DefIdx, 4284 const MachineInstr &UseMI, 4285 unsigned UseIdx) const { 4286 // No operand latency. The caller may fall back to getInstrLatency. 4287 if (!ItinData || ItinData->isEmpty()) 4288 return -1; 4289 4290 const MachineOperand &DefMO = DefMI.getOperand(DefIdx); 4291 Register Reg = DefMO.getReg(); 4292 4293 const MachineInstr *ResolvedDefMI = &DefMI; 4294 unsigned DefAdj = 0; 4295 if (DefMI.isBundle()) 4296 ResolvedDefMI = 4297 getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj); 4298 if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() || 4299 ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) { 4300 return 1; 4301 } 4302 4303 const MachineInstr *ResolvedUseMI = &UseMI; 4304 unsigned UseAdj = 0; 4305 if (UseMI.isBundle()) { 4306 ResolvedUseMI = 4307 getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj); 4308 if (!ResolvedUseMI) 4309 return -1; 4310 } 4311 4312 return getOperandLatencyImpl( 4313 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO, 4314 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj); 4315 } 4316 4317 int ARMBaseInstrInfo::getOperandLatencyImpl( 4318 const InstrItineraryData *ItinData, const MachineInstr &DefMI, 4319 unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj, 4320 const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI, 4321 unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const { 4322 if (Reg == ARM::CPSR) { 4323 if (DefMI.getOpcode() == ARM::FMSTAT) { 4324 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) 4325 return Subtarget.isLikeA9() ? 1 : 20; 4326 } 4327 4328 // CPSR set and branch can be paired in the same cycle. 4329 if (UseMI.isBranch()) 4330 return 0; 4331 4332 // Otherwise it takes the instruction latency (generally one). 4333 unsigned Latency = getInstrLatency(ItinData, DefMI); 4334 4335 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to 4336 // its uses. Instructions which are otherwise scheduled between them may 4337 // incur a code size penalty (not able to use the CPSR setting 16-bit 4338 // instructions). 4339 if (Latency > 0 && Subtarget.isThumb2()) { 4340 const MachineFunction *MF = DefMI.getParent()->getParent(); 4341 // FIXME: Use Function::hasOptSize(). 4342 if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize)) 4343 --Latency; 4344 } 4345 return Latency; 4346 } 4347 4348 if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit()) 4349 return -1; 4350 4351 unsigned DefAlign = DefMI.hasOneMemOperand() 4352 ? (*DefMI.memoperands_begin())->getAlign().value() 4353 : 0; 4354 unsigned UseAlign = UseMI.hasOneMemOperand() 4355 ? (*UseMI.memoperands_begin())->getAlign().value() 4356 : 0; 4357 4358 // Get the itinerary's latency if possible, and handle variable_ops. 4359 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID, 4360 UseIdx, UseAlign); 4361 // Unable to find operand latency. The caller may resort to getInstrLatency. 4362 if (Latency < 0) 4363 return Latency; 4364 4365 // Adjust for IT block position. 4366 int Adj = DefAdj + UseAdj; 4367 4368 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 4369 Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign); 4370 if (Adj >= 0 || (int)Latency > -Adj) { 4371 return Latency + Adj; 4372 } 4373 // Return the itinerary latency, which may be zero but not less than zero. 4374 return Latency; 4375 } 4376 4377 int 4378 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4379 SDNode *DefNode, unsigned DefIdx, 4380 SDNode *UseNode, unsigned UseIdx) const { 4381 if (!DefNode->isMachineOpcode()) 4382 return 1; 4383 4384 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); 4385 4386 if (isZeroCost(DefMCID.Opcode)) 4387 return 0; 4388 4389 if (!ItinData || ItinData->isEmpty()) 4390 return DefMCID.mayLoad() ? 3 : 1; 4391 4392 if (!UseNode->isMachineOpcode()) { 4393 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); 4394 int Adj = Subtarget.getPreISelOperandLatencyAdjustment(); 4395 int Threshold = 1 + Adj; 4396 return Latency <= Threshold ? 1 : Latency - Adj; 4397 } 4398 4399 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); 4400 auto *DefMN = cast<MachineSDNode>(DefNode); 4401 unsigned DefAlign = !DefMN->memoperands_empty() 4402 ? (*DefMN->memoperands_begin())->getAlign().value() 4403 : 0; 4404 auto *UseMN = cast<MachineSDNode>(UseNode); 4405 unsigned UseAlign = !UseMN->memoperands_empty() 4406 ? (*UseMN->memoperands_begin())->getAlign().value() 4407 : 0; 4408 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, 4409 UseMCID, UseIdx, UseAlign); 4410 4411 if (Latency > 1 && 4412 (Subtarget.isCortexA8() || Subtarget.isLikeA9() || 4413 Subtarget.isCortexA7())) { 4414 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 4415 // variants are one cycle cheaper. 4416 switch (DefMCID.getOpcode()) { 4417 default: break; 4418 case ARM::LDRrs: 4419 case ARM::LDRBrs: { 4420 unsigned ShOpVal = 4421 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4422 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4423 if (ShImm == 0 || 4424 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4425 --Latency; 4426 break; 4427 } 4428 case ARM::t2LDRs: 4429 case ARM::t2LDRBs: 4430 case ARM::t2LDRHs: 4431 case ARM::t2LDRSHs: { 4432 // Thumb2 mode: lsl only. 4433 unsigned ShAmt = 4434 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4435 if (ShAmt == 0 || ShAmt == 2) 4436 --Latency; 4437 break; 4438 } 4439 } 4440 } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) { 4441 // FIXME: Properly handle all of the latency adjustments for address 4442 // writeback. 4443 switch (DefMCID.getOpcode()) { 4444 default: break; 4445 case ARM::LDRrs: 4446 case ARM::LDRBrs: { 4447 unsigned ShOpVal = 4448 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4449 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4450 if (ShImm == 0 || 4451 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 4452 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4453 Latency -= 2; 4454 else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 4455 --Latency; 4456 break; 4457 } 4458 case ARM::t2LDRs: 4459 case ARM::t2LDRBs: 4460 case ARM::t2LDRHs: 4461 case ARM::t2LDRSHs: 4462 // Thumb2 mode: lsl 0-3 only. 4463 Latency -= 2; 4464 break; 4465 } 4466 } 4467 4468 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) 4469 switch (DefMCID.getOpcode()) { 4470 default: break; 4471 case ARM::VLD1q8: 4472 case ARM::VLD1q16: 4473 case ARM::VLD1q32: 4474 case ARM::VLD1q64: 4475 case ARM::VLD1q8wb_register: 4476 case ARM::VLD1q16wb_register: 4477 case ARM::VLD1q32wb_register: 4478 case ARM::VLD1q64wb_register: 4479 case ARM::VLD1q8wb_fixed: 4480 case ARM::VLD1q16wb_fixed: 4481 case ARM::VLD1q32wb_fixed: 4482 case ARM::VLD1q64wb_fixed: 4483 case ARM::VLD2d8: 4484 case ARM::VLD2d16: 4485 case ARM::VLD2d32: 4486 case ARM::VLD2q8Pseudo: 4487 case ARM::VLD2q16Pseudo: 4488 case ARM::VLD2q32Pseudo: 4489 case ARM::VLD2d8wb_fixed: 4490 case ARM::VLD2d16wb_fixed: 4491 case ARM::VLD2d32wb_fixed: 4492 case ARM::VLD2q8PseudoWB_fixed: 4493 case ARM::VLD2q16PseudoWB_fixed: 4494 case ARM::VLD2q32PseudoWB_fixed: 4495 case ARM::VLD2d8wb_register: 4496 case ARM::VLD2d16wb_register: 4497 case ARM::VLD2d32wb_register: 4498 case ARM::VLD2q8PseudoWB_register: 4499 case ARM::VLD2q16PseudoWB_register: 4500 case ARM::VLD2q32PseudoWB_register: 4501 case ARM::VLD3d8Pseudo: 4502 case ARM::VLD3d16Pseudo: 4503 case ARM::VLD3d32Pseudo: 4504 case ARM::VLD1d8TPseudo: 4505 case ARM::VLD1d16TPseudo: 4506 case ARM::VLD1d32TPseudo: 4507 case ARM::VLD1d64TPseudo: 4508 case ARM::VLD1d64TPseudoWB_fixed: 4509 case ARM::VLD1d64TPseudoWB_register: 4510 case ARM::VLD3d8Pseudo_UPD: 4511 case ARM::VLD3d16Pseudo_UPD: 4512 case ARM::VLD3d32Pseudo_UPD: 4513 case ARM::VLD3q8Pseudo_UPD: 4514 case ARM::VLD3q16Pseudo_UPD: 4515 case ARM::VLD3q32Pseudo_UPD: 4516 case ARM::VLD3q8oddPseudo: 4517 case ARM::VLD3q16oddPseudo: 4518 case ARM::VLD3q32oddPseudo: 4519 case ARM::VLD3q8oddPseudo_UPD: 4520 case ARM::VLD3q16oddPseudo_UPD: 4521 case ARM::VLD3q32oddPseudo_UPD: 4522 case ARM::VLD4d8Pseudo: 4523 case ARM::VLD4d16Pseudo: 4524 case ARM::VLD4d32Pseudo: 4525 case ARM::VLD1d8QPseudo: 4526 case ARM::VLD1d16QPseudo: 4527 case ARM::VLD1d32QPseudo: 4528 case ARM::VLD1d64QPseudo: 4529 case ARM::VLD1d64QPseudoWB_fixed: 4530 case ARM::VLD1d64QPseudoWB_register: 4531 case ARM::VLD1q8HighQPseudo: 4532 case ARM::VLD1q8LowQPseudo_UPD: 4533 case ARM::VLD1q8HighTPseudo: 4534 case ARM::VLD1q8LowTPseudo_UPD: 4535 case ARM::VLD1q16HighQPseudo: 4536 case ARM::VLD1q16LowQPseudo_UPD: 4537 case ARM::VLD1q16HighTPseudo: 4538 case ARM::VLD1q16LowTPseudo_UPD: 4539 case ARM::VLD1q32HighQPseudo: 4540 case ARM::VLD1q32LowQPseudo_UPD: 4541 case ARM::VLD1q32HighTPseudo: 4542 case ARM::VLD1q32LowTPseudo_UPD: 4543 case ARM::VLD1q64HighQPseudo: 4544 case ARM::VLD1q64LowQPseudo_UPD: 4545 case ARM::VLD1q64HighTPseudo: 4546 case ARM::VLD1q64LowTPseudo_UPD: 4547 case ARM::VLD4d8Pseudo_UPD: 4548 case ARM::VLD4d16Pseudo_UPD: 4549 case ARM::VLD4d32Pseudo_UPD: 4550 case ARM::VLD4q8Pseudo_UPD: 4551 case ARM::VLD4q16Pseudo_UPD: 4552 case ARM::VLD4q32Pseudo_UPD: 4553 case ARM::VLD4q8oddPseudo: 4554 case ARM::VLD4q16oddPseudo: 4555 case ARM::VLD4q32oddPseudo: 4556 case ARM::VLD4q8oddPseudo_UPD: 4557 case ARM::VLD4q16oddPseudo_UPD: 4558 case ARM::VLD4q32oddPseudo_UPD: 4559 case ARM::VLD1DUPq8: 4560 case ARM::VLD1DUPq16: 4561 case ARM::VLD1DUPq32: 4562 case ARM::VLD1DUPq8wb_fixed: 4563 case ARM::VLD1DUPq16wb_fixed: 4564 case ARM::VLD1DUPq32wb_fixed: 4565 case ARM::VLD1DUPq8wb_register: 4566 case ARM::VLD1DUPq16wb_register: 4567 case ARM::VLD1DUPq32wb_register: 4568 case ARM::VLD2DUPd8: 4569 case ARM::VLD2DUPd16: 4570 case ARM::VLD2DUPd32: 4571 case ARM::VLD2DUPd8wb_fixed: 4572 case ARM::VLD2DUPd16wb_fixed: 4573 case ARM::VLD2DUPd32wb_fixed: 4574 case ARM::VLD2DUPd8wb_register: 4575 case ARM::VLD2DUPd16wb_register: 4576 case ARM::VLD2DUPd32wb_register: 4577 case ARM::VLD2DUPq8EvenPseudo: 4578 case ARM::VLD2DUPq8OddPseudo: 4579 case ARM::VLD2DUPq16EvenPseudo: 4580 case ARM::VLD2DUPq16OddPseudo: 4581 case ARM::VLD2DUPq32EvenPseudo: 4582 case ARM::VLD2DUPq32OddPseudo: 4583 case ARM::VLD3DUPq8EvenPseudo: 4584 case ARM::VLD3DUPq8OddPseudo: 4585 case ARM::VLD3DUPq16EvenPseudo: 4586 case ARM::VLD3DUPq16OddPseudo: 4587 case ARM::VLD3DUPq32EvenPseudo: 4588 case ARM::VLD3DUPq32OddPseudo: 4589 case ARM::VLD4DUPd8Pseudo: 4590 case ARM::VLD4DUPd16Pseudo: 4591 case ARM::VLD4DUPd32Pseudo: 4592 case ARM::VLD4DUPd8Pseudo_UPD: 4593 case ARM::VLD4DUPd16Pseudo_UPD: 4594 case ARM::VLD4DUPd32Pseudo_UPD: 4595 case ARM::VLD4DUPq8EvenPseudo: 4596 case ARM::VLD4DUPq8OddPseudo: 4597 case ARM::VLD4DUPq16EvenPseudo: 4598 case ARM::VLD4DUPq16OddPseudo: 4599 case ARM::VLD4DUPq32EvenPseudo: 4600 case ARM::VLD4DUPq32OddPseudo: 4601 case ARM::VLD1LNq8Pseudo: 4602 case ARM::VLD1LNq16Pseudo: 4603 case ARM::VLD1LNq32Pseudo: 4604 case ARM::VLD1LNq8Pseudo_UPD: 4605 case ARM::VLD1LNq16Pseudo_UPD: 4606 case ARM::VLD1LNq32Pseudo_UPD: 4607 case ARM::VLD2LNd8Pseudo: 4608 case ARM::VLD2LNd16Pseudo: 4609 case ARM::VLD2LNd32Pseudo: 4610 case ARM::VLD2LNq16Pseudo: 4611 case ARM::VLD2LNq32Pseudo: 4612 case ARM::VLD2LNd8Pseudo_UPD: 4613 case ARM::VLD2LNd16Pseudo_UPD: 4614 case ARM::VLD2LNd32Pseudo_UPD: 4615 case ARM::VLD2LNq16Pseudo_UPD: 4616 case ARM::VLD2LNq32Pseudo_UPD: 4617 case ARM::VLD4LNd8Pseudo: 4618 case ARM::VLD4LNd16Pseudo: 4619 case ARM::VLD4LNd32Pseudo: 4620 case ARM::VLD4LNq16Pseudo: 4621 case ARM::VLD4LNq32Pseudo: 4622 case ARM::VLD4LNd8Pseudo_UPD: 4623 case ARM::VLD4LNd16Pseudo_UPD: 4624 case ARM::VLD4LNd32Pseudo_UPD: 4625 case ARM::VLD4LNq16Pseudo_UPD: 4626 case ARM::VLD4LNq32Pseudo_UPD: 4627 // If the address is not 64-bit aligned, the latencies of these 4628 // instructions increases by one. 4629 ++Latency; 4630 break; 4631 } 4632 4633 return Latency; 4634 } 4635 4636 unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const { 4637 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() || 4638 MI.isImplicitDef()) 4639 return 0; 4640 4641 if (MI.isBundle()) 4642 return 0; 4643 4644 const MCInstrDesc &MCID = MI.getDesc(); 4645 4646 if (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) && 4647 !Subtarget.cheapPredicableCPSRDef())) { 4648 // When predicated, CPSR is an additional source operand for CPSR updating 4649 // instructions, this apparently increases their latencies. 4650 return 1; 4651 } 4652 return 0; 4653 } 4654 4655 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 4656 const MachineInstr &MI, 4657 unsigned *PredCost) const { 4658 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() || 4659 MI.isImplicitDef()) 4660 return 1; 4661 4662 // An instruction scheduler typically runs on unbundled instructions, however 4663 // other passes may query the latency of a bundled instruction. 4664 if (MI.isBundle()) { 4665 unsigned Latency = 0; 4666 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 4667 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 4668 while (++I != E && I->isInsideBundle()) { 4669 if (I->getOpcode() != ARM::t2IT) 4670 Latency += getInstrLatency(ItinData, *I, PredCost); 4671 } 4672 return Latency; 4673 } 4674 4675 const MCInstrDesc &MCID = MI.getDesc(); 4676 if (PredCost && (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) && 4677 !Subtarget.cheapPredicableCPSRDef()))) { 4678 // When predicated, CPSR is an additional source operand for CPSR updating 4679 // instructions, this apparently increases their latencies. 4680 *PredCost = 1; 4681 } 4682 // Be sure to call getStageLatency for an empty itinerary in case it has a 4683 // valid MinLatency property. 4684 if (!ItinData) 4685 return MI.mayLoad() ? 3 : 1; 4686 4687 unsigned Class = MCID.getSchedClass(); 4688 4689 // For instructions with variable uops, use uops as latency. 4690 if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0) 4691 return getNumMicroOps(ItinData, MI); 4692 4693 // For the common case, fall back on the itinerary's latency. 4694 unsigned Latency = ItinData->getStageLatency(Class); 4695 4696 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 4697 unsigned DefAlign = 4698 MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0; 4699 int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign); 4700 if (Adj >= 0 || (int)Latency > -Adj) { 4701 return Latency + Adj; 4702 } 4703 return Latency; 4704 } 4705 4706 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 4707 SDNode *Node) const { 4708 if (!Node->isMachineOpcode()) 4709 return 1; 4710 4711 if (!ItinData || ItinData->isEmpty()) 4712 return 1; 4713 4714 unsigned Opcode = Node->getMachineOpcode(); 4715 switch (Opcode) { 4716 default: 4717 return ItinData->getStageLatency(get(Opcode).getSchedClass()); 4718 case ARM::VLDMQIA: 4719 case ARM::VSTMQIA: 4720 return 2; 4721 } 4722 } 4723 4724 bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, 4725 const MachineRegisterInfo *MRI, 4726 const MachineInstr &DefMI, 4727 unsigned DefIdx, 4728 const MachineInstr &UseMI, 4729 unsigned UseIdx) const { 4730 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask; 4731 unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask; 4732 if (Subtarget.nonpipelinedVFP() && 4733 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP)) 4734 return true; 4735 4736 // Hoist VFP / NEON instructions with 4 or higher latency. 4737 unsigned Latency = 4738 SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx); 4739 if (Latency <= 3) 4740 return false; 4741 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON || 4742 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON; 4743 } 4744 4745 bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 4746 const MachineInstr &DefMI, 4747 unsigned DefIdx) const { 4748 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 4749 if (!ItinData || ItinData->isEmpty()) 4750 return false; 4751 4752 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask; 4753 if (DDomain == ARMII::DomainGeneral) { 4754 unsigned DefClass = DefMI.getDesc().getSchedClass(); 4755 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 4756 return (DefCycle != -1 && DefCycle <= 2); 4757 } 4758 return false; 4759 } 4760 4761 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI, 4762 StringRef &ErrInfo) const { 4763 if (convertAddSubFlagsOpcode(MI.getOpcode())) { 4764 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; 4765 return false; 4766 } 4767 if (MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) { 4768 // Make sure we don't generate a lo-lo mov that isn't supported. 4769 if (!ARM::hGPRRegClass.contains(MI.getOperand(0).getReg()) && 4770 !ARM::hGPRRegClass.contains(MI.getOperand(1).getReg())) { 4771 ErrInfo = "Non-flag-setting Thumb1 mov is v6-only"; 4772 return false; 4773 } 4774 } 4775 if (MI.getOpcode() == ARM::tPUSH || 4776 MI.getOpcode() == ARM::tPOP || 4777 MI.getOpcode() == ARM::tPOP_RET) { 4778 for (int i = 2, e = MI.getNumOperands(); i < e; ++i) { 4779 if (MI.getOperand(i).isImplicit() || 4780 !MI.getOperand(i).isReg()) 4781 continue; 4782 Register Reg = MI.getOperand(i).getReg(); 4783 if (Reg < ARM::R0 || Reg > ARM::R7) { 4784 if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) && 4785 !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) { 4786 ErrInfo = "Unsupported register in Thumb1 push/pop"; 4787 return false; 4788 } 4789 } 4790 } 4791 } 4792 return true; 4793 } 4794 4795 // LoadStackGuard has so far only been implemented for MachO. Different code 4796 // sequence is needed for other targets. 4797 void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI, 4798 unsigned LoadImmOpc, 4799 unsigned LoadOpc) const { 4800 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() && 4801 "ROPI/RWPI not currently supported with stack guard"); 4802 4803 MachineBasicBlock &MBB = *MI->getParent(); 4804 DebugLoc DL = MI->getDebugLoc(); 4805 Register Reg = MI->getOperand(0).getReg(); 4806 const GlobalValue *GV = 4807 cast<GlobalValue>((*MI->memoperands_begin())->getValue()); 4808 MachineInstrBuilder MIB; 4809 4810 BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg) 4811 .addGlobalAddress(GV, 0, ARMII::MO_NONLAZY); 4812 4813 if (Subtarget.isGVIndirectSymbol(GV)) { 4814 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); 4815 MIB.addReg(Reg, RegState::Kill).addImm(0); 4816 auto Flags = MachineMemOperand::MOLoad | 4817 MachineMemOperand::MODereferenceable | 4818 MachineMemOperand::MOInvariant; 4819 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( 4820 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, Align(4)); 4821 MIB.addMemOperand(MMO).add(predOps(ARMCC::AL)); 4822 } 4823 4824 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); 4825 MIB.addReg(Reg, RegState::Kill) 4826 .addImm(0) 4827 .cloneMemRefs(*MI) 4828 .add(predOps(ARMCC::AL)); 4829 } 4830 4831 bool 4832 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 4833 unsigned &AddSubOpc, 4834 bool &NegAcc, bool &HasLane) const { 4835 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode); 4836 if (I == MLxEntryMap.end()) 4837 return false; 4838 4839 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; 4840 MulOpc = Entry.MulOpc; 4841 AddSubOpc = Entry.AddSubOpc; 4842 NegAcc = Entry.NegAcc; 4843 HasLane = Entry.HasLane; 4844 return true; 4845 } 4846 4847 //===----------------------------------------------------------------------===// 4848 // Execution domains. 4849 //===----------------------------------------------------------------------===// 4850 // 4851 // Some instructions go down the NEON pipeline, some go down the VFP pipeline, 4852 // and some can go down both. The vmov instructions go down the VFP pipeline, 4853 // but they can be changed to vorr equivalents that are executed by the NEON 4854 // pipeline. 4855 // 4856 // We use the following execution domain numbering: 4857 // 4858 enum ARMExeDomain { 4859 ExeGeneric = 0, 4860 ExeVFP = 1, 4861 ExeNEON = 2 4862 }; 4863 4864 // 4865 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h 4866 // 4867 std::pair<uint16_t, uint16_t> 4868 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const { 4869 // If we don't have access to NEON instructions then we won't be able 4870 // to swizzle anything to the NEON domain. Check to make sure. 4871 if (Subtarget.hasNEON()) { 4872 // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON 4873 // if they are not predicated. 4874 if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI)) 4875 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON)); 4876 4877 // CortexA9 is particularly picky about mixing the two and wants these 4878 // converted. 4879 if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) && 4880 (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR || 4881 MI.getOpcode() == ARM::VMOVS)) 4882 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON)); 4883 } 4884 // No other instructions can be swizzled, so just determine their domain. 4885 unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask; 4886 4887 if (Domain & ARMII::DomainNEON) 4888 return std::make_pair(ExeNEON, 0); 4889 4890 // Certain instructions can go either way on Cortex-A8. 4891 // Treat them as NEON instructions. 4892 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) 4893 return std::make_pair(ExeNEON, 0); 4894 4895 if (Domain & ARMII::DomainVFP) 4896 return std::make_pair(ExeVFP, 0); 4897 4898 return std::make_pair(ExeGeneric, 0); 4899 } 4900 4901 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, 4902 unsigned SReg, unsigned &Lane) { 4903 unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass); 4904 Lane = 0; 4905 4906 if (DReg != ARM::NoRegister) 4907 return DReg; 4908 4909 Lane = 1; 4910 DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass); 4911 4912 assert(DReg && "S-register with no D super-register?"); 4913 return DReg; 4914 } 4915 4916 /// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, 4917 /// set ImplicitSReg to a register number that must be marked as implicit-use or 4918 /// zero if no register needs to be defined as implicit-use. 4919 /// 4920 /// If the function cannot determine if an SPR should be marked implicit use or 4921 /// not, it returns false. 4922 /// 4923 /// This function handles cases where an instruction is being modified from taking 4924 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict 4925 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other 4926 /// lane of the DPR). 4927 /// 4928 /// If the other SPR is defined, an implicit-use of it should be added. Else, 4929 /// (including the case where the DPR itself is defined), it should not. 4930 /// 4931 static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, 4932 MachineInstr &MI, unsigned DReg, 4933 unsigned Lane, unsigned &ImplicitSReg) { 4934 // If the DPR is defined or used already, the other SPR lane will be chained 4935 // correctly, so there is nothing to be done. 4936 if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) { 4937 ImplicitSReg = 0; 4938 return true; 4939 } 4940 4941 // Otherwise we need to go searching to see if the SPR is set explicitly. 4942 ImplicitSReg = TRI->getSubReg(DReg, 4943 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1); 4944 MachineBasicBlock::LivenessQueryResult LQR = 4945 MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI); 4946 4947 if (LQR == MachineBasicBlock::LQR_Live) 4948 return true; 4949 else if (LQR == MachineBasicBlock::LQR_Unknown) 4950 return false; 4951 4952 // If the register is known not to be live, there is no need to add an 4953 // implicit-use. 4954 ImplicitSReg = 0; 4955 return true; 4956 } 4957 4958 void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI, 4959 unsigned Domain) const { 4960 unsigned DstReg, SrcReg, DReg; 4961 unsigned Lane; 4962 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 4963 const TargetRegisterInfo *TRI = &getRegisterInfo(); 4964 switch (MI.getOpcode()) { 4965 default: 4966 llvm_unreachable("cannot handle opcode!"); 4967 break; 4968 case ARM::VMOVD: 4969 if (Domain != ExeNEON) 4970 break; 4971 4972 // Zap the predicate operands. 4973 assert(!isPredicated(MI) && "Cannot predicate a VORRd"); 4974 4975 // Make sure we've got NEON instructions. 4976 assert(Subtarget.hasNEON() && "VORRd requires NEON"); 4977 4978 // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits) 4979 DstReg = MI.getOperand(0).getReg(); 4980 SrcReg = MI.getOperand(1).getReg(); 4981 4982 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 4983 MI.RemoveOperand(i - 1); 4984 4985 // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits) 4986 MI.setDesc(get(ARM::VORRd)); 4987 MIB.addReg(DstReg, RegState::Define) 4988 .addReg(SrcReg) 4989 .addReg(SrcReg) 4990 .add(predOps(ARMCC::AL)); 4991 break; 4992 case ARM::VMOVRS: 4993 if (Domain != ExeNEON) 4994 break; 4995 assert(!isPredicated(MI) && "Cannot predicate a VGETLN"); 4996 4997 // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits) 4998 DstReg = MI.getOperand(0).getReg(); 4999 SrcReg = MI.getOperand(1).getReg(); 5000 5001 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5002 MI.RemoveOperand(i - 1); 5003 5004 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); 5005 5006 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps) 5007 // Note that DSrc has been widened and the other lane may be undef, which 5008 // contaminates the entire register. 5009 MI.setDesc(get(ARM::VGETLNi32)); 5010 MIB.addReg(DstReg, RegState::Define) 5011 .addReg(DReg, RegState::Undef) 5012 .addImm(Lane) 5013 .add(predOps(ARMCC::AL)); 5014 5015 // The old source should be an implicit use, otherwise we might think it 5016 // was dead before here. 5017 MIB.addReg(SrcReg, RegState::Implicit); 5018 break; 5019 case ARM::VMOVSR: { 5020 if (Domain != ExeNEON) 5021 break; 5022 assert(!isPredicated(MI) && "Cannot predicate a VSETLN"); 5023 5024 // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits) 5025 DstReg = MI.getOperand(0).getReg(); 5026 SrcReg = MI.getOperand(1).getReg(); 5027 5028 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane); 5029 5030 unsigned ImplicitSReg; 5031 if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg)) 5032 break; 5033 5034 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5035 MI.RemoveOperand(i - 1); 5036 5037 // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps) 5038 // Again DDst may be undefined at the beginning of this instruction. 5039 MI.setDesc(get(ARM::VSETLNi32)); 5040 MIB.addReg(DReg, RegState::Define) 5041 .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI))) 5042 .addReg(SrcReg) 5043 .addImm(Lane) 5044 .add(predOps(ARMCC::AL)); 5045 5046 // The narrower destination must be marked as set to keep previous chains 5047 // in place. 5048 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 5049 if (ImplicitSReg != 0) 5050 MIB.addReg(ImplicitSReg, RegState::Implicit); 5051 break; 5052 } 5053 case ARM::VMOVS: { 5054 if (Domain != ExeNEON) 5055 break; 5056 5057 // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits) 5058 DstReg = MI.getOperand(0).getReg(); 5059 SrcReg = MI.getOperand(1).getReg(); 5060 5061 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc; 5062 DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane); 5063 DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane); 5064 5065 unsigned ImplicitSReg; 5066 if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg)) 5067 break; 5068 5069 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5070 MI.RemoveOperand(i - 1); 5071 5072 if (DSrc == DDst) { 5073 // Destination can be: 5074 // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits) 5075 MI.setDesc(get(ARM::VDUPLN32d)); 5076 MIB.addReg(DDst, RegState::Define) 5077 .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI))) 5078 .addImm(SrcLane) 5079 .add(predOps(ARMCC::AL)); 5080 5081 // Neither the source or the destination are naturally represented any 5082 // more, so add them in manually. 5083 MIB.addReg(DstReg, RegState::Implicit | RegState::Define); 5084 MIB.addReg(SrcReg, RegState::Implicit); 5085 if (ImplicitSReg != 0) 5086 MIB.addReg(ImplicitSReg, RegState::Implicit); 5087 break; 5088 } 5089 5090 // In general there's no single instruction that can perform an S <-> S 5091 // move in NEON space, but a pair of VEXT instructions *can* do the 5092 // job. It turns out that the VEXTs needed will only use DSrc once, with 5093 // the position based purely on the combination of lane-0 and lane-1 5094 // involved. For example 5095 // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1 5096 // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1 5097 // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1 5098 // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1 5099 // 5100 // Pattern of the MachineInstrs is: 5101 // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits) 5102 MachineInstrBuilder NewMIB; 5103 NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32), 5104 DDst); 5105 5106 // On the first instruction, both DSrc and DDst may be undef if present. 5107 // Specifically when the original instruction didn't have them as an 5108 // <imp-use>. 5109 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst; 5110 bool CurUndef = !MI.readsRegister(CurReg, TRI); 5111 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); 5112 5113 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst; 5114 CurUndef = !MI.readsRegister(CurReg, TRI); 5115 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)) 5116 .addImm(1) 5117 .add(predOps(ARMCC::AL)); 5118 5119 if (SrcLane == DstLane) 5120 NewMIB.addReg(SrcReg, RegState::Implicit); 5121 5122 MI.setDesc(get(ARM::VEXTd32)); 5123 MIB.addReg(DDst, RegState::Define); 5124 5125 // On the second instruction, DDst has definitely been defined above, so 5126 // it is not undef. DSrc, if present, can be undef as above. 5127 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst; 5128 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); 5129 MIB.addReg(CurReg, getUndefRegState(CurUndef)); 5130 5131 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst; 5132 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); 5133 MIB.addReg(CurReg, getUndefRegState(CurUndef)) 5134 .addImm(1) 5135 .add(predOps(ARMCC::AL)); 5136 5137 if (SrcLane != DstLane) 5138 MIB.addReg(SrcReg, RegState::Implicit); 5139 5140 // As before, the original destination is no longer represented, add it 5141 // implicitly. 5142 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 5143 if (ImplicitSReg != 0) 5144 MIB.addReg(ImplicitSReg, RegState::Implicit); 5145 break; 5146 } 5147 } 5148 } 5149 5150 //===----------------------------------------------------------------------===// 5151 // Partial register updates 5152 //===----------------------------------------------------------------------===// 5153 // 5154 // Swift renames NEON registers with 64-bit granularity. That means any 5155 // instruction writing an S-reg implicitly reads the containing D-reg. The 5156 // problem is mostly avoided by translating f32 operations to v2f32 operations 5157 // on D-registers, but f32 loads are still a problem. 5158 // 5159 // These instructions can load an f32 into a NEON register: 5160 // 5161 // VLDRS - Only writes S, partial D update. 5162 // VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops. 5163 // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops. 5164 // 5165 // FCONSTD can be used as a dependency-breaking instruction. 5166 unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance( 5167 const MachineInstr &MI, unsigned OpNum, 5168 const TargetRegisterInfo *TRI) const { 5169 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance(); 5170 if (!PartialUpdateClearance) 5171 return 0; 5172 5173 assert(TRI && "Need TRI instance"); 5174 5175 const MachineOperand &MO = MI.getOperand(OpNum); 5176 if (MO.readsReg()) 5177 return 0; 5178 Register Reg = MO.getReg(); 5179 int UseOp = -1; 5180 5181 switch (MI.getOpcode()) { 5182 // Normal instructions writing only an S-register. 5183 case ARM::VLDRS: 5184 case ARM::FCONSTS: 5185 case ARM::VMOVSR: 5186 case ARM::VMOVv8i8: 5187 case ARM::VMOVv4i16: 5188 case ARM::VMOVv2i32: 5189 case ARM::VMOVv2f32: 5190 case ARM::VMOVv1i64: 5191 UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI); 5192 break; 5193 5194 // Explicitly reads the dependency. 5195 case ARM::VLD1LNd32: 5196 UseOp = 3; 5197 break; 5198 default: 5199 return 0; 5200 } 5201 5202 // If this instruction actually reads a value from Reg, there is no unwanted 5203 // dependency. 5204 if (UseOp != -1 && MI.getOperand(UseOp).readsReg()) 5205 return 0; 5206 5207 // We must be able to clobber the whole D-reg. 5208 if (Register::isVirtualRegister(Reg)) { 5209 // Virtual register must be a def undef foo:ssub_0 operand. 5210 if (!MO.getSubReg() || MI.readsVirtualRegister(Reg)) 5211 return 0; 5212 } else if (ARM::SPRRegClass.contains(Reg)) { 5213 // Physical register: MI must define the full D-reg. 5214 unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0, 5215 &ARM::DPRRegClass); 5216 if (!DReg || !MI.definesRegister(DReg, TRI)) 5217 return 0; 5218 } 5219 5220 // MI has an unwanted D-register dependency. 5221 // Avoid defs in the previous N instructrions. 5222 return PartialUpdateClearance; 5223 } 5224 5225 // Break a partial register dependency after getPartialRegUpdateClearance 5226 // returned non-zero. 5227 void ARMBaseInstrInfo::breakPartialRegDependency( 5228 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { 5229 assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def"); 5230 assert(TRI && "Need TRI instance"); 5231 5232 const MachineOperand &MO = MI.getOperand(OpNum); 5233 Register Reg = MO.getReg(); 5234 assert(Register::isPhysicalRegister(Reg) && 5235 "Can't break virtual register dependencies."); 5236 unsigned DReg = Reg; 5237 5238 // If MI defines an S-reg, find the corresponding D super-register. 5239 if (ARM::SPRRegClass.contains(Reg)) { 5240 DReg = ARM::D0 + (Reg - ARM::S0) / 2; 5241 assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken"); 5242 } 5243 5244 assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps"); 5245 assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg"); 5246 5247 // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines 5248 // the full D-register by loading the same value to both lanes. The 5249 // instruction is micro-coded with 2 uops, so don't do this until we can 5250 // properly schedule micro-coded instructions. The dispatcher stalls cause 5251 // too big regressions. 5252 5253 // Insert the dependency-breaking FCONSTD before MI. 5254 // 96 is the encoding of 0.5, but the actual value doesn't matter here. 5255 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg) 5256 .addImm(96) 5257 .add(predOps(ARMCC::AL)); 5258 MI.addRegisterKilled(DReg, TRI, true); 5259 } 5260 5261 bool ARMBaseInstrInfo::hasNOP() const { 5262 return Subtarget.getFeatureBits()[ARM::HasV6KOps]; 5263 } 5264 5265 bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const { 5266 if (MI->getNumOperands() < 4) 5267 return true; 5268 unsigned ShOpVal = MI->getOperand(3).getImm(); 5269 unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal); 5270 // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1. 5271 if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || 5272 ((ShImm == 1 || ShImm == 2) && 5273 ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl)) 5274 return true; 5275 5276 return false; 5277 } 5278 5279 bool ARMBaseInstrInfo::getRegSequenceLikeInputs( 5280 const MachineInstr &MI, unsigned DefIdx, 5281 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 5282 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5283 assert(MI.isRegSequenceLike() && "Invalid kind of instruction"); 5284 5285 switch (MI.getOpcode()) { 5286 case ARM::VMOVDRR: 5287 // dX = VMOVDRR rY, rZ 5288 // is the same as: 5289 // dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1 5290 // Populate the InputRegs accordingly. 5291 // rY 5292 const MachineOperand *MOReg = &MI.getOperand(1); 5293 if (!MOReg->isUndef()) 5294 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(), 5295 MOReg->getSubReg(), ARM::ssub_0)); 5296 // rZ 5297 MOReg = &MI.getOperand(2); 5298 if (!MOReg->isUndef()) 5299 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(), 5300 MOReg->getSubReg(), ARM::ssub_1)); 5301 return true; 5302 } 5303 llvm_unreachable("Target dependent opcode missing"); 5304 } 5305 5306 bool ARMBaseInstrInfo::getExtractSubregLikeInputs( 5307 const MachineInstr &MI, unsigned DefIdx, 5308 RegSubRegPairAndIdx &InputReg) const { 5309 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5310 assert(MI.isExtractSubregLike() && "Invalid kind of instruction"); 5311 5312 switch (MI.getOpcode()) { 5313 case ARM::VMOVRRD: 5314 // rX, rY = VMOVRRD dZ 5315 // is the same as: 5316 // rX = EXTRACT_SUBREG dZ, ssub_0 5317 // rY = EXTRACT_SUBREG dZ, ssub_1 5318 const MachineOperand &MOReg = MI.getOperand(2); 5319 if (MOReg.isUndef()) 5320 return false; 5321 InputReg.Reg = MOReg.getReg(); 5322 InputReg.SubReg = MOReg.getSubReg(); 5323 InputReg.SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1; 5324 return true; 5325 } 5326 llvm_unreachable("Target dependent opcode missing"); 5327 } 5328 5329 bool ARMBaseInstrInfo::getInsertSubregLikeInputs( 5330 const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, 5331 RegSubRegPairAndIdx &InsertedReg) const { 5332 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5333 assert(MI.isInsertSubregLike() && "Invalid kind of instruction"); 5334 5335 switch (MI.getOpcode()) { 5336 case ARM::VSETLNi32: 5337 // dX = VSETLNi32 dY, rZ, imm 5338 const MachineOperand &MOBaseReg = MI.getOperand(1); 5339 const MachineOperand &MOInsertedReg = MI.getOperand(2); 5340 if (MOInsertedReg.isUndef()) 5341 return false; 5342 const MachineOperand &MOIndex = MI.getOperand(3); 5343 BaseReg.Reg = MOBaseReg.getReg(); 5344 BaseReg.SubReg = MOBaseReg.getSubReg(); 5345 5346 InsertedReg.Reg = MOInsertedReg.getReg(); 5347 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 5348 InsertedReg.SubIdx = MOIndex.getImm() == 0 ? ARM::ssub_0 : ARM::ssub_1; 5349 return true; 5350 } 5351 llvm_unreachable("Target dependent opcode missing"); 5352 } 5353 5354 std::pair<unsigned, unsigned> 5355 ARMBaseInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 5356 const unsigned Mask = ARMII::MO_OPTION_MASK; 5357 return std::make_pair(TF & Mask, TF & ~Mask); 5358 } 5359 5360 ArrayRef<std::pair<unsigned, const char *>> 5361 ARMBaseInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 5362 using namespace ARMII; 5363 5364 static const std::pair<unsigned, const char *> TargetFlags[] = { 5365 {MO_LO16, "arm-lo16"}, {MO_HI16, "arm-hi16"}}; 5366 return makeArrayRef(TargetFlags); 5367 } 5368 5369 ArrayRef<std::pair<unsigned, const char *>> 5370 ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { 5371 using namespace ARMII; 5372 5373 static const std::pair<unsigned, const char *> TargetFlags[] = { 5374 {MO_COFFSTUB, "arm-coffstub"}, 5375 {MO_GOT, "arm-got"}, 5376 {MO_SBREL, "arm-sbrel"}, 5377 {MO_DLLIMPORT, "arm-dllimport"}, 5378 {MO_SECREL, "arm-secrel"}, 5379 {MO_NONLAZY, "arm-nonlazy"}}; 5380 return makeArrayRef(TargetFlags); 5381 } 5382 5383 Optional<RegImmPair> ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI, 5384 Register Reg) const { 5385 int Sign = 1; 5386 unsigned Opcode = MI.getOpcode(); 5387 int64_t Offset = 0; 5388 5389 // TODO: Handle cases where Reg is a super- or sub-register of the 5390 // destination register. 5391 const MachineOperand &Op0 = MI.getOperand(0); 5392 if (!Op0.isReg() || Reg != Op0.getReg()) 5393 return None; 5394 5395 // We describe SUBri or ADDri instructions. 5396 if (Opcode == ARM::SUBri) 5397 Sign = -1; 5398 else if (Opcode != ARM::ADDri) 5399 return None; 5400 5401 // TODO: Third operand can be global address (usually some string). Since 5402 // strings can be relocated we cannot calculate their offsets for 5403 // now. 5404 if (!MI.getOperand(1).isReg() || !MI.getOperand(2).isImm()) 5405 return None; 5406 5407 Offset = MI.getOperand(2).getImm() * Sign; 5408 return RegImmPair{MI.getOperand(1).getReg(), Offset}; 5409 } 5410 5411 bool llvm::registerDefinedBetween(unsigned Reg, 5412 MachineBasicBlock::iterator From, 5413 MachineBasicBlock::iterator To, 5414 const TargetRegisterInfo *TRI) { 5415 for (auto I = From; I != To; ++I) 5416 if (I->modifiesRegister(Reg, TRI)) 5417 return true; 5418 return false; 5419 } 5420 5421 MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br, 5422 const TargetRegisterInfo *TRI) { 5423 // Search backwards to the instruction that defines CSPR. This may or not 5424 // be a CMP, we check that after this loop. If we find another instruction 5425 // that reads cpsr, we return nullptr. 5426 MachineBasicBlock::iterator CmpMI = Br; 5427 while (CmpMI != Br->getParent()->begin()) { 5428 --CmpMI; 5429 if (CmpMI->modifiesRegister(ARM::CPSR, TRI)) 5430 break; 5431 if (CmpMI->readsRegister(ARM::CPSR, TRI)) 5432 break; 5433 } 5434 5435 // Check that this inst is a CMP r[0-7], #0 and that the register 5436 // is not redefined between the cmp and the br. 5437 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri) 5438 return nullptr; 5439 Register Reg = CmpMI->getOperand(0).getReg(); 5440 Register PredReg; 5441 ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg); 5442 if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0) 5443 return nullptr; 5444 if (!isARMLowRegister(Reg)) 5445 return nullptr; 5446 if (registerDefinedBetween(Reg, CmpMI->getNextNode(), Br, TRI)) 5447 return nullptr; 5448 5449 return &*CmpMI; 5450 } 5451 5452 unsigned llvm::ConstantMaterializationCost(unsigned Val, 5453 const ARMSubtarget *Subtarget, 5454 bool ForCodesize) { 5455 if (Subtarget->isThumb()) { 5456 if (Val <= 255) // MOV 5457 return ForCodesize ? 2 : 1; 5458 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff || // MOV 5459 ARM_AM::getT2SOImmVal(Val) != -1 || // MOVW 5460 ARM_AM::getT2SOImmVal(~Val) != -1)) // MVN 5461 return ForCodesize ? 4 : 1; 5462 if (Val <= 510) // MOV + ADDi8 5463 return ForCodesize ? 4 : 2; 5464 if (~Val <= 255) // MOV + MVN 5465 return ForCodesize ? 4 : 2; 5466 if (ARM_AM::isThumbImmShiftedVal(Val)) // MOV + LSL 5467 return ForCodesize ? 4 : 2; 5468 } else { 5469 if (ARM_AM::getSOImmVal(Val) != -1) // MOV 5470 return ForCodesize ? 4 : 1; 5471 if (ARM_AM::getSOImmVal(~Val) != -1) // MVN 5472 return ForCodesize ? 4 : 1; 5473 if (Subtarget->hasV6T2Ops() && Val <= 0xffff) // MOVW 5474 return ForCodesize ? 4 : 1; 5475 if (ARM_AM::isSOImmTwoPartVal(Val)) // two instrs 5476 return ForCodesize ? 8 : 2; 5477 if (ARM_AM::isSOImmTwoPartValNeg(Val)) // two instrs 5478 return ForCodesize ? 8 : 2; 5479 } 5480 if (Subtarget->useMovt()) // MOVW + MOVT 5481 return ForCodesize ? 8 : 2; 5482 return ForCodesize ? 8 : 3; // Literal pool load 5483 } 5484 5485 bool llvm::HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, 5486 const ARMSubtarget *Subtarget, 5487 bool ForCodesize) { 5488 // Check with ForCodesize 5489 unsigned Cost1 = ConstantMaterializationCost(Val1, Subtarget, ForCodesize); 5490 unsigned Cost2 = ConstantMaterializationCost(Val2, Subtarget, ForCodesize); 5491 if (Cost1 < Cost2) 5492 return true; 5493 if (Cost1 > Cost2) 5494 return false; 5495 5496 // If they are equal, try with !ForCodesize 5497 return ConstantMaterializationCost(Val1, Subtarget, !ForCodesize) < 5498 ConstantMaterializationCost(Val2, Subtarget, !ForCodesize); 5499 } 5500 5501 /// Constants defining how certain sequences should be outlined. 5502 /// This encompasses how an outlined function should be called, and what kind of 5503 /// frame should be emitted for that outlined function. 5504 /// 5505 /// \p MachineOutlinerTailCall implies that the function is being created from 5506 /// a sequence of instructions ending in a return. 5507 /// 5508 /// That is, 5509 /// 5510 /// I1 OUTLINED_FUNCTION: 5511 /// I2 --> B OUTLINED_FUNCTION I1 5512 /// BX LR I2 5513 /// BX LR 5514 /// 5515 /// +-------------------------+--------+-----+ 5516 /// | | Thumb2 | ARM | 5517 /// +-------------------------+--------+-----+ 5518 /// | Call overhead in Bytes | 4 | 4 | 5519 /// | Frame overhead in Bytes | 0 | 0 | 5520 /// | Stack fixup required | No | No | 5521 /// +-------------------------+--------+-----+ 5522 /// 5523 /// \p MachineOutlinerThunk implies that the function is being created from 5524 /// a sequence of instructions ending in a call. The outlined function is 5525 /// called with a BL instruction, and the outlined function tail-calls the 5526 /// original call destination. 5527 /// 5528 /// That is, 5529 /// 5530 /// I1 OUTLINED_FUNCTION: 5531 /// I2 --> BL OUTLINED_FUNCTION I1 5532 /// BL f I2 5533 /// B f 5534 /// 5535 /// +-------------------------+--------+-----+ 5536 /// | | Thumb2 | ARM | 5537 /// +-------------------------+--------+-----+ 5538 /// | Call overhead in Bytes | 4 | 4 | 5539 /// | Frame overhead in Bytes | 0 | 0 | 5540 /// | Stack fixup required | No | No | 5541 /// +-------------------------+--------+-----+ 5542 /// 5543 /// \p MachineOutlinerNoLRSave implies that the function should be called using 5544 /// a BL instruction, but doesn't require LR to be saved and restored. This 5545 /// happens when LR is known to be dead. 5546 /// 5547 /// That is, 5548 /// 5549 /// I1 OUTLINED_FUNCTION: 5550 /// I2 --> BL OUTLINED_FUNCTION I1 5551 /// I3 I2 5552 /// I3 5553 /// BX LR 5554 /// 5555 /// +-------------------------+--------+-----+ 5556 /// | | Thumb2 | ARM | 5557 /// +-------------------------+--------+-----+ 5558 /// | Call overhead in Bytes | 4 | 4 | 5559 /// | Frame overhead in Bytes | 4 | 4 | 5560 /// | Stack fixup required | No | No | 5561 /// +-------------------------+--------+-----+ 5562 /// 5563 /// \p MachineOutlinerRegSave implies that the function should be called with a 5564 /// save and restore of LR to an available register. This allows us to avoid 5565 /// stack fixups. Note that this outlining variant is compatible with the 5566 /// NoLRSave case. 5567 /// 5568 /// That is, 5569 /// 5570 /// I1 Save LR OUTLINED_FUNCTION: 5571 /// I2 --> BL OUTLINED_FUNCTION I1 5572 /// I3 Restore LR I2 5573 /// I3 5574 /// BX LR 5575 /// 5576 /// +-------------------------+--------+-----+ 5577 /// | | Thumb2 | ARM | 5578 /// +-------------------------+--------+-----+ 5579 /// | Call overhead in Bytes | 8 | 12 | 5580 /// | Frame overhead in Bytes | 2 | 4 | 5581 /// | Stack fixup required | No | No | 5582 /// +-------------------------+--------+-----+ 5583 /// 5584 /// \p MachineOutlinerDefault implies that the function should be called with 5585 /// a save and restore of LR to the stack. 5586 /// 5587 /// That is, 5588 /// 5589 /// I1 Save LR OUTLINED_FUNCTION: 5590 /// I2 --> BL OUTLINED_FUNCTION I1 5591 /// I3 Restore LR I2 5592 /// I3 5593 /// BX LR 5594 /// 5595 /// +-------------------------+--------+-----+ 5596 /// | | Thumb2 | ARM | 5597 /// +-------------------------+--------+-----+ 5598 /// | Call overhead in Bytes | 8 | 12 | 5599 /// | Frame overhead in Bytes | 2 | 4 | 5600 /// | Stack fixup required | Yes | Yes | 5601 /// +-------------------------+--------+-----+ 5602 5603 enum MachineOutlinerClass { 5604 MachineOutlinerTailCall, 5605 MachineOutlinerThunk, 5606 MachineOutlinerNoLRSave, 5607 MachineOutlinerRegSave, 5608 MachineOutlinerDefault 5609 }; 5610 5611 enum MachineOutlinerMBBFlags { 5612 LRUnavailableSomewhere = 0x2, 5613 HasCalls = 0x4, 5614 UnsafeRegsDead = 0x8 5615 }; 5616 5617 struct OutlinerCosts { 5618 const int CallTailCall; 5619 const int FrameTailCall; 5620 const int CallThunk; 5621 const int FrameThunk; 5622 const int CallNoLRSave; 5623 const int FrameNoLRSave; 5624 const int CallRegSave; 5625 const int FrameRegSave; 5626 const int CallDefault; 5627 const int FrameDefault; 5628 const int SaveRestoreLROnStack; 5629 5630 OutlinerCosts(const ARMSubtarget &target) 5631 : CallTailCall(target.isThumb() ? 4 : 4), 5632 FrameTailCall(target.isThumb() ? 0 : 0), 5633 CallThunk(target.isThumb() ? 4 : 4), 5634 FrameThunk(target.isThumb() ? 0 : 0), 5635 CallNoLRSave(target.isThumb() ? 4 : 4), 5636 FrameNoLRSave(target.isThumb() ? 4 : 4), 5637 CallRegSave(target.isThumb() ? 8 : 12), 5638 FrameRegSave(target.isThumb() ? 2 : 4), 5639 CallDefault(target.isThumb() ? 8 : 12), 5640 FrameDefault(target.isThumb() ? 2 : 4), 5641 SaveRestoreLROnStack(target.isThumb() ? 8 : 8) {} 5642 }; 5643 5644 unsigned 5645 ARMBaseInstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const { 5646 assert(C.LRUWasSet && "LRU wasn't set?"); 5647 MachineFunction *MF = C.getMF(); 5648 const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo *>( 5649 MF->getSubtarget().getRegisterInfo()); 5650 5651 BitVector regsReserved = ARI->getReservedRegs(*MF); 5652 // Check if there is an available register across the sequence that we can 5653 // use. 5654 for (unsigned Reg : ARM::rGPRRegClass) { 5655 if (!(Reg < regsReserved.size() && regsReserved.test(Reg)) && 5656 Reg != ARM::LR && // LR is not reserved, but don't use it. 5657 Reg != ARM::R12 && // R12 is not guaranteed to be preserved. 5658 C.LRU.available(Reg) && C.UsedInSequence.available(Reg)) 5659 return Reg; 5660 } 5661 5662 // No suitable register. Return 0. 5663 return 0u; 5664 } 5665 5666 // Compute liveness of LR at the point after the interval [I, E), which 5667 // denotes a *backward* iteration through instructions. Used only for return 5668 // basic blocks, which do not end with a tail call. 5669 static bool isLRAvailable(const TargetRegisterInfo &TRI, 5670 MachineBasicBlock::reverse_iterator I, 5671 MachineBasicBlock::reverse_iterator E) { 5672 // At the end of the function LR dead. 5673 bool Live = false; 5674 for (; I != E; ++I) { 5675 const MachineInstr &MI = *I; 5676 5677 // Check defs of LR. 5678 if (MI.modifiesRegister(ARM::LR, &TRI)) 5679 Live = false; 5680 5681 // Check uses of LR. 5682 unsigned Opcode = MI.getOpcode(); 5683 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR || 5684 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET || 5685 Opcode == ARM::tBXNS_RET) { 5686 // These instructions use LR, but it's not an (explicit or implicit) 5687 // operand. 5688 Live = true; 5689 continue; 5690 } 5691 if (MI.readsRegister(ARM::LR, &TRI)) 5692 Live = true; 5693 } 5694 return !Live; 5695 } 5696 5697 outliner::OutlinedFunction ARMBaseInstrInfo::getOutliningCandidateInfo( 5698 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { 5699 outliner::Candidate &FirstCand = RepeatedSequenceLocs[0]; 5700 unsigned SequenceSize = 5701 std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0, 5702 [this](unsigned Sum, const MachineInstr &MI) { 5703 return Sum + getInstSizeInBytes(MI); 5704 }); 5705 5706 // Properties about candidate MBBs that hold for all of them. 5707 unsigned FlagsSetInAll = 0xF; 5708 5709 // Compute liveness information for each candidate, and set FlagsSetInAll. 5710 const TargetRegisterInfo &TRI = getRegisterInfo(); 5711 std::for_each( 5712 RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(), 5713 [&FlagsSetInAll](outliner::Candidate &C) { FlagsSetInAll &= C.Flags; }); 5714 5715 // According to the ARM Procedure Call Standard, the following are 5716 // undefined on entry/exit from a function call: 5717 // 5718 // * Register R12(IP), 5719 // * Condition codes (and thus the CPSR register) 5720 // 5721 // Since we control the instructions which are part of the outlined regions 5722 // we don't need to be fully compliant with the AAPCS, but we have to 5723 // guarantee that if a veneer is inserted at link time the code is still 5724 // correct. Because of this, we can't outline any sequence of instructions 5725 // where one of these registers is live into/across it. Thus, we need to 5726 // delete those candidates. 5727 auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) { 5728 // If the unsafe registers in this block are all dead, then we don't need 5729 // to compute liveness here. 5730 if (C.Flags & UnsafeRegsDead) 5731 return false; 5732 C.initLRU(TRI); 5733 LiveRegUnits LRU = C.LRU; 5734 return (!LRU.available(ARM::R12) || !LRU.available(ARM::CPSR)); 5735 }; 5736 5737 // Are there any candidates where those registers are live? 5738 if (!(FlagsSetInAll & UnsafeRegsDead)) { 5739 // Erase every candidate that violates the restrictions above. (It could be 5740 // true that we have viable candidates, so it's not worth bailing out in 5741 // the case that, say, 1 out of 20 candidates violate the restructions.) 5742 RepeatedSequenceLocs.erase(std::remove_if(RepeatedSequenceLocs.begin(), 5743 RepeatedSequenceLocs.end(), 5744 CantGuaranteeValueAcrossCall), 5745 RepeatedSequenceLocs.end()); 5746 5747 // If the sequence doesn't have enough candidates left, then we're done. 5748 if (RepeatedSequenceLocs.size() < 2) 5749 return outliner::OutlinedFunction(); 5750 } 5751 5752 // At this point, we have only "safe" candidates to outline. Figure out 5753 // frame + call instruction information. 5754 5755 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode(); 5756 5757 // Helper lambda which sets call information for every candidate. 5758 auto SetCandidateCallInfo = 5759 [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) { 5760 for (outliner::Candidate &C : RepeatedSequenceLocs) 5761 C.setCallInfo(CallID, NumBytesForCall); 5762 }; 5763 5764 OutlinerCosts Costs(Subtarget); 5765 unsigned FrameID = MachineOutlinerDefault; 5766 unsigned NumBytesToCreateFrame = Costs.FrameDefault; 5767 5768 // If the last instruction in any candidate is a terminator, then we should 5769 // tail call all of the candidates. 5770 if (RepeatedSequenceLocs[0].back()->isTerminator()) { 5771 FrameID = MachineOutlinerTailCall; 5772 NumBytesToCreateFrame = Costs.FrameTailCall; 5773 SetCandidateCallInfo(MachineOutlinerTailCall, Costs.CallTailCall); 5774 } else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX || 5775 LastInstrOpcode == ARM::tBL || LastInstrOpcode == ARM::tBLXr || 5776 LastInstrOpcode == ARM::tBLXi) { 5777 FrameID = MachineOutlinerThunk; 5778 NumBytesToCreateFrame = Costs.FrameThunk; 5779 SetCandidateCallInfo(MachineOutlinerThunk, Costs.CallThunk); 5780 } else { 5781 // We need to decide how to emit calls + frames. We can always emit the same 5782 // frame if we don't need to save to the stack. If we have to save to the 5783 // stack, then we need a different frame. 5784 unsigned NumBytesNoStackCalls = 0; 5785 std::vector<outliner::Candidate> CandidatesWithoutStackFixups; 5786 5787 for (outliner::Candidate &C : RepeatedSequenceLocs) { 5788 C.initLRU(TRI); 5789 // LR liveness is overestimated in return blocks, unless they end with a 5790 // tail call. 5791 const auto Last = C.getMBB()->rbegin(); 5792 const bool LRIsAvailable = 5793 C.getMBB()->isReturnBlock() && !Last->isCall() 5794 ? isLRAvailable(TRI, Last, 5795 (MachineBasicBlock::reverse_iterator)C.front()) 5796 : C.LRU.available(ARM::LR); 5797 if (LRIsAvailable) { 5798 FrameID = MachineOutlinerNoLRSave; 5799 NumBytesNoStackCalls += Costs.CallNoLRSave; 5800 C.setCallInfo(MachineOutlinerNoLRSave, Costs.CallNoLRSave); 5801 CandidatesWithoutStackFixups.push_back(C); 5802 } 5803 5804 // Is an unused register available? If so, we won't modify the stack, so 5805 // we can outline with the same frame type as those that don't save LR. 5806 else if (findRegisterToSaveLRTo(C)) { 5807 FrameID = MachineOutlinerRegSave; 5808 NumBytesNoStackCalls += Costs.CallRegSave; 5809 C.setCallInfo(MachineOutlinerRegSave, Costs.CallRegSave); 5810 CandidatesWithoutStackFixups.push_back(C); 5811 } 5812 5813 // Is SP used in the sequence at all? If not, we don't have to modify 5814 // the stack, so we are guaranteed to get the same frame. 5815 else if (C.UsedInSequence.available(ARM::SP)) { 5816 NumBytesNoStackCalls += Costs.CallDefault; 5817 C.setCallInfo(MachineOutlinerDefault, Costs.CallDefault); 5818 SetCandidateCallInfo(MachineOutlinerDefault, Costs.CallDefault); 5819 CandidatesWithoutStackFixups.push_back(C); 5820 } else 5821 return outliner::OutlinedFunction(); 5822 } 5823 5824 // Does every candidate's MBB contain a call? If so, then we might have a 5825 // call in the range. 5826 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) { 5827 // check if the range contains a call. These require a save + restore of 5828 // the link register. 5829 if (std::any_of(FirstCand.front(), FirstCand.back(), 5830 [](const MachineInstr &MI) { return MI.isCall(); })) 5831 NumBytesToCreateFrame += Costs.SaveRestoreLROnStack; 5832 5833 // Handle the last instruction separately. If it is tail call, then the 5834 // last instruction is a call, we don't want to save + restore in this 5835 // case. However, it could be possible that the last instruction is a 5836 // call without it being valid to tail call this sequence. We should 5837 // consider this as well. 5838 else if (FrameID != MachineOutlinerThunk && 5839 FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall()) 5840 NumBytesToCreateFrame += Costs.SaveRestoreLROnStack; 5841 } 5842 RepeatedSequenceLocs = CandidatesWithoutStackFixups; 5843 } 5844 5845 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 5846 NumBytesToCreateFrame, FrameID); 5847 } 5848 5849 bool ARMBaseInstrInfo::isFunctionSafeToOutlineFrom( 5850 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { 5851 const Function &F = MF.getFunction(); 5852 5853 // Can F be deduplicated by the linker? If it can, don't outline from it. 5854 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) 5855 return false; 5856 5857 // Don't outline from functions with section markings; the program could 5858 // expect that all the code is in the named section. 5859 // FIXME: Allow outlining from multiple functions with the same section 5860 // marking. 5861 if (F.hasSection()) 5862 return false; 5863 5864 // FIXME: Thumb1 outlining is not handled 5865 if (MF.getInfo<ARMFunctionInfo>()->isThumb1OnlyFunction()) 5866 return false; 5867 5868 // It's safe to outline from MF. 5869 return true; 5870 } 5871 5872 bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 5873 unsigned &Flags) const { 5874 // Check if LR is available through all of the MBB. If it's not, then set 5875 // a flag. 5876 assert(MBB.getParent()->getRegInfo().tracksLiveness() && 5877 "Suitable Machine Function for outlining must track liveness"); 5878 5879 LiveRegUnits LRU(getRegisterInfo()); 5880 5881 std::for_each(MBB.rbegin(), MBB.rend(), 5882 [&LRU](MachineInstr &MI) { LRU.accumulate(MI); }); 5883 5884 // Check if each of the unsafe registers are available... 5885 bool R12AvailableInBlock = LRU.available(ARM::R12); 5886 bool CPSRAvailableInBlock = LRU.available(ARM::CPSR); 5887 5888 // If all of these are dead (and not live out), we know we don't have to check 5889 // them later. 5890 if (R12AvailableInBlock && CPSRAvailableInBlock) 5891 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead; 5892 5893 // Now, add the live outs to the set. 5894 LRU.addLiveOuts(MBB); 5895 5896 // If any of these registers is available in the MBB, but also a live out of 5897 // the block, then we know outlining is unsafe. 5898 if (R12AvailableInBlock && !LRU.available(ARM::R12)) 5899 return false; 5900 if (CPSRAvailableInBlock && !LRU.available(ARM::CPSR)) 5901 return false; 5902 5903 // Check if there's a call inside this MachineBasicBlock. If there is, then 5904 // set a flag. 5905 if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); })) 5906 Flags |= MachineOutlinerMBBFlags::HasCalls; 5907 5908 // LR liveness is overestimated in return blocks. 5909 5910 bool LRIsAvailable = 5911 MBB.isReturnBlock() && !MBB.back().isCall() 5912 ? isLRAvailable(getRegisterInfo(), MBB.rbegin(), MBB.rend()) 5913 : LRU.available(ARM::LR); 5914 if (!LRIsAvailable) 5915 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere; 5916 5917 return true; 5918 } 5919 5920 outliner::InstrType 5921 ARMBaseInstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, 5922 unsigned Flags) const { 5923 MachineInstr &MI = *MIT; 5924 const TargetRegisterInfo *TRI = &getRegisterInfo(); 5925 5926 // Be conservative with inline ASM 5927 if (MI.isInlineAsm()) 5928 return outliner::InstrType::Illegal; 5929 5930 // Don't allow debug values to impact outlining type. 5931 if (MI.isDebugInstr() || MI.isIndirectDebugValue()) 5932 return outliner::InstrType::Invisible; 5933 5934 // At this point, KILL or IMPLICIT_DEF instructions don't really tell us much 5935 // so we can go ahead and skip over them. 5936 if (MI.isKill() || MI.isImplicitDef()) 5937 return outliner::InstrType::Invisible; 5938 5939 // PIC instructions contain labels, outlining them would break offset 5940 // computing. unsigned Opc = MI.getOpcode(); 5941 unsigned Opc = MI.getOpcode(); 5942 if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR || 5943 Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR || 5944 Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB || 5945 Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic || 5946 Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel || 5947 Opc == ARM::t2MOV_ga_pcrel) 5948 return outliner::InstrType::Illegal; 5949 5950 // Be conservative with ARMv8.1 MVE instructions. 5951 if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart || 5952 Opc == ARM::t2WhileLoopStart || Opc == ARM::t2LoopDec || 5953 Opc == ARM::t2LoopEnd) 5954 return outliner::InstrType::Illegal; 5955 5956 const MCInstrDesc &MCID = MI.getDesc(); 5957 uint64_t MIFlags = MCID.TSFlags; 5958 if ((MIFlags & ARMII::DomainMask) == ARMII::DomainMVE) 5959 return outliner::InstrType::Illegal; 5960 5961 // Is this a terminator for a basic block? 5962 if (MI.isTerminator()) { 5963 // Don't outline if the branch is not unconditional. 5964 if (isPredicated(MI)) 5965 return outliner::InstrType::Illegal; 5966 5967 // Is this the end of a function? 5968 if (MI.getParent()->succ_empty()) 5969 return outliner::InstrType::Legal; 5970 5971 // It's not, so don't outline it. 5972 return outliner::InstrType::Illegal; 5973 } 5974 5975 // Make sure none of the operands are un-outlinable. 5976 for (const MachineOperand &MOP : MI.operands()) { 5977 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || 5978 MOP.isTargetIndex()) 5979 return outliner::InstrType::Illegal; 5980 } 5981 5982 // Don't outline if link register or program counter value are used. 5983 if (MI.readsRegister(ARM::LR, TRI) || MI.readsRegister(ARM::PC, TRI)) 5984 return outliner::InstrType::Illegal; 5985 5986 if (MI.isCall()) { 5987 // Get the function associated with the call. Look at each operand and find 5988 // the one that represents the calle and get its name. 5989 const Function *Callee = nullptr; 5990 for (const MachineOperand &MOP : MI.operands()) { 5991 if (MOP.isGlobal()) { 5992 Callee = dyn_cast<Function>(MOP.getGlobal()); 5993 break; 5994 } 5995 } 5996 5997 // Dont't outline calls to "mcount" like functions, in particular Linux 5998 // kernel function tracing relies on it. 5999 if (Callee && 6000 (Callee->getName() == "\01__gnu_mcount_nc" || 6001 Callee->getName() == "\01mcount" || Callee->getName() == "__mcount")) 6002 return outliner::InstrType::Illegal; 6003 6004 // If we don't know anything about the callee, assume it depends on the 6005 // stack layout of the caller. In that case, it's only legal to outline 6006 // as a tail-call. Explicitly list the call instructions we know about so 6007 // we don't get unexpected results with call pseudo-instructions. 6008 auto UnknownCallOutlineType = outliner::InstrType::Illegal; 6009 if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX || 6010 Opc == ARM::tBLXr || Opc == ARM::tBLXi) 6011 UnknownCallOutlineType = outliner::InstrType::LegalTerminator; 6012 6013 if (!Callee) 6014 return UnknownCallOutlineType; 6015 6016 // We have a function we have information about. Check if it's something we 6017 // can safely outline. 6018 MachineFunction *MF = MI.getParent()->getParent(); 6019 MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee); 6020 6021 // We don't know what's going on with the callee at all. Don't touch it. 6022 if (!CalleeMF) 6023 return UnknownCallOutlineType; 6024 6025 // Check if we know anything about the callee saves on the function. If we 6026 // don't, then don't touch it, since that implies that we haven't computed 6027 // anything about its stack frame yet. 6028 MachineFrameInfo &MFI = CalleeMF->getFrameInfo(); 6029 if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 || 6030 MFI.getNumObjects() > 0) 6031 return UnknownCallOutlineType; 6032 6033 // At this point, we can say that CalleeMF ought to not pass anything on the 6034 // stack. Therefore, we can outline it. 6035 return outliner::InstrType::Legal; 6036 } 6037 6038 // Since calls are handled, don't touch LR or PC 6039 if (MI.modifiesRegister(ARM::LR, TRI) || MI.modifiesRegister(ARM::PC, TRI)) 6040 return outliner::InstrType::Illegal; 6041 6042 // Does this use the stack? 6043 if (MI.modifiesRegister(ARM::SP, TRI) || MI.readsRegister(ARM::SP, TRI)) { 6044 // True if there is no chance that any outlined candidate from this range 6045 // could require stack fixups. That is, both 6046 // * LR is available in the range (No save/restore around call) 6047 // * The range doesn't include calls (No save/restore in outlined frame) 6048 // are true. 6049 // FIXME: This is very restrictive; the flags check the whole block, 6050 // not just the bit we will try to outline. 6051 bool MightNeedStackFixUp = 6052 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere | 6053 MachineOutlinerMBBFlags::HasCalls)); 6054 6055 if (!MightNeedStackFixUp) 6056 return outliner::InstrType::Legal; 6057 6058 return outliner::InstrType::Illegal; 6059 } 6060 6061 // Be conservative with IT blocks. 6062 if (MI.readsRegister(ARM::ITSTATE, TRI) || 6063 MI.modifiesRegister(ARM::ITSTATE, TRI)) 6064 return outliner::InstrType::Illegal; 6065 6066 // Don't outline positions. 6067 if (MI.isPosition()) 6068 return outliner::InstrType::Illegal; 6069 6070 return outliner::InstrType::Legal; 6071 } 6072 6073 void ARMBaseInstrInfo::saveLROnStack(MachineBasicBlock &MBB, 6074 MachineBasicBlock::iterator &It) const { 6075 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM; 6076 int Align = -Subtarget.getStackAlignment().value(); 6077 BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::SP) 6078 .addReg(ARM::LR, RegState::Kill) 6079 .addReg(ARM::SP) 6080 .addImm(Align) 6081 .add(predOps(ARMCC::AL)); 6082 } 6083 6084 void ARMBaseInstrInfo::restoreLRFromStack( 6085 MachineBasicBlock &MBB, MachineBasicBlock::iterator &It) const { 6086 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM; 6087 MachineInstrBuilder MIB = BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::LR) 6088 .addReg(ARM::SP, RegState::Define) 6089 .addReg(ARM::SP); 6090 if (!Subtarget.isThumb()) 6091 MIB.addReg(0); 6092 MIB.addImm(Subtarget.getStackAlignment().value()).add(predOps(ARMCC::AL)); 6093 } 6094 6095 void ARMBaseInstrInfo::buildOutlinedFrame( 6096 MachineBasicBlock &MBB, MachineFunction &MF, 6097 const outliner::OutlinedFunction &OF) const { 6098 // For thunk outlining, rewrite the last instruction from a call to a 6099 // tail-call. 6100 if (OF.FrameConstructionID == MachineOutlinerThunk) { 6101 MachineInstr *Call = &*--MBB.instr_end(); 6102 bool isThumb = Subtarget.isThumb(); 6103 unsigned FuncOp = isThumb ? 2 : 0; 6104 unsigned Opc = Call->getOperand(FuncOp).isReg() 6105 ? isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr 6106 : isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd 6107 : ARM::tTAILJMPdND 6108 : ARM::TAILJMPd; 6109 MachineInstrBuilder MIB = BuildMI(MBB, MBB.end(), DebugLoc(), get(Opc)) 6110 .add(Call->getOperand(FuncOp)); 6111 if (isThumb && !Call->getOperand(FuncOp).isReg()) 6112 MIB.add(predOps(ARMCC::AL)); 6113 Call->eraseFromParent(); 6114 } 6115 6116 // Is there a call in the outlined range? 6117 auto IsNonTailCall = [](MachineInstr &MI) { 6118 return MI.isCall() && !MI.isReturn(); 6119 }; 6120 if (std::any_of(MBB.instr_begin(), MBB.instr_end(), IsNonTailCall)) { 6121 MachineBasicBlock::iterator It = MBB.begin(); 6122 MachineBasicBlock::iterator Et = MBB.end(); 6123 6124 if (OF.FrameConstructionID == MachineOutlinerTailCall || 6125 OF.FrameConstructionID == MachineOutlinerThunk) 6126 Et = std::prev(MBB.end()); 6127 6128 // We have to save and restore LR, we need to add it to the liveins if it 6129 // is not already part of the set. This is suffient since outlined 6130 // functions only have one block. 6131 if (!MBB.isLiveIn(ARM::LR)) 6132 MBB.addLiveIn(ARM::LR); 6133 6134 // Insert a save before the outlined region 6135 saveLROnStack(MBB, It); 6136 6137 unsigned StackAlignment = Subtarget.getStackAlignment().value(); 6138 const TargetSubtargetInfo &STI = MF.getSubtarget(); 6139 const MCRegisterInfo *MRI = STI.getRegisterInfo(); 6140 unsigned DwarfReg = MRI->getDwarfRegNum(ARM::LR, true); 6141 // Add a CFI saying the stack was moved down. 6142 int64_t StackPosEntry = MF.addFrameInst( 6143 MCCFIInstruction::cfiDefCfaOffset(nullptr, StackAlignment)); 6144 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6145 .addCFIIndex(StackPosEntry) 6146 .setMIFlags(MachineInstr::FrameSetup); 6147 6148 // Add a CFI saying that the LR that we want to find is now higher than 6149 // before. 6150 int64_t LRPosEntry = MF.addFrameInst( 6151 MCCFIInstruction::createOffset(nullptr, DwarfReg, StackAlignment)); 6152 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6153 .addCFIIndex(LRPosEntry) 6154 .setMIFlags(MachineInstr::FrameSetup); 6155 6156 // Insert a restore before the terminator for the function. Restore LR. 6157 restoreLRFromStack(MBB, Et); 6158 } 6159 6160 // If this is a tail call outlined function, then there's already a return. 6161 if (OF.FrameConstructionID == MachineOutlinerTailCall || 6162 OF.FrameConstructionID == MachineOutlinerThunk) 6163 return; 6164 6165 // Here we have to insert the return ourselves. Get the correct opcode from 6166 // current feature set. 6167 BuildMI(MBB, MBB.end(), DebugLoc(), get(Subtarget.getReturnOpcode())) 6168 .add(predOps(ARMCC::AL)); 6169 } 6170 6171 MachineBasicBlock::iterator ARMBaseInstrInfo::insertOutlinedCall( 6172 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, 6173 MachineFunction &MF, const outliner::Candidate &C) const { 6174 MachineInstrBuilder MIB; 6175 MachineBasicBlock::iterator CallPt; 6176 unsigned Opc; 6177 bool isThumb = Subtarget.isThumb(); 6178 6179 // Are we tail calling? 6180 if (C.CallConstructionID == MachineOutlinerTailCall) { 6181 // If yes, then we can just branch to the label. 6182 Opc = isThumb 6183 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND 6184 : ARM::TAILJMPd; 6185 MIB = BuildMI(MF, DebugLoc(), get(Opc)) 6186 .addGlobalAddress(M.getNamedValue(MF.getName())); 6187 if (isThumb) 6188 MIB.add(predOps(ARMCC::AL)); 6189 It = MBB.insert(It, MIB); 6190 return It; 6191 } 6192 6193 // Create the call instruction. 6194 Opc = isThumb ? ARM::tBL : ARM::BL; 6195 MachineInstrBuilder CallMIB = BuildMI(MF, DebugLoc(), get(Opc)); 6196 if (isThumb) 6197 CallMIB.add(predOps(ARMCC::AL)); 6198 CallMIB.addGlobalAddress(M.getNamedValue(MF.getName())); 6199 6200 if (C.CallConstructionID == MachineOutlinerNoLRSave || 6201 C.CallConstructionID == MachineOutlinerThunk) { 6202 // No, so just insert the call. 6203 It = MBB.insert(It, CallMIB); 6204 return It; 6205 } 6206 6207 // Can we save to a register? 6208 if (C.CallConstructionID == MachineOutlinerRegSave) { 6209 unsigned Reg = findRegisterToSaveLRTo(C); 6210 assert(Reg != 0 && "No callee-saved register available?"); 6211 6212 // Save and restore LR from that register. 6213 copyPhysReg(MBB, It, DebugLoc(), Reg, ARM::LR, true); 6214 CallPt = MBB.insert(It, CallMIB); 6215 copyPhysReg(MBB, It, DebugLoc(), ARM::LR, Reg, true); 6216 It--; 6217 return CallPt; 6218 } 6219 // We have the default case. Save and restore from SP. 6220 saveLROnStack(MBB, It); 6221 CallPt = MBB.insert(It, CallMIB); 6222 restoreLRFromStack(MBB, It); 6223 It--; 6224 return CallPt; 6225 } 6226 6227 bool ARMBaseInstrInfo::shouldOutlineFromFunctionByDefault( 6228 MachineFunction &MF) const { 6229 return Subtarget.isMClass() && MF.getFunction().hasMinSize(); 6230 } 6231 6232 bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 6233 AAResults *AA) const { 6234 // Try hard to rematerialize any VCTPs because if we spill P0, it will block 6235 // the tail predication conversion. This means that the element count 6236 // register has to be live for longer, but that has to be better than 6237 // spill/restore and VPT predication. 6238 return isVCTP(&MI) && !isPredicated(MI); 6239 } 6240