1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Base ARM implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseInstrInfo.h" 14 #include "ARMBaseRegisterInfo.h" 15 #include "ARMConstantPoolValue.h" 16 #include "ARMFeatures.h" 17 #include "ARMHazardRecognizer.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMSubtarget.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "MCTargetDesc/ARMBaseInfo.h" 22 #include "MVETailPredUtils.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/CodeGen/LiveVariables.h" 29 #include "llvm/CodeGen/MachineBasicBlock.h" 30 #include "llvm/CodeGen/MachineConstantPool.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunction.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineMemOperand.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/CodeGen/MachineScheduler.h" 40 #include "llvm/CodeGen/MultiHazardRecognizer.h" 41 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 42 #include "llvm/CodeGen/SelectionDAGNodes.h" 43 #include "llvm/CodeGen/TargetInstrInfo.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSchedule.h" 46 #include "llvm/IR/Attributes.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DebugLoc.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/GlobalValue.h" 51 #include "llvm/MC/MCAsmInfo.h" 52 #include "llvm/MC/MCInstrDesc.h" 53 #include "llvm/MC/MCInstrItineraries.h" 54 #include "llvm/Support/BranchProbability.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Support/ErrorHandling.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Target/TargetMachine.h" 62 #include <algorithm> 63 #include <cassert> 64 #include <cstdint> 65 #include <iterator> 66 #include <new> 67 #include <utility> 68 #include <vector> 69 70 using namespace llvm; 71 72 #define DEBUG_TYPE "arm-instrinfo" 73 74 #define GET_INSTRINFO_CTOR_DTOR 75 #include "ARMGenInstrInfo.inc" 76 77 static cl::opt<bool> 78 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, 79 cl::desc("Enable ARM 2-addr to 3-addr conv")); 80 81 /// ARM_MLxEntry - Record information about MLA / MLS instructions. 82 struct ARM_MLxEntry { 83 uint16_t MLxOpc; // MLA / MLS opcode 84 uint16_t MulOpc; // Expanded multiplication opcode 85 uint16_t AddSubOpc; // Expanded add / sub opcode 86 bool NegAcc; // True if the acc is negated before the add / sub. 87 bool HasLane; // True if instruction has an extra "lane" operand. 88 }; 89 90 static const ARM_MLxEntry ARM_MLxTable[] = { 91 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane 92 // fp scalar ops 93 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, 94 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, 95 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, 96 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, 97 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, 98 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, 99 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, 100 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, 101 102 // fp SIMD ops 103 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, 104 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, 105 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, 106 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, 107 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, 108 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, 109 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, 110 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, 111 }; 112 113 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) 114 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 115 Subtarget(STI) { 116 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { 117 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) 118 llvm_unreachable("Duplicated entries?"); 119 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); 120 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); 121 } 122 } 123 124 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl 125 // currently defaults to no prepass hazard recognizer. 126 ScheduleHazardRecognizer * 127 ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 128 const ScheduleDAG *DAG) const { 129 if (usePreRAHazardRecognizer()) { 130 const InstrItineraryData *II = 131 static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData(); 132 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); 133 } 134 return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG); 135 } 136 137 // Called during: 138 // - pre-RA scheduling 139 // - post-RA scheduling when FeatureUseMISched is set 140 ScheduleHazardRecognizer *ARMBaseInstrInfo::CreateTargetMIHazardRecognizer( 141 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const { 142 MultiHazardRecognizer *MHR = new MultiHazardRecognizer(); 143 144 // We would like to restrict this hazard recognizer to only 145 // post-RA scheduling; we can tell that we're post-RA because we don't 146 // track VRegLiveness. 147 // Cortex-M7: TRM indicates that there is a single ITCM bank and two DTCM 148 // banks banked on bit 2. Assume that TCMs are in use. 149 if (Subtarget.isCortexM7() && !DAG->hasVRegLiveness()) 150 MHR->AddHazardRecognizer( 151 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4, true)); 152 153 // Not inserting ARMHazardRecognizerFPMLx because that would change 154 // legacy behavior 155 156 auto BHR = TargetInstrInfo::CreateTargetMIHazardRecognizer(II, DAG); 157 MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR)); 158 return MHR; 159 } 160 161 // Called during post-RA scheduling when FeatureUseMISched is not set 162 ScheduleHazardRecognizer *ARMBaseInstrInfo:: 163 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 164 const ScheduleDAG *DAG) const { 165 MultiHazardRecognizer *MHR = new MultiHazardRecognizer(); 166 167 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base()) 168 MHR->AddHazardRecognizer(std::make_unique<ARMHazardRecognizerFPMLx>()); 169 170 auto BHR = TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG); 171 if (BHR) 172 MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR)); 173 return MHR; 174 } 175 176 MachineInstr *ARMBaseInstrInfo::convertToThreeAddress(MachineInstr &MI, 177 LiveVariables *LV) const { 178 // FIXME: Thumb2 support. 179 180 if (!EnableARM3Addr) 181 return nullptr; 182 183 MachineFunction &MF = *MI.getParent()->getParent(); 184 uint64_t TSFlags = MI.getDesc().TSFlags; 185 bool isPre = false; 186 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { 187 default: return nullptr; 188 case ARMII::IndexModePre: 189 isPre = true; 190 break; 191 case ARMII::IndexModePost: 192 break; 193 } 194 195 // Try splitting an indexed load/store to an un-indexed one plus an add/sub 196 // operation. 197 unsigned MemOpc = getUnindexedOpcode(MI.getOpcode()); 198 if (MemOpc == 0) 199 return nullptr; 200 201 MachineInstr *UpdateMI = nullptr; 202 MachineInstr *MemMI = nullptr; 203 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); 204 const MCInstrDesc &MCID = MI.getDesc(); 205 unsigned NumOps = MCID.getNumOperands(); 206 bool isLoad = !MI.mayStore(); 207 const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0); 208 const MachineOperand &Base = MI.getOperand(2); 209 const MachineOperand &Offset = MI.getOperand(NumOps - 3); 210 Register WBReg = WB.getReg(); 211 Register BaseReg = Base.getReg(); 212 Register OffReg = Offset.getReg(); 213 unsigned OffImm = MI.getOperand(NumOps - 2).getImm(); 214 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm(); 215 switch (AddrMode) { 216 default: llvm_unreachable("Unknown indexed op!"); 217 case ARMII::AddrMode2: { 218 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 219 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 220 if (OffReg == 0) { 221 if (ARM_AM::getSOImmVal(Amt) == -1) 222 // Can't encode it in a so_imm operand. This transformation will 223 // add more than 1 instruction. Abandon! 224 return nullptr; 225 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 226 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 227 .addReg(BaseReg) 228 .addImm(Amt) 229 .add(predOps(Pred)) 230 .add(condCodeOp()); 231 } else if (Amt != 0) { 232 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); 233 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); 234 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 235 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) 236 .addReg(BaseReg) 237 .addReg(OffReg) 238 .addReg(0) 239 .addImm(SOOpc) 240 .add(predOps(Pred)) 241 .add(condCodeOp()); 242 } else 243 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 244 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 245 .addReg(BaseReg) 246 .addReg(OffReg) 247 .add(predOps(Pred)) 248 .add(condCodeOp()); 249 break; 250 } 251 case ARMII::AddrMode3 : { 252 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub; 253 unsigned Amt = ARM_AM::getAM3Offset(OffImm); 254 if (OffReg == 0) 255 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand. 256 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 257 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 258 .addReg(BaseReg) 259 .addImm(Amt) 260 .add(predOps(Pred)) 261 .add(condCodeOp()); 262 else 263 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 264 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 265 .addReg(BaseReg) 266 .addReg(OffReg) 267 .add(predOps(Pred)) 268 .add(condCodeOp()); 269 break; 270 } 271 } 272 273 std::vector<MachineInstr*> NewMIs; 274 if (isPre) { 275 if (isLoad) 276 MemMI = 277 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg()) 278 .addReg(WBReg) 279 .addImm(0) 280 .addImm(Pred); 281 else 282 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc)) 283 .addReg(MI.getOperand(1).getReg()) 284 .addReg(WBReg) 285 .addReg(0) 286 .addImm(0) 287 .addImm(Pred); 288 NewMIs.push_back(MemMI); 289 NewMIs.push_back(UpdateMI); 290 } else { 291 if (isLoad) 292 MemMI = 293 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg()) 294 .addReg(BaseReg) 295 .addImm(0) 296 .addImm(Pred); 297 else 298 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc)) 299 .addReg(MI.getOperand(1).getReg()) 300 .addReg(BaseReg) 301 .addReg(0) 302 .addImm(0) 303 .addImm(Pred); 304 if (WB.isDead()) 305 UpdateMI->getOperand(0).setIsDead(); 306 NewMIs.push_back(UpdateMI); 307 NewMIs.push_back(MemMI); 308 } 309 310 // Transfer LiveVariables states, kill / dead info. 311 if (LV) { 312 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 313 MachineOperand &MO = MI.getOperand(i); 314 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) { 315 Register Reg = MO.getReg(); 316 317 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); 318 if (MO.isDef()) { 319 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI; 320 if (MO.isDead()) 321 LV->addVirtualRegisterDead(Reg, *NewMI); 322 } 323 if (MO.isUse() && MO.isKill()) { 324 for (unsigned j = 0; j < 2; ++j) { 325 // Look at the two new MI's in reverse order. 326 MachineInstr *NewMI = NewMIs[j]; 327 if (!NewMI->readsRegister(Reg)) 328 continue; 329 LV->addVirtualRegisterKilled(Reg, *NewMI); 330 if (VI.removeKill(MI)) 331 VI.Kills.push_back(NewMI); 332 break; 333 } 334 } 335 } 336 } 337 } 338 339 MachineBasicBlock &MBB = *MI.getParent(); 340 MBB.insert(MI, NewMIs[1]); 341 MBB.insert(MI, NewMIs[0]); 342 return NewMIs[0]; 343 } 344 345 // Branch analysis. 346 bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB, 347 MachineBasicBlock *&TBB, 348 MachineBasicBlock *&FBB, 349 SmallVectorImpl<MachineOperand> &Cond, 350 bool AllowModify) const { 351 TBB = nullptr; 352 FBB = nullptr; 353 354 MachineBasicBlock::instr_iterator I = MBB.instr_end(); 355 if (I == MBB.instr_begin()) 356 return false; // Empty blocks are easy. 357 --I; 358 359 // Walk backwards from the end of the basic block until the branch is 360 // analyzed or we give up. 361 while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) { 362 // Flag to be raised on unanalyzeable instructions. This is useful in cases 363 // where we want to clean up on the end of the basic block before we bail 364 // out. 365 bool CantAnalyze = false; 366 367 // Skip over DEBUG values, predicated nonterminators and speculation 368 // barrier terminators. 369 while (I->isDebugInstr() || !I->isTerminator() || 370 isSpeculationBarrierEndBBOpcode(I->getOpcode()) || 371 I->getOpcode() == ARM::t2DoLoopStartTP){ 372 if (I == MBB.instr_begin()) 373 return false; 374 --I; 375 } 376 377 if (isIndirectBranchOpcode(I->getOpcode()) || 378 isJumpTableBranchOpcode(I->getOpcode())) { 379 // Indirect branches and jump tables can't be analyzed, but we still want 380 // to clean up any instructions at the tail of the basic block. 381 CantAnalyze = true; 382 } else if (isUncondBranchOpcode(I->getOpcode())) { 383 TBB = I->getOperand(0).getMBB(); 384 } else if (isCondBranchOpcode(I->getOpcode())) { 385 // Bail out if we encounter multiple conditional branches. 386 if (!Cond.empty()) 387 return true; 388 389 assert(!FBB && "FBB should have been null."); 390 FBB = TBB; 391 TBB = I->getOperand(0).getMBB(); 392 Cond.push_back(I->getOperand(1)); 393 Cond.push_back(I->getOperand(2)); 394 } else if (I->isReturn()) { 395 // Returns can't be analyzed, but we should run cleanup. 396 CantAnalyze = true; 397 } else { 398 // We encountered other unrecognized terminator. Bail out immediately. 399 return true; 400 } 401 402 // Cleanup code - to be run for unpredicated unconditional branches and 403 // returns. 404 if (!isPredicated(*I) && 405 (isUncondBranchOpcode(I->getOpcode()) || 406 isIndirectBranchOpcode(I->getOpcode()) || 407 isJumpTableBranchOpcode(I->getOpcode()) || 408 I->isReturn())) { 409 // Forget any previous condition branch information - it no longer applies. 410 Cond.clear(); 411 FBB = nullptr; 412 413 // If we can modify the function, delete everything below this 414 // unconditional branch. 415 if (AllowModify) { 416 MachineBasicBlock::iterator DI = std::next(I); 417 while (DI != MBB.instr_end()) { 418 MachineInstr &InstToDelete = *DI; 419 ++DI; 420 // Speculation barriers must not be deleted. 421 if (isSpeculationBarrierEndBBOpcode(InstToDelete.getOpcode())) 422 continue; 423 InstToDelete.eraseFromParent(); 424 } 425 } 426 } 427 428 if (CantAnalyze) { 429 // We may not be able to analyze the block, but we could still have 430 // an unconditional branch as the last instruction in the block, which 431 // just branches to layout successor. If this is the case, then just 432 // remove it if we're allowed to make modifications. 433 if (AllowModify && !isPredicated(MBB.back()) && 434 isUncondBranchOpcode(MBB.back().getOpcode()) && 435 TBB && MBB.isLayoutSuccessor(TBB)) 436 removeBranch(MBB); 437 return true; 438 } 439 440 if (I == MBB.instr_begin()) 441 return false; 442 443 --I; 444 } 445 446 // We made it past the terminators without bailing out - we must have 447 // analyzed this branch successfully. 448 return false; 449 } 450 451 unsigned ARMBaseInstrInfo::removeBranch(MachineBasicBlock &MBB, 452 int *BytesRemoved) const { 453 assert(!BytesRemoved && "code size not handled"); 454 455 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 456 if (I == MBB.end()) 457 return 0; 458 459 if (!isUncondBranchOpcode(I->getOpcode()) && 460 !isCondBranchOpcode(I->getOpcode())) 461 return 0; 462 463 // Remove the branch. 464 I->eraseFromParent(); 465 466 I = MBB.end(); 467 468 if (I == MBB.begin()) return 1; 469 --I; 470 if (!isCondBranchOpcode(I->getOpcode())) 471 return 1; 472 473 // Remove the branch. 474 I->eraseFromParent(); 475 return 2; 476 } 477 478 unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB, 479 MachineBasicBlock *TBB, 480 MachineBasicBlock *FBB, 481 ArrayRef<MachineOperand> Cond, 482 const DebugLoc &DL, 483 int *BytesAdded) const { 484 assert(!BytesAdded && "code size not handled"); 485 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>(); 486 int BOpc = !AFI->isThumbFunction() 487 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); 488 int BccOpc = !AFI->isThumbFunction() 489 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); 490 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); 491 492 // Shouldn't be a fall through. 493 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 494 assert((Cond.size() == 2 || Cond.size() == 0) && 495 "ARM branch conditions have two components!"); 496 497 // For conditional branches, we use addOperand to preserve CPSR flags. 498 499 if (!FBB) { 500 if (Cond.empty()) { // Unconditional branch? 501 if (isThumb) 502 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).add(predOps(ARMCC::AL)); 503 else 504 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 505 } else 506 BuildMI(&MBB, DL, get(BccOpc)) 507 .addMBB(TBB) 508 .addImm(Cond[0].getImm()) 509 .add(Cond[1]); 510 return 1; 511 } 512 513 // Two-way conditional branch. 514 BuildMI(&MBB, DL, get(BccOpc)) 515 .addMBB(TBB) 516 .addImm(Cond[0].getImm()) 517 .add(Cond[1]); 518 if (isThumb) 519 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).add(predOps(ARMCC::AL)); 520 else 521 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 522 return 2; 523 } 524 525 bool ARMBaseInstrInfo:: 526 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 527 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 528 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 529 return false; 530 } 531 532 bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const { 533 if (MI.isBundle()) { 534 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 535 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 536 while (++I != E && I->isInsideBundle()) { 537 int PIdx = I->findFirstPredOperandIdx(); 538 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL) 539 return true; 540 } 541 return false; 542 } 543 544 int PIdx = MI.findFirstPredOperandIdx(); 545 return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL; 546 } 547 548 std::string ARMBaseInstrInfo::createMIROperandComment( 549 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, 550 const TargetRegisterInfo *TRI) const { 551 552 // First, let's see if there is a generic comment for this operand 553 std::string GenericComment = 554 TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI); 555 if (!GenericComment.empty()) 556 return GenericComment; 557 558 // If not, check if we have an immediate operand. 559 if (Op.getType() != MachineOperand::MO_Immediate) 560 return std::string(); 561 562 // And print its corresponding condition code if the immediate is a 563 // predicate. 564 int FirstPredOp = MI.findFirstPredOperandIdx(); 565 if (FirstPredOp != (int) OpIdx) 566 return std::string(); 567 568 std::string CC = "CC::"; 569 CC += ARMCondCodeToString((ARMCC::CondCodes)Op.getImm()); 570 return CC; 571 } 572 573 bool ARMBaseInstrInfo::PredicateInstruction( 574 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 575 unsigned Opc = MI.getOpcode(); 576 if (isUncondBranchOpcode(Opc)) { 577 MI.setDesc(get(getMatchingCondBranchOpcode(Opc))); 578 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 579 .addImm(Pred[0].getImm()) 580 .addReg(Pred[1].getReg()); 581 return true; 582 } 583 584 int PIdx = MI.findFirstPredOperandIdx(); 585 if (PIdx != -1) { 586 MachineOperand &PMO = MI.getOperand(PIdx); 587 PMO.setImm(Pred[0].getImm()); 588 MI.getOperand(PIdx+1).setReg(Pred[1].getReg()); 589 590 // Thumb 1 arithmetic instructions do not set CPSR when executed inside an 591 // IT block. This affects how they are printed. 592 const MCInstrDesc &MCID = MI.getDesc(); 593 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 594 assert(MCID.OpInfo[1].isOptionalDef() && "CPSR def isn't expected operand"); 595 assert((MI.getOperand(1).isDead() || 596 MI.getOperand(1).getReg() != ARM::CPSR) && 597 "if conversion tried to stop defining used CPSR"); 598 MI.getOperand(1).setReg(ARM::NoRegister); 599 } 600 601 return true; 602 } 603 return false; 604 } 605 606 bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 607 ArrayRef<MachineOperand> Pred2) const { 608 if (Pred1.size() > 2 || Pred2.size() > 2) 609 return false; 610 611 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 612 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 613 if (CC1 == CC2) 614 return true; 615 616 switch (CC1) { 617 default: 618 return false; 619 case ARMCC::AL: 620 return true; 621 case ARMCC::HS: 622 return CC2 == ARMCC::HI; 623 case ARMCC::LS: 624 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 625 case ARMCC::GE: 626 return CC2 == ARMCC::GT; 627 case ARMCC::LE: 628 return CC2 == ARMCC::LT; 629 } 630 } 631 632 bool ARMBaseInstrInfo::ClobbersPredicate(MachineInstr &MI, 633 std::vector<MachineOperand> &Pred, 634 bool SkipDead) const { 635 bool Found = false; 636 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 637 const MachineOperand &MO = MI.getOperand(i); 638 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR); 639 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR; 640 if (ClobbersCPSR || IsCPSR) { 641 642 // Filter out T1 instructions that have a dead CPSR, 643 // allowing IT blocks to be generated containing T1 instructions 644 const MCInstrDesc &MCID = MI.getDesc(); 645 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting && MO.isDead() && 646 SkipDead) 647 continue; 648 649 Pred.push_back(MO); 650 Found = true; 651 } 652 } 653 654 return Found; 655 } 656 657 bool ARMBaseInstrInfo::isCPSRDefined(const MachineInstr &MI) { 658 for (const auto &MO : MI.operands()) 659 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead()) 660 return true; 661 return false; 662 } 663 664 static bool isEligibleForITBlock(const MachineInstr *MI) { 665 switch (MI->getOpcode()) { 666 default: return true; 667 case ARM::tADC: // ADC (register) T1 668 case ARM::tADDi3: // ADD (immediate) T1 669 case ARM::tADDi8: // ADD (immediate) T2 670 case ARM::tADDrr: // ADD (register) T1 671 case ARM::tAND: // AND (register) T1 672 case ARM::tASRri: // ASR (immediate) T1 673 case ARM::tASRrr: // ASR (register) T1 674 case ARM::tBIC: // BIC (register) T1 675 case ARM::tEOR: // EOR (register) T1 676 case ARM::tLSLri: // LSL (immediate) T1 677 case ARM::tLSLrr: // LSL (register) T1 678 case ARM::tLSRri: // LSR (immediate) T1 679 case ARM::tLSRrr: // LSR (register) T1 680 case ARM::tMUL: // MUL T1 681 case ARM::tMVN: // MVN (register) T1 682 case ARM::tORR: // ORR (register) T1 683 case ARM::tROR: // ROR (register) T1 684 case ARM::tRSB: // RSB (immediate) T1 685 case ARM::tSBC: // SBC (register) T1 686 case ARM::tSUBi3: // SUB (immediate) T1 687 case ARM::tSUBi8: // SUB (immediate) T2 688 case ARM::tSUBrr: // SUB (register) T1 689 return !ARMBaseInstrInfo::isCPSRDefined(*MI); 690 } 691 } 692 693 /// isPredicable - Return true if the specified instruction can be predicated. 694 /// By default, this returns true for every instruction with a 695 /// PredicateOperand. 696 bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const { 697 if (!MI.isPredicable()) 698 return false; 699 700 if (MI.isBundle()) 701 return false; 702 703 if (!isEligibleForITBlock(&MI)) 704 return false; 705 706 const MachineFunction *MF = MI.getParent()->getParent(); 707 const ARMFunctionInfo *AFI = 708 MF->getInfo<ARMFunctionInfo>(); 709 710 // Neon instructions in Thumb2 IT blocks are deprecated, see ARMARM. 711 // In their ARM encoding, they can't be encoded in a conditional form. 712 if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) 713 return false; 714 715 // Make indirect control flow changes unpredicable when SLS mitigation is 716 // enabled. 717 const ARMSubtarget &ST = MF->getSubtarget<ARMSubtarget>(); 718 if (ST.hardenSlsRetBr() && isIndirectControlFlowNotComingBack(MI)) 719 return false; 720 if (ST.hardenSlsBlr() && isIndirectCall(MI)) 721 return false; 722 723 if (AFI->isThumb2Function()) { 724 if (getSubtarget().restrictIT()) 725 return isV8EligibleForIT(&MI); 726 } 727 728 return true; 729 } 730 731 namespace llvm { 732 733 template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) { 734 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 735 const MachineOperand &MO = MI->getOperand(i); 736 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 737 continue; 738 if (MO.getReg() != ARM::CPSR) 739 continue; 740 if (!MO.isDead()) 741 return false; 742 } 743 // all definitions of CPSR are dead 744 return true; 745 } 746 747 } // end namespace llvm 748 749 /// GetInstSize - Return the size of the specified MachineInstr. 750 /// 751 unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 752 const MachineBasicBlock &MBB = *MI.getParent(); 753 const MachineFunction *MF = MBB.getParent(); 754 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 755 756 const MCInstrDesc &MCID = MI.getDesc(); 757 if (MCID.getSize()) 758 return MCID.getSize(); 759 760 switch (MI.getOpcode()) { 761 default: 762 // pseudo-instruction sizes are zero. 763 return 0; 764 case TargetOpcode::BUNDLE: 765 return getInstBundleLength(MI); 766 case ARM::MOVi16_ga_pcrel: 767 case ARM::MOVTi16_ga_pcrel: 768 case ARM::t2MOVi16_ga_pcrel: 769 case ARM::t2MOVTi16_ga_pcrel: 770 return 4; 771 case ARM::MOVi32imm: 772 case ARM::t2MOVi32imm: 773 return 8; 774 case ARM::CONSTPOOL_ENTRY: 775 case ARM::JUMPTABLE_INSTS: 776 case ARM::JUMPTABLE_ADDRS: 777 case ARM::JUMPTABLE_TBB: 778 case ARM::JUMPTABLE_TBH: 779 // If this machine instr is a constant pool entry, its size is recorded as 780 // operand #2. 781 return MI.getOperand(2).getImm(); 782 case ARM::Int_eh_sjlj_longjmp: 783 return 16; 784 case ARM::tInt_eh_sjlj_longjmp: 785 return 10; 786 case ARM::tInt_WIN_eh_sjlj_longjmp: 787 return 12; 788 case ARM::Int_eh_sjlj_setjmp: 789 case ARM::Int_eh_sjlj_setjmp_nofp: 790 return 20; 791 case ARM::tInt_eh_sjlj_setjmp: 792 case ARM::t2Int_eh_sjlj_setjmp: 793 case ARM::t2Int_eh_sjlj_setjmp_nofp: 794 return 12; 795 case ARM::SPACE: 796 return MI.getOperand(1).getImm(); 797 case ARM::INLINEASM: 798 case ARM::INLINEASM_BR: { 799 // If this machine instr is an inline asm, measure it. 800 unsigned Size = getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI); 801 if (!MF->getInfo<ARMFunctionInfo>()->isThumbFunction()) 802 Size = alignTo(Size, 4); 803 return Size; 804 } 805 case ARM::SpeculationBarrierISBDSBEndBB: 806 case ARM::t2SpeculationBarrierISBDSBEndBB: 807 // This gets lowered to 2 4-byte instructions. 808 return 8; 809 case ARM::SpeculationBarrierSBEndBB: 810 case ARM::t2SpeculationBarrierSBEndBB: 811 // This gets lowered to 1 4-byte instructions. 812 return 4; 813 } 814 } 815 816 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const { 817 unsigned Size = 0; 818 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 819 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 820 while (++I != E && I->isInsideBundle()) { 821 assert(!I->isBundle() && "No nested bundle!"); 822 Size += getInstSizeInBytes(*I); 823 } 824 return Size; 825 } 826 827 void ARMBaseInstrInfo::copyFromCPSR(MachineBasicBlock &MBB, 828 MachineBasicBlock::iterator I, 829 unsigned DestReg, bool KillSrc, 830 const ARMSubtarget &Subtarget) const { 831 unsigned Opc = Subtarget.isThumb() 832 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR) 833 : ARM::MRS; 834 835 MachineInstrBuilder MIB = 836 BuildMI(MBB, I, I->getDebugLoc(), get(Opc), DestReg); 837 838 // There is only 1 A/R class MRS instruction, and it always refers to 839 // APSR. However, there are lots of other possibilities on M-class cores. 840 if (Subtarget.isMClass()) 841 MIB.addImm(0x800); 842 843 MIB.add(predOps(ARMCC::AL)) 844 .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc)); 845 } 846 847 void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB, 848 MachineBasicBlock::iterator I, 849 unsigned SrcReg, bool KillSrc, 850 const ARMSubtarget &Subtarget) const { 851 unsigned Opc = Subtarget.isThumb() 852 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR) 853 : ARM::MSR; 854 855 MachineInstrBuilder MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Opc)); 856 857 if (Subtarget.isMClass()) 858 MIB.addImm(0x800); 859 else 860 MIB.addImm(8); 861 862 MIB.addReg(SrcReg, getKillRegState(KillSrc)) 863 .add(predOps(ARMCC::AL)) 864 .addReg(ARM::CPSR, RegState::Implicit | RegState::Define); 865 } 866 867 void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) { 868 MIB.addImm(ARMVCC::None); 869 MIB.addReg(0); 870 MIB.addReg(0); // tp_reg 871 } 872 873 void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, 874 Register DestReg) { 875 addUnpredicatedMveVpredNOp(MIB); 876 MIB.addReg(DestReg, RegState::Undef); 877 } 878 879 void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) { 880 MIB.addImm(Cond); 881 MIB.addReg(ARM::VPR, RegState::Implicit); 882 MIB.addReg(0); // tp_reg 883 } 884 885 void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB, 886 unsigned Cond, unsigned Inactive) { 887 addPredicatedMveVpredNOp(MIB, Cond); 888 MIB.addReg(Inactive); 889 } 890 891 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 892 MachineBasicBlock::iterator I, 893 const DebugLoc &DL, MCRegister DestReg, 894 MCRegister SrcReg, bool KillSrc) const { 895 bool GPRDest = ARM::GPRRegClass.contains(DestReg); 896 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); 897 898 if (GPRDest && GPRSrc) { 899 BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) 900 .addReg(SrcReg, getKillRegState(KillSrc)) 901 .add(predOps(ARMCC::AL)) 902 .add(condCodeOp()); 903 return; 904 } 905 906 bool SPRDest = ARM::SPRRegClass.contains(DestReg); 907 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); 908 909 unsigned Opc = 0; 910 if (SPRDest && SPRSrc) 911 Opc = ARM::VMOVS; 912 else if (GPRDest && SPRSrc) 913 Opc = ARM::VMOVRS; 914 else if (SPRDest && GPRSrc) 915 Opc = ARM::VMOVSR; 916 else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64()) 917 Opc = ARM::VMOVD; 918 else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) 919 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy; 920 921 if (Opc) { 922 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); 923 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 924 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) 925 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 926 if (Opc == ARM::MVE_VORR) 927 addUnpredicatedMveVpredROp(MIB, DestReg); 928 else if (Opc != ARM::MQPRCopy) 929 MIB.add(predOps(ARMCC::AL)); 930 return; 931 } 932 933 // Handle register classes that require multiple instructions. 934 unsigned BeginIdx = 0; 935 unsigned SubRegs = 0; 936 int Spacing = 1; 937 938 // Use VORRq when possible. 939 if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) { 940 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 941 BeginIdx = ARM::qsub_0; 942 SubRegs = 2; 943 } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) { 944 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 945 BeginIdx = ARM::qsub_0; 946 SubRegs = 4; 947 // Fall back to VMOVD. 948 } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) { 949 Opc = ARM::VMOVD; 950 BeginIdx = ARM::dsub_0; 951 SubRegs = 2; 952 } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) { 953 Opc = ARM::VMOVD; 954 BeginIdx = ARM::dsub_0; 955 SubRegs = 3; 956 } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) { 957 Opc = ARM::VMOVD; 958 BeginIdx = ARM::dsub_0; 959 SubRegs = 4; 960 } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) { 961 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr; 962 BeginIdx = ARM::gsub_0; 963 SubRegs = 2; 964 } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) { 965 Opc = ARM::VMOVD; 966 BeginIdx = ARM::dsub_0; 967 SubRegs = 2; 968 Spacing = 2; 969 } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) { 970 Opc = ARM::VMOVD; 971 BeginIdx = ARM::dsub_0; 972 SubRegs = 3; 973 Spacing = 2; 974 } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) { 975 Opc = ARM::VMOVD; 976 BeginIdx = ARM::dsub_0; 977 SubRegs = 4; 978 Spacing = 2; 979 } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && 980 !Subtarget.hasFP64()) { 981 Opc = ARM::VMOVS; 982 BeginIdx = ARM::ssub_0; 983 SubRegs = 2; 984 } else if (SrcReg == ARM::CPSR) { 985 copyFromCPSR(MBB, I, DestReg, KillSrc, Subtarget); 986 return; 987 } else if (DestReg == ARM::CPSR) { 988 copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget); 989 return; 990 } else if (DestReg == ARM::VPR) { 991 assert(ARM::GPRRegClass.contains(SrcReg)); 992 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_P0), DestReg) 993 .addReg(SrcReg, getKillRegState(KillSrc)) 994 .add(predOps(ARMCC::AL)); 995 return; 996 } else if (SrcReg == ARM::VPR) { 997 assert(ARM::GPRRegClass.contains(DestReg)); 998 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_P0), DestReg) 999 .addReg(SrcReg, getKillRegState(KillSrc)) 1000 .add(predOps(ARMCC::AL)); 1001 return; 1002 } else if (DestReg == ARM::FPSCR_NZCV) { 1003 assert(ARM::GPRRegClass.contains(SrcReg)); 1004 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_FPSCR_NZCVQC), DestReg) 1005 .addReg(SrcReg, getKillRegState(KillSrc)) 1006 .add(predOps(ARMCC::AL)); 1007 return; 1008 } else if (SrcReg == ARM::FPSCR_NZCV) { 1009 assert(ARM::GPRRegClass.contains(DestReg)); 1010 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_FPSCR_NZCVQC), DestReg) 1011 .addReg(SrcReg, getKillRegState(KillSrc)) 1012 .add(predOps(ARMCC::AL)); 1013 return; 1014 } 1015 1016 assert(Opc && "Impossible reg-to-reg copy"); 1017 1018 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1019 MachineInstrBuilder Mov; 1020 1021 // Copy register tuples backward when the first Dest reg overlaps with SrcReg. 1022 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) { 1023 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing); 1024 Spacing = -Spacing; 1025 } 1026 #ifndef NDEBUG 1027 SmallSet<unsigned, 4> DstRegs; 1028 #endif 1029 for (unsigned i = 0; i != SubRegs; ++i) { 1030 Register Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing); 1031 Register Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing); 1032 assert(Dst && Src && "Bad sub-register"); 1033 #ifndef NDEBUG 1034 assert(!DstRegs.count(Src) && "destructive vector copy"); 1035 DstRegs.insert(Dst); 1036 #endif 1037 Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src); 1038 // VORR (NEON or MVE) takes two source operands. 1039 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) { 1040 Mov.addReg(Src); 1041 } 1042 // MVE VORR takes predicate operands in place of an ordinary condition. 1043 if (Opc == ARM::MVE_VORR) 1044 addUnpredicatedMveVpredROp(Mov, Dst); 1045 else 1046 Mov = Mov.add(predOps(ARMCC::AL)); 1047 // MOVr can set CC. 1048 if (Opc == ARM::MOVr) 1049 Mov = Mov.add(condCodeOp()); 1050 } 1051 // Add implicit super-register defs and kills to the last instruction. 1052 Mov->addRegisterDefined(DestReg, TRI); 1053 if (KillSrc) 1054 Mov->addRegisterKilled(SrcReg, TRI); 1055 } 1056 1057 Optional<DestSourcePair> 1058 ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { 1059 // VMOVRRD is also a copy instruction but it requires 1060 // special way of handling. It is more complex copy version 1061 // and since that we are not considering it. For recognition 1062 // of such instruction isExtractSubregLike MI interface fuction 1063 // could be used. 1064 // VORRq is considered as a move only if two inputs are 1065 // the same register. 1066 if (!MI.isMoveReg() || 1067 (MI.getOpcode() == ARM::VORRq && 1068 MI.getOperand(1).getReg() != MI.getOperand(2).getReg())) 1069 return None; 1070 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; 1071 } 1072 1073 Optional<ParamLoadedValue> 1074 ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI, 1075 Register Reg) const { 1076 if (auto DstSrcPair = isCopyInstrImpl(MI)) { 1077 Register DstReg = DstSrcPair->Destination->getReg(); 1078 1079 // TODO: We don't handle cases where the forwarding reg is narrower/wider 1080 // than the copy registers. Consider for example: 1081 // 1082 // s16 = VMOVS s0 1083 // s17 = VMOVS s1 1084 // call @callee(d0) 1085 // 1086 // We'd like to describe the call site value of d0 as d8, but this requires 1087 // gathering and merging the descriptions for the two VMOVS instructions. 1088 // 1089 // We also don't handle the reverse situation, where the forwarding reg is 1090 // narrower than the copy destination: 1091 // 1092 // d8 = VMOVD d0 1093 // call @callee(s1) 1094 // 1095 // We need to produce a fragment description (the call site value of s1 is 1096 // /not/ just d8). 1097 if (DstReg != Reg) 1098 return None; 1099 } 1100 return TargetInstrInfo::describeLoadedValue(MI, Reg); 1101 } 1102 1103 const MachineInstrBuilder & 1104 ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg, 1105 unsigned SubIdx, unsigned State, 1106 const TargetRegisterInfo *TRI) const { 1107 if (!SubIdx) 1108 return MIB.addReg(Reg, State); 1109 1110 if (Register::isPhysicalRegister(Reg)) 1111 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); 1112 return MIB.addReg(Reg, State, SubIdx); 1113 } 1114 1115 void ARMBaseInstrInfo:: 1116 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1117 Register SrcReg, bool isKill, int FI, 1118 const TargetRegisterClass *RC, 1119 const TargetRegisterInfo *TRI) const { 1120 MachineFunction &MF = *MBB.getParent(); 1121 MachineFrameInfo &MFI = MF.getFrameInfo(); 1122 Align Alignment = MFI.getObjectAlign(FI); 1123 1124 MachineMemOperand *MMO = MF.getMachineMemOperand( 1125 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 1126 MFI.getObjectSize(FI), Alignment); 1127 1128 switch (TRI->getSpillSize(*RC)) { 1129 case 2: 1130 if (ARM::HPRRegClass.hasSubClassEq(RC)) { 1131 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH)) 1132 .addReg(SrcReg, getKillRegState(isKill)) 1133 .addFrameIndex(FI) 1134 .addImm(0) 1135 .addMemOperand(MMO) 1136 .add(predOps(ARMCC::AL)); 1137 } else 1138 llvm_unreachable("Unknown reg class!"); 1139 break; 1140 case 4: 1141 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1142 BuildMI(MBB, I, DebugLoc(), get(ARM::STRi12)) 1143 .addReg(SrcReg, getKillRegState(isKill)) 1144 .addFrameIndex(FI) 1145 .addImm(0) 1146 .addMemOperand(MMO) 1147 .add(predOps(ARMCC::AL)); 1148 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1149 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRS)) 1150 .addReg(SrcReg, getKillRegState(isKill)) 1151 .addFrameIndex(FI) 1152 .addImm(0) 1153 .addMemOperand(MMO) 1154 .add(predOps(ARMCC::AL)); 1155 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { 1156 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTR_P0_off)) 1157 .addReg(SrcReg, getKillRegState(isKill)) 1158 .addFrameIndex(FI) 1159 .addImm(0) 1160 .addMemOperand(MMO) 1161 .add(predOps(ARMCC::AL)); 1162 } else 1163 llvm_unreachable("Unknown reg class!"); 1164 break; 1165 case 8: 1166 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1167 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRD)) 1168 .addReg(SrcReg, getKillRegState(isKill)) 1169 .addFrameIndex(FI) 1170 .addImm(0) 1171 .addMemOperand(MMO) 1172 .add(predOps(ARMCC::AL)); 1173 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1174 if (Subtarget.hasV5TEOps()) { 1175 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD)); 1176 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 1177 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 1178 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) 1179 .add(predOps(ARMCC::AL)); 1180 } else { 1181 // Fallback to STM instruction, which has existed since the dawn of 1182 // time. 1183 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STMIA)) 1184 .addFrameIndex(FI) 1185 .addMemOperand(MMO) 1186 .add(predOps(ARMCC::AL)); 1187 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 1188 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 1189 } 1190 } else 1191 llvm_unreachable("Unknown reg class!"); 1192 break; 1193 case 16: 1194 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { 1195 // Use aligned spills if the stack can be realigned. 1196 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { 1197 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64)) 1198 .addFrameIndex(FI) 1199 .addImm(16) 1200 .addReg(SrcReg, getKillRegState(isKill)) 1201 .addMemOperand(MMO) 1202 .add(predOps(ARMCC::AL)); 1203 } else { 1204 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMQIA)) 1205 .addReg(SrcReg, getKillRegState(isKill)) 1206 .addFrameIndex(FI) 1207 .addMemOperand(MMO) 1208 .add(predOps(ARMCC::AL)); 1209 } 1210 } else if (ARM::QPRRegClass.hasSubClassEq(RC) && 1211 Subtarget.hasMVEIntegerOps()) { 1212 auto MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::MVE_VSTRWU32)); 1213 MIB.addReg(SrcReg, getKillRegState(isKill)) 1214 .addFrameIndex(FI) 1215 .addImm(0) 1216 .addMemOperand(MMO); 1217 addUnpredicatedMveVpredNOp(MIB); 1218 } else 1219 llvm_unreachable("Unknown reg class!"); 1220 break; 1221 case 24: 1222 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1223 // Use aligned spills if the stack can be realigned. 1224 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1225 Subtarget.hasNEON()) { 1226 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo)) 1227 .addFrameIndex(FI) 1228 .addImm(16) 1229 .addReg(SrcReg, getKillRegState(isKill)) 1230 .addMemOperand(MMO) 1231 .add(predOps(ARMCC::AL)); 1232 } else { 1233 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), 1234 get(ARM::VSTMDIA)) 1235 .addFrameIndex(FI) 1236 .add(predOps(ARMCC::AL)) 1237 .addMemOperand(MMO); 1238 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1239 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1240 AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1241 } 1242 } else 1243 llvm_unreachable("Unknown reg class!"); 1244 break; 1245 case 32: 1246 if (ARM::QQPRRegClass.hasSubClassEq(RC) || 1247 ARM::MQQPRRegClass.hasSubClassEq(RC) || 1248 ARM::DQuadRegClass.hasSubClassEq(RC)) { 1249 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1250 Subtarget.hasNEON()) { 1251 // FIXME: It's possible to only store part of the QQ register if the 1252 // spilled def has a sub-register index. 1253 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64QPseudo)) 1254 .addFrameIndex(FI) 1255 .addImm(16) 1256 .addReg(SrcReg, getKillRegState(isKill)) 1257 .addMemOperand(MMO) 1258 .add(predOps(ARMCC::AL)); 1259 } else if (Subtarget.hasMVEIntegerOps()) { 1260 BuildMI(MBB, I, DebugLoc(), get(ARM::MQQPRStore)) 1261 .addReg(SrcReg, getKillRegState(isKill)) 1262 .addFrameIndex(FI) 1263 .addMemOperand(MMO); 1264 } else { 1265 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), 1266 get(ARM::VSTMDIA)) 1267 .addFrameIndex(FI) 1268 .add(predOps(ARMCC::AL)) 1269 .addMemOperand(MMO); 1270 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1271 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1272 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1273 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 1274 } 1275 } else 1276 llvm_unreachable("Unknown reg class!"); 1277 break; 1278 case 64: 1279 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) && 1280 Subtarget.hasMVEIntegerOps()) { 1281 BuildMI(MBB, I, DebugLoc(), get(ARM::MQQQQPRStore)) 1282 .addReg(SrcReg, getKillRegState(isKill)) 1283 .addFrameIndex(FI) 1284 .addMemOperand(MMO); 1285 } else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1286 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMDIA)) 1287 .addFrameIndex(FI) 1288 .add(predOps(ARMCC::AL)) 1289 .addMemOperand(MMO); 1290 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1291 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1292 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1293 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 1294 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); 1295 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); 1296 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); 1297 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); 1298 } else 1299 llvm_unreachable("Unknown reg class!"); 1300 break; 1301 default: 1302 llvm_unreachable("Unknown reg class!"); 1303 } 1304 } 1305 1306 unsigned ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 1307 int &FrameIndex) const { 1308 switch (MI.getOpcode()) { 1309 default: break; 1310 case ARM::STRrs: 1311 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame. 1312 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() && 1313 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 && 1314 MI.getOperand(3).getImm() == 0) { 1315 FrameIndex = MI.getOperand(1).getIndex(); 1316 return MI.getOperand(0).getReg(); 1317 } 1318 break; 1319 case ARM::STRi12: 1320 case ARM::t2STRi12: 1321 case ARM::tSTRspi: 1322 case ARM::VSTRD: 1323 case ARM::VSTRS: 1324 case ARM::VSTR_P0_off: 1325 case ARM::MVE_VSTRWU32: 1326 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 1327 MI.getOperand(2).getImm() == 0) { 1328 FrameIndex = MI.getOperand(1).getIndex(); 1329 return MI.getOperand(0).getReg(); 1330 } 1331 break; 1332 case ARM::VST1q64: 1333 case ARM::VST1d64TPseudo: 1334 case ARM::VST1d64QPseudo: 1335 if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) { 1336 FrameIndex = MI.getOperand(0).getIndex(); 1337 return MI.getOperand(2).getReg(); 1338 } 1339 break; 1340 case ARM::VSTMQIA: 1341 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1342 FrameIndex = MI.getOperand(1).getIndex(); 1343 return MI.getOperand(0).getReg(); 1344 } 1345 break; 1346 case ARM::MQQPRStore: 1347 case ARM::MQQQQPRStore: 1348 if (MI.getOperand(1).isFI()) { 1349 FrameIndex = MI.getOperand(1).getIndex(); 1350 return MI.getOperand(0).getReg(); 1351 } 1352 break; 1353 } 1354 1355 return 0; 1356 } 1357 1358 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, 1359 int &FrameIndex) const { 1360 SmallVector<const MachineMemOperand *, 1> Accesses; 1361 if (MI.mayStore() && hasStoreToStackSlot(MI, Accesses) && 1362 Accesses.size() == 1) { 1363 FrameIndex = 1364 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 1365 ->getFrameIndex(); 1366 return true; 1367 } 1368 return false; 1369 } 1370 1371 void ARMBaseInstrInfo:: 1372 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1373 Register DestReg, int FI, 1374 const TargetRegisterClass *RC, 1375 const TargetRegisterInfo *TRI) const { 1376 DebugLoc DL; 1377 if (I != MBB.end()) DL = I->getDebugLoc(); 1378 MachineFunction &MF = *MBB.getParent(); 1379 MachineFrameInfo &MFI = MF.getFrameInfo(); 1380 const Align Alignment = MFI.getObjectAlign(FI); 1381 MachineMemOperand *MMO = MF.getMachineMemOperand( 1382 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 1383 MFI.getObjectSize(FI), Alignment); 1384 1385 switch (TRI->getSpillSize(*RC)) { 1386 case 2: 1387 if (ARM::HPRRegClass.hasSubClassEq(RC)) { 1388 BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg) 1389 .addFrameIndex(FI) 1390 .addImm(0) 1391 .addMemOperand(MMO) 1392 .add(predOps(ARMCC::AL)); 1393 } else 1394 llvm_unreachable("Unknown reg class!"); 1395 break; 1396 case 4: 1397 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1398 BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) 1399 .addFrameIndex(FI) 1400 .addImm(0) 1401 .addMemOperand(MMO) 1402 .add(predOps(ARMCC::AL)); 1403 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1404 BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) 1405 .addFrameIndex(FI) 1406 .addImm(0) 1407 .addMemOperand(MMO) 1408 .add(predOps(ARMCC::AL)); 1409 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { 1410 BuildMI(MBB, I, DL, get(ARM::VLDR_P0_off), DestReg) 1411 .addFrameIndex(FI) 1412 .addImm(0) 1413 .addMemOperand(MMO) 1414 .add(predOps(ARMCC::AL)); 1415 } else 1416 llvm_unreachable("Unknown reg class!"); 1417 break; 1418 case 8: 1419 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1420 BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) 1421 .addFrameIndex(FI) 1422 .addImm(0) 1423 .addMemOperand(MMO) 1424 .add(predOps(ARMCC::AL)); 1425 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1426 MachineInstrBuilder MIB; 1427 1428 if (Subtarget.hasV5TEOps()) { 1429 MIB = BuildMI(MBB, I, DL, get(ARM::LDRD)); 1430 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1431 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1432 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) 1433 .add(predOps(ARMCC::AL)); 1434 } else { 1435 // Fallback to LDM instruction, which has existed since the dawn of 1436 // time. 1437 MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA)) 1438 .addFrameIndex(FI) 1439 .addMemOperand(MMO) 1440 .add(predOps(ARMCC::AL)); 1441 MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1442 MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1443 } 1444 1445 if (Register::isPhysicalRegister(DestReg)) 1446 MIB.addReg(DestReg, RegState::ImplicitDefine); 1447 } else 1448 llvm_unreachable("Unknown reg class!"); 1449 break; 1450 case 16: 1451 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { 1452 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { 1453 BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) 1454 .addFrameIndex(FI) 1455 .addImm(16) 1456 .addMemOperand(MMO) 1457 .add(predOps(ARMCC::AL)); 1458 } else { 1459 BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) 1460 .addFrameIndex(FI) 1461 .addMemOperand(MMO) 1462 .add(predOps(ARMCC::AL)); 1463 } 1464 } else if (ARM::QPRRegClass.hasSubClassEq(RC) && 1465 Subtarget.hasMVEIntegerOps()) { 1466 auto MIB = BuildMI(MBB, I, DL, get(ARM::MVE_VLDRWU32), DestReg); 1467 MIB.addFrameIndex(FI) 1468 .addImm(0) 1469 .addMemOperand(MMO); 1470 addUnpredicatedMveVpredNOp(MIB); 1471 } else 1472 llvm_unreachable("Unknown reg class!"); 1473 break; 1474 case 24: 1475 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1476 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1477 Subtarget.hasNEON()) { 1478 BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) 1479 .addFrameIndex(FI) 1480 .addImm(16) 1481 .addMemOperand(MMO) 1482 .add(predOps(ARMCC::AL)); 1483 } else { 1484 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1485 .addFrameIndex(FI) 1486 .addMemOperand(MMO) 1487 .add(predOps(ARMCC::AL)); 1488 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1489 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1490 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1491 if (Register::isPhysicalRegister(DestReg)) 1492 MIB.addReg(DestReg, RegState::ImplicitDefine); 1493 } 1494 } else 1495 llvm_unreachable("Unknown reg class!"); 1496 break; 1497 case 32: 1498 if (ARM::QQPRRegClass.hasSubClassEq(RC) || 1499 ARM::MQQPRRegClass.hasSubClassEq(RC) || 1500 ARM::DQuadRegClass.hasSubClassEq(RC)) { 1501 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1502 Subtarget.hasNEON()) { 1503 BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) 1504 .addFrameIndex(FI) 1505 .addImm(16) 1506 .addMemOperand(MMO) 1507 .add(predOps(ARMCC::AL)); 1508 } else if (Subtarget.hasMVEIntegerOps()) { 1509 BuildMI(MBB, I, DL, get(ARM::MQQPRLoad), DestReg) 1510 .addFrameIndex(FI) 1511 .addMemOperand(MMO); 1512 } else { 1513 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1514 .addFrameIndex(FI) 1515 .add(predOps(ARMCC::AL)) 1516 .addMemOperand(MMO); 1517 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1518 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1519 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1520 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1521 if (Register::isPhysicalRegister(DestReg)) 1522 MIB.addReg(DestReg, RegState::ImplicitDefine); 1523 } 1524 } else 1525 llvm_unreachable("Unknown reg class!"); 1526 break; 1527 case 64: 1528 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) && 1529 Subtarget.hasMVEIntegerOps()) { 1530 BuildMI(MBB, I, DL, get(ARM::MQQQQPRLoad), DestReg) 1531 .addFrameIndex(FI) 1532 .addMemOperand(MMO); 1533 } else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1534 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1535 .addFrameIndex(FI) 1536 .add(predOps(ARMCC::AL)) 1537 .addMemOperand(MMO); 1538 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1539 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1540 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1541 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1542 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI); 1543 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI); 1544 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI); 1545 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI); 1546 if (Register::isPhysicalRegister(DestReg)) 1547 MIB.addReg(DestReg, RegState::ImplicitDefine); 1548 } else 1549 llvm_unreachable("Unknown reg class!"); 1550 break; 1551 default: 1552 llvm_unreachable("Unknown regclass!"); 1553 } 1554 } 1555 1556 unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 1557 int &FrameIndex) const { 1558 switch (MI.getOpcode()) { 1559 default: break; 1560 case ARM::LDRrs: 1561 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame. 1562 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() && 1563 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 && 1564 MI.getOperand(3).getImm() == 0) { 1565 FrameIndex = MI.getOperand(1).getIndex(); 1566 return MI.getOperand(0).getReg(); 1567 } 1568 break; 1569 case ARM::LDRi12: 1570 case ARM::t2LDRi12: 1571 case ARM::tLDRspi: 1572 case ARM::VLDRD: 1573 case ARM::VLDRS: 1574 case ARM::VLDR_P0_off: 1575 case ARM::MVE_VLDRWU32: 1576 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 1577 MI.getOperand(2).getImm() == 0) { 1578 FrameIndex = MI.getOperand(1).getIndex(); 1579 return MI.getOperand(0).getReg(); 1580 } 1581 break; 1582 case ARM::VLD1q64: 1583 case ARM::VLD1d8TPseudo: 1584 case ARM::VLD1d16TPseudo: 1585 case ARM::VLD1d32TPseudo: 1586 case ARM::VLD1d64TPseudo: 1587 case ARM::VLD1d8QPseudo: 1588 case ARM::VLD1d16QPseudo: 1589 case ARM::VLD1d32QPseudo: 1590 case ARM::VLD1d64QPseudo: 1591 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1592 FrameIndex = MI.getOperand(1).getIndex(); 1593 return MI.getOperand(0).getReg(); 1594 } 1595 break; 1596 case ARM::VLDMQIA: 1597 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1598 FrameIndex = MI.getOperand(1).getIndex(); 1599 return MI.getOperand(0).getReg(); 1600 } 1601 break; 1602 case ARM::MQQPRLoad: 1603 case ARM::MQQQQPRLoad: 1604 if (MI.getOperand(1).isFI()) { 1605 FrameIndex = MI.getOperand(1).getIndex(); 1606 return MI.getOperand(0).getReg(); 1607 } 1608 break; 1609 } 1610 1611 return 0; 1612 } 1613 1614 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, 1615 int &FrameIndex) const { 1616 SmallVector<const MachineMemOperand *, 1> Accesses; 1617 if (MI.mayLoad() && hasLoadFromStackSlot(MI, Accesses) && 1618 Accesses.size() == 1) { 1619 FrameIndex = 1620 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 1621 ->getFrameIndex(); 1622 return true; 1623 } 1624 return false; 1625 } 1626 1627 /// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD 1628 /// depending on whether the result is used. 1629 void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { 1630 bool isThumb1 = Subtarget.isThumb1Only(); 1631 bool isThumb2 = Subtarget.isThumb2(); 1632 const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo(); 1633 1634 DebugLoc dl = MI->getDebugLoc(); 1635 MachineBasicBlock *BB = MI->getParent(); 1636 1637 MachineInstrBuilder LDM, STM; 1638 if (isThumb1 || !MI->getOperand(1).isDead()) { 1639 MachineOperand LDWb(MI->getOperand(1)); 1640 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD 1641 : isThumb1 ? ARM::tLDMIA_UPD 1642 : ARM::LDMIA_UPD)) 1643 .add(LDWb); 1644 } else { 1645 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA)); 1646 } 1647 1648 if (isThumb1 || !MI->getOperand(0).isDead()) { 1649 MachineOperand STWb(MI->getOperand(0)); 1650 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD 1651 : isThumb1 ? ARM::tSTMIA_UPD 1652 : ARM::STMIA_UPD)) 1653 .add(STWb); 1654 } else { 1655 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA)); 1656 } 1657 1658 MachineOperand LDBase(MI->getOperand(3)); 1659 LDM.add(LDBase).add(predOps(ARMCC::AL)); 1660 1661 MachineOperand STBase(MI->getOperand(2)); 1662 STM.add(STBase).add(predOps(ARMCC::AL)); 1663 1664 // Sort the scratch registers into ascending order. 1665 const TargetRegisterInfo &TRI = getRegisterInfo(); 1666 SmallVector<unsigned, 6> ScratchRegs; 1667 for(unsigned I = 5; I < MI->getNumOperands(); ++I) 1668 ScratchRegs.push_back(MI->getOperand(I).getReg()); 1669 llvm::sort(ScratchRegs, 1670 [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool { 1671 return TRI.getEncodingValue(Reg1) < 1672 TRI.getEncodingValue(Reg2); 1673 }); 1674 1675 for (const auto &Reg : ScratchRegs) { 1676 LDM.addReg(Reg, RegState::Define); 1677 STM.addReg(Reg, RegState::Kill); 1678 } 1679 1680 BB->erase(MI); 1681 } 1682 1683 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1684 if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) { 1685 expandLoadStackGuard(MI); 1686 MI.getParent()->erase(MI); 1687 return true; 1688 } 1689 1690 if (MI.getOpcode() == ARM::MEMCPY) { 1691 expandMEMCPY(MI); 1692 return true; 1693 } 1694 1695 // This hook gets to expand COPY instructions before they become 1696 // copyPhysReg() calls. Look for VMOVS instructions that can legally be 1697 // widened to VMOVD. We prefer the VMOVD when possible because it may be 1698 // changed into a VORR that can go down the NEON pipeline. 1699 if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64()) 1700 return false; 1701 1702 // Look for a copy between even S-registers. That is where we keep floats 1703 // when using NEON v2f32 instructions for f32 arithmetic. 1704 Register DstRegS = MI.getOperand(0).getReg(); 1705 Register SrcRegS = MI.getOperand(1).getReg(); 1706 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) 1707 return false; 1708 1709 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1710 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, 1711 &ARM::DPRRegClass); 1712 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, 1713 &ARM::DPRRegClass); 1714 if (!DstRegD || !SrcRegD) 1715 return false; 1716 1717 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only 1718 // legal if the COPY already defines the full DstRegD, and it isn't a 1719 // sub-register insertion. 1720 if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI)) 1721 return false; 1722 1723 // A dead copy shouldn't show up here, but reject it just in case. 1724 if (MI.getOperand(0).isDead()) 1725 return false; 1726 1727 // All clear, widen the COPY. 1728 LLVM_DEBUG(dbgs() << "widening: " << MI); 1729 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1730 1731 // Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg 1732 // or some other super-register. 1733 int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD); 1734 if (ImpDefIdx != -1) 1735 MI.RemoveOperand(ImpDefIdx); 1736 1737 // Change the opcode and operands. 1738 MI.setDesc(get(ARM::VMOVD)); 1739 MI.getOperand(0).setReg(DstRegD); 1740 MI.getOperand(1).setReg(SrcRegD); 1741 MIB.add(predOps(ARMCC::AL)); 1742 1743 // We are now reading SrcRegD instead of SrcRegS. This may upset the 1744 // register scavenger and machine verifier, so we need to indicate that we 1745 // are reading an undefined value from SrcRegD, but a proper value from 1746 // SrcRegS. 1747 MI.getOperand(1).setIsUndef(); 1748 MIB.addReg(SrcRegS, RegState::Implicit); 1749 1750 // SrcRegD may actually contain an unrelated value in the ssub_1 1751 // sub-register. Don't kill it. Only kill the ssub_0 sub-register. 1752 if (MI.getOperand(1).isKill()) { 1753 MI.getOperand(1).setIsKill(false); 1754 MI.addRegisterKilled(SrcRegS, TRI, true); 1755 } 1756 1757 LLVM_DEBUG(dbgs() << "replaced by: " << MI); 1758 return true; 1759 } 1760 1761 /// Create a copy of a const pool value. Update CPI to the new index and return 1762 /// the label UID. 1763 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 1764 MachineConstantPool *MCP = MF.getConstantPool(); 1765 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1766 1767 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI]; 1768 assert(MCPE.isMachineConstantPoolEntry() && 1769 "Expecting a machine constantpool entry!"); 1770 ARMConstantPoolValue *ACPV = 1771 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal); 1772 1773 unsigned PCLabelId = AFI->createPICLabelUId(); 1774 ARMConstantPoolValue *NewCPV = nullptr; 1775 1776 // FIXME: The below assumes PIC relocation model and that the function 1777 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and 1778 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR 1779 // instructions, so that's probably OK, but is PIC always correct when 1780 // we get here? 1781 if (ACPV->isGlobalValue()) 1782 NewCPV = ARMConstantPoolConstant::Create( 1783 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, ARMCP::CPValue, 1784 4, ACPV->getModifier(), ACPV->mustAddCurrentAddress()); 1785 else if (ACPV->isExtSymbol()) 1786 NewCPV = ARMConstantPoolSymbol:: 1787 Create(MF.getFunction().getContext(), 1788 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); 1789 else if (ACPV->isBlockAddress()) 1790 NewCPV = ARMConstantPoolConstant:: 1791 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, 1792 ARMCP::CPBlockAddress, 4); 1793 else if (ACPV->isLSDA()) 1794 NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId, 1795 ARMCP::CPLSDA, 4); 1796 else if (ACPV->isMachineBasicBlock()) 1797 NewCPV = ARMConstantPoolMBB:: 1798 Create(MF.getFunction().getContext(), 1799 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); 1800 else 1801 llvm_unreachable("Unexpected ARM constantpool value type!!"); 1802 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlign()); 1803 return PCLabelId; 1804 } 1805 1806 void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB, 1807 MachineBasicBlock::iterator I, 1808 Register DestReg, unsigned SubIdx, 1809 const MachineInstr &Orig, 1810 const TargetRegisterInfo &TRI) const { 1811 unsigned Opcode = Orig.getOpcode(); 1812 switch (Opcode) { 1813 default: { 1814 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 1815 MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); 1816 MBB.insert(I, MI); 1817 break; 1818 } 1819 case ARM::tLDRpci_pic: 1820 case ARM::t2LDRpci_pic: { 1821 MachineFunction &MF = *MBB.getParent(); 1822 unsigned CPI = Orig.getOperand(1).getIndex(); 1823 unsigned PCLabelId = duplicateCPV(MF, CPI); 1824 BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg) 1825 .addConstantPoolIndex(CPI) 1826 .addImm(PCLabelId) 1827 .cloneMemRefs(Orig); 1828 break; 1829 } 1830 } 1831 } 1832 1833 MachineInstr & 1834 ARMBaseInstrInfo::duplicate(MachineBasicBlock &MBB, 1835 MachineBasicBlock::iterator InsertBefore, 1836 const MachineInstr &Orig) const { 1837 MachineInstr &Cloned = TargetInstrInfo::duplicate(MBB, InsertBefore, Orig); 1838 MachineBasicBlock::instr_iterator I = Cloned.getIterator(); 1839 for (;;) { 1840 switch (I->getOpcode()) { 1841 case ARM::tLDRpci_pic: 1842 case ARM::t2LDRpci_pic: { 1843 MachineFunction &MF = *MBB.getParent(); 1844 unsigned CPI = I->getOperand(1).getIndex(); 1845 unsigned PCLabelId = duplicateCPV(MF, CPI); 1846 I->getOperand(1).setIndex(CPI); 1847 I->getOperand(2).setImm(PCLabelId); 1848 break; 1849 } 1850 } 1851 if (!I->isBundledWithSucc()) 1852 break; 1853 ++I; 1854 } 1855 return Cloned; 1856 } 1857 1858 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0, 1859 const MachineInstr &MI1, 1860 const MachineRegisterInfo *MRI) const { 1861 unsigned Opcode = MI0.getOpcode(); 1862 if (Opcode == ARM::t2LDRpci || 1863 Opcode == ARM::t2LDRpci_pic || 1864 Opcode == ARM::tLDRpci || 1865 Opcode == ARM::tLDRpci_pic || 1866 Opcode == ARM::LDRLIT_ga_pcrel || 1867 Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1868 Opcode == ARM::tLDRLIT_ga_pcrel || 1869 Opcode == ARM::MOV_ga_pcrel || 1870 Opcode == ARM::MOV_ga_pcrel_ldr || 1871 Opcode == ARM::t2MOV_ga_pcrel) { 1872 if (MI1.getOpcode() != Opcode) 1873 return false; 1874 if (MI0.getNumOperands() != MI1.getNumOperands()) 1875 return false; 1876 1877 const MachineOperand &MO0 = MI0.getOperand(1); 1878 const MachineOperand &MO1 = MI1.getOperand(1); 1879 if (MO0.getOffset() != MO1.getOffset()) 1880 return false; 1881 1882 if (Opcode == ARM::LDRLIT_ga_pcrel || 1883 Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1884 Opcode == ARM::tLDRLIT_ga_pcrel || 1885 Opcode == ARM::MOV_ga_pcrel || 1886 Opcode == ARM::MOV_ga_pcrel_ldr || 1887 Opcode == ARM::t2MOV_ga_pcrel) 1888 // Ignore the PC labels. 1889 return MO0.getGlobal() == MO1.getGlobal(); 1890 1891 const MachineFunction *MF = MI0.getParent()->getParent(); 1892 const MachineConstantPool *MCP = MF->getConstantPool(); 1893 int CPI0 = MO0.getIndex(); 1894 int CPI1 = MO1.getIndex(); 1895 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; 1896 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; 1897 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); 1898 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); 1899 if (isARMCP0 && isARMCP1) { 1900 ARMConstantPoolValue *ACPV0 = 1901 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal); 1902 ARMConstantPoolValue *ACPV1 = 1903 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal); 1904 return ACPV0->hasSameValue(ACPV1); 1905 } else if (!isARMCP0 && !isARMCP1) { 1906 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; 1907 } 1908 return false; 1909 } else if (Opcode == ARM::PICLDR) { 1910 if (MI1.getOpcode() != Opcode) 1911 return false; 1912 if (MI0.getNumOperands() != MI1.getNumOperands()) 1913 return false; 1914 1915 Register Addr0 = MI0.getOperand(1).getReg(); 1916 Register Addr1 = MI1.getOperand(1).getReg(); 1917 if (Addr0 != Addr1) { 1918 if (!MRI || !Register::isVirtualRegister(Addr0) || 1919 !Register::isVirtualRegister(Addr1)) 1920 return false; 1921 1922 // This assumes SSA form. 1923 MachineInstr *Def0 = MRI->getVRegDef(Addr0); 1924 MachineInstr *Def1 = MRI->getVRegDef(Addr1); 1925 // Check if the loaded value, e.g. a constantpool of a global address, are 1926 // the same. 1927 if (!produceSameValue(*Def0, *Def1, MRI)) 1928 return false; 1929 } 1930 1931 for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) { 1932 // %12 = PICLDR %11, 0, 14, %noreg 1933 const MachineOperand &MO0 = MI0.getOperand(i); 1934 const MachineOperand &MO1 = MI1.getOperand(i); 1935 if (!MO0.isIdenticalTo(MO1)) 1936 return false; 1937 } 1938 return true; 1939 } 1940 1941 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 1942 } 1943 1944 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 1945 /// determine if two loads are loading from the same base address. It should 1946 /// only return true if the base pointers are the same and the only differences 1947 /// between the two addresses is the offset. It also returns the offsets by 1948 /// reference. 1949 /// 1950 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 1951 /// is permanently disabled. 1952 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1953 int64_t &Offset1, 1954 int64_t &Offset2) const { 1955 // Don't worry about Thumb: just ARM and Thumb2. 1956 if (Subtarget.isThumb1Only()) return false; 1957 1958 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 1959 return false; 1960 1961 switch (Load1->getMachineOpcode()) { 1962 default: 1963 return false; 1964 case ARM::LDRi12: 1965 case ARM::LDRBi12: 1966 case ARM::LDRD: 1967 case ARM::LDRH: 1968 case ARM::LDRSB: 1969 case ARM::LDRSH: 1970 case ARM::VLDRD: 1971 case ARM::VLDRS: 1972 case ARM::t2LDRi8: 1973 case ARM::t2LDRBi8: 1974 case ARM::t2LDRDi8: 1975 case ARM::t2LDRSHi8: 1976 case ARM::t2LDRi12: 1977 case ARM::t2LDRBi12: 1978 case ARM::t2LDRSHi12: 1979 break; 1980 } 1981 1982 switch (Load2->getMachineOpcode()) { 1983 default: 1984 return false; 1985 case ARM::LDRi12: 1986 case ARM::LDRBi12: 1987 case ARM::LDRD: 1988 case ARM::LDRH: 1989 case ARM::LDRSB: 1990 case ARM::LDRSH: 1991 case ARM::VLDRD: 1992 case ARM::VLDRS: 1993 case ARM::t2LDRi8: 1994 case ARM::t2LDRBi8: 1995 case ARM::t2LDRSHi8: 1996 case ARM::t2LDRi12: 1997 case ARM::t2LDRBi12: 1998 case ARM::t2LDRSHi12: 1999 break; 2000 } 2001 2002 // Check if base addresses and chain operands match. 2003 if (Load1->getOperand(0) != Load2->getOperand(0) || 2004 Load1->getOperand(4) != Load2->getOperand(4)) 2005 return false; 2006 2007 // Index should be Reg0. 2008 if (Load1->getOperand(3) != Load2->getOperand(3)) 2009 return false; 2010 2011 // Determine the offsets. 2012 if (isa<ConstantSDNode>(Load1->getOperand(1)) && 2013 isa<ConstantSDNode>(Load2->getOperand(1))) { 2014 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); 2015 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); 2016 return true; 2017 } 2018 2019 return false; 2020 } 2021 2022 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 2023 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 2024 /// be scheduled togther. On some targets if two loads are loading from 2025 /// addresses in the same cache line, it's better if they are scheduled 2026 /// together. This function takes two integers that represent the load offsets 2027 /// from the common base address. It returns true if it decides it's desirable 2028 /// to schedule the two loads together. "NumLoads" is the number of loads that 2029 /// have already been scheduled after Load1. 2030 /// 2031 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 2032 /// is permanently disabled. 2033 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 2034 int64_t Offset1, int64_t Offset2, 2035 unsigned NumLoads) const { 2036 // Don't worry about Thumb: just ARM and Thumb2. 2037 if (Subtarget.isThumb1Only()) return false; 2038 2039 assert(Offset2 > Offset1); 2040 2041 if ((Offset2 - Offset1) / 8 > 64) 2042 return false; 2043 2044 // Check if the machine opcodes are different. If they are different 2045 // then we consider them to not be of the same base address, 2046 // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12. 2047 // In this case, they are considered to be the same because they are different 2048 // encoding forms of the same basic instruction. 2049 if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) && 2050 !((Load1->getMachineOpcode() == ARM::t2LDRBi8 && 2051 Load2->getMachineOpcode() == ARM::t2LDRBi12) || 2052 (Load1->getMachineOpcode() == ARM::t2LDRBi12 && 2053 Load2->getMachineOpcode() == ARM::t2LDRBi8))) 2054 return false; // FIXME: overly conservative? 2055 2056 // Four loads in a row should be sufficient. 2057 if (NumLoads >= 3) 2058 return false; 2059 2060 return true; 2061 } 2062 2063 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2064 const MachineBasicBlock *MBB, 2065 const MachineFunction &MF) const { 2066 // Debug info is never a scheduling boundary. It's necessary to be explicit 2067 // due to the special treatment of IT instructions below, otherwise a 2068 // dbg_value followed by an IT will result in the IT instruction being 2069 // considered a scheduling hazard, which is wrong. It should be the actual 2070 // instruction preceding the dbg_value instruction(s), just like it is 2071 // when debug info is not present. 2072 if (MI.isDebugInstr()) 2073 return false; 2074 2075 // Terminators and labels can't be scheduled around. 2076 if (MI.isTerminator() || MI.isPosition()) 2077 return true; 2078 2079 // INLINEASM_BR can jump to another block 2080 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 2081 return true; 2082 2083 // Treat the start of the IT block as a scheduling boundary, but schedule 2084 // t2IT along with all instructions following it. 2085 // FIXME: This is a big hammer. But the alternative is to add all potential 2086 // true and anti dependencies to IT block instructions as implicit operands 2087 // to the t2IT instruction. The added compile time and complexity does not 2088 // seem worth it. 2089 MachineBasicBlock::const_iterator I = MI; 2090 // Make sure to skip any debug instructions 2091 while (++I != MBB->end() && I->isDebugInstr()) 2092 ; 2093 if (I != MBB->end() && I->getOpcode() == ARM::t2IT) 2094 return true; 2095 2096 // Don't attempt to schedule around any instruction that defines 2097 // a stack-oriented pointer, as it's unlikely to be profitable. This 2098 // saves compile time, because it doesn't require every single 2099 // stack slot reference to depend on the instruction that does the 2100 // modification. 2101 // Calls don't actually change the stack pointer, even if they have imp-defs. 2102 // No ARM calling conventions change the stack pointer. (X86 calling 2103 // conventions sometimes do). 2104 if (!MI.isCall() && MI.definesRegister(ARM::SP)) 2105 return true; 2106 2107 return false; 2108 } 2109 2110 bool ARMBaseInstrInfo:: 2111 isProfitableToIfCvt(MachineBasicBlock &MBB, 2112 unsigned NumCycles, unsigned ExtraPredCycles, 2113 BranchProbability Probability) const { 2114 if (!NumCycles) 2115 return false; 2116 2117 // If we are optimizing for size, see if the branch in the predecessor can be 2118 // lowered to cbn?z by the constant island lowering pass, and return false if 2119 // so. This results in a shorter instruction sequence. 2120 if (MBB.getParent()->getFunction().hasOptSize()) { 2121 MachineBasicBlock *Pred = *MBB.pred_begin(); 2122 if (!Pred->empty()) { 2123 MachineInstr *LastMI = &*Pred->rbegin(); 2124 if (LastMI->getOpcode() == ARM::t2Bcc) { 2125 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2126 MachineInstr *CmpMI = findCMPToFoldIntoCBZ(LastMI, TRI); 2127 if (CmpMI) 2128 return false; 2129 } 2130 } 2131 } 2132 return isProfitableToIfCvt(MBB, NumCycles, ExtraPredCycles, 2133 MBB, 0, 0, Probability); 2134 } 2135 2136 bool ARMBaseInstrInfo:: 2137 isProfitableToIfCvt(MachineBasicBlock &TBB, 2138 unsigned TCycles, unsigned TExtra, 2139 MachineBasicBlock &FBB, 2140 unsigned FCycles, unsigned FExtra, 2141 BranchProbability Probability) const { 2142 if (!TCycles) 2143 return false; 2144 2145 // In thumb code we often end up trading one branch for a IT block, and 2146 // if we are cloning the instruction can increase code size. Prevent 2147 // blocks with multiple predecesors from being ifcvted to prevent this 2148 // cloning. 2149 if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) { 2150 if (TBB.pred_size() != 1 || FBB.pred_size() != 1) 2151 return false; 2152 } 2153 2154 // Attempt to estimate the relative costs of predication versus branching. 2155 // Here we scale up each component of UnpredCost to avoid precision issue when 2156 // scaling TCycles/FCycles by Probability. 2157 const unsigned ScalingUpFactor = 1024; 2158 2159 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor; 2160 unsigned UnpredCost; 2161 if (!Subtarget.hasBranchPredictor()) { 2162 // When we don't have a branch predictor it's always cheaper to not take a 2163 // branch than take it, so we have to take that into account. 2164 unsigned NotTakenBranchCost = 1; 2165 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty(); 2166 unsigned TUnpredCycles, FUnpredCycles; 2167 if (!FCycles) { 2168 // Triangle: TBB is the fallthrough 2169 TUnpredCycles = TCycles + NotTakenBranchCost; 2170 FUnpredCycles = TakenBranchCost; 2171 } else { 2172 // Diamond: TBB is the block that is branched to, FBB is the fallthrough 2173 TUnpredCycles = TCycles + TakenBranchCost; 2174 FUnpredCycles = FCycles + NotTakenBranchCost; 2175 // The branch at the end of FBB will disappear when it's predicated, so 2176 // discount it from PredCost. 2177 PredCost -= 1 * ScalingUpFactor; 2178 } 2179 // The total cost is the cost of each path scaled by their probabilites 2180 unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor); 2181 unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor); 2182 UnpredCost = TUnpredCost + FUnpredCost; 2183 // When predicating assume that the first IT can be folded away but later 2184 // ones cost one cycle each 2185 if (Subtarget.isThumb2() && TCycles + FCycles > 4) { 2186 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor; 2187 } 2188 } else { 2189 unsigned TUnpredCost = Probability.scale(TCycles * ScalingUpFactor); 2190 unsigned FUnpredCost = 2191 Probability.getCompl().scale(FCycles * ScalingUpFactor); 2192 UnpredCost = TUnpredCost + FUnpredCost; 2193 UnpredCost += 1 * ScalingUpFactor; // The branch itself 2194 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10; 2195 } 2196 2197 return PredCost <= UnpredCost; 2198 } 2199 2200 unsigned 2201 ARMBaseInstrInfo::extraSizeToPredicateInstructions(const MachineFunction &MF, 2202 unsigned NumInsts) const { 2203 // Thumb2 needs a 2-byte IT instruction to predicate up to 4 instructions. 2204 // ARM has a condition code field in every predicable instruction, using it 2205 // doesn't change code size. 2206 if (!Subtarget.isThumb2()) 2207 return 0; 2208 2209 // It's possible that the size of the IT is restricted to a single block. 2210 unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4; 2211 return divideCeil(NumInsts, MaxInsts) * 2; 2212 } 2213 2214 unsigned 2215 ARMBaseInstrInfo::predictBranchSizeForIfCvt(MachineInstr &MI) const { 2216 // If this branch is likely to be folded into the comparison to form a 2217 // CB(N)Z, then removing it won't reduce code size at all, because that will 2218 // just replace the CB(N)Z with a CMP. 2219 if (MI.getOpcode() == ARM::t2Bcc && 2220 findCMPToFoldIntoCBZ(&MI, &getRegisterInfo())) 2221 return 0; 2222 2223 unsigned Size = getInstSizeInBytes(MI); 2224 2225 // For Thumb2, all branches are 32-bit instructions during the if conversion 2226 // pass, but may be replaced with 16-bit instructions during size reduction. 2227 // Since the branches considered by if conversion tend to be forward branches 2228 // over small basic blocks, they are very likely to be in range for the 2229 // narrow instructions, so we assume the final code size will be half what it 2230 // currently is. 2231 if (Subtarget.isThumb2()) 2232 Size /= 2; 2233 2234 return Size; 2235 } 2236 2237 bool 2238 ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, 2239 MachineBasicBlock &FMBB) const { 2240 // Reduce false anti-dependencies to let the target's out-of-order execution 2241 // engine do its thing. 2242 return Subtarget.isProfitableToUnpredicate(); 2243 } 2244 2245 /// getInstrPredicate - If instruction is predicated, returns its predicate 2246 /// condition, otherwise returns AL. It also returns the condition code 2247 /// register by reference. 2248 ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI, 2249 Register &PredReg) { 2250 int PIdx = MI.findFirstPredOperandIdx(); 2251 if (PIdx == -1) { 2252 PredReg = 0; 2253 return ARMCC::AL; 2254 } 2255 2256 PredReg = MI.getOperand(PIdx+1).getReg(); 2257 return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 2258 } 2259 2260 unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) { 2261 if (Opc == ARM::B) 2262 return ARM::Bcc; 2263 if (Opc == ARM::tB) 2264 return ARM::tBcc; 2265 if (Opc == ARM::t2B) 2266 return ARM::t2Bcc; 2267 2268 llvm_unreachable("Unknown unconditional branch opcode!"); 2269 } 2270 2271 MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI, 2272 bool NewMI, 2273 unsigned OpIdx1, 2274 unsigned OpIdx2) const { 2275 switch (MI.getOpcode()) { 2276 case ARM::MOVCCr: 2277 case ARM::t2MOVCCr: { 2278 // MOVCC can be commuted by inverting the condition. 2279 Register PredReg; 2280 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg); 2281 // MOVCC AL can't be inverted. Shouldn't happen. 2282 if (CC == ARMCC::AL || PredReg != ARM::CPSR) 2283 return nullptr; 2284 MachineInstr *CommutedMI = 2285 TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 2286 if (!CommutedMI) 2287 return nullptr; 2288 // After swapping the MOVCC operands, also invert the condition. 2289 CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx()) 2290 .setImm(ARMCC::getOppositeCondition(CC)); 2291 return CommutedMI; 2292 } 2293 } 2294 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 2295 } 2296 2297 /// Identify instructions that can be folded into a MOVCC instruction, and 2298 /// return the defining instruction. 2299 MachineInstr * 2300 ARMBaseInstrInfo::canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI, 2301 const TargetInstrInfo *TII) const { 2302 if (!Reg.isVirtual()) 2303 return nullptr; 2304 if (!MRI.hasOneNonDBGUse(Reg)) 2305 return nullptr; 2306 MachineInstr *MI = MRI.getVRegDef(Reg); 2307 if (!MI) 2308 return nullptr; 2309 // Check if MI can be predicated and folded into the MOVCC. 2310 if (!isPredicable(*MI)) 2311 return nullptr; 2312 // Check if MI has any non-dead defs or physreg uses. This also detects 2313 // predicated instructions which will be reading CPSR. 2314 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 2315 const MachineOperand &MO = MI->getOperand(i); 2316 // Reject frame index operands, PEI can't handle the predicated pseudos. 2317 if (MO.isFI() || MO.isCPI() || MO.isJTI()) 2318 return nullptr; 2319 if (!MO.isReg()) 2320 continue; 2321 // MI can't have any tied operands, that would conflict with predication. 2322 if (MO.isTied()) 2323 return nullptr; 2324 if (Register::isPhysicalRegister(MO.getReg())) 2325 return nullptr; 2326 if (MO.isDef() && !MO.isDead()) 2327 return nullptr; 2328 } 2329 bool DontMoveAcrossStores = true; 2330 if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores)) 2331 return nullptr; 2332 return MI; 2333 } 2334 2335 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI, 2336 SmallVectorImpl<MachineOperand> &Cond, 2337 unsigned &TrueOp, unsigned &FalseOp, 2338 bool &Optimizable) const { 2339 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) && 2340 "Unknown select instruction"); 2341 // MOVCC operands: 2342 // 0: Def. 2343 // 1: True use. 2344 // 2: False use. 2345 // 3: Condition code. 2346 // 4: CPSR use. 2347 TrueOp = 1; 2348 FalseOp = 2; 2349 Cond.push_back(MI.getOperand(3)); 2350 Cond.push_back(MI.getOperand(4)); 2351 // We can always fold a def. 2352 Optimizable = true; 2353 return false; 2354 } 2355 2356 MachineInstr * 2357 ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI, 2358 SmallPtrSetImpl<MachineInstr *> &SeenMIs, 2359 bool PreferFalse) const { 2360 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) && 2361 "Unknown select instruction"); 2362 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 2363 MachineInstr *DefMI = canFoldIntoMOVCC(MI.getOperand(2).getReg(), MRI, this); 2364 bool Invert = !DefMI; 2365 if (!DefMI) 2366 DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this); 2367 if (!DefMI) 2368 return nullptr; 2369 2370 // Find new register class to use. 2371 MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1); 2372 MachineOperand TrueReg = MI.getOperand(Invert ? 1 : 2); 2373 Register DestReg = MI.getOperand(0).getReg(); 2374 const TargetRegisterClass *FalseClass = MRI.getRegClass(FalseReg.getReg()); 2375 const TargetRegisterClass *TrueClass = MRI.getRegClass(TrueReg.getReg()); 2376 if (!MRI.constrainRegClass(DestReg, FalseClass)) 2377 return nullptr; 2378 if (!MRI.constrainRegClass(DestReg, TrueClass)) 2379 return nullptr; 2380 2381 // Create a new predicated version of DefMI. 2382 // Rfalse is the first use. 2383 MachineInstrBuilder NewMI = 2384 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg); 2385 2386 // Copy all the DefMI operands, excluding its (null) predicate. 2387 const MCInstrDesc &DefDesc = DefMI->getDesc(); 2388 for (unsigned i = 1, e = DefDesc.getNumOperands(); 2389 i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) 2390 NewMI.add(DefMI->getOperand(i)); 2391 2392 unsigned CondCode = MI.getOperand(3).getImm(); 2393 if (Invert) 2394 NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode))); 2395 else 2396 NewMI.addImm(CondCode); 2397 NewMI.add(MI.getOperand(4)); 2398 2399 // DefMI is not the -S version that sets CPSR, so add an optional %noreg. 2400 if (NewMI->hasOptionalDef()) 2401 NewMI.add(condCodeOp()); 2402 2403 // The output register value when the predicate is false is an implicit 2404 // register operand tied to the first def. 2405 // The tie makes the register allocator ensure the FalseReg is allocated the 2406 // same register as operand 0. 2407 FalseReg.setImplicit(); 2408 NewMI.add(FalseReg); 2409 NewMI->tieOperands(0, NewMI->getNumOperands() - 1); 2410 2411 // Update SeenMIs set: register newly created MI and erase removed DefMI. 2412 SeenMIs.insert(NewMI); 2413 SeenMIs.erase(DefMI); 2414 2415 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on 2416 // DefMI would be invalid when tranferred inside the loop. Checking for a 2417 // loop is expensive, but at least remove kill flags if they are in different 2418 // BBs. 2419 if (DefMI->getParent() != MI.getParent()) 2420 NewMI->clearKillInfo(); 2421 2422 // The caller will erase MI, but not DefMI. 2423 DefMI->eraseFromParent(); 2424 return NewMI; 2425 } 2426 2427 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the 2428 /// instruction is encoded with an 'S' bit is determined by the optional CPSR 2429 /// def operand. 2430 /// 2431 /// This will go away once we can teach tblgen how to set the optional CPSR def 2432 /// operand itself. 2433 struct AddSubFlagsOpcodePair { 2434 uint16_t PseudoOpc; 2435 uint16_t MachineOpc; 2436 }; 2437 2438 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 2439 {ARM::ADDSri, ARM::ADDri}, 2440 {ARM::ADDSrr, ARM::ADDrr}, 2441 {ARM::ADDSrsi, ARM::ADDrsi}, 2442 {ARM::ADDSrsr, ARM::ADDrsr}, 2443 2444 {ARM::SUBSri, ARM::SUBri}, 2445 {ARM::SUBSrr, ARM::SUBrr}, 2446 {ARM::SUBSrsi, ARM::SUBrsi}, 2447 {ARM::SUBSrsr, ARM::SUBrsr}, 2448 2449 {ARM::RSBSri, ARM::RSBri}, 2450 {ARM::RSBSrsi, ARM::RSBrsi}, 2451 {ARM::RSBSrsr, ARM::RSBrsr}, 2452 2453 {ARM::tADDSi3, ARM::tADDi3}, 2454 {ARM::tADDSi8, ARM::tADDi8}, 2455 {ARM::tADDSrr, ARM::tADDrr}, 2456 {ARM::tADCS, ARM::tADC}, 2457 2458 {ARM::tSUBSi3, ARM::tSUBi3}, 2459 {ARM::tSUBSi8, ARM::tSUBi8}, 2460 {ARM::tSUBSrr, ARM::tSUBrr}, 2461 {ARM::tSBCS, ARM::tSBC}, 2462 {ARM::tRSBS, ARM::tRSB}, 2463 {ARM::tLSLSri, ARM::tLSLri}, 2464 2465 {ARM::t2ADDSri, ARM::t2ADDri}, 2466 {ARM::t2ADDSrr, ARM::t2ADDrr}, 2467 {ARM::t2ADDSrs, ARM::t2ADDrs}, 2468 2469 {ARM::t2SUBSri, ARM::t2SUBri}, 2470 {ARM::t2SUBSrr, ARM::t2SUBrr}, 2471 {ARM::t2SUBSrs, ARM::t2SUBrs}, 2472 2473 {ARM::t2RSBSri, ARM::t2RSBri}, 2474 {ARM::t2RSBSrs, ARM::t2RSBrs}, 2475 }; 2476 2477 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { 2478 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i) 2479 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc) 2480 return AddSubFlagsOpcodeMap[i].MachineOpc; 2481 return 0; 2482 } 2483 2484 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, 2485 MachineBasicBlock::iterator &MBBI, 2486 const DebugLoc &dl, Register DestReg, 2487 Register BaseReg, int NumBytes, 2488 ARMCC::CondCodes Pred, Register PredReg, 2489 const ARMBaseInstrInfo &TII, 2490 unsigned MIFlags) { 2491 if (NumBytes == 0 && DestReg != BaseReg) { 2492 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg) 2493 .addReg(BaseReg, RegState::Kill) 2494 .add(predOps(Pred, PredReg)) 2495 .add(condCodeOp()) 2496 .setMIFlags(MIFlags); 2497 return; 2498 } 2499 2500 bool isSub = NumBytes < 0; 2501 if (isSub) NumBytes = -NumBytes; 2502 2503 while (NumBytes) { 2504 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes); 2505 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt); 2506 assert(ThisVal && "Didn't extract field correctly"); 2507 2508 // We will handle these bits from offset, clear them. 2509 NumBytes &= ~ThisVal; 2510 2511 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?"); 2512 2513 // Build the new ADD / SUB. 2514 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; 2515 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 2516 .addReg(BaseReg, RegState::Kill) 2517 .addImm(ThisVal) 2518 .add(predOps(Pred, PredReg)) 2519 .add(condCodeOp()) 2520 .setMIFlags(MIFlags); 2521 BaseReg = DestReg; 2522 } 2523 } 2524 2525 bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, 2526 MachineFunction &MF, MachineInstr *MI, 2527 unsigned NumBytes) { 2528 // This optimisation potentially adds lots of load and store 2529 // micro-operations, it's only really a great benefit to code-size. 2530 if (!Subtarget.hasMinSize()) 2531 return false; 2532 2533 // If only one register is pushed/popped, LLVM can use an LDR/STR 2534 // instead. We can't modify those so make sure we're dealing with an 2535 // instruction we understand. 2536 bool IsPop = isPopOpcode(MI->getOpcode()); 2537 bool IsPush = isPushOpcode(MI->getOpcode()); 2538 if (!IsPush && !IsPop) 2539 return false; 2540 2541 bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD || 2542 MI->getOpcode() == ARM::VLDMDIA_UPD; 2543 bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH || 2544 MI->getOpcode() == ARM::tPOP || 2545 MI->getOpcode() == ARM::tPOP_RET; 2546 2547 assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP && 2548 MI->getOperand(1).getReg() == ARM::SP)) && 2549 "trying to fold sp update into non-sp-updating push/pop"); 2550 2551 // The VFP push & pop act on D-registers, so we can only fold an adjustment 2552 // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try 2553 // if this is violated. 2554 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0) 2555 return false; 2556 2557 // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+ 2558 // pred) so the list starts at 4. Thumb1 starts after the predicate. 2559 int RegListIdx = IsT1PushPop ? 2 : 4; 2560 2561 // Calculate the space we'll need in terms of registers. 2562 unsigned RegsNeeded; 2563 const TargetRegisterClass *RegClass; 2564 if (IsVFPPushPop) { 2565 RegsNeeded = NumBytes / 8; 2566 RegClass = &ARM::DPRRegClass; 2567 } else { 2568 RegsNeeded = NumBytes / 4; 2569 RegClass = &ARM::GPRRegClass; 2570 } 2571 2572 // We're going to have to strip all list operands off before 2573 // re-adding them since the order matters, so save the existing ones 2574 // for later. 2575 SmallVector<MachineOperand, 4> RegList; 2576 2577 // We're also going to need the first register transferred by this 2578 // instruction, which won't necessarily be the first register in the list. 2579 unsigned FirstRegEnc = -1; 2580 2581 const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo(); 2582 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) { 2583 MachineOperand &MO = MI->getOperand(i); 2584 RegList.push_back(MO); 2585 2586 if (MO.isReg() && !MO.isImplicit() && 2587 TRI->getEncodingValue(MO.getReg()) < FirstRegEnc) 2588 FirstRegEnc = TRI->getEncodingValue(MO.getReg()); 2589 } 2590 2591 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF); 2592 2593 // Now try to find enough space in the reglist to allocate NumBytes. 2594 for (int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded; 2595 --CurRegEnc) { 2596 unsigned CurReg = RegClass->getRegister(CurRegEnc); 2597 if (IsT1PushPop && CurRegEnc > TRI->getEncodingValue(ARM::R7)) 2598 continue; 2599 if (!IsPop) { 2600 // Pushing any register is completely harmless, mark the register involved 2601 // as undef since we don't care about its value and must not restore it 2602 // during stack unwinding. 2603 RegList.push_back(MachineOperand::CreateReg(CurReg, false, false, 2604 false, false, true)); 2605 --RegsNeeded; 2606 continue; 2607 } 2608 2609 // However, we can only pop an extra register if it's not live. For 2610 // registers live within the function we might clobber a return value 2611 // register; the other way a register can be live here is if it's 2612 // callee-saved. 2613 if (isCalleeSavedRegister(CurReg, CSRegs) || 2614 MI->getParent()->computeRegisterLiveness(TRI, CurReg, MI) != 2615 MachineBasicBlock::LQR_Dead) { 2616 // VFP pops don't allow holes in the register list, so any skip is fatal 2617 // for our transformation. GPR pops do, so we should just keep looking. 2618 if (IsVFPPushPop) 2619 return false; 2620 else 2621 continue; 2622 } 2623 2624 // Mark the unimportant registers as <def,dead> in the POP. 2625 RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false, 2626 true)); 2627 --RegsNeeded; 2628 } 2629 2630 if (RegsNeeded > 0) 2631 return false; 2632 2633 // Finally we know we can profitably perform the optimisation so go 2634 // ahead: strip all existing registers off and add them back again 2635 // in the right order. 2636 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) 2637 MI->RemoveOperand(i); 2638 2639 // Add the complete list back in. 2640 MachineInstrBuilder MIB(MF, &*MI); 2641 for (int i = RegList.size() - 1; i >= 0; --i) 2642 MIB.add(RegList[i]); 2643 2644 return true; 2645 } 2646 2647 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 2648 Register FrameReg, int &Offset, 2649 const ARMBaseInstrInfo &TII) { 2650 unsigned Opcode = MI.getOpcode(); 2651 const MCInstrDesc &Desc = MI.getDesc(); 2652 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 2653 bool isSub = false; 2654 2655 // Memory operands in inline assembly always use AddrMode2. 2656 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR) 2657 AddrMode = ARMII::AddrMode2; 2658 2659 if (Opcode == ARM::ADDri) { 2660 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 2661 if (Offset == 0) { 2662 // Turn it into a move. 2663 MI.setDesc(TII.get(ARM::MOVr)); 2664 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2665 MI.RemoveOperand(FrameRegIdx+1); 2666 Offset = 0; 2667 return true; 2668 } else if (Offset < 0) { 2669 Offset = -Offset; 2670 isSub = true; 2671 MI.setDesc(TII.get(ARM::SUBri)); 2672 } 2673 2674 // Common case: small offset, fits into instruction. 2675 if (ARM_AM::getSOImmVal(Offset) != -1) { 2676 // Replace the FrameIndex with sp / fp 2677 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2678 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 2679 Offset = 0; 2680 return true; 2681 } 2682 2683 // Otherwise, pull as much of the immedidate into this ADDri/SUBri 2684 // as possible. 2685 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset); 2686 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt); 2687 2688 // We will handle these bits from offset, clear them. 2689 Offset &= ~ThisImmVal; 2690 2691 // Get the properly encoded SOImmVal field. 2692 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 && 2693 "Bit extraction didn't work?"); 2694 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 2695 } else { 2696 unsigned ImmIdx = 0; 2697 int InstrOffs = 0; 2698 unsigned NumBits = 0; 2699 unsigned Scale = 1; 2700 switch (AddrMode) { 2701 case ARMII::AddrMode_i12: 2702 ImmIdx = FrameRegIdx + 1; 2703 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2704 NumBits = 12; 2705 break; 2706 case ARMII::AddrMode2: 2707 ImmIdx = FrameRegIdx+2; 2708 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm()); 2709 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2710 InstrOffs *= -1; 2711 NumBits = 12; 2712 break; 2713 case ARMII::AddrMode3: 2714 ImmIdx = FrameRegIdx+2; 2715 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm()); 2716 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2717 InstrOffs *= -1; 2718 NumBits = 8; 2719 break; 2720 case ARMII::AddrMode4: 2721 case ARMII::AddrMode6: 2722 // Can't fold any offset even if it's zero. 2723 return false; 2724 case ARMII::AddrMode5: 2725 ImmIdx = FrameRegIdx+1; 2726 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2727 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2728 InstrOffs *= -1; 2729 NumBits = 8; 2730 Scale = 4; 2731 break; 2732 case ARMII::AddrMode5FP16: 2733 ImmIdx = FrameRegIdx+1; 2734 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2735 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2736 InstrOffs *= -1; 2737 NumBits = 8; 2738 Scale = 2; 2739 break; 2740 case ARMII::AddrModeT2_i7: 2741 case ARMII::AddrModeT2_i7s2: 2742 case ARMII::AddrModeT2_i7s4: 2743 ImmIdx = FrameRegIdx+1; 2744 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2745 NumBits = 7; 2746 Scale = (AddrMode == ARMII::AddrModeT2_i7s2 ? 2 : 2747 AddrMode == ARMII::AddrModeT2_i7s4 ? 4 : 1); 2748 break; 2749 default: 2750 llvm_unreachable("Unsupported addressing mode!"); 2751 } 2752 2753 Offset += InstrOffs * Scale; 2754 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 2755 if (Offset < 0) { 2756 Offset = -Offset; 2757 isSub = true; 2758 } 2759 2760 // Attempt to fold address comp. if opcode has offset bits 2761 if (NumBits > 0) { 2762 // Common case: small offset, fits into instruction. 2763 MachineOperand &ImmOp = MI.getOperand(ImmIdx); 2764 int ImmedOffset = Offset / Scale; 2765 unsigned Mask = (1 << NumBits) - 1; 2766 if ((unsigned)Offset <= Mask * Scale) { 2767 // Replace the FrameIndex with sp 2768 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2769 // FIXME: When addrmode2 goes away, this will simplify (like the 2770 // T2 version), as the LDR.i12 versions don't need the encoding 2771 // tricks for the offset value. 2772 if (isSub) { 2773 if (AddrMode == ARMII::AddrMode_i12) 2774 ImmedOffset = -ImmedOffset; 2775 else 2776 ImmedOffset |= 1 << NumBits; 2777 } 2778 ImmOp.ChangeToImmediate(ImmedOffset); 2779 Offset = 0; 2780 return true; 2781 } 2782 2783 // Otherwise, it didn't fit. Pull in what we can to simplify the immed. 2784 ImmedOffset = ImmedOffset & Mask; 2785 if (isSub) { 2786 if (AddrMode == ARMII::AddrMode_i12) 2787 ImmedOffset = -ImmedOffset; 2788 else 2789 ImmedOffset |= 1 << NumBits; 2790 } 2791 ImmOp.ChangeToImmediate(ImmedOffset); 2792 Offset &= ~(Mask*Scale); 2793 } 2794 } 2795 2796 Offset = (isSub) ? -Offset : Offset; 2797 return Offset == 0; 2798 } 2799 2800 /// analyzeCompare - For a comparison instruction, return the source registers 2801 /// in SrcReg and SrcReg2 if having two register operands, and the value it 2802 /// compares against in CmpValue. Return true if the comparison instruction 2803 /// can be analyzed. 2804 bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, 2805 Register &SrcReg2, int64_t &CmpMask, 2806 int64_t &CmpValue) const { 2807 switch (MI.getOpcode()) { 2808 default: break; 2809 case ARM::CMPri: 2810 case ARM::t2CMPri: 2811 case ARM::tCMPi8: 2812 SrcReg = MI.getOperand(0).getReg(); 2813 SrcReg2 = 0; 2814 CmpMask = ~0; 2815 CmpValue = MI.getOperand(1).getImm(); 2816 return true; 2817 case ARM::CMPrr: 2818 case ARM::t2CMPrr: 2819 case ARM::tCMPr: 2820 SrcReg = MI.getOperand(0).getReg(); 2821 SrcReg2 = MI.getOperand(1).getReg(); 2822 CmpMask = ~0; 2823 CmpValue = 0; 2824 return true; 2825 case ARM::TSTri: 2826 case ARM::t2TSTri: 2827 SrcReg = MI.getOperand(0).getReg(); 2828 SrcReg2 = 0; 2829 CmpMask = MI.getOperand(1).getImm(); 2830 CmpValue = 0; 2831 return true; 2832 } 2833 2834 return false; 2835 } 2836 2837 /// isSuitableForMask - Identify a suitable 'and' instruction that 2838 /// operates on the given source register and applies the same mask 2839 /// as a 'tst' instruction. Provide a limited look-through for copies. 2840 /// When successful, MI will hold the found instruction. 2841 static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, 2842 int CmpMask, bool CommonUse) { 2843 switch (MI->getOpcode()) { 2844 case ARM::ANDri: 2845 case ARM::t2ANDri: 2846 if (CmpMask != MI->getOperand(2).getImm()) 2847 return false; 2848 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg()) 2849 return true; 2850 break; 2851 } 2852 2853 return false; 2854 } 2855 2856 /// getCmpToAddCondition - assume the flags are set by CMP(a,b), return 2857 /// the condition code if we modify the instructions such that flags are 2858 /// set by ADD(a,b,X). 2859 inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) { 2860 switch (CC) { 2861 default: return ARMCC::AL; 2862 case ARMCC::HS: return ARMCC::LO; 2863 case ARMCC::LO: return ARMCC::HS; 2864 case ARMCC::VS: return ARMCC::VS; 2865 case ARMCC::VC: return ARMCC::VC; 2866 } 2867 } 2868 2869 /// isRedundantFlagInstr - check whether the first instruction, whose only 2870 /// purpose is to update flags, can be made redundant. 2871 /// CMPrr can be made redundant by SUBrr if the operands are the same. 2872 /// CMPri can be made redundant by SUBri if the operands are the same. 2873 /// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X). 2874 /// This function can be extended later on. 2875 inline static bool isRedundantFlagInstr(const MachineInstr *CmpI, 2876 Register SrcReg, Register SrcReg2, 2877 int64_t ImmValue, 2878 const MachineInstr *OI, 2879 bool &IsThumb1) { 2880 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && 2881 (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) && 2882 ((OI->getOperand(1).getReg() == SrcReg && 2883 OI->getOperand(2).getReg() == SrcReg2) || 2884 (OI->getOperand(1).getReg() == SrcReg2 && 2885 OI->getOperand(2).getReg() == SrcReg))) { 2886 IsThumb1 = false; 2887 return true; 2888 } 2889 2890 if (CmpI->getOpcode() == ARM::tCMPr && OI->getOpcode() == ARM::tSUBrr && 2891 ((OI->getOperand(2).getReg() == SrcReg && 2892 OI->getOperand(3).getReg() == SrcReg2) || 2893 (OI->getOperand(2).getReg() == SrcReg2 && 2894 OI->getOperand(3).getReg() == SrcReg))) { 2895 IsThumb1 = true; 2896 return true; 2897 } 2898 2899 if ((CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) && 2900 (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) && 2901 OI->getOperand(1).getReg() == SrcReg && 2902 OI->getOperand(2).getImm() == ImmValue) { 2903 IsThumb1 = false; 2904 return true; 2905 } 2906 2907 if (CmpI->getOpcode() == ARM::tCMPi8 && 2908 (OI->getOpcode() == ARM::tSUBi8 || OI->getOpcode() == ARM::tSUBi3) && 2909 OI->getOperand(2).getReg() == SrcReg && 2910 OI->getOperand(3).getImm() == ImmValue) { 2911 IsThumb1 = true; 2912 return true; 2913 } 2914 2915 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && 2916 (OI->getOpcode() == ARM::ADDrr || OI->getOpcode() == ARM::t2ADDrr || 2917 OI->getOpcode() == ARM::ADDri || OI->getOpcode() == ARM::t2ADDri) && 2918 OI->getOperand(0).isReg() && OI->getOperand(1).isReg() && 2919 OI->getOperand(0).getReg() == SrcReg && 2920 OI->getOperand(1).getReg() == SrcReg2) { 2921 IsThumb1 = false; 2922 return true; 2923 } 2924 2925 if (CmpI->getOpcode() == ARM::tCMPr && 2926 (OI->getOpcode() == ARM::tADDi3 || OI->getOpcode() == ARM::tADDi8 || 2927 OI->getOpcode() == ARM::tADDrr) && 2928 OI->getOperand(0).getReg() == SrcReg && 2929 OI->getOperand(2).getReg() == SrcReg2) { 2930 IsThumb1 = true; 2931 return true; 2932 } 2933 2934 return false; 2935 } 2936 2937 static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) { 2938 switch (MI->getOpcode()) { 2939 default: return false; 2940 case ARM::tLSLri: 2941 case ARM::tLSRri: 2942 case ARM::tLSLrr: 2943 case ARM::tLSRrr: 2944 case ARM::tSUBrr: 2945 case ARM::tADDrr: 2946 case ARM::tADDi3: 2947 case ARM::tADDi8: 2948 case ARM::tSUBi3: 2949 case ARM::tSUBi8: 2950 case ARM::tMUL: 2951 case ARM::tADC: 2952 case ARM::tSBC: 2953 case ARM::tRSB: 2954 case ARM::tAND: 2955 case ARM::tORR: 2956 case ARM::tEOR: 2957 case ARM::tBIC: 2958 case ARM::tMVN: 2959 case ARM::tASRri: 2960 case ARM::tASRrr: 2961 case ARM::tROR: 2962 IsThumb1 = true; 2963 LLVM_FALLTHROUGH; 2964 case ARM::RSBrr: 2965 case ARM::RSBri: 2966 case ARM::RSCrr: 2967 case ARM::RSCri: 2968 case ARM::ADDrr: 2969 case ARM::ADDri: 2970 case ARM::ADCrr: 2971 case ARM::ADCri: 2972 case ARM::SUBrr: 2973 case ARM::SUBri: 2974 case ARM::SBCrr: 2975 case ARM::SBCri: 2976 case ARM::t2RSBri: 2977 case ARM::t2ADDrr: 2978 case ARM::t2ADDri: 2979 case ARM::t2ADCrr: 2980 case ARM::t2ADCri: 2981 case ARM::t2SUBrr: 2982 case ARM::t2SUBri: 2983 case ARM::t2SBCrr: 2984 case ARM::t2SBCri: 2985 case ARM::ANDrr: 2986 case ARM::ANDri: 2987 case ARM::t2ANDrr: 2988 case ARM::t2ANDri: 2989 case ARM::ORRrr: 2990 case ARM::ORRri: 2991 case ARM::t2ORRrr: 2992 case ARM::t2ORRri: 2993 case ARM::EORrr: 2994 case ARM::EORri: 2995 case ARM::t2EORrr: 2996 case ARM::t2EORri: 2997 case ARM::t2LSRri: 2998 case ARM::t2LSRrr: 2999 case ARM::t2LSLri: 3000 case ARM::t2LSLrr: 3001 return true; 3002 } 3003 } 3004 3005 /// optimizeCompareInstr - Convert the instruction supplying the argument to the 3006 /// comparison into one that sets the zero bit in the flags register; 3007 /// Remove a redundant Compare instruction if an earlier instruction can set the 3008 /// flags in the same way as Compare. 3009 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two 3010 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the 3011 /// condition code of instructions which use the flags. 3012 bool ARMBaseInstrInfo::optimizeCompareInstr( 3013 MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, 3014 int64_t CmpValue, const MachineRegisterInfo *MRI) const { 3015 // Get the unique definition of SrcReg. 3016 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 3017 if (!MI) return false; 3018 3019 // Masked compares sometimes use the same register as the corresponding 'and'. 3020 if (CmpMask != ~0) { 3021 if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) { 3022 MI = nullptr; 3023 for (MachineRegisterInfo::use_instr_iterator 3024 UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end(); 3025 UI != UE; ++UI) { 3026 if (UI->getParent() != CmpInstr.getParent()) 3027 continue; 3028 MachineInstr *PotentialAND = &*UI; 3029 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) || 3030 isPredicated(*PotentialAND)) 3031 continue; 3032 MI = PotentialAND; 3033 break; 3034 } 3035 if (!MI) return false; 3036 } 3037 } 3038 3039 // Get ready to iterate backward from CmpInstr. 3040 MachineBasicBlock::iterator I = CmpInstr, E = MI, 3041 B = CmpInstr.getParent()->begin(); 3042 3043 // Early exit if CmpInstr is at the beginning of the BB. 3044 if (I == B) return false; 3045 3046 // There are two possible candidates which can be changed to set CPSR: 3047 // One is MI, the other is a SUB or ADD instruction. 3048 // For CMPrr(r1,r2), we are looking for SUB(r1,r2), SUB(r2,r1), or 3049 // ADDr[ri](r1, r2, X). 3050 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue). 3051 MachineInstr *SubAdd = nullptr; 3052 if (SrcReg2 != 0) 3053 // MI is not a candidate for CMPrr. 3054 MI = nullptr; 3055 else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) { 3056 // Conservatively refuse to convert an instruction which isn't in the same 3057 // BB as the comparison. 3058 // For CMPri w/ CmpValue != 0, a SubAdd may still be a candidate. 3059 // Thus we cannot return here. 3060 if (CmpInstr.getOpcode() == ARM::CMPri || 3061 CmpInstr.getOpcode() == ARM::t2CMPri || 3062 CmpInstr.getOpcode() == ARM::tCMPi8) 3063 MI = nullptr; 3064 else 3065 return false; 3066 } 3067 3068 bool IsThumb1 = false; 3069 if (MI && !isOptimizeCompareCandidate(MI, IsThumb1)) 3070 return false; 3071 3072 // We also want to do this peephole for cases like this: if (a*b == 0), 3073 // and optimise away the CMP instruction from the generated code sequence: 3074 // MULS, MOVS, MOVS, CMP. Here the MOVS instructions load the boolean values 3075 // resulting from the select instruction, but these MOVS instructions for 3076 // Thumb1 (V6M) are flag setting and are thus preventing this optimisation. 3077 // However, if we only have MOVS instructions in between the CMP and the 3078 // other instruction (the MULS in this example), then the CPSR is dead so we 3079 // can safely reorder the sequence into: MOVS, MOVS, MULS, CMP. We do this 3080 // reordering and then continue the analysis hoping we can eliminate the 3081 // CMP. This peephole works on the vregs, so is still in SSA form. As a 3082 // consequence, the movs won't redefine/kill the MUL operands which would 3083 // make this reordering illegal. 3084 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3085 if (MI && IsThumb1) { 3086 --I; 3087 if (I != E && !MI->readsRegister(ARM::CPSR, TRI)) { 3088 bool CanReorder = true; 3089 for (; I != E; --I) { 3090 if (I->getOpcode() != ARM::tMOVi8) { 3091 CanReorder = false; 3092 break; 3093 } 3094 } 3095 if (CanReorder) { 3096 MI = MI->removeFromParent(); 3097 E = CmpInstr; 3098 CmpInstr.getParent()->insert(E, MI); 3099 } 3100 } 3101 I = CmpInstr; 3102 E = MI; 3103 } 3104 3105 // Check that CPSR isn't set between the comparison instruction and the one we 3106 // want to change. At the same time, search for SubAdd. 3107 bool SubAddIsThumb1 = false; 3108 do { 3109 const MachineInstr &Instr = *--I; 3110 3111 // Check whether CmpInstr can be made redundant by the current instruction. 3112 if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &Instr, 3113 SubAddIsThumb1)) { 3114 SubAdd = &*I; 3115 break; 3116 } 3117 3118 // Allow E (which was initially MI) to be SubAdd but do not search before E. 3119 if (I == E) 3120 break; 3121 3122 if (Instr.modifiesRegister(ARM::CPSR, TRI) || 3123 Instr.readsRegister(ARM::CPSR, TRI)) 3124 // This instruction modifies or uses CPSR after the one we want to 3125 // change. We can't do this transformation. 3126 return false; 3127 3128 if (I == B) { 3129 // In some cases, we scan the use-list of an instruction for an AND; 3130 // that AND is in the same BB, but may not be scheduled before the 3131 // corresponding TST. In that case, bail out. 3132 // 3133 // FIXME: We could try to reschedule the AND. 3134 return false; 3135 } 3136 } while (true); 3137 3138 // Return false if no candidates exist. 3139 if (!MI && !SubAdd) 3140 return false; 3141 3142 // If we found a SubAdd, use it as it will be closer to the CMP 3143 if (SubAdd) { 3144 MI = SubAdd; 3145 IsThumb1 = SubAddIsThumb1; 3146 } 3147 3148 // We can't use a predicated instruction - it doesn't always write the flags. 3149 if (isPredicated(*MI)) 3150 return false; 3151 3152 // Scan forward for the use of CPSR 3153 // When checking against MI: if it's a conditional code that requires 3154 // checking of the V bit or C bit, then this is not safe to do. 3155 // It is safe to remove CmpInstr if CPSR is redefined or killed. 3156 // If we are done with the basic block, we need to check whether CPSR is 3157 // live-out. 3158 SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4> 3159 OperandsToUpdate; 3160 bool isSafe = false; 3161 I = CmpInstr; 3162 E = CmpInstr.getParent()->end(); 3163 while (!isSafe && ++I != E) { 3164 const MachineInstr &Instr = *I; 3165 for (unsigned IO = 0, EO = Instr.getNumOperands(); 3166 !isSafe && IO != EO; ++IO) { 3167 const MachineOperand &MO = Instr.getOperand(IO); 3168 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) { 3169 isSafe = true; 3170 break; 3171 } 3172 if (!MO.isReg() || MO.getReg() != ARM::CPSR) 3173 continue; 3174 if (MO.isDef()) { 3175 isSafe = true; 3176 break; 3177 } 3178 // Condition code is after the operand before CPSR except for VSELs. 3179 ARMCC::CondCodes CC; 3180 bool IsInstrVSel = true; 3181 switch (Instr.getOpcode()) { 3182 default: 3183 IsInstrVSel = false; 3184 CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm(); 3185 break; 3186 case ARM::VSELEQD: 3187 case ARM::VSELEQS: 3188 case ARM::VSELEQH: 3189 CC = ARMCC::EQ; 3190 break; 3191 case ARM::VSELGTD: 3192 case ARM::VSELGTS: 3193 case ARM::VSELGTH: 3194 CC = ARMCC::GT; 3195 break; 3196 case ARM::VSELGED: 3197 case ARM::VSELGES: 3198 case ARM::VSELGEH: 3199 CC = ARMCC::GE; 3200 break; 3201 case ARM::VSELVSD: 3202 case ARM::VSELVSS: 3203 case ARM::VSELVSH: 3204 CC = ARMCC::VS; 3205 break; 3206 } 3207 3208 if (SubAdd) { 3209 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based 3210 // on CMP needs to be updated to be based on SUB. 3211 // If we have ADD(r1, r2, X) and CMP(r1, r2), the condition code also 3212 // needs to be modified. 3213 // Push the condition code operands to OperandsToUpdate. 3214 // If it is safe to remove CmpInstr, the condition code of these 3215 // operands will be modified. 3216 unsigned Opc = SubAdd->getOpcode(); 3217 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr || 3218 Opc == ARM::SUBri || Opc == ARM::t2SUBri || 3219 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 || 3220 Opc == ARM::tSUBi8; 3221 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2; 3222 if (!IsSub || 3223 (SrcReg2 != 0 && SubAdd->getOperand(OpI).getReg() == SrcReg2 && 3224 SubAdd->getOperand(OpI + 1).getReg() == SrcReg)) { 3225 // VSel doesn't support condition code update. 3226 if (IsInstrVSel) 3227 return false; 3228 // Ensure we can swap the condition. 3229 ARMCC::CondCodes NewCC = (IsSub ? getSwappedCondition(CC) : getCmpToAddCondition(CC)); 3230 if (NewCC == ARMCC::AL) 3231 return false; 3232 OperandsToUpdate.push_back( 3233 std::make_pair(&((*I).getOperand(IO - 1)), NewCC)); 3234 } 3235 } else { 3236 // No SubAdd, so this is x = <op> y, z; cmp x, 0. 3237 switch (CC) { 3238 case ARMCC::EQ: // Z 3239 case ARMCC::NE: // Z 3240 case ARMCC::MI: // N 3241 case ARMCC::PL: // N 3242 case ARMCC::AL: // none 3243 // CPSR can be used multiple times, we should continue. 3244 break; 3245 case ARMCC::HS: // C 3246 case ARMCC::LO: // C 3247 case ARMCC::VS: // V 3248 case ARMCC::VC: // V 3249 case ARMCC::HI: // C Z 3250 case ARMCC::LS: // C Z 3251 case ARMCC::GE: // N V 3252 case ARMCC::LT: // N V 3253 case ARMCC::GT: // Z N V 3254 case ARMCC::LE: // Z N V 3255 // The instruction uses the V bit or C bit which is not safe. 3256 return false; 3257 } 3258 } 3259 } 3260 } 3261 3262 // If CPSR is not killed nor re-defined, we should check whether it is 3263 // live-out. If it is live-out, do not optimize. 3264 if (!isSafe) { 3265 MachineBasicBlock *MBB = CmpInstr.getParent(); 3266 for (MachineBasicBlock *Succ : MBB->successors()) 3267 if (Succ->isLiveIn(ARM::CPSR)) 3268 return false; 3269 } 3270 3271 // Toggle the optional operand to CPSR (if it exists - in Thumb1 we always 3272 // set CPSR so this is represented as an explicit output) 3273 if (!IsThumb1) { 3274 MI->getOperand(5).setReg(ARM::CPSR); 3275 MI->getOperand(5).setIsDef(true); 3276 } 3277 assert(!isPredicated(*MI) && "Can't use flags from predicated instruction"); 3278 CmpInstr.eraseFromParent(); 3279 3280 // Modify the condition code of operands in OperandsToUpdate. 3281 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to 3282 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 3283 for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++) 3284 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second); 3285 3286 MI->clearRegisterDeads(ARM::CPSR); 3287 3288 return true; 3289 } 3290 3291 bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const { 3292 // Do not sink MI if it might be used to optimize a redundant compare. 3293 // We heuristically only look at the instruction immediately following MI to 3294 // avoid potentially searching the entire basic block. 3295 if (isPredicated(MI)) 3296 return true; 3297 MachineBasicBlock::const_iterator Next = &MI; 3298 ++Next; 3299 Register SrcReg, SrcReg2; 3300 int64_t CmpMask, CmpValue; 3301 bool IsThumb1; 3302 if (Next != MI.getParent()->end() && 3303 analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) && 3304 isRedundantFlagInstr(&*Next, SrcReg, SrcReg2, CmpValue, &MI, IsThumb1)) 3305 return false; 3306 return true; 3307 } 3308 3309 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 3310 Register Reg, 3311 MachineRegisterInfo *MRI) const { 3312 // Fold large immediates into add, sub, or, xor. 3313 unsigned DefOpc = DefMI.getOpcode(); 3314 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) 3315 return false; 3316 if (!DefMI.getOperand(1).isImm()) 3317 // Could be t2MOVi32imm @xx 3318 return false; 3319 3320 if (!MRI->hasOneNonDBGUse(Reg)) 3321 return false; 3322 3323 const MCInstrDesc &DefMCID = DefMI.getDesc(); 3324 if (DefMCID.hasOptionalDef()) { 3325 unsigned NumOps = DefMCID.getNumOperands(); 3326 const MachineOperand &MO = DefMI.getOperand(NumOps - 1); 3327 if (MO.getReg() == ARM::CPSR && !MO.isDead()) 3328 // If DefMI defines CPSR and it is not dead, it's obviously not safe 3329 // to delete DefMI. 3330 return false; 3331 } 3332 3333 const MCInstrDesc &UseMCID = UseMI.getDesc(); 3334 if (UseMCID.hasOptionalDef()) { 3335 unsigned NumOps = UseMCID.getNumOperands(); 3336 if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR) 3337 // If the instruction sets the flag, do not attempt this optimization 3338 // since it may change the semantics of the code. 3339 return false; 3340 } 3341 3342 unsigned UseOpc = UseMI.getOpcode(); 3343 unsigned NewUseOpc = 0; 3344 uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm(); 3345 uint32_t SOImmValV1 = 0, SOImmValV2 = 0; 3346 bool Commute = false; 3347 switch (UseOpc) { 3348 default: return false; 3349 case ARM::SUBrr: 3350 case ARM::ADDrr: 3351 case ARM::ORRrr: 3352 case ARM::EORrr: 3353 case ARM::t2SUBrr: 3354 case ARM::t2ADDrr: 3355 case ARM::t2ORRrr: 3356 case ARM::t2EORrr: { 3357 Commute = UseMI.getOperand(2).getReg() != Reg; 3358 switch (UseOpc) { 3359 default: break; 3360 case ARM::ADDrr: 3361 case ARM::SUBrr: 3362 if (UseOpc == ARM::SUBrr && Commute) 3363 return false; 3364 3365 // ADD/SUB are special because they're essentially the same operation, so 3366 // we can handle a larger range of immediates. 3367 if (ARM_AM::isSOImmTwoPartVal(ImmVal)) 3368 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri; 3369 else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) { 3370 ImmVal = -ImmVal; 3371 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri; 3372 } else 3373 return false; 3374 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 3375 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 3376 break; 3377 case ARM::ORRrr: 3378 case ARM::EORrr: 3379 if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) 3380 return false; 3381 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 3382 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 3383 switch (UseOpc) { 3384 default: break; 3385 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; 3386 case ARM::EORrr: NewUseOpc = ARM::EORri; break; 3387 } 3388 break; 3389 case ARM::t2ADDrr: 3390 case ARM::t2SUBrr: { 3391 if (UseOpc == ARM::t2SUBrr && Commute) 3392 return false; 3393 3394 // ADD/SUB are special because they're essentially the same operation, so 3395 // we can handle a larger range of immediates. 3396 const bool ToSP = DefMI.getOperand(0).getReg() == ARM::SP; 3397 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri; 3398 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri; 3399 if (ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 3400 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB; 3401 else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) { 3402 ImmVal = -ImmVal; 3403 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD; 3404 } else 3405 return false; 3406 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 3407 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 3408 break; 3409 } 3410 case ARM::t2ORRrr: 3411 case ARM::t2EORrr: 3412 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 3413 return false; 3414 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 3415 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 3416 switch (UseOpc) { 3417 default: break; 3418 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; 3419 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; 3420 } 3421 break; 3422 } 3423 } 3424 } 3425 3426 unsigned OpIdx = Commute ? 2 : 1; 3427 Register Reg1 = UseMI.getOperand(OpIdx).getReg(); 3428 bool isKill = UseMI.getOperand(OpIdx).isKill(); 3429 const TargetRegisterClass *TRC = MRI->getRegClass(Reg); 3430 Register NewReg = MRI->createVirtualRegister(TRC); 3431 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc), 3432 NewReg) 3433 .addReg(Reg1, getKillRegState(isKill)) 3434 .addImm(SOImmValV1) 3435 .add(predOps(ARMCC::AL)) 3436 .add(condCodeOp()); 3437 UseMI.setDesc(get(NewUseOpc)); 3438 UseMI.getOperand(1).setReg(NewReg); 3439 UseMI.getOperand(1).setIsKill(); 3440 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2); 3441 DefMI.eraseFromParent(); 3442 // FIXME: t2ADDrr should be split, as different rulles apply when writing to SP. 3443 // Just as t2ADDri, that was split to [t2ADDri, t2ADDspImm]. 3444 // Then the below code will not be needed, as the input/output register 3445 // classes will be rgpr or gprSP. 3446 // For now, we fix the UseMI operand explicitly here: 3447 switch(NewUseOpc){ 3448 case ARM::t2ADDspImm: 3449 case ARM::t2SUBspImm: 3450 case ARM::t2ADDri: 3451 case ARM::t2SUBri: 3452 MRI->constrainRegClass(UseMI.getOperand(0).getReg(), TRC); 3453 } 3454 return true; 3455 } 3456 3457 static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, 3458 const MachineInstr &MI) { 3459 switch (MI.getOpcode()) { 3460 default: { 3461 const MCInstrDesc &Desc = MI.getDesc(); 3462 int UOps = ItinData->getNumMicroOps(Desc.getSchedClass()); 3463 assert(UOps >= 0 && "bad # UOps"); 3464 return UOps; 3465 } 3466 3467 case ARM::LDRrs: 3468 case ARM::LDRBrs: 3469 case ARM::STRrs: 3470 case ARM::STRBrs: { 3471 unsigned ShOpVal = MI.getOperand(3).getImm(); 3472 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3473 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3474 if (!isSub && 3475 (ShImm == 0 || 3476 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3477 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3478 return 1; 3479 return 2; 3480 } 3481 3482 case ARM::LDRH: 3483 case ARM::STRH: { 3484 if (!MI.getOperand(2).getReg()) 3485 return 1; 3486 3487 unsigned ShOpVal = MI.getOperand(3).getImm(); 3488 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3489 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3490 if (!isSub && 3491 (ShImm == 0 || 3492 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3493 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3494 return 1; 3495 return 2; 3496 } 3497 3498 case ARM::LDRSB: 3499 case ARM::LDRSH: 3500 return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2; 3501 3502 case ARM::LDRSB_POST: 3503 case ARM::LDRSH_POST: { 3504 Register Rt = MI.getOperand(0).getReg(); 3505 Register Rm = MI.getOperand(3).getReg(); 3506 return (Rt == Rm) ? 4 : 3; 3507 } 3508 3509 case ARM::LDR_PRE_REG: 3510 case ARM::LDRB_PRE_REG: { 3511 Register Rt = MI.getOperand(0).getReg(); 3512 Register Rm = MI.getOperand(3).getReg(); 3513 if (Rt == Rm) 3514 return 3; 3515 unsigned ShOpVal = MI.getOperand(4).getImm(); 3516 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3517 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3518 if (!isSub && 3519 (ShImm == 0 || 3520 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3521 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3522 return 2; 3523 return 3; 3524 } 3525 3526 case ARM::STR_PRE_REG: 3527 case ARM::STRB_PRE_REG: { 3528 unsigned ShOpVal = MI.getOperand(4).getImm(); 3529 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3530 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3531 if (!isSub && 3532 (ShImm == 0 || 3533 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3534 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3535 return 2; 3536 return 3; 3537 } 3538 3539 case ARM::LDRH_PRE: 3540 case ARM::STRH_PRE: { 3541 Register Rt = MI.getOperand(0).getReg(); 3542 Register Rm = MI.getOperand(3).getReg(); 3543 if (!Rm) 3544 return 2; 3545 if (Rt == Rm) 3546 return 3; 3547 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2; 3548 } 3549 3550 case ARM::LDR_POST_REG: 3551 case ARM::LDRB_POST_REG: 3552 case ARM::LDRH_POST: { 3553 Register Rt = MI.getOperand(0).getReg(); 3554 Register Rm = MI.getOperand(3).getReg(); 3555 return (Rt == Rm) ? 3 : 2; 3556 } 3557 3558 case ARM::LDR_PRE_IMM: 3559 case ARM::LDRB_PRE_IMM: 3560 case ARM::LDR_POST_IMM: 3561 case ARM::LDRB_POST_IMM: 3562 case ARM::STRB_POST_IMM: 3563 case ARM::STRB_POST_REG: 3564 case ARM::STRB_PRE_IMM: 3565 case ARM::STRH_POST: 3566 case ARM::STR_POST_IMM: 3567 case ARM::STR_POST_REG: 3568 case ARM::STR_PRE_IMM: 3569 return 2; 3570 3571 case ARM::LDRSB_PRE: 3572 case ARM::LDRSH_PRE: { 3573 Register Rm = MI.getOperand(3).getReg(); 3574 if (Rm == 0) 3575 return 3; 3576 Register Rt = MI.getOperand(0).getReg(); 3577 if (Rt == Rm) 3578 return 4; 3579 unsigned ShOpVal = MI.getOperand(4).getImm(); 3580 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3581 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3582 if (!isSub && 3583 (ShImm == 0 || 3584 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3585 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3586 return 3; 3587 return 4; 3588 } 3589 3590 case ARM::LDRD: { 3591 Register Rt = MI.getOperand(0).getReg(); 3592 Register Rn = MI.getOperand(2).getReg(); 3593 Register Rm = MI.getOperand(3).getReg(); 3594 if (Rm) 3595 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 3596 : 3; 3597 return (Rt == Rn) ? 3 : 2; 3598 } 3599 3600 case ARM::STRD: { 3601 Register Rm = MI.getOperand(3).getReg(); 3602 if (Rm) 3603 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 3604 : 3; 3605 return 2; 3606 } 3607 3608 case ARM::LDRD_POST: 3609 case ARM::t2LDRD_POST: 3610 return 3; 3611 3612 case ARM::STRD_POST: 3613 case ARM::t2STRD_POST: 3614 return 4; 3615 3616 case ARM::LDRD_PRE: { 3617 Register Rt = MI.getOperand(0).getReg(); 3618 Register Rn = MI.getOperand(3).getReg(); 3619 Register Rm = MI.getOperand(4).getReg(); 3620 if (Rm) 3621 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 3622 : 4; 3623 return (Rt == Rn) ? 4 : 3; 3624 } 3625 3626 case ARM::t2LDRD_PRE: { 3627 Register Rt = MI.getOperand(0).getReg(); 3628 Register Rn = MI.getOperand(3).getReg(); 3629 return (Rt == Rn) ? 4 : 3; 3630 } 3631 3632 case ARM::STRD_PRE: { 3633 Register Rm = MI.getOperand(4).getReg(); 3634 if (Rm) 3635 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 3636 : 4; 3637 return 3; 3638 } 3639 3640 case ARM::t2STRD_PRE: 3641 return 3; 3642 3643 case ARM::t2LDR_POST: 3644 case ARM::t2LDRB_POST: 3645 case ARM::t2LDRB_PRE: 3646 case ARM::t2LDRSBi12: 3647 case ARM::t2LDRSBi8: 3648 case ARM::t2LDRSBpci: 3649 case ARM::t2LDRSBs: 3650 case ARM::t2LDRH_POST: 3651 case ARM::t2LDRH_PRE: 3652 case ARM::t2LDRSBT: 3653 case ARM::t2LDRSB_POST: 3654 case ARM::t2LDRSB_PRE: 3655 case ARM::t2LDRSH_POST: 3656 case ARM::t2LDRSH_PRE: 3657 case ARM::t2LDRSHi12: 3658 case ARM::t2LDRSHi8: 3659 case ARM::t2LDRSHpci: 3660 case ARM::t2LDRSHs: 3661 return 2; 3662 3663 case ARM::t2LDRDi8: { 3664 Register Rt = MI.getOperand(0).getReg(); 3665 Register Rn = MI.getOperand(2).getReg(); 3666 return (Rt == Rn) ? 3 : 2; 3667 } 3668 3669 case ARM::t2STRB_POST: 3670 case ARM::t2STRB_PRE: 3671 case ARM::t2STRBs: 3672 case ARM::t2STRDi8: 3673 case ARM::t2STRH_POST: 3674 case ARM::t2STRH_PRE: 3675 case ARM::t2STRHs: 3676 case ARM::t2STR_POST: 3677 case ARM::t2STR_PRE: 3678 case ARM::t2STRs: 3679 return 2; 3680 } 3681 } 3682 3683 // Return the number of 32-bit words loaded by LDM or stored by STM. If this 3684 // can't be easily determined return 0 (missing MachineMemOperand). 3685 // 3686 // FIXME: The current MachineInstr design does not support relying on machine 3687 // mem operands to determine the width of a memory access. Instead, we expect 3688 // the target to provide this information based on the instruction opcode and 3689 // operands. However, using MachineMemOperand is the best solution now for 3690 // two reasons: 3691 // 3692 // 1) getNumMicroOps tries to infer LDM memory width from the total number of MI 3693 // operands. This is much more dangerous than using the MachineMemOperand 3694 // sizes because CodeGen passes can insert/remove optional machine operands. In 3695 // fact, it's totally incorrect for preRA passes and appears to be wrong for 3696 // postRA passes as well. 3697 // 3698 // 2) getNumLDMAddresses is only used by the scheduling machine model and any 3699 // machine model that calls this should handle the unknown (zero size) case. 3700 // 3701 // Long term, we should require a target hook that verifies MachineMemOperand 3702 // sizes during MC lowering. That target hook should be local to MC lowering 3703 // because we can't ensure that it is aware of other MI forms. Doing this will 3704 // ensure that MachineMemOperands are correctly propagated through all passes. 3705 unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const { 3706 unsigned Size = 0; 3707 for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), 3708 E = MI.memoperands_end(); 3709 I != E; ++I) { 3710 Size += (*I)->getSize(); 3711 } 3712 // FIXME: The scheduler currently can't handle values larger than 16. But 3713 // the values can actually go up to 32 for floating-point load/store 3714 // multiple (VLDMIA etc.). Also, the way this code is reasoning about memory 3715 // operations isn't right; we could end up with "extra" memory operands for 3716 // various reasons, like tail merge merging two memory operations. 3717 return std::min(Size / 4, 16U); 3718 } 3719 3720 static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, 3721 unsigned NumRegs) { 3722 unsigned UOps = 1 + NumRegs; // 1 for address computation. 3723 switch (Opc) { 3724 default: 3725 break; 3726 case ARM::VLDMDIA_UPD: 3727 case ARM::VLDMDDB_UPD: 3728 case ARM::VLDMSIA_UPD: 3729 case ARM::VLDMSDB_UPD: 3730 case ARM::VSTMDIA_UPD: 3731 case ARM::VSTMDDB_UPD: 3732 case ARM::VSTMSIA_UPD: 3733 case ARM::VSTMSDB_UPD: 3734 case ARM::LDMIA_UPD: 3735 case ARM::LDMDA_UPD: 3736 case ARM::LDMDB_UPD: 3737 case ARM::LDMIB_UPD: 3738 case ARM::STMIA_UPD: 3739 case ARM::STMDA_UPD: 3740 case ARM::STMDB_UPD: 3741 case ARM::STMIB_UPD: 3742 case ARM::tLDMIA_UPD: 3743 case ARM::tSTMIA_UPD: 3744 case ARM::t2LDMIA_UPD: 3745 case ARM::t2LDMDB_UPD: 3746 case ARM::t2STMIA_UPD: 3747 case ARM::t2STMDB_UPD: 3748 ++UOps; // One for base register writeback. 3749 break; 3750 case ARM::LDMIA_RET: 3751 case ARM::tPOP_RET: 3752 case ARM::t2LDMIA_RET: 3753 UOps += 2; // One for base reg wb, one for write to pc. 3754 break; 3755 } 3756 return UOps; 3757 } 3758 3759 unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 3760 const MachineInstr &MI) const { 3761 if (!ItinData || ItinData->isEmpty()) 3762 return 1; 3763 3764 const MCInstrDesc &Desc = MI.getDesc(); 3765 unsigned Class = Desc.getSchedClass(); 3766 int ItinUOps = ItinData->getNumMicroOps(Class); 3767 if (ItinUOps >= 0) { 3768 if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore())) 3769 return getNumMicroOpsSwiftLdSt(ItinData, MI); 3770 3771 return ItinUOps; 3772 } 3773 3774 unsigned Opc = MI.getOpcode(); 3775 switch (Opc) { 3776 default: 3777 llvm_unreachable("Unexpected multi-uops instruction!"); 3778 case ARM::VLDMQIA: 3779 case ARM::VSTMQIA: 3780 return 2; 3781 3782 // The number of uOps for load / store multiple are determined by the number 3783 // registers. 3784 // 3785 // On Cortex-A8, each pair of register loads / stores can be scheduled on the 3786 // same cycle. The scheduling for the first load / store must be done 3787 // separately by assuming the address is not 64-bit aligned. 3788 // 3789 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address 3790 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON 3791 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. 3792 case ARM::VLDMDIA: 3793 case ARM::VLDMDIA_UPD: 3794 case ARM::VLDMDDB_UPD: 3795 case ARM::VLDMSIA: 3796 case ARM::VLDMSIA_UPD: 3797 case ARM::VLDMSDB_UPD: 3798 case ARM::VSTMDIA: 3799 case ARM::VSTMDIA_UPD: 3800 case ARM::VSTMDDB_UPD: 3801 case ARM::VSTMSIA: 3802 case ARM::VSTMSIA_UPD: 3803 case ARM::VSTMSDB_UPD: { 3804 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands(); 3805 return (NumRegs / 2) + (NumRegs % 2) + 1; 3806 } 3807 3808 case ARM::LDMIA_RET: 3809 case ARM::LDMIA: 3810 case ARM::LDMDA: 3811 case ARM::LDMDB: 3812 case ARM::LDMIB: 3813 case ARM::LDMIA_UPD: 3814 case ARM::LDMDA_UPD: 3815 case ARM::LDMDB_UPD: 3816 case ARM::LDMIB_UPD: 3817 case ARM::STMIA: 3818 case ARM::STMDA: 3819 case ARM::STMDB: 3820 case ARM::STMIB: 3821 case ARM::STMIA_UPD: 3822 case ARM::STMDA_UPD: 3823 case ARM::STMDB_UPD: 3824 case ARM::STMIB_UPD: 3825 case ARM::tLDMIA: 3826 case ARM::tLDMIA_UPD: 3827 case ARM::tSTMIA_UPD: 3828 case ARM::tPOP_RET: 3829 case ARM::tPOP: 3830 case ARM::tPUSH: 3831 case ARM::t2LDMIA_RET: 3832 case ARM::t2LDMIA: 3833 case ARM::t2LDMDB: 3834 case ARM::t2LDMIA_UPD: 3835 case ARM::t2LDMDB_UPD: 3836 case ARM::t2STMIA: 3837 case ARM::t2STMDB: 3838 case ARM::t2STMIA_UPD: 3839 case ARM::t2STMDB_UPD: { 3840 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1; 3841 switch (Subtarget.getLdStMultipleTiming()) { 3842 case ARMSubtarget::SingleIssuePlusExtras: 3843 return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs); 3844 case ARMSubtarget::SingleIssue: 3845 // Assume the worst. 3846 return NumRegs; 3847 case ARMSubtarget::DoubleIssue: { 3848 if (NumRegs < 4) 3849 return 2; 3850 // 4 registers would be issued: 2, 2. 3851 // 5 registers would be issued: 2, 2, 1. 3852 unsigned UOps = (NumRegs / 2); 3853 if (NumRegs % 2) 3854 ++UOps; 3855 return UOps; 3856 } 3857 case ARMSubtarget::DoubleIssueCheckUnalignedAccess: { 3858 unsigned UOps = (NumRegs / 2); 3859 // If there are odd number of registers or if it's not 64-bit aligned, 3860 // then it takes an extra AGU (Address Generation Unit) cycle. 3861 if ((NumRegs % 2) || !MI.hasOneMemOperand() || 3862 (*MI.memoperands_begin())->getAlign() < Align(8)) 3863 ++UOps; 3864 return UOps; 3865 } 3866 } 3867 } 3868 } 3869 llvm_unreachable("Didn't find the number of microops"); 3870 } 3871 3872 int 3873 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, 3874 const MCInstrDesc &DefMCID, 3875 unsigned DefClass, 3876 unsigned DefIdx, unsigned DefAlign) const { 3877 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3878 if (RegNo <= 0) 3879 // Def is the address writeback. 3880 return ItinData->getOperandCycle(DefClass, DefIdx); 3881 3882 int DefCycle; 3883 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3884 // (regno / 2) + (regno % 2) + 1 3885 DefCycle = RegNo / 2 + 1; 3886 if (RegNo % 2) 3887 ++DefCycle; 3888 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3889 DefCycle = RegNo; 3890 bool isSLoad = false; 3891 3892 switch (DefMCID.getOpcode()) { 3893 default: break; 3894 case ARM::VLDMSIA: 3895 case ARM::VLDMSIA_UPD: 3896 case ARM::VLDMSDB_UPD: 3897 isSLoad = true; 3898 break; 3899 } 3900 3901 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3902 // then it takes an extra cycle. 3903 if ((isSLoad && (RegNo % 2)) || DefAlign < 8) 3904 ++DefCycle; 3905 } else { 3906 // Assume the worst. 3907 DefCycle = RegNo + 2; 3908 } 3909 3910 return DefCycle; 3911 } 3912 3913 int 3914 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, 3915 const MCInstrDesc &DefMCID, 3916 unsigned DefClass, 3917 unsigned DefIdx, unsigned DefAlign) const { 3918 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3919 if (RegNo <= 0) 3920 // Def is the address writeback. 3921 return ItinData->getOperandCycle(DefClass, DefIdx); 3922 3923 int DefCycle; 3924 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3925 // 4 registers would be issued: 1, 2, 1. 3926 // 5 registers would be issued: 1, 2, 2. 3927 DefCycle = RegNo / 2; 3928 if (DefCycle < 1) 3929 DefCycle = 1; 3930 // Result latency is issue cycle + 2: E2. 3931 DefCycle += 2; 3932 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3933 DefCycle = (RegNo / 2); 3934 // If there are odd number of registers or if it's not 64-bit aligned, 3935 // then it takes an extra AGU (Address Generation Unit) cycle. 3936 if ((RegNo % 2) || DefAlign < 8) 3937 ++DefCycle; 3938 // Result latency is AGU cycles + 2. 3939 DefCycle += 2; 3940 } else { 3941 // Assume the worst. 3942 DefCycle = RegNo + 2; 3943 } 3944 3945 return DefCycle; 3946 } 3947 3948 int 3949 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, 3950 const MCInstrDesc &UseMCID, 3951 unsigned UseClass, 3952 unsigned UseIdx, unsigned UseAlign) const { 3953 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3954 if (RegNo <= 0) 3955 return ItinData->getOperandCycle(UseClass, UseIdx); 3956 3957 int UseCycle; 3958 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3959 // (regno / 2) + (regno % 2) + 1 3960 UseCycle = RegNo / 2 + 1; 3961 if (RegNo % 2) 3962 ++UseCycle; 3963 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3964 UseCycle = RegNo; 3965 bool isSStore = false; 3966 3967 switch (UseMCID.getOpcode()) { 3968 default: break; 3969 case ARM::VSTMSIA: 3970 case ARM::VSTMSIA_UPD: 3971 case ARM::VSTMSDB_UPD: 3972 isSStore = true; 3973 break; 3974 } 3975 3976 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3977 // then it takes an extra cycle. 3978 if ((isSStore && (RegNo % 2)) || UseAlign < 8) 3979 ++UseCycle; 3980 } else { 3981 // Assume the worst. 3982 UseCycle = RegNo + 2; 3983 } 3984 3985 return UseCycle; 3986 } 3987 3988 int 3989 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, 3990 const MCInstrDesc &UseMCID, 3991 unsigned UseClass, 3992 unsigned UseIdx, unsigned UseAlign) const { 3993 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3994 if (RegNo <= 0) 3995 return ItinData->getOperandCycle(UseClass, UseIdx); 3996 3997 int UseCycle; 3998 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3999 UseCycle = RegNo / 2; 4000 if (UseCycle < 2) 4001 UseCycle = 2; 4002 // Read in E3. 4003 UseCycle += 2; 4004 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 4005 UseCycle = (RegNo / 2); 4006 // If there are odd number of registers or if it's not 64-bit aligned, 4007 // then it takes an extra AGU (Address Generation Unit) cycle. 4008 if ((RegNo % 2) || UseAlign < 8) 4009 ++UseCycle; 4010 } else { 4011 // Assume the worst. 4012 UseCycle = 1; 4013 } 4014 return UseCycle; 4015 } 4016 4017 int 4018 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4019 const MCInstrDesc &DefMCID, 4020 unsigned DefIdx, unsigned DefAlign, 4021 const MCInstrDesc &UseMCID, 4022 unsigned UseIdx, unsigned UseAlign) const { 4023 unsigned DefClass = DefMCID.getSchedClass(); 4024 unsigned UseClass = UseMCID.getSchedClass(); 4025 4026 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) 4027 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 4028 4029 // This may be a def / use of a variable_ops instruction, the operand 4030 // latency might be determinable dynamically. Let the target try to 4031 // figure it out. 4032 int DefCycle = -1; 4033 bool LdmBypass = false; 4034 switch (DefMCID.getOpcode()) { 4035 default: 4036 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 4037 break; 4038 4039 case ARM::VLDMDIA: 4040 case ARM::VLDMDIA_UPD: 4041 case ARM::VLDMDDB_UPD: 4042 case ARM::VLDMSIA: 4043 case ARM::VLDMSIA_UPD: 4044 case ARM::VLDMSDB_UPD: 4045 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 4046 break; 4047 4048 case ARM::LDMIA_RET: 4049 case ARM::LDMIA: 4050 case ARM::LDMDA: 4051 case ARM::LDMDB: 4052 case ARM::LDMIB: 4053 case ARM::LDMIA_UPD: 4054 case ARM::LDMDA_UPD: 4055 case ARM::LDMDB_UPD: 4056 case ARM::LDMIB_UPD: 4057 case ARM::tLDMIA: 4058 case ARM::tLDMIA_UPD: 4059 case ARM::tPUSH: 4060 case ARM::t2LDMIA_RET: 4061 case ARM::t2LDMIA: 4062 case ARM::t2LDMDB: 4063 case ARM::t2LDMIA_UPD: 4064 case ARM::t2LDMDB_UPD: 4065 LdmBypass = true; 4066 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 4067 break; 4068 } 4069 4070 if (DefCycle == -1) 4071 // We can't seem to determine the result latency of the def, assume it's 2. 4072 DefCycle = 2; 4073 4074 int UseCycle = -1; 4075 switch (UseMCID.getOpcode()) { 4076 default: 4077 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); 4078 break; 4079 4080 case ARM::VSTMDIA: 4081 case ARM::VSTMDIA_UPD: 4082 case ARM::VSTMDDB_UPD: 4083 case ARM::VSTMSIA: 4084 case ARM::VSTMSIA_UPD: 4085 case ARM::VSTMSDB_UPD: 4086 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 4087 break; 4088 4089 case ARM::STMIA: 4090 case ARM::STMDA: 4091 case ARM::STMDB: 4092 case ARM::STMIB: 4093 case ARM::STMIA_UPD: 4094 case ARM::STMDA_UPD: 4095 case ARM::STMDB_UPD: 4096 case ARM::STMIB_UPD: 4097 case ARM::tSTMIA_UPD: 4098 case ARM::tPOP_RET: 4099 case ARM::tPOP: 4100 case ARM::t2STMIA: 4101 case ARM::t2STMDB: 4102 case ARM::t2STMIA_UPD: 4103 case ARM::t2STMDB_UPD: 4104 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 4105 break; 4106 } 4107 4108 if (UseCycle == -1) 4109 // Assume it's read in the first stage. 4110 UseCycle = 1; 4111 4112 UseCycle = DefCycle - UseCycle + 1; 4113 if (UseCycle > 0) { 4114 if (LdmBypass) { 4115 // It's a variable_ops instruction so we can't use DefIdx here. Just use 4116 // first def operand. 4117 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, 4118 UseClass, UseIdx)) 4119 --UseCycle; 4120 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, 4121 UseClass, UseIdx)) { 4122 --UseCycle; 4123 } 4124 } 4125 4126 return UseCycle; 4127 } 4128 4129 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, 4130 const MachineInstr *MI, unsigned Reg, 4131 unsigned &DefIdx, unsigned &Dist) { 4132 Dist = 0; 4133 4134 MachineBasicBlock::const_iterator I = MI; ++I; 4135 MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator()); 4136 assert(II->isInsideBundle() && "Empty bundle?"); 4137 4138 int Idx = -1; 4139 while (II->isInsideBundle()) { 4140 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI); 4141 if (Idx != -1) 4142 break; 4143 --II; 4144 ++Dist; 4145 } 4146 4147 assert(Idx != -1 && "Cannot find bundled definition!"); 4148 DefIdx = Idx; 4149 return &*II; 4150 } 4151 4152 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, 4153 const MachineInstr &MI, unsigned Reg, 4154 unsigned &UseIdx, unsigned &Dist) { 4155 Dist = 0; 4156 4157 MachineBasicBlock::const_instr_iterator II = ++MI.getIterator(); 4158 assert(II->isInsideBundle() && "Empty bundle?"); 4159 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 4160 4161 // FIXME: This doesn't properly handle multiple uses. 4162 int Idx = -1; 4163 while (II != E && II->isInsideBundle()) { 4164 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI); 4165 if (Idx != -1) 4166 break; 4167 if (II->getOpcode() != ARM::t2IT) 4168 ++Dist; 4169 ++II; 4170 } 4171 4172 if (Idx == -1) { 4173 Dist = 0; 4174 return nullptr; 4175 } 4176 4177 UseIdx = Idx; 4178 return &*II; 4179 } 4180 4181 /// Return the number of cycles to add to (or subtract from) the static 4182 /// itinerary based on the def opcode and alignment. The caller will ensure that 4183 /// adjusted latency is at least one cycle. 4184 static int adjustDefLatency(const ARMSubtarget &Subtarget, 4185 const MachineInstr &DefMI, 4186 const MCInstrDesc &DefMCID, unsigned DefAlign) { 4187 int Adjust = 0; 4188 if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) { 4189 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 4190 // variants are one cycle cheaper. 4191 switch (DefMCID.getOpcode()) { 4192 default: break; 4193 case ARM::LDRrs: 4194 case ARM::LDRBrs: { 4195 unsigned ShOpVal = DefMI.getOperand(3).getImm(); 4196 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4197 if (ShImm == 0 || 4198 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4199 --Adjust; 4200 break; 4201 } 4202 case ARM::t2LDRs: 4203 case ARM::t2LDRBs: 4204 case ARM::t2LDRHs: 4205 case ARM::t2LDRSHs: { 4206 // Thumb2 mode: lsl only. 4207 unsigned ShAmt = DefMI.getOperand(3).getImm(); 4208 if (ShAmt == 0 || ShAmt == 2) 4209 --Adjust; 4210 break; 4211 } 4212 } 4213 } else if (Subtarget.isSwift()) { 4214 // FIXME: Properly handle all of the latency adjustments for address 4215 // writeback. 4216 switch (DefMCID.getOpcode()) { 4217 default: break; 4218 case ARM::LDRrs: 4219 case ARM::LDRBrs: { 4220 unsigned ShOpVal = DefMI.getOperand(3).getImm(); 4221 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 4222 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4223 if (!isSub && 4224 (ShImm == 0 || 4225 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 4226 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 4227 Adjust -= 2; 4228 else if (!isSub && 4229 ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 4230 --Adjust; 4231 break; 4232 } 4233 case ARM::t2LDRs: 4234 case ARM::t2LDRBs: 4235 case ARM::t2LDRHs: 4236 case ARM::t2LDRSHs: { 4237 // Thumb2 mode: lsl only. 4238 unsigned ShAmt = DefMI.getOperand(3).getImm(); 4239 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3) 4240 Adjust -= 2; 4241 break; 4242 } 4243 } 4244 } 4245 4246 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) { 4247 switch (DefMCID.getOpcode()) { 4248 default: break; 4249 case ARM::VLD1q8: 4250 case ARM::VLD1q16: 4251 case ARM::VLD1q32: 4252 case ARM::VLD1q64: 4253 case ARM::VLD1q8wb_fixed: 4254 case ARM::VLD1q16wb_fixed: 4255 case ARM::VLD1q32wb_fixed: 4256 case ARM::VLD1q64wb_fixed: 4257 case ARM::VLD1q8wb_register: 4258 case ARM::VLD1q16wb_register: 4259 case ARM::VLD1q32wb_register: 4260 case ARM::VLD1q64wb_register: 4261 case ARM::VLD2d8: 4262 case ARM::VLD2d16: 4263 case ARM::VLD2d32: 4264 case ARM::VLD2q8: 4265 case ARM::VLD2q16: 4266 case ARM::VLD2q32: 4267 case ARM::VLD2d8wb_fixed: 4268 case ARM::VLD2d16wb_fixed: 4269 case ARM::VLD2d32wb_fixed: 4270 case ARM::VLD2q8wb_fixed: 4271 case ARM::VLD2q16wb_fixed: 4272 case ARM::VLD2q32wb_fixed: 4273 case ARM::VLD2d8wb_register: 4274 case ARM::VLD2d16wb_register: 4275 case ARM::VLD2d32wb_register: 4276 case ARM::VLD2q8wb_register: 4277 case ARM::VLD2q16wb_register: 4278 case ARM::VLD2q32wb_register: 4279 case ARM::VLD3d8: 4280 case ARM::VLD3d16: 4281 case ARM::VLD3d32: 4282 case ARM::VLD1d64T: 4283 case ARM::VLD3d8_UPD: 4284 case ARM::VLD3d16_UPD: 4285 case ARM::VLD3d32_UPD: 4286 case ARM::VLD1d64Twb_fixed: 4287 case ARM::VLD1d64Twb_register: 4288 case ARM::VLD3q8_UPD: 4289 case ARM::VLD3q16_UPD: 4290 case ARM::VLD3q32_UPD: 4291 case ARM::VLD4d8: 4292 case ARM::VLD4d16: 4293 case ARM::VLD4d32: 4294 case ARM::VLD1d64Q: 4295 case ARM::VLD4d8_UPD: 4296 case ARM::VLD4d16_UPD: 4297 case ARM::VLD4d32_UPD: 4298 case ARM::VLD1d64Qwb_fixed: 4299 case ARM::VLD1d64Qwb_register: 4300 case ARM::VLD4q8_UPD: 4301 case ARM::VLD4q16_UPD: 4302 case ARM::VLD4q32_UPD: 4303 case ARM::VLD1DUPq8: 4304 case ARM::VLD1DUPq16: 4305 case ARM::VLD1DUPq32: 4306 case ARM::VLD1DUPq8wb_fixed: 4307 case ARM::VLD1DUPq16wb_fixed: 4308 case ARM::VLD1DUPq32wb_fixed: 4309 case ARM::VLD1DUPq8wb_register: 4310 case ARM::VLD1DUPq16wb_register: 4311 case ARM::VLD1DUPq32wb_register: 4312 case ARM::VLD2DUPd8: 4313 case ARM::VLD2DUPd16: 4314 case ARM::VLD2DUPd32: 4315 case ARM::VLD2DUPd8wb_fixed: 4316 case ARM::VLD2DUPd16wb_fixed: 4317 case ARM::VLD2DUPd32wb_fixed: 4318 case ARM::VLD2DUPd8wb_register: 4319 case ARM::VLD2DUPd16wb_register: 4320 case ARM::VLD2DUPd32wb_register: 4321 case ARM::VLD4DUPd8: 4322 case ARM::VLD4DUPd16: 4323 case ARM::VLD4DUPd32: 4324 case ARM::VLD4DUPd8_UPD: 4325 case ARM::VLD4DUPd16_UPD: 4326 case ARM::VLD4DUPd32_UPD: 4327 case ARM::VLD1LNd8: 4328 case ARM::VLD1LNd16: 4329 case ARM::VLD1LNd32: 4330 case ARM::VLD1LNd8_UPD: 4331 case ARM::VLD1LNd16_UPD: 4332 case ARM::VLD1LNd32_UPD: 4333 case ARM::VLD2LNd8: 4334 case ARM::VLD2LNd16: 4335 case ARM::VLD2LNd32: 4336 case ARM::VLD2LNq16: 4337 case ARM::VLD2LNq32: 4338 case ARM::VLD2LNd8_UPD: 4339 case ARM::VLD2LNd16_UPD: 4340 case ARM::VLD2LNd32_UPD: 4341 case ARM::VLD2LNq16_UPD: 4342 case ARM::VLD2LNq32_UPD: 4343 case ARM::VLD4LNd8: 4344 case ARM::VLD4LNd16: 4345 case ARM::VLD4LNd32: 4346 case ARM::VLD4LNq16: 4347 case ARM::VLD4LNq32: 4348 case ARM::VLD4LNd8_UPD: 4349 case ARM::VLD4LNd16_UPD: 4350 case ARM::VLD4LNd32_UPD: 4351 case ARM::VLD4LNq16_UPD: 4352 case ARM::VLD4LNq32_UPD: 4353 // If the address is not 64-bit aligned, the latencies of these 4354 // instructions increases by one. 4355 ++Adjust; 4356 break; 4357 } 4358 } 4359 return Adjust; 4360 } 4361 4362 int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4363 const MachineInstr &DefMI, 4364 unsigned DefIdx, 4365 const MachineInstr &UseMI, 4366 unsigned UseIdx) const { 4367 // No operand latency. The caller may fall back to getInstrLatency. 4368 if (!ItinData || ItinData->isEmpty()) 4369 return -1; 4370 4371 const MachineOperand &DefMO = DefMI.getOperand(DefIdx); 4372 Register Reg = DefMO.getReg(); 4373 4374 const MachineInstr *ResolvedDefMI = &DefMI; 4375 unsigned DefAdj = 0; 4376 if (DefMI.isBundle()) 4377 ResolvedDefMI = 4378 getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj); 4379 if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() || 4380 ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) { 4381 return 1; 4382 } 4383 4384 const MachineInstr *ResolvedUseMI = &UseMI; 4385 unsigned UseAdj = 0; 4386 if (UseMI.isBundle()) { 4387 ResolvedUseMI = 4388 getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj); 4389 if (!ResolvedUseMI) 4390 return -1; 4391 } 4392 4393 return getOperandLatencyImpl( 4394 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO, 4395 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj); 4396 } 4397 4398 int ARMBaseInstrInfo::getOperandLatencyImpl( 4399 const InstrItineraryData *ItinData, const MachineInstr &DefMI, 4400 unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj, 4401 const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI, 4402 unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const { 4403 if (Reg == ARM::CPSR) { 4404 if (DefMI.getOpcode() == ARM::FMSTAT) { 4405 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) 4406 return Subtarget.isLikeA9() ? 1 : 20; 4407 } 4408 4409 // CPSR set and branch can be paired in the same cycle. 4410 if (UseMI.isBranch()) 4411 return 0; 4412 4413 // Otherwise it takes the instruction latency (generally one). 4414 unsigned Latency = getInstrLatency(ItinData, DefMI); 4415 4416 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to 4417 // its uses. Instructions which are otherwise scheduled between them may 4418 // incur a code size penalty (not able to use the CPSR setting 16-bit 4419 // instructions). 4420 if (Latency > 0 && Subtarget.isThumb2()) { 4421 const MachineFunction *MF = DefMI.getParent()->getParent(); 4422 // FIXME: Use Function::hasOptSize(). 4423 if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize)) 4424 --Latency; 4425 } 4426 return Latency; 4427 } 4428 4429 if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit()) 4430 return -1; 4431 4432 unsigned DefAlign = DefMI.hasOneMemOperand() 4433 ? (*DefMI.memoperands_begin())->getAlign().value() 4434 : 0; 4435 unsigned UseAlign = UseMI.hasOneMemOperand() 4436 ? (*UseMI.memoperands_begin())->getAlign().value() 4437 : 0; 4438 4439 // Get the itinerary's latency if possible, and handle variable_ops. 4440 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID, 4441 UseIdx, UseAlign); 4442 // Unable to find operand latency. The caller may resort to getInstrLatency. 4443 if (Latency < 0) 4444 return Latency; 4445 4446 // Adjust for IT block position. 4447 int Adj = DefAdj + UseAdj; 4448 4449 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 4450 Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign); 4451 if (Adj >= 0 || (int)Latency > -Adj) { 4452 return Latency + Adj; 4453 } 4454 // Return the itinerary latency, which may be zero but not less than zero. 4455 return Latency; 4456 } 4457 4458 int 4459 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4460 SDNode *DefNode, unsigned DefIdx, 4461 SDNode *UseNode, unsigned UseIdx) const { 4462 if (!DefNode->isMachineOpcode()) 4463 return 1; 4464 4465 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); 4466 4467 if (isZeroCost(DefMCID.Opcode)) 4468 return 0; 4469 4470 if (!ItinData || ItinData->isEmpty()) 4471 return DefMCID.mayLoad() ? 3 : 1; 4472 4473 if (!UseNode->isMachineOpcode()) { 4474 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); 4475 int Adj = Subtarget.getPreISelOperandLatencyAdjustment(); 4476 int Threshold = 1 + Adj; 4477 return Latency <= Threshold ? 1 : Latency - Adj; 4478 } 4479 4480 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); 4481 auto *DefMN = cast<MachineSDNode>(DefNode); 4482 unsigned DefAlign = !DefMN->memoperands_empty() 4483 ? (*DefMN->memoperands_begin())->getAlign().value() 4484 : 0; 4485 auto *UseMN = cast<MachineSDNode>(UseNode); 4486 unsigned UseAlign = !UseMN->memoperands_empty() 4487 ? (*UseMN->memoperands_begin())->getAlign().value() 4488 : 0; 4489 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, 4490 UseMCID, UseIdx, UseAlign); 4491 4492 if (Latency > 1 && 4493 (Subtarget.isCortexA8() || Subtarget.isLikeA9() || 4494 Subtarget.isCortexA7())) { 4495 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 4496 // variants are one cycle cheaper. 4497 switch (DefMCID.getOpcode()) { 4498 default: break; 4499 case ARM::LDRrs: 4500 case ARM::LDRBrs: { 4501 unsigned ShOpVal = 4502 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4503 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4504 if (ShImm == 0 || 4505 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4506 --Latency; 4507 break; 4508 } 4509 case ARM::t2LDRs: 4510 case ARM::t2LDRBs: 4511 case ARM::t2LDRHs: 4512 case ARM::t2LDRSHs: { 4513 // Thumb2 mode: lsl only. 4514 unsigned ShAmt = 4515 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4516 if (ShAmt == 0 || ShAmt == 2) 4517 --Latency; 4518 break; 4519 } 4520 } 4521 } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) { 4522 // FIXME: Properly handle all of the latency adjustments for address 4523 // writeback. 4524 switch (DefMCID.getOpcode()) { 4525 default: break; 4526 case ARM::LDRrs: 4527 case ARM::LDRBrs: { 4528 unsigned ShOpVal = 4529 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4530 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4531 if (ShImm == 0 || 4532 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 4533 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4534 Latency -= 2; 4535 else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 4536 --Latency; 4537 break; 4538 } 4539 case ARM::t2LDRs: 4540 case ARM::t2LDRBs: 4541 case ARM::t2LDRHs: 4542 case ARM::t2LDRSHs: 4543 // Thumb2 mode: lsl 0-3 only. 4544 Latency -= 2; 4545 break; 4546 } 4547 } 4548 4549 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) 4550 switch (DefMCID.getOpcode()) { 4551 default: break; 4552 case ARM::VLD1q8: 4553 case ARM::VLD1q16: 4554 case ARM::VLD1q32: 4555 case ARM::VLD1q64: 4556 case ARM::VLD1q8wb_register: 4557 case ARM::VLD1q16wb_register: 4558 case ARM::VLD1q32wb_register: 4559 case ARM::VLD1q64wb_register: 4560 case ARM::VLD1q8wb_fixed: 4561 case ARM::VLD1q16wb_fixed: 4562 case ARM::VLD1q32wb_fixed: 4563 case ARM::VLD1q64wb_fixed: 4564 case ARM::VLD2d8: 4565 case ARM::VLD2d16: 4566 case ARM::VLD2d32: 4567 case ARM::VLD2q8Pseudo: 4568 case ARM::VLD2q16Pseudo: 4569 case ARM::VLD2q32Pseudo: 4570 case ARM::VLD2d8wb_fixed: 4571 case ARM::VLD2d16wb_fixed: 4572 case ARM::VLD2d32wb_fixed: 4573 case ARM::VLD2q8PseudoWB_fixed: 4574 case ARM::VLD2q16PseudoWB_fixed: 4575 case ARM::VLD2q32PseudoWB_fixed: 4576 case ARM::VLD2d8wb_register: 4577 case ARM::VLD2d16wb_register: 4578 case ARM::VLD2d32wb_register: 4579 case ARM::VLD2q8PseudoWB_register: 4580 case ARM::VLD2q16PseudoWB_register: 4581 case ARM::VLD2q32PseudoWB_register: 4582 case ARM::VLD3d8Pseudo: 4583 case ARM::VLD3d16Pseudo: 4584 case ARM::VLD3d32Pseudo: 4585 case ARM::VLD1d8TPseudo: 4586 case ARM::VLD1d16TPseudo: 4587 case ARM::VLD1d32TPseudo: 4588 case ARM::VLD1d64TPseudo: 4589 case ARM::VLD1d64TPseudoWB_fixed: 4590 case ARM::VLD1d64TPseudoWB_register: 4591 case ARM::VLD3d8Pseudo_UPD: 4592 case ARM::VLD3d16Pseudo_UPD: 4593 case ARM::VLD3d32Pseudo_UPD: 4594 case ARM::VLD3q8Pseudo_UPD: 4595 case ARM::VLD3q16Pseudo_UPD: 4596 case ARM::VLD3q32Pseudo_UPD: 4597 case ARM::VLD3q8oddPseudo: 4598 case ARM::VLD3q16oddPseudo: 4599 case ARM::VLD3q32oddPseudo: 4600 case ARM::VLD3q8oddPseudo_UPD: 4601 case ARM::VLD3q16oddPseudo_UPD: 4602 case ARM::VLD3q32oddPseudo_UPD: 4603 case ARM::VLD4d8Pseudo: 4604 case ARM::VLD4d16Pseudo: 4605 case ARM::VLD4d32Pseudo: 4606 case ARM::VLD1d8QPseudo: 4607 case ARM::VLD1d16QPseudo: 4608 case ARM::VLD1d32QPseudo: 4609 case ARM::VLD1d64QPseudo: 4610 case ARM::VLD1d64QPseudoWB_fixed: 4611 case ARM::VLD1d64QPseudoWB_register: 4612 case ARM::VLD1q8HighQPseudo: 4613 case ARM::VLD1q8LowQPseudo_UPD: 4614 case ARM::VLD1q8HighTPseudo: 4615 case ARM::VLD1q8LowTPseudo_UPD: 4616 case ARM::VLD1q16HighQPseudo: 4617 case ARM::VLD1q16LowQPseudo_UPD: 4618 case ARM::VLD1q16HighTPseudo: 4619 case ARM::VLD1q16LowTPseudo_UPD: 4620 case ARM::VLD1q32HighQPseudo: 4621 case ARM::VLD1q32LowQPseudo_UPD: 4622 case ARM::VLD1q32HighTPseudo: 4623 case ARM::VLD1q32LowTPseudo_UPD: 4624 case ARM::VLD1q64HighQPseudo: 4625 case ARM::VLD1q64LowQPseudo_UPD: 4626 case ARM::VLD1q64HighTPseudo: 4627 case ARM::VLD1q64LowTPseudo_UPD: 4628 case ARM::VLD4d8Pseudo_UPD: 4629 case ARM::VLD4d16Pseudo_UPD: 4630 case ARM::VLD4d32Pseudo_UPD: 4631 case ARM::VLD4q8Pseudo_UPD: 4632 case ARM::VLD4q16Pseudo_UPD: 4633 case ARM::VLD4q32Pseudo_UPD: 4634 case ARM::VLD4q8oddPseudo: 4635 case ARM::VLD4q16oddPseudo: 4636 case ARM::VLD4q32oddPseudo: 4637 case ARM::VLD4q8oddPseudo_UPD: 4638 case ARM::VLD4q16oddPseudo_UPD: 4639 case ARM::VLD4q32oddPseudo_UPD: 4640 case ARM::VLD1DUPq8: 4641 case ARM::VLD1DUPq16: 4642 case ARM::VLD1DUPq32: 4643 case ARM::VLD1DUPq8wb_fixed: 4644 case ARM::VLD1DUPq16wb_fixed: 4645 case ARM::VLD1DUPq32wb_fixed: 4646 case ARM::VLD1DUPq8wb_register: 4647 case ARM::VLD1DUPq16wb_register: 4648 case ARM::VLD1DUPq32wb_register: 4649 case ARM::VLD2DUPd8: 4650 case ARM::VLD2DUPd16: 4651 case ARM::VLD2DUPd32: 4652 case ARM::VLD2DUPd8wb_fixed: 4653 case ARM::VLD2DUPd16wb_fixed: 4654 case ARM::VLD2DUPd32wb_fixed: 4655 case ARM::VLD2DUPd8wb_register: 4656 case ARM::VLD2DUPd16wb_register: 4657 case ARM::VLD2DUPd32wb_register: 4658 case ARM::VLD2DUPq8EvenPseudo: 4659 case ARM::VLD2DUPq8OddPseudo: 4660 case ARM::VLD2DUPq16EvenPseudo: 4661 case ARM::VLD2DUPq16OddPseudo: 4662 case ARM::VLD2DUPq32EvenPseudo: 4663 case ARM::VLD2DUPq32OddPseudo: 4664 case ARM::VLD3DUPq8EvenPseudo: 4665 case ARM::VLD3DUPq8OddPseudo: 4666 case ARM::VLD3DUPq16EvenPseudo: 4667 case ARM::VLD3DUPq16OddPseudo: 4668 case ARM::VLD3DUPq32EvenPseudo: 4669 case ARM::VLD3DUPq32OddPseudo: 4670 case ARM::VLD4DUPd8Pseudo: 4671 case ARM::VLD4DUPd16Pseudo: 4672 case ARM::VLD4DUPd32Pseudo: 4673 case ARM::VLD4DUPd8Pseudo_UPD: 4674 case ARM::VLD4DUPd16Pseudo_UPD: 4675 case ARM::VLD4DUPd32Pseudo_UPD: 4676 case ARM::VLD4DUPq8EvenPseudo: 4677 case ARM::VLD4DUPq8OddPseudo: 4678 case ARM::VLD4DUPq16EvenPseudo: 4679 case ARM::VLD4DUPq16OddPseudo: 4680 case ARM::VLD4DUPq32EvenPseudo: 4681 case ARM::VLD4DUPq32OddPseudo: 4682 case ARM::VLD1LNq8Pseudo: 4683 case ARM::VLD1LNq16Pseudo: 4684 case ARM::VLD1LNq32Pseudo: 4685 case ARM::VLD1LNq8Pseudo_UPD: 4686 case ARM::VLD1LNq16Pseudo_UPD: 4687 case ARM::VLD1LNq32Pseudo_UPD: 4688 case ARM::VLD2LNd8Pseudo: 4689 case ARM::VLD2LNd16Pseudo: 4690 case ARM::VLD2LNd32Pseudo: 4691 case ARM::VLD2LNq16Pseudo: 4692 case ARM::VLD2LNq32Pseudo: 4693 case ARM::VLD2LNd8Pseudo_UPD: 4694 case ARM::VLD2LNd16Pseudo_UPD: 4695 case ARM::VLD2LNd32Pseudo_UPD: 4696 case ARM::VLD2LNq16Pseudo_UPD: 4697 case ARM::VLD2LNq32Pseudo_UPD: 4698 case ARM::VLD4LNd8Pseudo: 4699 case ARM::VLD4LNd16Pseudo: 4700 case ARM::VLD4LNd32Pseudo: 4701 case ARM::VLD4LNq16Pseudo: 4702 case ARM::VLD4LNq32Pseudo: 4703 case ARM::VLD4LNd8Pseudo_UPD: 4704 case ARM::VLD4LNd16Pseudo_UPD: 4705 case ARM::VLD4LNd32Pseudo_UPD: 4706 case ARM::VLD4LNq16Pseudo_UPD: 4707 case ARM::VLD4LNq32Pseudo_UPD: 4708 // If the address is not 64-bit aligned, the latencies of these 4709 // instructions increases by one. 4710 ++Latency; 4711 break; 4712 } 4713 4714 return Latency; 4715 } 4716 4717 unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const { 4718 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() || 4719 MI.isImplicitDef()) 4720 return 0; 4721 4722 if (MI.isBundle()) 4723 return 0; 4724 4725 const MCInstrDesc &MCID = MI.getDesc(); 4726 4727 if (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) && 4728 !Subtarget.cheapPredicableCPSRDef())) { 4729 // When predicated, CPSR is an additional source operand for CPSR updating 4730 // instructions, this apparently increases their latencies. 4731 return 1; 4732 } 4733 return 0; 4734 } 4735 4736 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 4737 const MachineInstr &MI, 4738 unsigned *PredCost) const { 4739 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() || 4740 MI.isImplicitDef()) 4741 return 1; 4742 4743 // An instruction scheduler typically runs on unbundled instructions, however 4744 // other passes may query the latency of a bundled instruction. 4745 if (MI.isBundle()) { 4746 unsigned Latency = 0; 4747 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 4748 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 4749 while (++I != E && I->isInsideBundle()) { 4750 if (I->getOpcode() != ARM::t2IT) 4751 Latency += getInstrLatency(ItinData, *I, PredCost); 4752 } 4753 return Latency; 4754 } 4755 4756 const MCInstrDesc &MCID = MI.getDesc(); 4757 if (PredCost && (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) && 4758 !Subtarget.cheapPredicableCPSRDef()))) { 4759 // When predicated, CPSR is an additional source operand for CPSR updating 4760 // instructions, this apparently increases their latencies. 4761 *PredCost = 1; 4762 } 4763 // Be sure to call getStageLatency for an empty itinerary in case it has a 4764 // valid MinLatency property. 4765 if (!ItinData) 4766 return MI.mayLoad() ? 3 : 1; 4767 4768 unsigned Class = MCID.getSchedClass(); 4769 4770 // For instructions with variable uops, use uops as latency. 4771 if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0) 4772 return getNumMicroOps(ItinData, MI); 4773 4774 // For the common case, fall back on the itinerary's latency. 4775 unsigned Latency = ItinData->getStageLatency(Class); 4776 4777 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 4778 unsigned DefAlign = 4779 MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0; 4780 int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign); 4781 if (Adj >= 0 || (int)Latency > -Adj) { 4782 return Latency + Adj; 4783 } 4784 return Latency; 4785 } 4786 4787 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 4788 SDNode *Node) const { 4789 if (!Node->isMachineOpcode()) 4790 return 1; 4791 4792 if (!ItinData || ItinData->isEmpty()) 4793 return 1; 4794 4795 unsigned Opcode = Node->getMachineOpcode(); 4796 switch (Opcode) { 4797 default: 4798 return ItinData->getStageLatency(get(Opcode).getSchedClass()); 4799 case ARM::VLDMQIA: 4800 case ARM::VSTMQIA: 4801 return 2; 4802 } 4803 } 4804 4805 bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, 4806 const MachineRegisterInfo *MRI, 4807 const MachineInstr &DefMI, 4808 unsigned DefIdx, 4809 const MachineInstr &UseMI, 4810 unsigned UseIdx) const { 4811 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask; 4812 unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask; 4813 if (Subtarget.nonpipelinedVFP() && 4814 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP)) 4815 return true; 4816 4817 // Hoist VFP / NEON instructions with 4 or higher latency. 4818 unsigned Latency = 4819 SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx); 4820 if (Latency <= 3) 4821 return false; 4822 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON || 4823 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON; 4824 } 4825 4826 bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 4827 const MachineInstr &DefMI, 4828 unsigned DefIdx) const { 4829 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 4830 if (!ItinData || ItinData->isEmpty()) 4831 return false; 4832 4833 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask; 4834 if (DDomain == ARMII::DomainGeneral) { 4835 unsigned DefClass = DefMI.getDesc().getSchedClass(); 4836 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 4837 return (DefCycle != -1 && DefCycle <= 2); 4838 } 4839 return false; 4840 } 4841 4842 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI, 4843 StringRef &ErrInfo) const { 4844 if (convertAddSubFlagsOpcode(MI.getOpcode())) { 4845 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; 4846 return false; 4847 } 4848 if (MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) { 4849 // Make sure we don't generate a lo-lo mov that isn't supported. 4850 if (!ARM::hGPRRegClass.contains(MI.getOperand(0).getReg()) && 4851 !ARM::hGPRRegClass.contains(MI.getOperand(1).getReg())) { 4852 ErrInfo = "Non-flag-setting Thumb1 mov is v6-only"; 4853 return false; 4854 } 4855 } 4856 if (MI.getOpcode() == ARM::tPUSH || 4857 MI.getOpcode() == ARM::tPOP || 4858 MI.getOpcode() == ARM::tPOP_RET) { 4859 for (int i = 2, e = MI.getNumOperands(); i < e; ++i) { 4860 if (MI.getOperand(i).isImplicit() || 4861 !MI.getOperand(i).isReg()) 4862 continue; 4863 Register Reg = MI.getOperand(i).getReg(); 4864 if (Reg < ARM::R0 || Reg > ARM::R7) { 4865 if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) && 4866 !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) { 4867 ErrInfo = "Unsupported register in Thumb1 push/pop"; 4868 return false; 4869 } 4870 } 4871 } 4872 } 4873 if (MI.getOpcode() == ARM::MVE_VMOV_q_rr) { 4874 assert(MI.getOperand(4).isImm() && MI.getOperand(5).isImm()); 4875 if ((MI.getOperand(4).getImm() != 2 && MI.getOperand(4).getImm() != 3) || 4876 MI.getOperand(4).getImm() != MI.getOperand(5).getImm() + 2) { 4877 ErrInfo = "Incorrect array index for MVE_VMOV_q_rr"; 4878 return false; 4879 } 4880 } 4881 return true; 4882 } 4883 4884 void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI, 4885 unsigned LoadImmOpc, 4886 unsigned LoadOpc) const { 4887 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() && 4888 "ROPI/RWPI not currently supported with stack guard"); 4889 4890 MachineBasicBlock &MBB = *MI->getParent(); 4891 DebugLoc DL = MI->getDebugLoc(); 4892 Register Reg = MI->getOperand(0).getReg(); 4893 MachineInstrBuilder MIB; 4894 unsigned int Offset = 0; 4895 4896 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) { 4897 assert(Subtarget.isReadTPHard() && 4898 "TLS stack protector requires hardware TLS register"); 4899 4900 BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg) 4901 .addImm(15) 4902 .addImm(0) 4903 .addImm(13) 4904 .addImm(0) 4905 .addImm(3) 4906 .add(predOps(ARMCC::AL)); 4907 4908 Module &M = *MBB.getParent()->getFunction().getParent(); 4909 Offset = M.getStackProtectorGuardOffset(); 4910 if (Offset & ~0xfffU) { 4911 // The offset won't fit in the LDR's 12-bit immediate field, so emit an 4912 // extra ADD to cover the delta. This gives us a guaranteed 8 additional 4913 // bits, resulting in a range of 0 to +1 MiB for the guard offset. 4914 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri; 4915 BuildMI(MBB, MI, DL, get(AddOpc), Reg) 4916 .addReg(Reg, RegState::Kill) 4917 .addImm(Offset & ~0xfffU) 4918 .add(predOps(ARMCC::AL)) 4919 .addReg(0); 4920 Offset &= 0xfffU; 4921 } 4922 } else { 4923 const GlobalValue *GV = 4924 cast<GlobalValue>((*MI->memoperands_begin())->getValue()); 4925 bool IsIndirect = Subtarget.isGVIndirectSymbol(GV); 4926 4927 unsigned TargetFlags = ARMII::MO_NO_FLAG; 4928 if (Subtarget.isTargetMachO()) { 4929 TargetFlags |= ARMII::MO_NONLAZY; 4930 } else if (Subtarget.isTargetCOFF()) { 4931 if (GV->hasDLLImportStorageClass()) 4932 TargetFlags |= ARMII::MO_DLLIMPORT; 4933 else if (IsIndirect) 4934 TargetFlags |= ARMII::MO_COFFSTUB; 4935 } else if (Subtarget.isGVInGOT(GV)) { 4936 TargetFlags |= ARMII::MO_GOT; 4937 } 4938 4939 BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg) 4940 .addGlobalAddress(GV, 0, TargetFlags); 4941 4942 if (IsIndirect) { 4943 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); 4944 MIB.addReg(Reg, RegState::Kill).addImm(0); 4945 auto Flags = MachineMemOperand::MOLoad | 4946 MachineMemOperand::MODereferenceable | 4947 MachineMemOperand::MOInvariant; 4948 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( 4949 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, Align(4)); 4950 MIB.addMemOperand(MMO).add(predOps(ARMCC::AL)); 4951 } 4952 } 4953 4954 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); 4955 MIB.addReg(Reg, RegState::Kill) 4956 .addImm(Offset) 4957 .cloneMemRefs(*MI) 4958 .add(predOps(ARMCC::AL)); 4959 } 4960 4961 bool 4962 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 4963 unsigned &AddSubOpc, 4964 bool &NegAcc, bool &HasLane) const { 4965 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode); 4966 if (I == MLxEntryMap.end()) 4967 return false; 4968 4969 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; 4970 MulOpc = Entry.MulOpc; 4971 AddSubOpc = Entry.AddSubOpc; 4972 NegAcc = Entry.NegAcc; 4973 HasLane = Entry.HasLane; 4974 return true; 4975 } 4976 4977 //===----------------------------------------------------------------------===// 4978 // Execution domains. 4979 //===----------------------------------------------------------------------===// 4980 // 4981 // Some instructions go down the NEON pipeline, some go down the VFP pipeline, 4982 // and some can go down both. The vmov instructions go down the VFP pipeline, 4983 // but they can be changed to vorr equivalents that are executed by the NEON 4984 // pipeline. 4985 // 4986 // We use the following execution domain numbering: 4987 // 4988 enum ARMExeDomain { 4989 ExeGeneric = 0, 4990 ExeVFP = 1, 4991 ExeNEON = 2 4992 }; 4993 4994 // 4995 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h 4996 // 4997 std::pair<uint16_t, uint16_t> 4998 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const { 4999 // If we don't have access to NEON instructions then we won't be able 5000 // to swizzle anything to the NEON domain. Check to make sure. 5001 if (Subtarget.hasNEON()) { 5002 // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON 5003 // if they are not predicated. 5004 if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI)) 5005 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON)); 5006 5007 // CortexA9 is particularly picky about mixing the two and wants these 5008 // converted. 5009 if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) && 5010 (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR || 5011 MI.getOpcode() == ARM::VMOVS)) 5012 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON)); 5013 } 5014 // No other instructions can be swizzled, so just determine their domain. 5015 unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask; 5016 5017 if (Domain & ARMII::DomainNEON) 5018 return std::make_pair(ExeNEON, 0); 5019 5020 // Certain instructions can go either way on Cortex-A8. 5021 // Treat them as NEON instructions. 5022 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) 5023 return std::make_pair(ExeNEON, 0); 5024 5025 if (Domain & ARMII::DomainVFP) 5026 return std::make_pair(ExeVFP, 0); 5027 5028 return std::make_pair(ExeGeneric, 0); 5029 } 5030 5031 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, 5032 unsigned SReg, unsigned &Lane) { 5033 unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass); 5034 Lane = 0; 5035 5036 if (DReg != ARM::NoRegister) 5037 return DReg; 5038 5039 Lane = 1; 5040 DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass); 5041 5042 assert(DReg && "S-register with no D super-register?"); 5043 return DReg; 5044 } 5045 5046 /// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, 5047 /// set ImplicitSReg to a register number that must be marked as implicit-use or 5048 /// zero if no register needs to be defined as implicit-use. 5049 /// 5050 /// If the function cannot determine if an SPR should be marked implicit use or 5051 /// not, it returns false. 5052 /// 5053 /// This function handles cases where an instruction is being modified from taking 5054 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict 5055 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other 5056 /// lane of the DPR). 5057 /// 5058 /// If the other SPR is defined, an implicit-use of it should be added. Else, 5059 /// (including the case where the DPR itself is defined), it should not. 5060 /// 5061 static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, 5062 MachineInstr &MI, unsigned DReg, 5063 unsigned Lane, unsigned &ImplicitSReg) { 5064 // If the DPR is defined or used already, the other SPR lane will be chained 5065 // correctly, so there is nothing to be done. 5066 if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) { 5067 ImplicitSReg = 0; 5068 return true; 5069 } 5070 5071 // Otherwise we need to go searching to see if the SPR is set explicitly. 5072 ImplicitSReg = TRI->getSubReg(DReg, 5073 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1); 5074 MachineBasicBlock::LivenessQueryResult LQR = 5075 MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI); 5076 5077 if (LQR == MachineBasicBlock::LQR_Live) 5078 return true; 5079 else if (LQR == MachineBasicBlock::LQR_Unknown) 5080 return false; 5081 5082 // If the register is known not to be live, there is no need to add an 5083 // implicit-use. 5084 ImplicitSReg = 0; 5085 return true; 5086 } 5087 5088 void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI, 5089 unsigned Domain) const { 5090 unsigned DstReg, SrcReg, DReg; 5091 unsigned Lane; 5092 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 5093 const TargetRegisterInfo *TRI = &getRegisterInfo(); 5094 switch (MI.getOpcode()) { 5095 default: 5096 llvm_unreachable("cannot handle opcode!"); 5097 break; 5098 case ARM::VMOVD: 5099 if (Domain != ExeNEON) 5100 break; 5101 5102 // Zap the predicate operands. 5103 assert(!isPredicated(MI) && "Cannot predicate a VORRd"); 5104 5105 // Make sure we've got NEON instructions. 5106 assert(Subtarget.hasNEON() && "VORRd requires NEON"); 5107 5108 // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits) 5109 DstReg = MI.getOperand(0).getReg(); 5110 SrcReg = MI.getOperand(1).getReg(); 5111 5112 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5113 MI.RemoveOperand(i - 1); 5114 5115 // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits) 5116 MI.setDesc(get(ARM::VORRd)); 5117 MIB.addReg(DstReg, RegState::Define) 5118 .addReg(SrcReg) 5119 .addReg(SrcReg) 5120 .add(predOps(ARMCC::AL)); 5121 break; 5122 case ARM::VMOVRS: 5123 if (Domain != ExeNEON) 5124 break; 5125 assert(!isPredicated(MI) && "Cannot predicate a VGETLN"); 5126 5127 // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits) 5128 DstReg = MI.getOperand(0).getReg(); 5129 SrcReg = MI.getOperand(1).getReg(); 5130 5131 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5132 MI.RemoveOperand(i - 1); 5133 5134 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); 5135 5136 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps) 5137 // Note that DSrc has been widened and the other lane may be undef, which 5138 // contaminates the entire register. 5139 MI.setDesc(get(ARM::VGETLNi32)); 5140 MIB.addReg(DstReg, RegState::Define) 5141 .addReg(DReg, RegState::Undef) 5142 .addImm(Lane) 5143 .add(predOps(ARMCC::AL)); 5144 5145 // The old source should be an implicit use, otherwise we might think it 5146 // was dead before here. 5147 MIB.addReg(SrcReg, RegState::Implicit); 5148 break; 5149 case ARM::VMOVSR: { 5150 if (Domain != ExeNEON) 5151 break; 5152 assert(!isPredicated(MI) && "Cannot predicate a VSETLN"); 5153 5154 // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits) 5155 DstReg = MI.getOperand(0).getReg(); 5156 SrcReg = MI.getOperand(1).getReg(); 5157 5158 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane); 5159 5160 unsigned ImplicitSReg; 5161 if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg)) 5162 break; 5163 5164 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5165 MI.RemoveOperand(i - 1); 5166 5167 // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps) 5168 // Again DDst may be undefined at the beginning of this instruction. 5169 MI.setDesc(get(ARM::VSETLNi32)); 5170 MIB.addReg(DReg, RegState::Define) 5171 .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI))) 5172 .addReg(SrcReg) 5173 .addImm(Lane) 5174 .add(predOps(ARMCC::AL)); 5175 5176 // The narrower destination must be marked as set to keep previous chains 5177 // in place. 5178 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 5179 if (ImplicitSReg != 0) 5180 MIB.addReg(ImplicitSReg, RegState::Implicit); 5181 break; 5182 } 5183 case ARM::VMOVS: { 5184 if (Domain != ExeNEON) 5185 break; 5186 5187 // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits) 5188 DstReg = MI.getOperand(0).getReg(); 5189 SrcReg = MI.getOperand(1).getReg(); 5190 5191 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc; 5192 DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane); 5193 DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane); 5194 5195 unsigned ImplicitSReg; 5196 if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg)) 5197 break; 5198 5199 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5200 MI.RemoveOperand(i - 1); 5201 5202 if (DSrc == DDst) { 5203 // Destination can be: 5204 // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits) 5205 MI.setDesc(get(ARM::VDUPLN32d)); 5206 MIB.addReg(DDst, RegState::Define) 5207 .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI))) 5208 .addImm(SrcLane) 5209 .add(predOps(ARMCC::AL)); 5210 5211 // Neither the source or the destination are naturally represented any 5212 // more, so add them in manually. 5213 MIB.addReg(DstReg, RegState::Implicit | RegState::Define); 5214 MIB.addReg(SrcReg, RegState::Implicit); 5215 if (ImplicitSReg != 0) 5216 MIB.addReg(ImplicitSReg, RegState::Implicit); 5217 break; 5218 } 5219 5220 // In general there's no single instruction that can perform an S <-> S 5221 // move in NEON space, but a pair of VEXT instructions *can* do the 5222 // job. It turns out that the VEXTs needed will only use DSrc once, with 5223 // the position based purely on the combination of lane-0 and lane-1 5224 // involved. For example 5225 // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1 5226 // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1 5227 // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1 5228 // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1 5229 // 5230 // Pattern of the MachineInstrs is: 5231 // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits) 5232 MachineInstrBuilder NewMIB; 5233 NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32), 5234 DDst); 5235 5236 // On the first instruction, both DSrc and DDst may be undef if present. 5237 // Specifically when the original instruction didn't have them as an 5238 // <imp-use>. 5239 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst; 5240 bool CurUndef = !MI.readsRegister(CurReg, TRI); 5241 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); 5242 5243 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst; 5244 CurUndef = !MI.readsRegister(CurReg, TRI); 5245 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)) 5246 .addImm(1) 5247 .add(predOps(ARMCC::AL)); 5248 5249 if (SrcLane == DstLane) 5250 NewMIB.addReg(SrcReg, RegState::Implicit); 5251 5252 MI.setDesc(get(ARM::VEXTd32)); 5253 MIB.addReg(DDst, RegState::Define); 5254 5255 // On the second instruction, DDst has definitely been defined above, so 5256 // it is not undef. DSrc, if present, can be undef as above. 5257 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst; 5258 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); 5259 MIB.addReg(CurReg, getUndefRegState(CurUndef)); 5260 5261 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst; 5262 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); 5263 MIB.addReg(CurReg, getUndefRegState(CurUndef)) 5264 .addImm(1) 5265 .add(predOps(ARMCC::AL)); 5266 5267 if (SrcLane != DstLane) 5268 MIB.addReg(SrcReg, RegState::Implicit); 5269 5270 // As before, the original destination is no longer represented, add it 5271 // implicitly. 5272 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 5273 if (ImplicitSReg != 0) 5274 MIB.addReg(ImplicitSReg, RegState::Implicit); 5275 break; 5276 } 5277 } 5278 } 5279 5280 //===----------------------------------------------------------------------===// 5281 // Partial register updates 5282 //===----------------------------------------------------------------------===// 5283 // 5284 // Swift renames NEON registers with 64-bit granularity. That means any 5285 // instruction writing an S-reg implicitly reads the containing D-reg. The 5286 // problem is mostly avoided by translating f32 operations to v2f32 operations 5287 // on D-registers, but f32 loads are still a problem. 5288 // 5289 // These instructions can load an f32 into a NEON register: 5290 // 5291 // VLDRS - Only writes S, partial D update. 5292 // VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops. 5293 // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops. 5294 // 5295 // FCONSTD can be used as a dependency-breaking instruction. 5296 unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance( 5297 const MachineInstr &MI, unsigned OpNum, 5298 const TargetRegisterInfo *TRI) const { 5299 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance(); 5300 if (!PartialUpdateClearance) 5301 return 0; 5302 5303 assert(TRI && "Need TRI instance"); 5304 5305 const MachineOperand &MO = MI.getOperand(OpNum); 5306 if (MO.readsReg()) 5307 return 0; 5308 Register Reg = MO.getReg(); 5309 int UseOp = -1; 5310 5311 switch (MI.getOpcode()) { 5312 // Normal instructions writing only an S-register. 5313 case ARM::VLDRS: 5314 case ARM::FCONSTS: 5315 case ARM::VMOVSR: 5316 case ARM::VMOVv8i8: 5317 case ARM::VMOVv4i16: 5318 case ARM::VMOVv2i32: 5319 case ARM::VMOVv2f32: 5320 case ARM::VMOVv1i64: 5321 UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI); 5322 break; 5323 5324 // Explicitly reads the dependency. 5325 case ARM::VLD1LNd32: 5326 UseOp = 3; 5327 break; 5328 default: 5329 return 0; 5330 } 5331 5332 // If this instruction actually reads a value from Reg, there is no unwanted 5333 // dependency. 5334 if (UseOp != -1 && MI.getOperand(UseOp).readsReg()) 5335 return 0; 5336 5337 // We must be able to clobber the whole D-reg. 5338 if (Register::isVirtualRegister(Reg)) { 5339 // Virtual register must be a def undef foo:ssub_0 operand. 5340 if (!MO.getSubReg() || MI.readsVirtualRegister(Reg)) 5341 return 0; 5342 } else if (ARM::SPRRegClass.contains(Reg)) { 5343 // Physical register: MI must define the full D-reg. 5344 unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0, 5345 &ARM::DPRRegClass); 5346 if (!DReg || !MI.definesRegister(DReg, TRI)) 5347 return 0; 5348 } 5349 5350 // MI has an unwanted D-register dependency. 5351 // Avoid defs in the previous N instructrions. 5352 return PartialUpdateClearance; 5353 } 5354 5355 // Break a partial register dependency after getPartialRegUpdateClearance 5356 // returned non-zero. 5357 void ARMBaseInstrInfo::breakPartialRegDependency( 5358 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { 5359 assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def"); 5360 assert(TRI && "Need TRI instance"); 5361 5362 const MachineOperand &MO = MI.getOperand(OpNum); 5363 Register Reg = MO.getReg(); 5364 assert(Register::isPhysicalRegister(Reg) && 5365 "Can't break virtual register dependencies."); 5366 unsigned DReg = Reg; 5367 5368 // If MI defines an S-reg, find the corresponding D super-register. 5369 if (ARM::SPRRegClass.contains(Reg)) { 5370 DReg = ARM::D0 + (Reg - ARM::S0) / 2; 5371 assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken"); 5372 } 5373 5374 assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps"); 5375 assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg"); 5376 5377 // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines 5378 // the full D-register by loading the same value to both lanes. The 5379 // instruction is micro-coded with 2 uops, so don't do this until we can 5380 // properly schedule micro-coded instructions. The dispatcher stalls cause 5381 // too big regressions. 5382 5383 // Insert the dependency-breaking FCONSTD before MI. 5384 // 96 is the encoding of 0.5, but the actual value doesn't matter here. 5385 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg) 5386 .addImm(96) 5387 .add(predOps(ARMCC::AL)); 5388 MI.addRegisterKilled(DReg, TRI, true); 5389 } 5390 5391 bool ARMBaseInstrInfo::hasNOP() const { 5392 return Subtarget.getFeatureBits()[ARM::HasV6KOps]; 5393 } 5394 5395 bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const { 5396 if (MI->getNumOperands() < 4) 5397 return true; 5398 unsigned ShOpVal = MI->getOperand(3).getImm(); 5399 unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal); 5400 // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1. 5401 if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || 5402 ((ShImm == 1 || ShImm == 2) && 5403 ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl)) 5404 return true; 5405 5406 return false; 5407 } 5408 5409 bool ARMBaseInstrInfo::getRegSequenceLikeInputs( 5410 const MachineInstr &MI, unsigned DefIdx, 5411 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 5412 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5413 assert(MI.isRegSequenceLike() && "Invalid kind of instruction"); 5414 5415 switch (MI.getOpcode()) { 5416 case ARM::VMOVDRR: 5417 // dX = VMOVDRR rY, rZ 5418 // is the same as: 5419 // dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1 5420 // Populate the InputRegs accordingly. 5421 // rY 5422 const MachineOperand *MOReg = &MI.getOperand(1); 5423 if (!MOReg->isUndef()) 5424 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(), 5425 MOReg->getSubReg(), ARM::ssub_0)); 5426 // rZ 5427 MOReg = &MI.getOperand(2); 5428 if (!MOReg->isUndef()) 5429 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(), 5430 MOReg->getSubReg(), ARM::ssub_1)); 5431 return true; 5432 } 5433 llvm_unreachable("Target dependent opcode missing"); 5434 } 5435 5436 bool ARMBaseInstrInfo::getExtractSubregLikeInputs( 5437 const MachineInstr &MI, unsigned DefIdx, 5438 RegSubRegPairAndIdx &InputReg) const { 5439 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5440 assert(MI.isExtractSubregLike() && "Invalid kind of instruction"); 5441 5442 switch (MI.getOpcode()) { 5443 case ARM::VMOVRRD: 5444 // rX, rY = VMOVRRD dZ 5445 // is the same as: 5446 // rX = EXTRACT_SUBREG dZ, ssub_0 5447 // rY = EXTRACT_SUBREG dZ, ssub_1 5448 const MachineOperand &MOReg = MI.getOperand(2); 5449 if (MOReg.isUndef()) 5450 return false; 5451 InputReg.Reg = MOReg.getReg(); 5452 InputReg.SubReg = MOReg.getSubReg(); 5453 InputReg.SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1; 5454 return true; 5455 } 5456 llvm_unreachable("Target dependent opcode missing"); 5457 } 5458 5459 bool ARMBaseInstrInfo::getInsertSubregLikeInputs( 5460 const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, 5461 RegSubRegPairAndIdx &InsertedReg) const { 5462 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5463 assert(MI.isInsertSubregLike() && "Invalid kind of instruction"); 5464 5465 switch (MI.getOpcode()) { 5466 case ARM::VSETLNi32: 5467 case ARM::MVE_VMOV_to_lane_32: 5468 // dX = VSETLNi32 dY, rZ, imm 5469 // qX = MVE_VMOV_to_lane_32 qY, rZ, imm 5470 const MachineOperand &MOBaseReg = MI.getOperand(1); 5471 const MachineOperand &MOInsertedReg = MI.getOperand(2); 5472 if (MOInsertedReg.isUndef()) 5473 return false; 5474 const MachineOperand &MOIndex = MI.getOperand(3); 5475 BaseReg.Reg = MOBaseReg.getReg(); 5476 BaseReg.SubReg = MOBaseReg.getSubReg(); 5477 5478 InsertedReg.Reg = MOInsertedReg.getReg(); 5479 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 5480 InsertedReg.SubIdx = ARM::ssub_0 + MOIndex.getImm(); 5481 return true; 5482 } 5483 llvm_unreachable("Target dependent opcode missing"); 5484 } 5485 5486 std::pair<unsigned, unsigned> 5487 ARMBaseInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 5488 const unsigned Mask = ARMII::MO_OPTION_MASK; 5489 return std::make_pair(TF & Mask, TF & ~Mask); 5490 } 5491 5492 ArrayRef<std::pair<unsigned, const char *>> 5493 ARMBaseInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 5494 using namespace ARMII; 5495 5496 static const std::pair<unsigned, const char *> TargetFlags[] = { 5497 {MO_LO16, "arm-lo16"}, {MO_HI16, "arm-hi16"}}; 5498 return makeArrayRef(TargetFlags); 5499 } 5500 5501 ArrayRef<std::pair<unsigned, const char *>> 5502 ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { 5503 using namespace ARMII; 5504 5505 static const std::pair<unsigned, const char *> TargetFlags[] = { 5506 {MO_COFFSTUB, "arm-coffstub"}, 5507 {MO_GOT, "arm-got"}, 5508 {MO_SBREL, "arm-sbrel"}, 5509 {MO_DLLIMPORT, "arm-dllimport"}, 5510 {MO_SECREL, "arm-secrel"}, 5511 {MO_NONLAZY, "arm-nonlazy"}}; 5512 return makeArrayRef(TargetFlags); 5513 } 5514 5515 Optional<RegImmPair> ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI, 5516 Register Reg) const { 5517 int Sign = 1; 5518 unsigned Opcode = MI.getOpcode(); 5519 int64_t Offset = 0; 5520 5521 // TODO: Handle cases where Reg is a super- or sub-register of the 5522 // destination register. 5523 const MachineOperand &Op0 = MI.getOperand(0); 5524 if (!Op0.isReg() || Reg != Op0.getReg()) 5525 return None; 5526 5527 // We describe SUBri or ADDri instructions. 5528 if (Opcode == ARM::SUBri) 5529 Sign = -1; 5530 else if (Opcode != ARM::ADDri) 5531 return None; 5532 5533 // TODO: Third operand can be global address (usually some string). Since 5534 // strings can be relocated we cannot calculate their offsets for 5535 // now. 5536 if (!MI.getOperand(1).isReg() || !MI.getOperand(2).isImm()) 5537 return None; 5538 5539 Offset = MI.getOperand(2).getImm() * Sign; 5540 return RegImmPair{MI.getOperand(1).getReg(), Offset}; 5541 } 5542 5543 bool llvm::registerDefinedBetween(unsigned Reg, 5544 MachineBasicBlock::iterator From, 5545 MachineBasicBlock::iterator To, 5546 const TargetRegisterInfo *TRI) { 5547 for (auto I = From; I != To; ++I) 5548 if (I->modifiesRegister(Reg, TRI)) 5549 return true; 5550 return false; 5551 } 5552 5553 MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br, 5554 const TargetRegisterInfo *TRI) { 5555 // Search backwards to the instruction that defines CSPR. This may or not 5556 // be a CMP, we check that after this loop. If we find another instruction 5557 // that reads cpsr, we return nullptr. 5558 MachineBasicBlock::iterator CmpMI = Br; 5559 while (CmpMI != Br->getParent()->begin()) { 5560 --CmpMI; 5561 if (CmpMI->modifiesRegister(ARM::CPSR, TRI)) 5562 break; 5563 if (CmpMI->readsRegister(ARM::CPSR, TRI)) 5564 break; 5565 } 5566 5567 // Check that this inst is a CMP r[0-7], #0 and that the register 5568 // is not redefined between the cmp and the br. 5569 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri) 5570 return nullptr; 5571 Register Reg = CmpMI->getOperand(0).getReg(); 5572 Register PredReg; 5573 ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg); 5574 if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0) 5575 return nullptr; 5576 if (!isARMLowRegister(Reg)) 5577 return nullptr; 5578 if (registerDefinedBetween(Reg, CmpMI->getNextNode(), Br, TRI)) 5579 return nullptr; 5580 5581 return &*CmpMI; 5582 } 5583 5584 unsigned llvm::ConstantMaterializationCost(unsigned Val, 5585 const ARMSubtarget *Subtarget, 5586 bool ForCodesize) { 5587 if (Subtarget->isThumb()) { 5588 if (Val <= 255) // MOV 5589 return ForCodesize ? 2 : 1; 5590 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff || // MOV 5591 ARM_AM::getT2SOImmVal(Val) != -1 || // MOVW 5592 ARM_AM::getT2SOImmVal(~Val) != -1)) // MVN 5593 return ForCodesize ? 4 : 1; 5594 if (Val <= 510) // MOV + ADDi8 5595 return ForCodesize ? 4 : 2; 5596 if (~Val <= 255) // MOV + MVN 5597 return ForCodesize ? 4 : 2; 5598 if (ARM_AM::isThumbImmShiftedVal(Val)) // MOV + LSL 5599 return ForCodesize ? 4 : 2; 5600 } else { 5601 if (ARM_AM::getSOImmVal(Val) != -1) // MOV 5602 return ForCodesize ? 4 : 1; 5603 if (ARM_AM::getSOImmVal(~Val) != -1) // MVN 5604 return ForCodesize ? 4 : 1; 5605 if (Subtarget->hasV6T2Ops() && Val <= 0xffff) // MOVW 5606 return ForCodesize ? 4 : 1; 5607 if (ARM_AM::isSOImmTwoPartVal(Val)) // two instrs 5608 return ForCodesize ? 8 : 2; 5609 if (ARM_AM::isSOImmTwoPartValNeg(Val)) // two instrs 5610 return ForCodesize ? 8 : 2; 5611 } 5612 if (Subtarget->useMovt()) // MOVW + MOVT 5613 return ForCodesize ? 8 : 2; 5614 return ForCodesize ? 8 : 3; // Literal pool load 5615 } 5616 5617 bool llvm::HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, 5618 const ARMSubtarget *Subtarget, 5619 bool ForCodesize) { 5620 // Check with ForCodesize 5621 unsigned Cost1 = ConstantMaterializationCost(Val1, Subtarget, ForCodesize); 5622 unsigned Cost2 = ConstantMaterializationCost(Val2, Subtarget, ForCodesize); 5623 if (Cost1 < Cost2) 5624 return true; 5625 if (Cost1 > Cost2) 5626 return false; 5627 5628 // If they are equal, try with !ForCodesize 5629 return ConstantMaterializationCost(Val1, Subtarget, !ForCodesize) < 5630 ConstantMaterializationCost(Val2, Subtarget, !ForCodesize); 5631 } 5632 5633 /// Constants defining how certain sequences should be outlined. 5634 /// This encompasses how an outlined function should be called, and what kind of 5635 /// frame should be emitted for that outlined function. 5636 /// 5637 /// \p MachineOutlinerTailCall implies that the function is being created from 5638 /// a sequence of instructions ending in a return. 5639 /// 5640 /// That is, 5641 /// 5642 /// I1 OUTLINED_FUNCTION: 5643 /// I2 --> B OUTLINED_FUNCTION I1 5644 /// BX LR I2 5645 /// BX LR 5646 /// 5647 /// +-------------------------+--------+-----+ 5648 /// | | Thumb2 | ARM | 5649 /// +-------------------------+--------+-----+ 5650 /// | Call overhead in Bytes | 4 | 4 | 5651 /// | Frame overhead in Bytes | 0 | 0 | 5652 /// | Stack fixup required | No | No | 5653 /// +-------------------------+--------+-----+ 5654 /// 5655 /// \p MachineOutlinerThunk implies that the function is being created from 5656 /// a sequence of instructions ending in a call. The outlined function is 5657 /// called with a BL instruction, and the outlined function tail-calls the 5658 /// original call destination. 5659 /// 5660 /// That is, 5661 /// 5662 /// I1 OUTLINED_FUNCTION: 5663 /// I2 --> BL OUTLINED_FUNCTION I1 5664 /// BL f I2 5665 /// B f 5666 /// 5667 /// +-------------------------+--------+-----+ 5668 /// | | Thumb2 | ARM | 5669 /// +-------------------------+--------+-----+ 5670 /// | Call overhead in Bytes | 4 | 4 | 5671 /// | Frame overhead in Bytes | 0 | 0 | 5672 /// | Stack fixup required | No | No | 5673 /// +-------------------------+--------+-----+ 5674 /// 5675 /// \p MachineOutlinerNoLRSave implies that the function should be called using 5676 /// a BL instruction, but doesn't require LR to be saved and restored. This 5677 /// happens when LR is known to be dead. 5678 /// 5679 /// That is, 5680 /// 5681 /// I1 OUTLINED_FUNCTION: 5682 /// I2 --> BL OUTLINED_FUNCTION I1 5683 /// I3 I2 5684 /// I3 5685 /// BX LR 5686 /// 5687 /// +-------------------------+--------+-----+ 5688 /// | | Thumb2 | ARM | 5689 /// +-------------------------+--------+-----+ 5690 /// | Call overhead in Bytes | 4 | 4 | 5691 /// | Frame overhead in Bytes | 4 | 4 | 5692 /// | Stack fixup required | No | No | 5693 /// +-------------------------+--------+-----+ 5694 /// 5695 /// \p MachineOutlinerRegSave implies that the function should be called with a 5696 /// save and restore of LR to an available register. This allows us to avoid 5697 /// stack fixups. Note that this outlining variant is compatible with the 5698 /// NoLRSave case. 5699 /// 5700 /// That is, 5701 /// 5702 /// I1 Save LR OUTLINED_FUNCTION: 5703 /// I2 --> BL OUTLINED_FUNCTION I1 5704 /// I3 Restore LR I2 5705 /// I3 5706 /// BX LR 5707 /// 5708 /// +-------------------------+--------+-----+ 5709 /// | | Thumb2 | ARM | 5710 /// +-------------------------+--------+-----+ 5711 /// | Call overhead in Bytes | 8 | 12 | 5712 /// | Frame overhead in Bytes | 2 | 4 | 5713 /// | Stack fixup required | No | No | 5714 /// +-------------------------+--------+-----+ 5715 /// 5716 /// \p MachineOutlinerDefault implies that the function should be called with 5717 /// a save and restore of LR to the stack. 5718 /// 5719 /// That is, 5720 /// 5721 /// I1 Save LR OUTLINED_FUNCTION: 5722 /// I2 --> BL OUTLINED_FUNCTION I1 5723 /// I3 Restore LR I2 5724 /// I3 5725 /// BX LR 5726 /// 5727 /// +-------------------------+--------+-----+ 5728 /// | | Thumb2 | ARM | 5729 /// +-------------------------+--------+-----+ 5730 /// | Call overhead in Bytes | 8 | 12 | 5731 /// | Frame overhead in Bytes | 2 | 4 | 5732 /// | Stack fixup required | Yes | Yes | 5733 /// +-------------------------+--------+-----+ 5734 5735 enum MachineOutlinerClass { 5736 MachineOutlinerTailCall, 5737 MachineOutlinerThunk, 5738 MachineOutlinerNoLRSave, 5739 MachineOutlinerRegSave, 5740 MachineOutlinerDefault 5741 }; 5742 5743 enum MachineOutlinerMBBFlags { 5744 LRUnavailableSomewhere = 0x2, 5745 HasCalls = 0x4, 5746 UnsafeRegsDead = 0x8 5747 }; 5748 5749 struct OutlinerCosts { 5750 const int CallTailCall; 5751 const int FrameTailCall; 5752 const int CallThunk; 5753 const int FrameThunk; 5754 const int CallNoLRSave; 5755 const int FrameNoLRSave; 5756 const int CallRegSave; 5757 const int FrameRegSave; 5758 const int CallDefault; 5759 const int FrameDefault; 5760 const int SaveRestoreLROnStack; 5761 5762 OutlinerCosts(const ARMSubtarget &target) 5763 : CallTailCall(target.isThumb() ? 4 : 4), 5764 FrameTailCall(target.isThumb() ? 0 : 0), 5765 CallThunk(target.isThumb() ? 4 : 4), 5766 FrameThunk(target.isThumb() ? 0 : 0), 5767 CallNoLRSave(target.isThumb() ? 4 : 4), 5768 FrameNoLRSave(target.isThumb() ? 4 : 4), 5769 CallRegSave(target.isThumb() ? 8 : 12), 5770 FrameRegSave(target.isThumb() ? 2 : 4), 5771 CallDefault(target.isThumb() ? 8 : 12), 5772 FrameDefault(target.isThumb() ? 2 : 4), 5773 SaveRestoreLROnStack(target.isThumb() ? 8 : 8) {} 5774 }; 5775 5776 unsigned 5777 ARMBaseInstrInfo::findRegisterToSaveLRTo(const outliner::Candidate &C) const { 5778 assert(C.LRUWasSet && "LRU wasn't set?"); 5779 MachineFunction *MF = C.getMF(); 5780 const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo *>( 5781 MF->getSubtarget().getRegisterInfo()); 5782 5783 BitVector regsReserved = ARI->getReservedRegs(*MF); 5784 // Check if there is an available register across the sequence that we can 5785 // use. 5786 for (unsigned Reg : ARM::rGPRRegClass) { 5787 if (!(Reg < regsReserved.size() && regsReserved.test(Reg)) && 5788 Reg != ARM::LR && // LR is not reserved, but don't use it. 5789 Reg != ARM::R12 && // R12 is not guaranteed to be preserved. 5790 C.LRU.available(Reg) && C.UsedInSequence.available(Reg)) 5791 return Reg; 5792 } 5793 5794 // No suitable register. Return 0. 5795 return 0u; 5796 } 5797 5798 // Compute liveness of LR at the point after the interval [I, E), which 5799 // denotes a *backward* iteration through instructions. Used only for return 5800 // basic blocks, which do not end with a tail call. 5801 static bool isLRAvailable(const TargetRegisterInfo &TRI, 5802 MachineBasicBlock::reverse_iterator I, 5803 MachineBasicBlock::reverse_iterator E) { 5804 // At the end of the function LR dead. 5805 bool Live = false; 5806 for (; I != E; ++I) { 5807 const MachineInstr &MI = *I; 5808 5809 // Check defs of LR. 5810 if (MI.modifiesRegister(ARM::LR, &TRI)) 5811 Live = false; 5812 5813 // Check uses of LR. 5814 unsigned Opcode = MI.getOpcode(); 5815 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR || 5816 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET || 5817 Opcode == ARM::tBXNS_RET) { 5818 // These instructions use LR, but it's not an (explicit or implicit) 5819 // operand. 5820 Live = true; 5821 continue; 5822 } 5823 if (MI.readsRegister(ARM::LR, &TRI)) 5824 Live = true; 5825 } 5826 return !Live; 5827 } 5828 5829 outliner::OutlinedFunction ARMBaseInstrInfo::getOutliningCandidateInfo( 5830 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { 5831 outliner::Candidate &FirstCand = RepeatedSequenceLocs[0]; 5832 unsigned SequenceSize = 5833 std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0, 5834 [this](unsigned Sum, const MachineInstr &MI) { 5835 return Sum + getInstSizeInBytes(MI); 5836 }); 5837 5838 // Properties about candidate MBBs that hold for all of them. 5839 unsigned FlagsSetInAll = 0xF; 5840 5841 // Compute liveness information for each candidate, and set FlagsSetInAll. 5842 const TargetRegisterInfo &TRI = getRegisterInfo(); 5843 std::for_each( 5844 RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(), 5845 [&FlagsSetInAll](outliner::Candidate &C) { FlagsSetInAll &= C.Flags; }); 5846 5847 // According to the ARM Procedure Call Standard, the following are 5848 // undefined on entry/exit from a function call: 5849 // 5850 // * Register R12(IP), 5851 // * Condition codes (and thus the CPSR register) 5852 // 5853 // Since we control the instructions which are part of the outlined regions 5854 // we don't need to be fully compliant with the AAPCS, but we have to 5855 // guarantee that if a veneer is inserted at link time the code is still 5856 // correct. Because of this, we can't outline any sequence of instructions 5857 // where one of these registers is live into/across it. Thus, we need to 5858 // delete those candidates. 5859 auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) { 5860 // If the unsafe registers in this block are all dead, then we don't need 5861 // to compute liveness here. 5862 if (C.Flags & UnsafeRegsDead) 5863 return false; 5864 C.initLRU(TRI); 5865 LiveRegUnits LRU = C.LRU; 5866 return (!LRU.available(ARM::R12) || !LRU.available(ARM::CPSR)); 5867 }; 5868 5869 // Are there any candidates where those registers are live? 5870 if (!(FlagsSetInAll & UnsafeRegsDead)) { 5871 // Erase every candidate that violates the restrictions above. (It could be 5872 // true that we have viable candidates, so it's not worth bailing out in 5873 // the case that, say, 1 out of 20 candidates violate the restructions.) 5874 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall); 5875 5876 // If the sequence doesn't have enough candidates left, then we're done. 5877 if (RepeatedSequenceLocs.size() < 2) 5878 return outliner::OutlinedFunction(); 5879 } 5880 5881 // At this point, we have only "safe" candidates to outline. Figure out 5882 // frame + call instruction information. 5883 5884 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode(); 5885 5886 // Helper lambda which sets call information for every candidate. 5887 auto SetCandidateCallInfo = 5888 [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) { 5889 for (outliner::Candidate &C : RepeatedSequenceLocs) 5890 C.setCallInfo(CallID, NumBytesForCall); 5891 }; 5892 5893 OutlinerCosts Costs(Subtarget); 5894 unsigned FrameID = MachineOutlinerDefault; 5895 unsigned NumBytesToCreateFrame = Costs.FrameDefault; 5896 5897 // If the last instruction in any candidate is a terminator, then we should 5898 // tail call all of the candidates. 5899 if (RepeatedSequenceLocs[0].back()->isTerminator()) { 5900 FrameID = MachineOutlinerTailCall; 5901 NumBytesToCreateFrame = Costs.FrameTailCall; 5902 SetCandidateCallInfo(MachineOutlinerTailCall, Costs.CallTailCall); 5903 } else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX || 5904 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL || 5905 LastInstrOpcode == ARM::tBLXr || 5906 LastInstrOpcode == ARM::tBLXr_noip || 5907 LastInstrOpcode == ARM::tBLXi) { 5908 FrameID = MachineOutlinerThunk; 5909 NumBytesToCreateFrame = Costs.FrameThunk; 5910 SetCandidateCallInfo(MachineOutlinerThunk, Costs.CallThunk); 5911 } else { 5912 // We need to decide how to emit calls + frames. We can always emit the same 5913 // frame if we don't need to save to the stack. If we have to save to the 5914 // stack, then we need a different frame. 5915 unsigned NumBytesNoStackCalls = 0; 5916 std::vector<outliner::Candidate> CandidatesWithoutStackFixups; 5917 5918 for (outliner::Candidate &C : RepeatedSequenceLocs) { 5919 C.initLRU(TRI); 5920 // LR liveness is overestimated in return blocks, unless they end with a 5921 // tail call. 5922 const auto Last = C.getMBB()->rbegin(); 5923 const bool LRIsAvailable = 5924 C.getMBB()->isReturnBlock() && !Last->isCall() 5925 ? isLRAvailable(TRI, Last, 5926 (MachineBasicBlock::reverse_iterator)C.front()) 5927 : C.LRU.available(ARM::LR); 5928 if (LRIsAvailable) { 5929 FrameID = MachineOutlinerNoLRSave; 5930 NumBytesNoStackCalls += Costs.CallNoLRSave; 5931 C.setCallInfo(MachineOutlinerNoLRSave, Costs.CallNoLRSave); 5932 CandidatesWithoutStackFixups.push_back(C); 5933 } 5934 5935 // Is an unused register available? If so, we won't modify the stack, so 5936 // we can outline with the same frame type as those that don't save LR. 5937 else if (findRegisterToSaveLRTo(C)) { 5938 FrameID = MachineOutlinerRegSave; 5939 NumBytesNoStackCalls += Costs.CallRegSave; 5940 C.setCallInfo(MachineOutlinerRegSave, Costs.CallRegSave); 5941 CandidatesWithoutStackFixups.push_back(C); 5942 } 5943 5944 // Is SP used in the sequence at all? If not, we don't have to modify 5945 // the stack, so we are guaranteed to get the same frame. 5946 else if (C.UsedInSequence.available(ARM::SP)) { 5947 NumBytesNoStackCalls += Costs.CallDefault; 5948 C.setCallInfo(MachineOutlinerDefault, Costs.CallDefault); 5949 CandidatesWithoutStackFixups.push_back(C); 5950 } 5951 5952 // If we outline this, we need to modify the stack. Pretend we don't 5953 // outline this by saving all of its bytes. 5954 else 5955 NumBytesNoStackCalls += SequenceSize; 5956 } 5957 5958 // If there are no places where we have to save LR, then note that we don't 5959 // have to update the stack. Otherwise, give every candidate the default 5960 // call type 5961 if (NumBytesNoStackCalls <= 5962 RepeatedSequenceLocs.size() * Costs.CallDefault) { 5963 RepeatedSequenceLocs = CandidatesWithoutStackFixups; 5964 FrameID = MachineOutlinerNoLRSave; 5965 } else 5966 SetCandidateCallInfo(MachineOutlinerDefault, Costs.CallDefault); 5967 } 5968 5969 // Does every candidate's MBB contain a call? If so, then we might have a 5970 // call in the range. 5971 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) { 5972 // check if the range contains a call. These require a save + restore of 5973 // the link register. 5974 if (std::any_of(FirstCand.front(), FirstCand.back(), 5975 [](const MachineInstr &MI) { return MI.isCall(); })) 5976 NumBytesToCreateFrame += Costs.SaveRestoreLROnStack; 5977 5978 // Handle the last instruction separately. If it is tail call, then the 5979 // last instruction is a call, we don't want to save + restore in this 5980 // case. However, it could be possible that the last instruction is a 5981 // call without it being valid to tail call this sequence. We should 5982 // consider this as well. 5983 else if (FrameID != MachineOutlinerThunk && 5984 FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall()) 5985 NumBytesToCreateFrame += Costs.SaveRestoreLROnStack; 5986 } 5987 5988 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 5989 NumBytesToCreateFrame, FrameID); 5990 } 5991 5992 bool ARMBaseInstrInfo::checkAndUpdateStackOffset(MachineInstr *MI, 5993 int64_t Fixup, 5994 bool Updt) const { 5995 int SPIdx = MI->findRegisterUseOperandIdx(ARM::SP); 5996 unsigned AddrMode = (MI->getDesc().TSFlags & ARMII::AddrModeMask); 5997 if (SPIdx < 0) 5998 // No SP operand 5999 return true; 6000 else if (SPIdx != 1 && (AddrMode != ARMII::AddrModeT2_i8s4 || SPIdx != 2)) 6001 // If SP is not the base register we can't do much 6002 return false; 6003 6004 // Stack might be involved but addressing mode doesn't handle any offset. 6005 // Rq: AddrModeT1_[1|2|4] don't operate on SP 6006 if (AddrMode == ARMII::AddrMode1 // Arithmetic instructions 6007 || AddrMode == ARMII::AddrMode4 // Load/Store Multiple 6008 || AddrMode == ARMII::AddrMode6 // Neon Load/Store Multiple 6009 || AddrMode == ARMII::AddrModeT2_so // SP can't be used as based register 6010 || AddrMode == ARMII::AddrModeT2_pc // PCrel access 6011 || AddrMode == ARMII::AddrMode2 // Used by PRE and POST indexed LD/ST 6012 || AddrMode == ARMII::AddrModeT2_i7 // v8.1-M MVE 6013 || AddrMode == ARMII::AddrModeT2_i7s2 // v8.1-M MVE 6014 || AddrMode == ARMII::AddrModeT2_i7s4 // v8.1-M sys regs VLDR/VSTR 6015 || AddrMode == ARMII::AddrModeNone) 6016 return false; 6017 6018 unsigned NumOps = MI->getDesc().getNumOperands(); 6019 unsigned ImmIdx = NumOps - 3; 6020 6021 const MachineOperand &Offset = MI->getOperand(ImmIdx); 6022 assert(Offset.isImm() && "Is not an immediate"); 6023 int64_t OffVal = Offset.getImm(); 6024 6025 if (OffVal < 0) 6026 // Don't override data if the are below SP. 6027 return false; 6028 6029 unsigned NumBits = 0; 6030 unsigned Scale = 1; 6031 6032 switch (AddrMode) { 6033 case ARMII::AddrMode3: 6034 if (ARM_AM::getAM3Op(OffVal) == ARM_AM::sub) 6035 return false; 6036 OffVal = ARM_AM::getAM3Offset(OffVal); 6037 NumBits = 8; 6038 break; 6039 case ARMII::AddrMode5: 6040 if (ARM_AM::getAM5Op(OffVal) == ARM_AM::sub) 6041 return false; 6042 OffVal = ARM_AM::getAM5Offset(OffVal); 6043 NumBits = 8; 6044 Scale = 4; 6045 break; 6046 case ARMII::AddrMode5FP16: 6047 if (ARM_AM::getAM5FP16Op(OffVal) == ARM_AM::sub) 6048 return false; 6049 OffVal = ARM_AM::getAM5FP16Offset(OffVal); 6050 NumBits = 8; 6051 Scale = 2; 6052 break; 6053 case ARMII::AddrModeT2_i8: 6054 NumBits = 8; 6055 break; 6056 case ARMII::AddrModeT2_i8s4: 6057 // FIXME: Values are already scaled in this addressing mode. 6058 assert((Fixup & 3) == 0 && "Can't encode this offset!"); 6059 NumBits = 10; 6060 break; 6061 case ARMII::AddrModeT2_ldrex: 6062 NumBits = 8; 6063 Scale = 4; 6064 break; 6065 case ARMII::AddrModeT2_i12: 6066 case ARMII::AddrMode_i12: 6067 NumBits = 12; 6068 break; 6069 case ARMII::AddrModeT1_s: // SP-relative LD/ST 6070 NumBits = 8; 6071 Scale = 4; 6072 break; 6073 default: 6074 llvm_unreachable("Unsupported addressing mode!"); 6075 } 6076 // Make sure the offset is encodable for instructions that scale the 6077 // immediate. 6078 assert(((OffVal * Scale + Fixup) & (Scale - 1)) == 0 && 6079 "Can't encode this offset!"); 6080 OffVal += Fixup / Scale; 6081 6082 unsigned Mask = (1 << NumBits) - 1; 6083 6084 if (OffVal <= Mask) { 6085 if (Updt) 6086 MI->getOperand(ImmIdx).setImm(OffVal); 6087 return true; 6088 } 6089 6090 return false; 6091 6092 } 6093 6094 bool ARMBaseInstrInfo::isFunctionSafeToOutlineFrom( 6095 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { 6096 const Function &F = MF.getFunction(); 6097 6098 // Can F be deduplicated by the linker? If it can, don't outline from it. 6099 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) 6100 return false; 6101 6102 // Don't outline from functions with section markings; the program could 6103 // expect that all the code is in the named section. 6104 // FIXME: Allow outlining from multiple functions with the same section 6105 // marking. 6106 if (F.hasSection()) 6107 return false; 6108 6109 // FIXME: Thumb1 outlining is not handled 6110 if (MF.getInfo<ARMFunctionInfo>()->isThumb1OnlyFunction()) 6111 return false; 6112 6113 // It's safe to outline from MF. 6114 return true; 6115 } 6116 6117 bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 6118 unsigned &Flags) const { 6119 // Check if LR is available through all of the MBB. If it's not, then set 6120 // a flag. 6121 assert(MBB.getParent()->getRegInfo().tracksLiveness() && 6122 "Suitable Machine Function for outlining must track liveness"); 6123 6124 LiveRegUnits LRU(getRegisterInfo()); 6125 6126 std::for_each(MBB.rbegin(), MBB.rend(), 6127 [&LRU](MachineInstr &MI) { LRU.accumulate(MI); }); 6128 6129 // Check if each of the unsafe registers are available... 6130 bool R12AvailableInBlock = LRU.available(ARM::R12); 6131 bool CPSRAvailableInBlock = LRU.available(ARM::CPSR); 6132 6133 // If all of these are dead (and not live out), we know we don't have to check 6134 // them later. 6135 if (R12AvailableInBlock && CPSRAvailableInBlock) 6136 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead; 6137 6138 // Now, add the live outs to the set. 6139 LRU.addLiveOuts(MBB); 6140 6141 // If any of these registers is available in the MBB, but also a live out of 6142 // the block, then we know outlining is unsafe. 6143 if (R12AvailableInBlock && !LRU.available(ARM::R12)) 6144 return false; 6145 if (CPSRAvailableInBlock && !LRU.available(ARM::CPSR)) 6146 return false; 6147 6148 // Check if there's a call inside this MachineBasicBlock. If there is, then 6149 // set a flag. 6150 if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); })) 6151 Flags |= MachineOutlinerMBBFlags::HasCalls; 6152 6153 // LR liveness is overestimated in return blocks. 6154 6155 bool LRIsAvailable = 6156 MBB.isReturnBlock() && !MBB.back().isCall() 6157 ? isLRAvailable(getRegisterInfo(), MBB.rbegin(), MBB.rend()) 6158 : LRU.available(ARM::LR); 6159 if (!LRIsAvailable) 6160 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere; 6161 6162 return true; 6163 } 6164 6165 outliner::InstrType 6166 ARMBaseInstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, 6167 unsigned Flags) const { 6168 MachineInstr &MI = *MIT; 6169 const TargetRegisterInfo *TRI = &getRegisterInfo(); 6170 6171 // Be conservative with inline ASM 6172 if (MI.isInlineAsm()) 6173 return outliner::InstrType::Illegal; 6174 6175 // Don't allow debug values to impact outlining type. 6176 if (MI.isDebugInstr() || MI.isIndirectDebugValue()) 6177 return outliner::InstrType::Invisible; 6178 6179 // At this point, KILL or IMPLICIT_DEF instructions don't really tell us much 6180 // so we can go ahead and skip over them. 6181 if (MI.isKill() || MI.isImplicitDef()) 6182 return outliner::InstrType::Invisible; 6183 6184 // PIC instructions contain labels, outlining them would break offset 6185 // computing. unsigned Opc = MI.getOpcode(); 6186 unsigned Opc = MI.getOpcode(); 6187 if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR || 6188 Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR || 6189 Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB || 6190 Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic || 6191 Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel || 6192 Opc == ARM::t2MOV_ga_pcrel) 6193 return outliner::InstrType::Illegal; 6194 6195 // Be conservative with ARMv8.1 MVE instructions. 6196 if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart || 6197 Opc == ARM::t2DoLoopStartTP || Opc == ARM::t2WhileLoopStart || 6198 Opc == ARM::t2WhileLoopStartLR || Opc == ARM::t2WhileLoopStartTP || 6199 Opc == ARM::t2LoopDec || Opc == ARM::t2LoopEnd || 6200 Opc == ARM::t2LoopEndDec) 6201 return outliner::InstrType::Illegal; 6202 6203 const MCInstrDesc &MCID = MI.getDesc(); 6204 uint64_t MIFlags = MCID.TSFlags; 6205 if ((MIFlags & ARMII::DomainMask) == ARMII::DomainMVE) 6206 return outliner::InstrType::Illegal; 6207 6208 // Is this a terminator for a basic block? 6209 if (MI.isTerminator()) { 6210 // Don't outline if the branch is not unconditional. 6211 if (isPredicated(MI)) 6212 return outliner::InstrType::Illegal; 6213 6214 // Is this the end of a function? 6215 if (MI.getParent()->succ_empty()) 6216 return outliner::InstrType::Legal; 6217 6218 // It's not, so don't outline it. 6219 return outliner::InstrType::Illegal; 6220 } 6221 6222 // Make sure none of the operands are un-outlinable. 6223 for (const MachineOperand &MOP : MI.operands()) { 6224 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || 6225 MOP.isTargetIndex()) 6226 return outliner::InstrType::Illegal; 6227 } 6228 6229 // Don't outline if link register or program counter value are used. 6230 if (MI.readsRegister(ARM::LR, TRI) || MI.readsRegister(ARM::PC, TRI)) 6231 return outliner::InstrType::Illegal; 6232 6233 if (MI.isCall()) { 6234 // Get the function associated with the call. Look at each operand and find 6235 // the one that represents the calle and get its name. 6236 const Function *Callee = nullptr; 6237 for (const MachineOperand &MOP : MI.operands()) { 6238 if (MOP.isGlobal()) { 6239 Callee = dyn_cast<Function>(MOP.getGlobal()); 6240 break; 6241 } 6242 } 6243 6244 // Dont't outline calls to "mcount" like functions, in particular Linux 6245 // kernel function tracing relies on it. 6246 if (Callee && 6247 (Callee->getName() == "\01__gnu_mcount_nc" || 6248 Callee->getName() == "\01mcount" || Callee->getName() == "__mcount")) 6249 return outliner::InstrType::Illegal; 6250 6251 // If we don't know anything about the callee, assume it depends on the 6252 // stack layout of the caller. In that case, it's only legal to outline 6253 // as a tail-call. Explicitly list the call instructions we know about so 6254 // we don't get unexpected results with call pseudo-instructions. 6255 auto UnknownCallOutlineType = outliner::InstrType::Illegal; 6256 if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX || 6257 Opc == ARM::BLX_noip || Opc == ARM::tBLXr || Opc == ARM::tBLXr_noip || 6258 Opc == ARM::tBLXi) 6259 UnknownCallOutlineType = outliner::InstrType::LegalTerminator; 6260 6261 if (!Callee) 6262 return UnknownCallOutlineType; 6263 6264 // We have a function we have information about. Check if it's something we 6265 // can safely outline. 6266 MachineFunction *MF = MI.getParent()->getParent(); 6267 MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee); 6268 6269 // We don't know what's going on with the callee at all. Don't touch it. 6270 if (!CalleeMF) 6271 return UnknownCallOutlineType; 6272 6273 // Check if we know anything about the callee saves on the function. If we 6274 // don't, then don't touch it, since that implies that we haven't computed 6275 // anything about its stack frame yet. 6276 MachineFrameInfo &MFI = CalleeMF->getFrameInfo(); 6277 if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 || 6278 MFI.getNumObjects() > 0) 6279 return UnknownCallOutlineType; 6280 6281 // At this point, we can say that CalleeMF ought to not pass anything on the 6282 // stack. Therefore, we can outline it. 6283 return outliner::InstrType::Legal; 6284 } 6285 6286 // Since calls are handled, don't touch LR or PC 6287 if (MI.modifiesRegister(ARM::LR, TRI) || MI.modifiesRegister(ARM::PC, TRI)) 6288 return outliner::InstrType::Illegal; 6289 6290 // Does this use the stack? 6291 if (MI.modifiesRegister(ARM::SP, TRI) || MI.readsRegister(ARM::SP, TRI)) { 6292 // True if there is no chance that any outlined candidate from this range 6293 // could require stack fixups. That is, both 6294 // * LR is available in the range (No save/restore around call) 6295 // * The range doesn't include calls (No save/restore in outlined frame) 6296 // are true. 6297 // FIXME: This is very restrictive; the flags check the whole block, 6298 // not just the bit we will try to outline. 6299 bool MightNeedStackFixUp = 6300 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere | 6301 MachineOutlinerMBBFlags::HasCalls)); 6302 6303 if (!MightNeedStackFixUp) 6304 return outliner::InstrType::Legal; 6305 6306 // Any modification of SP will break our code to save/restore LR. 6307 // FIXME: We could handle some instructions which add a constant offset to 6308 // SP, with a bit more work. 6309 if (MI.modifiesRegister(ARM::SP, TRI)) 6310 return outliner::InstrType::Illegal; 6311 6312 // At this point, we have a stack instruction that we might need to fix up. 6313 // up. We'll handle it if it's a load or store. 6314 if (checkAndUpdateStackOffset(&MI, Subtarget.getStackAlignment().value(), 6315 false)) 6316 return outliner::InstrType::Legal; 6317 6318 // We can't fix it up, so don't outline it. 6319 return outliner::InstrType::Illegal; 6320 } 6321 6322 // Be conservative with IT blocks. 6323 if (MI.readsRegister(ARM::ITSTATE, TRI) || 6324 MI.modifiesRegister(ARM::ITSTATE, TRI)) 6325 return outliner::InstrType::Illegal; 6326 6327 // Don't outline positions. 6328 if (MI.isPosition()) 6329 return outliner::InstrType::Illegal; 6330 6331 return outliner::InstrType::Legal; 6332 } 6333 6334 void ARMBaseInstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const { 6335 for (MachineInstr &MI : MBB) { 6336 checkAndUpdateStackOffset(&MI, Subtarget.getStackAlignment().value(), true); 6337 } 6338 } 6339 6340 void ARMBaseInstrInfo::saveLROnStack(MachineBasicBlock &MBB, 6341 MachineBasicBlock::iterator It) const { 6342 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM; 6343 int Align = -Subtarget.getStackAlignment().value(); 6344 BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::SP) 6345 .addReg(ARM::LR, RegState::Kill) 6346 .addReg(ARM::SP) 6347 .addImm(Align) 6348 .add(predOps(ARMCC::AL)); 6349 } 6350 6351 void ARMBaseInstrInfo::emitCFIForLRSaveOnStack( 6352 MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const { 6353 MachineFunction &MF = *MBB.getParent(); 6354 const MCRegisterInfo *MRI = Subtarget.getRegisterInfo(); 6355 unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true); 6356 int Align = Subtarget.getStackAlignment().value(); 6357 // Add a CFI saying the stack was moved down. 6358 int64_t StackPosEntry = 6359 MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, Align)); 6360 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6361 .addCFIIndex(StackPosEntry) 6362 .setMIFlags(MachineInstr::FrameSetup); 6363 6364 // Add a CFI saying that the LR that we want to find is now higher than 6365 // before. 6366 int64_t LRPosEntry = 6367 MF.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfLR, -Align)); 6368 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6369 .addCFIIndex(LRPosEntry) 6370 .setMIFlags(MachineInstr::FrameSetup); 6371 } 6372 6373 void ARMBaseInstrInfo::emitCFIForLRSaveToReg(MachineBasicBlock &MBB, 6374 MachineBasicBlock::iterator It, 6375 Register Reg) const { 6376 MachineFunction &MF = *MBB.getParent(); 6377 const MCRegisterInfo *MRI = Subtarget.getRegisterInfo(); 6378 unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true); 6379 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 6380 6381 int64_t LRPosEntry = MF.addFrameInst( 6382 MCCFIInstruction::createRegister(nullptr, DwarfLR, DwarfReg)); 6383 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6384 .addCFIIndex(LRPosEntry) 6385 .setMIFlags(MachineInstr::FrameSetup); 6386 } 6387 6388 void ARMBaseInstrInfo::restoreLRFromStack( 6389 MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const { 6390 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM; 6391 MachineInstrBuilder MIB = BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::LR) 6392 .addReg(ARM::SP, RegState::Define) 6393 .addReg(ARM::SP); 6394 if (!Subtarget.isThumb()) 6395 MIB.addReg(0); 6396 MIB.addImm(Subtarget.getStackAlignment().value()).add(predOps(ARMCC::AL)); 6397 } 6398 6399 void ARMBaseInstrInfo::emitCFIForLRRestoreFromStack( 6400 MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const { 6401 // Now stack has moved back up... 6402 MachineFunction &MF = *MBB.getParent(); 6403 const MCRegisterInfo *MRI = Subtarget.getRegisterInfo(); 6404 unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true); 6405 int64_t StackPosEntry = 6406 MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 0)); 6407 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6408 .addCFIIndex(StackPosEntry) 6409 .setMIFlags(MachineInstr::FrameDestroy); 6410 6411 // ... and we have restored LR. 6412 int64_t LRPosEntry = 6413 MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, DwarfLR)); 6414 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6415 .addCFIIndex(LRPosEntry) 6416 .setMIFlags(MachineInstr::FrameDestroy); 6417 } 6418 6419 void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg( 6420 MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const { 6421 MachineFunction &MF = *MBB.getParent(); 6422 const MCRegisterInfo *MRI = Subtarget.getRegisterInfo(); 6423 unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true); 6424 6425 int64_t LRPosEntry = 6426 MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, DwarfLR)); 6427 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6428 .addCFIIndex(LRPosEntry) 6429 .setMIFlags(MachineInstr::FrameDestroy); 6430 } 6431 6432 void ARMBaseInstrInfo::buildOutlinedFrame( 6433 MachineBasicBlock &MBB, MachineFunction &MF, 6434 const outliner::OutlinedFunction &OF) const { 6435 // For thunk outlining, rewrite the last instruction from a call to a 6436 // tail-call. 6437 if (OF.FrameConstructionID == MachineOutlinerThunk) { 6438 MachineInstr *Call = &*--MBB.instr_end(); 6439 bool isThumb = Subtarget.isThumb(); 6440 unsigned FuncOp = isThumb ? 2 : 0; 6441 unsigned Opc = Call->getOperand(FuncOp).isReg() 6442 ? isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr 6443 : isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd 6444 : ARM::tTAILJMPdND 6445 : ARM::TAILJMPd; 6446 MachineInstrBuilder MIB = BuildMI(MBB, MBB.end(), DebugLoc(), get(Opc)) 6447 .add(Call->getOperand(FuncOp)); 6448 if (isThumb && !Call->getOperand(FuncOp).isReg()) 6449 MIB.add(predOps(ARMCC::AL)); 6450 Call->eraseFromParent(); 6451 } 6452 6453 // Is there a call in the outlined range? 6454 auto IsNonTailCall = [](MachineInstr &MI) { 6455 return MI.isCall() && !MI.isReturn(); 6456 }; 6457 if (llvm::any_of(MBB.instrs(), IsNonTailCall)) { 6458 MachineBasicBlock::iterator It = MBB.begin(); 6459 MachineBasicBlock::iterator Et = MBB.end(); 6460 6461 if (OF.FrameConstructionID == MachineOutlinerTailCall || 6462 OF.FrameConstructionID == MachineOutlinerThunk) 6463 Et = std::prev(MBB.end()); 6464 6465 // We have to save and restore LR, we need to add it to the liveins if it 6466 // is not already part of the set. This is suffient since outlined 6467 // functions only have one block. 6468 if (!MBB.isLiveIn(ARM::LR)) 6469 MBB.addLiveIn(ARM::LR); 6470 6471 // Insert a save before the outlined region 6472 saveLROnStack(MBB, It); 6473 emitCFIForLRSaveOnStack(MBB, It); 6474 6475 // Fix up the instructions in the range, since we're going to modify the 6476 // stack. 6477 assert(OF.FrameConstructionID != MachineOutlinerDefault && 6478 "Can only fix up stack references once"); 6479 fixupPostOutline(MBB); 6480 6481 // Insert a restore before the terminator for the function. Restore LR. 6482 restoreLRFromStack(MBB, Et); 6483 emitCFIForLRRestoreFromStack(MBB, Et); 6484 } 6485 6486 // If this is a tail call outlined function, then there's already a return. 6487 if (OF.FrameConstructionID == MachineOutlinerTailCall || 6488 OF.FrameConstructionID == MachineOutlinerThunk) 6489 return; 6490 6491 // Here we have to insert the return ourselves. Get the correct opcode from 6492 // current feature set. 6493 BuildMI(MBB, MBB.end(), DebugLoc(), get(Subtarget.getReturnOpcode())) 6494 .add(predOps(ARMCC::AL)); 6495 6496 // Did we have to modify the stack by saving the link register? 6497 if (OF.FrameConstructionID != MachineOutlinerDefault && 6498 OF.Candidates[0].CallConstructionID != MachineOutlinerDefault) 6499 return; 6500 6501 // We modified the stack. 6502 // Walk over the basic block and fix up all the stack accesses. 6503 fixupPostOutline(MBB); 6504 } 6505 6506 MachineBasicBlock::iterator ARMBaseInstrInfo::insertOutlinedCall( 6507 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, 6508 MachineFunction &MF, const outliner::Candidate &C) const { 6509 MachineInstrBuilder MIB; 6510 MachineBasicBlock::iterator CallPt; 6511 unsigned Opc; 6512 bool isThumb = Subtarget.isThumb(); 6513 6514 // Are we tail calling? 6515 if (C.CallConstructionID == MachineOutlinerTailCall) { 6516 // If yes, then we can just branch to the label. 6517 Opc = isThumb 6518 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND 6519 : ARM::TAILJMPd; 6520 MIB = BuildMI(MF, DebugLoc(), get(Opc)) 6521 .addGlobalAddress(M.getNamedValue(MF.getName())); 6522 if (isThumb) 6523 MIB.add(predOps(ARMCC::AL)); 6524 It = MBB.insert(It, MIB); 6525 return It; 6526 } 6527 6528 // Create the call instruction. 6529 Opc = isThumb ? ARM::tBL : ARM::BL; 6530 MachineInstrBuilder CallMIB = BuildMI(MF, DebugLoc(), get(Opc)); 6531 if (isThumb) 6532 CallMIB.add(predOps(ARMCC::AL)); 6533 CallMIB.addGlobalAddress(M.getNamedValue(MF.getName())); 6534 6535 if (C.CallConstructionID == MachineOutlinerNoLRSave || 6536 C.CallConstructionID == MachineOutlinerThunk) { 6537 // No, so just insert the call. 6538 It = MBB.insert(It, CallMIB); 6539 return It; 6540 } 6541 6542 const ARMFunctionInfo &AFI = *C.getMF()->getInfo<ARMFunctionInfo>(); 6543 // Can we save to a register? 6544 if (C.CallConstructionID == MachineOutlinerRegSave) { 6545 unsigned Reg = findRegisterToSaveLRTo(C); 6546 assert(Reg != 0 && "No callee-saved register available?"); 6547 6548 // Save and restore LR from that register. 6549 copyPhysReg(MBB, It, DebugLoc(), Reg, ARM::LR, true); 6550 if (!AFI.isLRSpilled()) 6551 emitCFIForLRSaveToReg(MBB, It, Reg); 6552 CallPt = MBB.insert(It, CallMIB); 6553 copyPhysReg(MBB, It, DebugLoc(), ARM::LR, Reg, true); 6554 if (!AFI.isLRSpilled()) 6555 emitCFIForLRRestoreFromReg(MBB, It); 6556 It--; 6557 return CallPt; 6558 } 6559 // We have the default case. Save and restore from SP. 6560 if (!MBB.isLiveIn(ARM::LR)) 6561 MBB.addLiveIn(ARM::LR); 6562 saveLROnStack(MBB, It); 6563 if (!AFI.isLRSpilled()) 6564 emitCFIForLRSaveOnStack(MBB, It); 6565 CallPt = MBB.insert(It, CallMIB); 6566 restoreLRFromStack(MBB, It); 6567 if (!AFI.isLRSpilled()) 6568 emitCFIForLRRestoreFromStack(MBB, It); 6569 It--; 6570 return CallPt; 6571 } 6572 6573 bool ARMBaseInstrInfo::shouldOutlineFromFunctionByDefault( 6574 MachineFunction &MF) const { 6575 return Subtarget.isMClass() && MF.getFunction().hasMinSize(); 6576 } 6577 6578 bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 6579 AAResults *AA) const { 6580 // Try hard to rematerialize any VCTPs because if we spill P0, it will block 6581 // the tail predication conversion. This means that the element count 6582 // register has to be live for longer, but that has to be better than 6583 // spill/restore and VPT predication. 6584 return isVCTP(&MI) && !isPredicated(MI); 6585 } 6586 6587 unsigned llvm::getBLXOpcode(const MachineFunction &MF) { 6588 return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::BLX_noip 6589 : ARM::BLX; 6590 } 6591 6592 unsigned llvm::gettBLXrOpcode(const MachineFunction &MF) { 6593 return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::tBLXr_noip 6594 : ARM::tBLXr; 6595 } 6596 6597 unsigned llvm::getBLXpredOpcode(const MachineFunction &MF) { 6598 return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::BLX_pred_noip 6599 : ARM::BLX_pred; 6600 } 6601 6602