1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the Base ARM implementation of the TargetInstrInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseInstrInfo.h" 14 #include "ARMBaseRegisterInfo.h" 15 #include "ARMConstantPoolValue.h" 16 #include "ARMFeatures.h" 17 #include "ARMHazardRecognizer.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMSubtarget.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "MCTargetDesc/ARMBaseInfo.h" 22 #include "MVETailPredUtils.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/CodeGen/LiveVariables.h" 29 #include "llvm/CodeGen/MachineBasicBlock.h" 30 #include "llvm/CodeGen/MachineConstantPool.h" 31 #include "llvm/CodeGen/MachineFrameInfo.h" 32 #include "llvm/CodeGen/MachineFunction.h" 33 #include "llvm/CodeGen/MachineInstr.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineMemOperand.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineOperand.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/CodeGen/MachineScheduler.h" 40 #include "llvm/CodeGen/MultiHazardRecognizer.h" 41 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 42 #include "llvm/CodeGen/SelectionDAGNodes.h" 43 #include "llvm/CodeGen/TargetInstrInfo.h" 44 #include "llvm/CodeGen/TargetRegisterInfo.h" 45 #include "llvm/CodeGen/TargetSchedule.h" 46 #include "llvm/IR/Attributes.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DebugLoc.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/GlobalValue.h" 51 #include "llvm/MC/MCAsmInfo.h" 52 #include "llvm/MC/MCInstrDesc.h" 53 #include "llvm/MC/MCInstrItineraries.h" 54 #include "llvm/Support/BranchProbability.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Support/ErrorHandling.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Target/TargetMachine.h" 62 #include <algorithm> 63 #include <cassert> 64 #include <cstdint> 65 #include <iterator> 66 #include <new> 67 #include <utility> 68 #include <vector> 69 70 using namespace llvm; 71 72 #define DEBUG_TYPE "arm-instrinfo" 73 74 #define GET_INSTRINFO_CTOR_DTOR 75 #include "ARMGenInstrInfo.inc" 76 77 static cl::opt<bool> 78 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, 79 cl::desc("Enable ARM 2-addr to 3-addr conv")); 80 81 /// ARM_MLxEntry - Record information about MLA / MLS instructions. 82 struct ARM_MLxEntry { 83 uint16_t MLxOpc; // MLA / MLS opcode 84 uint16_t MulOpc; // Expanded multiplication opcode 85 uint16_t AddSubOpc; // Expanded add / sub opcode 86 bool NegAcc; // True if the acc is negated before the add / sub. 87 bool HasLane; // True if instruction has an extra "lane" operand. 88 }; 89 90 static const ARM_MLxEntry ARM_MLxTable[] = { 91 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane 92 // fp scalar ops 93 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, 94 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, 95 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, 96 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, 97 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, 98 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, 99 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, 100 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, 101 102 // fp SIMD ops 103 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, 104 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, 105 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, 106 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, 107 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, 108 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, 109 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, 110 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, 111 }; 112 113 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) 114 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 115 Subtarget(STI) { 116 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { 117 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) 118 llvm_unreachable("Duplicated entries?"); 119 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); 120 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); 121 } 122 } 123 124 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl 125 // currently defaults to no prepass hazard recognizer. 126 ScheduleHazardRecognizer * 127 ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 128 const ScheduleDAG *DAG) const { 129 if (usePreRAHazardRecognizer()) { 130 const InstrItineraryData *II = 131 static_cast<const ARMSubtarget *>(STI)->getInstrItineraryData(); 132 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); 133 } 134 return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG); 135 } 136 137 // Called during: 138 // - pre-RA scheduling 139 // - post-RA scheduling when FeatureUseMISched is set 140 ScheduleHazardRecognizer *ARMBaseInstrInfo::CreateTargetMIHazardRecognizer( 141 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const { 142 MultiHazardRecognizer *MHR = new MultiHazardRecognizer(); 143 144 // We would like to restrict this hazard recognizer to only 145 // post-RA scheduling; we can tell that we're post-RA because we don't 146 // track VRegLiveness. 147 // Cortex-M7: TRM indicates that there is a single ITCM bank and two DTCM 148 // banks banked on bit 2. Assume that TCMs are in use. 149 if (Subtarget.isCortexM7() && !DAG->hasVRegLiveness()) 150 MHR->AddHazardRecognizer( 151 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4, true)); 152 153 // Not inserting ARMHazardRecognizerFPMLx because that would change 154 // legacy behavior 155 156 auto BHR = TargetInstrInfo::CreateTargetMIHazardRecognizer(II, DAG); 157 MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR)); 158 return MHR; 159 } 160 161 // Called during post-RA scheduling when FeatureUseMISched is not set 162 ScheduleHazardRecognizer *ARMBaseInstrInfo:: 163 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 164 const ScheduleDAG *DAG) const { 165 MultiHazardRecognizer *MHR = new MultiHazardRecognizer(); 166 167 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base()) 168 MHR->AddHazardRecognizer(std::make_unique<ARMHazardRecognizerFPMLx>()); 169 170 auto BHR = TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG); 171 if (BHR) 172 MHR->AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer>(BHR)); 173 return MHR; 174 } 175 176 MachineInstr * 177 ARMBaseInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, 178 LiveIntervals *LIS) const { 179 // FIXME: Thumb2 support. 180 181 if (!EnableARM3Addr) 182 return nullptr; 183 184 MachineFunction &MF = *MI.getParent()->getParent(); 185 uint64_t TSFlags = MI.getDesc().TSFlags; 186 bool isPre = false; 187 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { 188 default: return nullptr; 189 case ARMII::IndexModePre: 190 isPre = true; 191 break; 192 case ARMII::IndexModePost: 193 break; 194 } 195 196 // Try splitting an indexed load/store to an un-indexed one plus an add/sub 197 // operation. 198 unsigned MemOpc = getUnindexedOpcode(MI.getOpcode()); 199 if (MemOpc == 0) 200 return nullptr; 201 202 MachineInstr *UpdateMI = nullptr; 203 MachineInstr *MemMI = nullptr; 204 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); 205 const MCInstrDesc &MCID = MI.getDesc(); 206 unsigned NumOps = MCID.getNumOperands(); 207 bool isLoad = !MI.mayStore(); 208 const MachineOperand &WB = isLoad ? MI.getOperand(1) : MI.getOperand(0); 209 const MachineOperand &Base = MI.getOperand(2); 210 const MachineOperand &Offset = MI.getOperand(NumOps - 3); 211 Register WBReg = WB.getReg(); 212 Register BaseReg = Base.getReg(); 213 Register OffReg = Offset.getReg(); 214 unsigned OffImm = MI.getOperand(NumOps - 2).getImm(); 215 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI.getOperand(NumOps - 1).getImm(); 216 switch (AddrMode) { 217 default: llvm_unreachable("Unknown indexed op!"); 218 case ARMII::AddrMode2: { 219 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 220 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 221 if (OffReg == 0) { 222 if (ARM_AM::getSOImmVal(Amt) == -1) 223 // Can't encode it in a so_imm operand. This transformation will 224 // add more than 1 instruction. Abandon! 225 return nullptr; 226 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 227 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 228 .addReg(BaseReg) 229 .addImm(Amt) 230 .add(predOps(Pred)) 231 .add(condCodeOp()); 232 } else if (Amt != 0) { 233 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); 234 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); 235 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 236 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) 237 .addReg(BaseReg) 238 .addReg(OffReg) 239 .addReg(0) 240 .addImm(SOOpc) 241 .add(predOps(Pred)) 242 .add(condCodeOp()); 243 } else 244 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 245 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 246 .addReg(BaseReg) 247 .addReg(OffReg) 248 .add(predOps(Pred)) 249 .add(condCodeOp()); 250 break; 251 } 252 case ARMII::AddrMode3 : { 253 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub; 254 unsigned Amt = ARM_AM::getAM3Offset(OffImm); 255 if (OffReg == 0) 256 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand. 257 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 258 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 259 .addReg(BaseReg) 260 .addImm(Amt) 261 .add(predOps(Pred)) 262 .add(condCodeOp()); 263 else 264 UpdateMI = BuildMI(MF, MI.getDebugLoc(), 265 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 266 .addReg(BaseReg) 267 .addReg(OffReg) 268 .add(predOps(Pred)) 269 .add(condCodeOp()); 270 break; 271 } 272 } 273 274 std::vector<MachineInstr*> NewMIs; 275 if (isPre) { 276 if (isLoad) 277 MemMI = 278 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg()) 279 .addReg(WBReg) 280 .addImm(0) 281 .addImm(Pred); 282 else 283 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc)) 284 .addReg(MI.getOperand(1).getReg()) 285 .addReg(WBReg) 286 .addReg(0) 287 .addImm(0) 288 .addImm(Pred); 289 NewMIs.push_back(MemMI); 290 NewMIs.push_back(UpdateMI); 291 } else { 292 if (isLoad) 293 MemMI = 294 BuildMI(MF, MI.getDebugLoc(), get(MemOpc), MI.getOperand(0).getReg()) 295 .addReg(BaseReg) 296 .addImm(0) 297 .addImm(Pred); 298 else 299 MemMI = BuildMI(MF, MI.getDebugLoc(), get(MemOpc)) 300 .addReg(MI.getOperand(1).getReg()) 301 .addReg(BaseReg) 302 .addReg(0) 303 .addImm(0) 304 .addImm(Pred); 305 if (WB.isDead()) 306 UpdateMI->getOperand(0).setIsDead(); 307 NewMIs.push_back(UpdateMI); 308 NewMIs.push_back(MemMI); 309 } 310 311 // Transfer LiveVariables states, kill / dead info. 312 if (LV) { 313 for (const MachineOperand &MO : MI.operands()) { 314 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) { 315 Register Reg = MO.getReg(); 316 317 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); 318 if (MO.isDef()) { 319 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI; 320 if (MO.isDead()) 321 LV->addVirtualRegisterDead(Reg, *NewMI); 322 } 323 if (MO.isUse() && MO.isKill()) { 324 for (unsigned j = 0; j < 2; ++j) { 325 // Look at the two new MI's in reverse order. 326 MachineInstr *NewMI = NewMIs[j]; 327 if (!NewMI->readsRegister(Reg)) 328 continue; 329 LV->addVirtualRegisterKilled(Reg, *NewMI); 330 if (VI.removeKill(MI)) 331 VI.Kills.push_back(NewMI); 332 break; 333 } 334 } 335 } 336 } 337 } 338 339 MachineBasicBlock &MBB = *MI.getParent(); 340 MBB.insert(MI, NewMIs[1]); 341 MBB.insert(MI, NewMIs[0]); 342 return NewMIs[0]; 343 } 344 345 // Branch analysis. 346 // Cond vector output format: 347 // 0 elements indicates an unconditional branch 348 // 2 elements indicates a conditional branch; the elements are 349 // the condition to check and the CPSR. 350 // 3 elements indicates a hardware loop end; the elements 351 // are the opcode, the operand value to test, and a dummy 352 // operand used to pad out to 3 operands. 353 bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB, 354 MachineBasicBlock *&TBB, 355 MachineBasicBlock *&FBB, 356 SmallVectorImpl<MachineOperand> &Cond, 357 bool AllowModify) const { 358 TBB = nullptr; 359 FBB = nullptr; 360 361 MachineBasicBlock::instr_iterator I = MBB.instr_end(); 362 if (I == MBB.instr_begin()) 363 return false; // Empty blocks are easy. 364 --I; 365 366 // Walk backwards from the end of the basic block until the branch is 367 // analyzed or we give up. 368 while (isPredicated(*I) || I->isTerminator() || I->isDebugValue()) { 369 // Flag to be raised on unanalyzeable instructions. This is useful in cases 370 // where we want to clean up on the end of the basic block before we bail 371 // out. 372 bool CantAnalyze = false; 373 374 // Skip over DEBUG values, predicated nonterminators and speculation 375 // barrier terminators. 376 while (I->isDebugInstr() || !I->isTerminator() || 377 isSpeculationBarrierEndBBOpcode(I->getOpcode()) || 378 I->getOpcode() == ARM::t2DoLoopStartTP){ 379 if (I == MBB.instr_begin()) 380 return false; 381 --I; 382 } 383 384 if (isIndirectBranchOpcode(I->getOpcode()) || 385 isJumpTableBranchOpcode(I->getOpcode())) { 386 // Indirect branches and jump tables can't be analyzed, but we still want 387 // to clean up any instructions at the tail of the basic block. 388 CantAnalyze = true; 389 } else if (isUncondBranchOpcode(I->getOpcode())) { 390 TBB = I->getOperand(0).getMBB(); 391 } else if (isCondBranchOpcode(I->getOpcode())) { 392 // Bail out if we encounter multiple conditional branches. 393 if (!Cond.empty()) 394 return true; 395 396 assert(!FBB && "FBB should have been null."); 397 FBB = TBB; 398 TBB = I->getOperand(0).getMBB(); 399 Cond.push_back(I->getOperand(1)); 400 Cond.push_back(I->getOperand(2)); 401 } else if (I->isReturn()) { 402 // Returns can't be analyzed, but we should run cleanup. 403 CantAnalyze = true; 404 } else if (I->getOpcode() == ARM::t2LoopEnd && 405 MBB.getParent() 406 ->getSubtarget<ARMSubtarget>() 407 .enableMachinePipeliner()) { 408 if (!Cond.empty()) 409 return true; 410 FBB = TBB; 411 TBB = I->getOperand(1).getMBB(); 412 Cond.push_back(MachineOperand::CreateImm(I->getOpcode())); 413 Cond.push_back(I->getOperand(0)); 414 Cond.push_back(MachineOperand::CreateImm(0)); 415 } else { 416 // We encountered other unrecognized terminator. Bail out immediately. 417 return true; 418 } 419 420 // Cleanup code - to be run for unpredicated unconditional branches and 421 // returns. 422 if (!isPredicated(*I) && 423 (isUncondBranchOpcode(I->getOpcode()) || 424 isIndirectBranchOpcode(I->getOpcode()) || 425 isJumpTableBranchOpcode(I->getOpcode()) || 426 I->isReturn())) { 427 // Forget any previous condition branch information - it no longer applies. 428 Cond.clear(); 429 FBB = nullptr; 430 431 // If we can modify the function, delete everything below this 432 // unconditional branch. 433 if (AllowModify) { 434 MachineBasicBlock::iterator DI = std::next(I); 435 while (DI != MBB.instr_end()) { 436 MachineInstr &InstToDelete = *DI; 437 ++DI; 438 // Speculation barriers must not be deleted. 439 if (isSpeculationBarrierEndBBOpcode(InstToDelete.getOpcode())) 440 continue; 441 InstToDelete.eraseFromParent(); 442 } 443 } 444 } 445 446 if (CantAnalyze) { 447 // We may not be able to analyze the block, but we could still have 448 // an unconditional branch as the last instruction in the block, which 449 // just branches to layout successor. If this is the case, then just 450 // remove it if we're allowed to make modifications. 451 if (AllowModify && !isPredicated(MBB.back()) && 452 isUncondBranchOpcode(MBB.back().getOpcode()) && 453 TBB && MBB.isLayoutSuccessor(TBB)) 454 removeBranch(MBB); 455 return true; 456 } 457 458 if (I == MBB.instr_begin()) 459 return false; 460 461 --I; 462 } 463 464 // We made it past the terminators without bailing out - we must have 465 // analyzed this branch successfully. 466 return false; 467 } 468 469 unsigned ARMBaseInstrInfo::removeBranch(MachineBasicBlock &MBB, 470 int *BytesRemoved) const { 471 assert(!BytesRemoved && "code size not handled"); 472 473 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); 474 if (I == MBB.end()) 475 return 0; 476 477 if (!isUncondBranchOpcode(I->getOpcode()) && 478 !isCondBranchOpcode(I->getOpcode()) && I->getOpcode() != ARM::t2LoopEnd) 479 return 0; 480 481 // Remove the branch. 482 I->eraseFromParent(); 483 484 I = MBB.end(); 485 486 if (I == MBB.begin()) return 1; 487 --I; 488 if (!isCondBranchOpcode(I->getOpcode()) && I->getOpcode() != ARM::t2LoopEnd) 489 return 1; 490 491 // Remove the branch. 492 I->eraseFromParent(); 493 return 2; 494 } 495 496 unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB, 497 MachineBasicBlock *TBB, 498 MachineBasicBlock *FBB, 499 ArrayRef<MachineOperand> Cond, 500 const DebugLoc &DL, 501 int *BytesAdded) const { 502 assert(!BytesAdded && "code size not handled"); 503 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>(); 504 int BOpc = !AFI->isThumbFunction() 505 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); 506 int BccOpc = !AFI->isThumbFunction() 507 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); 508 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); 509 510 // Shouldn't be a fall through. 511 assert(TBB && "insertBranch must not be told to insert a fallthrough"); 512 assert((Cond.size() == 2 || Cond.size() == 0 || Cond.size() == 3) && 513 "ARM branch conditions have two or three components!"); 514 515 // For conditional branches, we use addOperand to preserve CPSR flags. 516 517 if (!FBB) { 518 if (Cond.empty()) { // Unconditional branch? 519 if (isThumb) 520 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).add(predOps(ARMCC::AL)); 521 else 522 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 523 } else if (Cond.size() == 2) { 524 BuildMI(&MBB, DL, get(BccOpc)) 525 .addMBB(TBB) 526 .addImm(Cond[0].getImm()) 527 .add(Cond[1]); 528 } else 529 BuildMI(&MBB, DL, get(Cond[0].getImm())).add(Cond[1]).addMBB(TBB); 530 return 1; 531 } 532 533 // Two-way conditional branch. 534 if (Cond.size() == 2) 535 BuildMI(&MBB, DL, get(BccOpc)) 536 .addMBB(TBB) 537 .addImm(Cond[0].getImm()) 538 .add(Cond[1]); 539 else if (Cond.size() == 3) 540 BuildMI(&MBB, DL, get(Cond[0].getImm())).add(Cond[1]).addMBB(TBB); 541 if (isThumb) 542 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).add(predOps(ARMCC::AL)); 543 else 544 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 545 return 2; 546 } 547 548 bool ARMBaseInstrInfo:: 549 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 550 if (Cond.size() == 2) { 551 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 552 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 553 return false; 554 } 555 return true; 556 } 557 558 bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const { 559 if (MI.isBundle()) { 560 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 561 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 562 while (++I != E && I->isInsideBundle()) { 563 int PIdx = I->findFirstPredOperandIdx(); 564 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL) 565 return true; 566 } 567 return false; 568 } 569 570 int PIdx = MI.findFirstPredOperandIdx(); 571 return PIdx != -1 && MI.getOperand(PIdx).getImm() != ARMCC::AL; 572 } 573 574 std::string ARMBaseInstrInfo::createMIROperandComment( 575 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, 576 const TargetRegisterInfo *TRI) const { 577 578 // First, let's see if there is a generic comment for this operand 579 std::string GenericComment = 580 TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI); 581 if (!GenericComment.empty()) 582 return GenericComment; 583 584 // If not, check if we have an immediate operand. 585 if (Op.getType() != MachineOperand::MO_Immediate) 586 return std::string(); 587 588 // And print its corresponding condition code if the immediate is a 589 // predicate. 590 int FirstPredOp = MI.findFirstPredOperandIdx(); 591 if (FirstPredOp != (int) OpIdx) 592 return std::string(); 593 594 std::string CC = "CC::"; 595 CC += ARMCondCodeToString((ARMCC::CondCodes)Op.getImm()); 596 return CC; 597 } 598 599 bool ARMBaseInstrInfo::PredicateInstruction( 600 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 601 unsigned Opc = MI.getOpcode(); 602 if (isUncondBranchOpcode(Opc)) { 603 MI.setDesc(get(getMatchingCondBranchOpcode(Opc))); 604 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 605 .addImm(Pred[0].getImm()) 606 .addReg(Pred[1].getReg()); 607 return true; 608 } 609 610 int PIdx = MI.findFirstPredOperandIdx(); 611 if (PIdx != -1) { 612 MachineOperand &PMO = MI.getOperand(PIdx); 613 PMO.setImm(Pred[0].getImm()); 614 MI.getOperand(PIdx+1).setReg(Pred[1].getReg()); 615 616 // Thumb 1 arithmetic instructions do not set CPSR when executed inside an 617 // IT block. This affects how they are printed. 618 const MCInstrDesc &MCID = MI.getDesc(); 619 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 620 assert(MCID.OpInfo[1].isOptionalDef() && "CPSR def isn't expected operand"); 621 assert((MI.getOperand(1).isDead() || 622 MI.getOperand(1).getReg() != ARM::CPSR) && 623 "if conversion tried to stop defining used CPSR"); 624 MI.getOperand(1).setReg(ARM::NoRegister); 625 } 626 627 return true; 628 } 629 return false; 630 } 631 632 bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 633 ArrayRef<MachineOperand> Pred2) const { 634 if (Pred1.size() > 2 || Pred2.size() > 2) 635 return false; 636 637 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 638 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 639 if (CC1 == CC2) 640 return true; 641 642 switch (CC1) { 643 default: 644 return false; 645 case ARMCC::AL: 646 return true; 647 case ARMCC::HS: 648 return CC2 == ARMCC::HI; 649 case ARMCC::LS: 650 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 651 case ARMCC::GE: 652 return CC2 == ARMCC::GT; 653 case ARMCC::LE: 654 return CC2 == ARMCC::LT; 655 } 656 } 657 658 bool ARMBaseInstrInfo::ClobbersPredicate(MachineInstr &MI, 659 std::vector<MachineOperand> &Pred, 660 bool SkipDead) const { 661 bool Found = false; 662 for (const MachineOperand &MO : MI.operands()) { 663 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR); 664 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR; 665 if (ClobbersCPSR || IsCPSR) { 666 667 // Filter out T1 instructions that have a dead CPSR, 668 // allowing IT blocks to be generated containing T1 instructions 669 const MCInstrDesc &MCID = MI.getDesc(); 670 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting && MO.isDead() && 671 SkipDead) 672 continue; 673 674 Pred.push_back(MO); 675 Found = true; 676 } 677 } 678 679 return Found; 680 } 681 682 bool ARMBaseInstrInfo::isCPSRDefined(const MachineInstr &MI) { 683 for (const auto &MO : MI.operands()) 684 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead()) 685 return true; 686 return false; 687 } 688 689 static bool isEligibleForITBlock(const MachineInstr *MI) { 690 switch (MI->getOpcode()) { 691 default: return true; 692 case ARM::tADC: // ADC (register) T1 693 case ARM::tADDi3: // ADD (immediate) T1 694 case ARM::tADDi8: // ADD (immediate) T2 695 case ARM::tADDrr: // ADD (register) T1 696 case ARM::tAND: // AND (register) T1 697 case ARM::tASRri: // ASR (immediate) T1 698 case ARM::tASRrr: // ASR (register) T1 699 case ARM::tBIC: // BIC (register) T1 700 case ARM::tEOR: // EOR (register) T1 701 case ARM::tLSLri: // LSL (immediate) T1 702 case ARM::tLSLrr: // LSL (register) T1 703 case ARM::tLSRri: // LSR (immediate) T1 704 case ARM::tLSRrr: // LSR (register) T1 705 case ARM::tMUL: // MUL T1 706 case ARM::tMVN: // MVN (register) T1 707 case ARM::tORR: // ORR (register) T1 708 case ARM::tROR: // ROR (register) T1 709 case ARM::tRSB: // RSB (immediate) T1 710 case ARM::tSBC: // SBC (register) T1 711 case ARM::tSUBi3: // SUB (immediate) T1 712 case ARM::tSUBi8: // SUB (immediate) T2 713 case ARM::tSUBrr: // SUB (register) T1 714 return !ARMBaseInstrInfo::isCPSRDefined(*MI); 715 } 716 } 717 718 /// isPredicable - Return true if the specified instruction can be predicated. 719 /// By default, this returns true for every instruction with a 720 /// PredicateOperand. 721 bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const { 722 if (!MI.isPredicable()) 723 return false; 724 725 if (MI.isBundle()) 726 return false; 727 728 if (!isEligibleForITBlock(&MI)) 729 return false; 730 731 const MachineFunction *MF = MI.getParent()->getParent(); 732 const ARMFunctionInfo *AFI = 733 MF->getInfo<ARMFunctionInfo>(); 734 735 // Neon instructions in Thumb2 IT blocks are deprecated, see ARMARM. 736 // In their ARM encoding, they can't be encoded in a conditional form. 737 if ((MI.getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) 738 return false; 739 740 // Make indirect control flow changes unpredicable when SLS mitigation is 741 // enabled. 742 const ARMSubtarget &ST = MF->getSubtarget<ARMSubtarget>(); 743 if (ST.hardenSlsRetBr() && isIndirectControlFlowNotComingBack(MI)) 744 return false; 745 if (ST.hardenSlsBlr() && isIndirectCall(MI)) 746 return false; 747 748 if (AFI->isThumb2Function()) { 749 if (getSubtarget().restrictIT()) 750 return isV8EligibleForIT(&MI); 751 } 752 753 return true; 754 } 755 756 namespace llvm { 757 758 template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) { 759 for (const MachineOperand &MO : MI->operands()) { 760 if (!MO.isReg() || MO.isUndef() || MO.isUse()) 761 continue; 762 if (MO.getReg() != ARM::CPSR) 763 continue; 764 if (!MO.isDead()) 765 return false; 766 } 767 // all definitions of CPSR are dead 768 return true; 769 } 770 771 } // end namespace llvm 772 773 /// GetInstSize - Return the size of the specified MachineInstr. 774 /// 775 unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 776 const MachineBasicBlock &MBB = *MI.getParent(); 777 const MachineFunction *MF = MBB.getParent(); 778 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 779 780 const MCInstrDesc &MCID = MI.getDesc(); 781 782 switch (MI.getOpcode()) { 783 default: 784 // Return the size specified in .td file. If there's none, return 0, as we 785 // can't define a default size (Thumb1 instructions are 2 bytes, Thumb2 786 // instructions are 2-4 bytes, and ARM instructions are 4 bytes), in 787 // contrast to AArch64 instructions which have a default size of 4 bytes for 788 // example. 789 return MCID.getSize(); 790 case TargetOpcode::BUNDLE: 791 return getInstBundleLength(MI); 792 case ARM::CONSTPOOL_ENTRY: 793 case ARM::JUMPTABLE_INSTS: 794 case ARM::JUMPTABLE_ADDRS: 795 case ARM::JUMPTABLE_TBB: 796 case ARM::JUMPTABLE_TBH: 797 // If this machine instr is a constant pool entry, its size is recorded as 798 // operand #2. 799 return MI.getOperand(2).getImm(); 800 case ARM::SPACE: 801 return MI.getOperand(1).getImm(); 802 case ARM::INLINEASM: 803 case ARM::INLINEASM_BR: { 804 // If this machine instr is an inline asm, measure it. 805 unsigned Size = getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI); 806 if (!MF->getInfo<ARMFunctionInfo>()->isThumbFunction()) 807 Size = alignTo(Size, 4); 808 return Size; 809 } 810 } 811 } 812 813 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const { 814 unsigned Size = 0; 815 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 816 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 817 while (++I != E && I->isInsideBundle()) { 818 assert(!I->isBundle() && "No nested bundle!"); 819 Size += getInstSizeInBytes(*I); 820 } 821 return Size; 822 } 823 824 void ARMBaseInstrInfo::copyFromCPSR(MachineBasicBlock &MBB, 825 MachineBasicBlock::iterator I, 826 unsigned DestReg, bool KillSrc, 827 const ARMSubtarget &Subtarget) const { 828 unsigned Opc = Subtarget.isThumb() 829 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR) 830 : ARM::MRS; 831 832 MachineInstrBuilder MIB = 833 BuildMI(MBB, I, I->getDebugLoc(), get(Opc), DestReg); 834 835 // There is only 1 A/R class MRS instruction, and it always refers to 836 // APSR. However, there are lots of other possibilities on M-class cores. 837 if (Subtarget.isMClass()) 838 MIB.addImm(0x800); 839 840 MIB.add(predOps(ARMCC::AL)) 841 .addReg(ARM::CPSR, RegState::Implicit | getKillRegState(KillSrc)); 842 } 843 844 void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB, 845 MachineBasicBlock::iterator I, 846 unsigned SrcReg, bool KillSrc, 847 const ARMSubtarget &Subtarget) const { 848 unsigned Opc = Subtarget.isThumb() 849 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR) 850 : ARM::MSR; 851 852 MachineInstrBuilder MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Opc)); 853 854 if (Subtarget.isMClass()) 855 MIB.addImm(0x800); 856 else 857 MIB.addImm(8); 858 859 MIB.addReg(SrcReg, getKillRegState(KillSrc)) 860 .add(predOps(ARMCC::AL)) 861 .addReg(ARM::CPSR, RegState::Implicit | RegState::Define); 862 } 863 864 void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) { 865 MIB.addImm(ARMVCC::None); 866 MIB.addReg(0); 867 MIB.addReg(0); // tp_reg 868 } 869 870 void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, 871 Register DestReg) { 872 addUnpredicatedMveVpredNOp(MIB); 873 MIB.addReg(DestReg, RegState::Undef); 874 } 875 876 void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) { 877 MIB.addImm(Cond); 878 MIB.addReg(ARM::VPR, RegState::Implicit); 879 MIB.addReg(0); // tp_reg 880 } 881 882 void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB, 883 unsigned Cond, unsigned Inactive) { 884 addPredicatedMveVpredNOp(MIB, Cond); 885 MIB.addReg(Inactive); 886 } 887 888 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 889 MachineBasicBlock::iterator I, 890 const DebugLoc &DL, MCRegister DestReg, 891 MCRegister SrcReg, bool KillSrc) const { 892 bool GPRDest = ARM::GPRRegClass.contains(DestReg); 893 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); 894 895 if (GPRDest && GPRSrc) { 896 BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) 897 .addReg(SrcReg, getKillRegState(KillSrc)) 898 .add(predOps(ARMCC::AL)) 899 .add(condCodeOp()); 900 return; 901 } 902 903 bool SPRDest = ARM::SPRRegClass.contains(DestReg); 904 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); 905 906 unsigned Opc = 0; 907 if (SPRDest && SPRSrc) 908 Opc = ARM::VMOVS; 909 else if (GPRDest && SPRSrc) 910 Opc = ARM::VMOVRS; 911 else if (SPRDest && GPRSrc) 912 Opc = ARM::VMOVSR; 913 else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64()) 914 Opc = ARM::VMOVD; 915 else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) 916 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy; 917 918 if (Opc) { 919 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); 920 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 921 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) 922 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 923 if (Opc == ARM::MVE_VORR) 924 addUnpredicatedMveVpredROp(MIB, DestReg); 925 else if (Opc != ARM::MQPRCopy) 926 MIB.add(predOps(ARMCC::AL)); 927 return; 928 } 929 930 // Handle register classes that require multiple instructions. 931 unsigned BeginIdx = 0; 932 unsigned SubRegs = 0; 933 int Spacing = 1; 934 935 // Use VORRq when possible. 936 if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) { 937 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 938 BeginIdx = ARM::qsub_0; 939 SubRegs = 2; 940 } else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) { 941 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR; 942 BeginIdx = ARM::qsub_0; 943 SubRegs = 4; 944 // Fall back to VMOVD. 945 } else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) { 946 Opc = ARM::VMOVD; 947 BeginIdx = ARM::dsub_0; 948 SubRegs = 2; 949 } else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) { 950 Opc = ARM::VMOVD; 951 BeginIdx = ARM::dsub_0; 952 SubRegs = 3; 953 } else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) { 954 Opc = ARM::VMOVD; 955 BeginIdx = ARM::dsub_0; 956 SubRegs = 4; 957 } else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) { 958 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr; 959 BeginIdx = ARM::gsub_0; 960 SubRegs = 2; 961 } else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) { 962 Opc = ARM::VMOVD; 963 BeginIdx = ARM::dsub_0; 964 SubRegs = 2; 965 Spacing = 2; 966 } else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) { 967 Opc = ARM::VMOVD; 968 BeginIdx = ARM::dsub_0; 969 SubRegs = 3; 970 Spacing = 2; 971 } else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) { 972 Opc = ARM::VMOVD; 973 BeginIdx = ARM::dsub_0; 974 SubRegs = 4; 975 Spacing = 2; 976 } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && 977 !Subtarget.hasFP64()) { 978 Opc = ARM::VMOVS; 979 BeginIdx = ARM::ssub_0; 980 SubRegs = 2; 981 } else if (SrcReg == ARM::CPSR) { 982 copyFromCPSR(MBB, I, DestReg, KillSrc, Subtarget); 983 return; 984 } else if (DestReg == ARM::CPSR) { 985 copyToCPSR(MBB, I, SrcReg, KillSrc, Subtarget); 986 return; 987 } else if (DestReg == ARM::VPR) { 988 assert(ARM::GPRRegClass.contains(SrcReg)); 989 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_P0), DestReg) 990 .addReg(SrcReg, getKillRegState(KillSrc)) 991 .add(predOps(ARMCC::AL)); 992 return; 993 } else if (SrcReg == ARM::VPR) { 994 assert(ARM::GPRRegClass.contains(DestReg)); 995 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_P0), DestReg) 996 .addReg(SrcReg, getKillRegState(KillSrc)) 997 .add(predOps(ARMCC::AL)); 998 return; 999 } else if (DestReg == ARM::FPSCR_NZCV) { 1000 assert(ARM::GPRRegClass.contains(SrcReg)); 1001 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMSR_FPSCR_NZCVQC), DestReg) 1002 .addReg(SrcReg, getKillRegState(KillSrc)) 1003 .add(predOps(ARMCC::AL)); 1004 return; 1005 } else if (SrcReg == ARM::FPSCR_NZCV) { 1006 assert(ARM::GPRRegClass.contains(DestReg)); 1007 BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VMRS_FPSCR_NZCVQC), DestReg) 1008 .addReg(SrcReg, getKillRegState(KillSrc)) 1009 .add(predOps(ARMCC::AL)); 1010 return; 1011 } 1012 1013 assert(Opc && "Impossible reg-to-reg copy"); 1014 1015 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1016 MachineInstrBuilder Mov; 1017 1018 // Copy register tuples backward when the first Dest reg overlaps with SrcReg. 1019 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) { 1020 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing); 1021 Spacing = -Spacing; 1022 } 1023 #ifndef NDEBUG 1024 SmallSet<unsigned, 4> DstRegs; 1025 #endif 1026 for (unsigned i = 0; i != SubRegs; ++i) { 1027 Register Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing); 1028 Register Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing); 1029 assert(Dst && Src && "Bad sub-register"); 1030 #ifndef NDEBUG 1031 assert(!DstRegs.count(Src) && "destructive vector copy"); 1032 DstRegs.insert(Dst); 1033 #endif 1034 Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src); 1035 // VORR (NEON or MVE) takes two source operands. 1036 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) { 1037 Mov.addReg(Src); 1038 } 1039 // MVE VORR takes predicate operands in place of an ordinary condition. 1040 if (Opc == ARM::MVE_VORR) 1041 addUnpredicatedMveVpredROp(Mov, Dst); 1042 else 1043 Mov = Mov.add(predOps(ARMCC::AL)); 1044 // MOVr can set CC. 1045 if (Opc == ARM::MOVr) 1046 Mov = Mov.add(condCodeOp()); 1047 } 1048 // Add implicit super-register defs and kills to the last instruction. 1049 Mov->addRegisterDefined(DestReg, TRI); 1050 if (KillSrc) 1051 Mov->addRegisterKilled(SrcReg, TRI); 1052 } 1053 1054 Optional<DestSourcePair> 1055 ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { 1056 // VMOVRRD is also a copy instruction but it requires 1057 // special way of handling. It is more complex copy version 1058 // and since that we are not considering it. For recognition 1059 // of such instruction isExtractSubregLike MI interface fuction 1060 // could be used. 1061 // VORRq is considered as a move only if two inputs are 1062 // the same register. 1063 if (!MI.isMoveReg() || 1064 (MI.getOpcode() == ARM::VORRq && 1065 MI.getOperand(1).getReg() != MI.getOperand(2).getReg())) 1066 return None; 1067 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; 1068 } 1069 1070 Optional<ParamLoadedValue> 1071 ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI, 1072 Register Reg) const { 1073 if (auto DstSrcPair = isCopyInstrImpl(MI)) { 1074 Register DstReg = DstSrcPair->Destination->getReg(); 1075 1076 // TODO: We don't handle cases where the forwarding reg is narrower/wider 1077 // than the copy registers. Consider for example: 1078 // 1079 // s16 = VMOVS s0 1080 // s17 = VMOVS s1 1081 // call @callee(d0) 1082 // 1083 // We'd like to describe the call site value of d0 as d8, but this requires 1084 // gathering and merging the descriptions for the two VMOVS instructions. 1085 // 1086 // We also don't handle the reverse situation, where the forwarding reg is 1087 // narrower than the copy destination: 1088 // 1089 // d8 = VMOVD d0 1090 // call @callee(s1) 1091 // 1092 // We need to produce a fragment description (the call site value of s1 is 1093 // /not/ just d8). 1094 if (DstReg != Reg) 1095 return None; 1096 } 1097 return TargetInstrInfo::describeLoadedValue(MI, Reg); 1098 } 1099 1100 const MachineInstrBuilder & 1101 ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg, 1102 unsigned SubIdx, unsigned State, 1103 const TargetRegisterInfo *TRI) const { 1104 if (!SubIdx) 1105 return MIB.addReg(Reg, State); 1106 1107 if (Register::isPhysicalRegister(Reg)) 1108 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); 1109 return MIB.addReg(Reg, State, SubIdx); 1110 } 1111 1112 void ARMBaseInstrInfo:: 1113 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1114 Register SrcReg, bool isKill, int FI, 1115 const TargetRegisterClass *RC, 1116 const TargetRegisterInfo *TRI) const { 1117 MachineFunction &MF = *MBB.getParent(); 1118 MachineFrameInfo &MFI = MF.getFrameInfo(); 1119 Align Alignment = MFI.getObjectAlign(FI); 1120 1121 MachineMemOperand *MMO = MF.getMachineMemOperand( 1122 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, 1123 MFI.getObjectSize(FI), Alignment); 1124 1125 switch (TRI->getSpillSize(*RC)) { 1126 case 2: 1127 if (ARM::HPRRegClass.hasSubClassEq(RC)) { 1128 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH)) 1129 .addReg(SrcReg, getKillRegState(isKill)) 1130 .addFrameIndex(FI) 1131 .addImm(0) 1132 .addMemOperand(MMO) 1133 .add(predOps(ARMCC::AL)); 1134 } else 1135 llvm_unreachable("Unknown reg class!"); 1136 break; 1137 case 4: 1138 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1139 BuildMI(MBB, I, DebugLoc(), get(ARM::STRi12)) 1140 .addReg(SrcReg, getKillRegState(isKill)) 1141 .addFrameIndex(FI) 1142 .addImm(0) 1143 .addMemOperand(MMO) 1144 .add(predOps(ARMCC::AL)); 1145 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1146 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRS)) 1147 .addReg(SrcReg, getKillRegState(isKill)) 1148 .addFrameIndex(FI) 1149 .addImm(0) 1150 .addMemOperand(MMO) 1151 .add(predOps(ARMCC::AL)); 1152 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { 1153 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTR_P0_off)) 1154 .addReg(SrcReg, getKillRegState(isKill)) 1155 .addFrameIndex(FI) 1156 .addImm(0) 1157 .addMemOperand(MMO) 1158 .add(predOps(ARMCC::AL)); 1159 } else 1160 llvm_unreachable("Unknown reg class!"); 1161 break; 1162 case 8: 1163 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1164 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRD)) 1165 .addReg(SrcReg, getKillRegState(isKill)) 1166 .addFrameIndex(FI) 1167 .addImm(0) 1168 .addMemOperand(MMO) 1169 .add(predOps(ARMCC::AL)); 1170 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1171 if (Subtarget.hasV5TEOps()) { 1172 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD)); 1173 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 1174 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 1175 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) 1176 .add(predOps(ARMCC::AL)); 1177 } else { 1178 // Fallback to STM instruction, which has existed since the dawn of 1179 // time. 1180 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STMIA)) 1181 .addFrameIndex(FI) 1182 .addMemOperand(MMO) 1183 .add(predOps(ARMCC::AL)); 1184 AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); 1185 AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); 1186 } 1187 } else 1188 llvm_unreachable("Unknown reg class!"); 1189 break; 1190 case 16: 1191 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { 1192 // Use aligned spills if the stack can be realigned. 1193 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { 1194 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64)) 1195 .addFrameIndex(FI) 1196 .addImm(16) 1197 .addReg(SrcReg, getKillRegState(isKill)) 1198 .addMemOperand(MMO) 1199 .add(predOps(ARMCC::AL)); 1200 } else { 1201 BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMQIA)) 1202 .addReg(SrcReg, getKillRegState(isKill)) 1203 .addFrameIndex(FI) 1204 .addMemOperand(MMO) 1205 .add(predOps(ARMCC::AL)); 1206 } 1207 } else if (ARM::QPRRegClass.hasSubClassEq(RC) && 1208 Subtarget.hasMVEIntegerOps()) { 1209 auto MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::MVE_VSTRWU32)); 1210 MIB.addReg(SrcReg, getKillRegState(isKill)) 1211 .addFrameIndex(FI) 1212 .addImm(0) 1213 .addMemOperand(MMO); 1214 addUnpredicatedMveVpredNOp(MIB); 1215 } else 1216 llvm_unreachable("Unknown reg class!"); 1217 break; 1218 case 24: 1219 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1220 // Use aligned spills if the stack can be realigned. 1221 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1222 Subtarget.hasNEON()) { 1223 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo)) 1224 .addFrameIndex(FI) 1225 .addImm(16) 1226 .addReg(SrcReg, getKillRegState(isKill)) 1227 .addMemOperand(MMO) 1228 .add(predOps(ARMCC::AL)); 1229 } else { 1230 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), 1231 get(ARM::VSTMDIA)) 1232 .addFrameIndex(FI) 1233 .add(predOps(ARMCC::AL)) 1234 .addMemOperand(MMO); 1235 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1236 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1237 AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1238 } 1239 } else 1240 llvm_unreachable("Unknown reg class!"); 1241 break; 1242 case 32: 1243 if (ARM::QQPRRegClass.hasSubClassEq(RC) || 1244 ARM::MQQPRRegClass.hasSubClassEq(RC) || 1245 ARM::DQuadRegClass.hasSubClassEq(RC)) { 1246 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1247 Subtarget.hasNEON()) { 1248 // FIXME: It's possible to only store part of the QQ register if the 1249 // spilled def has a sub-register index. 1250 BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64QPseudo)) 1251 .addFrameIndex(FI) 1252 .addImm(16) 1253 .addReg(SrcReg, getKillRegState(isKill)) 1254 .addMemOperand(MMO) 1255 .add(predOps(ARMCC::AL)); 1256 } else if (Subtarget.hasMVEIntegerOps()) { 1257 BuildMI(MBB, I, DebugLoc(), get(ARM::MQQPRStore)) 1258 .addReg(SrcReg, getKillRegState(isKill)) 1259 .addFrameIndex(FI) 1260 .addMemOperand(MMO); 1261 } else { 1262 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), 1263 get(ARM::VSTMDIA)) 1264 .addFrameIndex(FI) 1265 .add(predOps(ARMCC::AL)) 1266 .addMemOperand(MMO); 1267 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1268 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1269 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1270 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 1271 } 1272 } else 1273 llvm_unreachable("Unknown reg class!"); 1274 break; 1275 case 64: 1276 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) && 1277 Subtarget.hasMVEIntegerOps()) { 1278 BuildMI(MBB, I, DebugLoc(), get(ARM::MQQQQPRStore)) 1279 .addReg(SrcReg, getKillRegState(isKill)) 1280 .addFrameIndex(FI) 1281 .addMemOperand(MMO); 1282 } else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1283 MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::VSTMDIA)) 1284 .addFrameIndex(FI) 1285 .add(predOps(ARMCC::AL)) 1286 .addMemOperand(MMO); 1287 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 1288 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 1289 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 1290 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 1291 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); 1292 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); 1293 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); 1294 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); 1295 } else 1296 llvm_unreachable("Unknown reg class!"); 1297 break; 1298 default: 1299 llvm_unreachable("Unknown reg class!"); 1300 } 1301 } 1302 1303 unsigned ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 1304 int &FrameIndex) const { 1305 switch (MI.getOpcode()) { 1306 default: break; 1307 case ARM::STRrs: 1308 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame. 1309 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() && 1310 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 && 1311 MI.getOperand(3).getImm() == 0) { 1312 FrameIndex = MI.getOperand(1).getIndex(); 1313 return MI.getOperand(0).getReg(); 1314 } 1315 break; 1316 case ARM::STRi12: 1317 case ARM::t2STRi12: 1318 case ARM::tSTRspi: 1319 case ARM::VSTRD: 1320 case ARM::VSTRS: 1321 case ARM::VSTR_P0_off: 1322 case ARM::MVE_VSTRWU32: 1323 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 1324 MI.getOperand(2).getImm() == 0) { 1325 FrameIndex = MI.getOperand(1).getIndex(); 1326 return MI.getOperand(0).getReg(); 1327 } 1328 break; 1329 case ARM::VST1q64: 1330 case ARM::VST1d64TPseudo: 1331 case ARM::VST1d64QPseudo: 1332 if (MI.getOperand(0).isFI() && MI.getOperand(2).getSubReg() == 0) { 1333 FrameIndex = MI.getOperand(0).getIndex(); 1334 return MI.getOperand(2).getReg(); 1335 } 1336 break; 1337 case ARM::VSTMQIA: 1338 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1339 FrameIndex = MI.getOperand(1).getIndex(); 1340 return MI.getOperand(0).getReg(); 1341 } 1342 break; 1343 case ARM::MQQPRStore: 1344 case ARM::MQQQQPRStore: 1345 if (MI.getOperand(1).isFI()) { 1346 FrameIndex = MI.getOperand(1).getIndex(); 1347 return MI.getOperand(0).getReg(); 1348 } 1349 break; 1350 } 1351 1352 return 0; 1353 } 1354 1355 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI, 1356 int &FrameIndex) const { 1357 SmallVector<const MachineMemOperand *, 1> Accesses; 1358 if (MI.mayStore() && hasStoreToStackSlot(MI, Accesses) && 1359 Accesses.size() == 1) { 1360 FrameIndex = 1361 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 1362 ->getFrameIndex(); 1363 return true; 1364 } 1365 return false; 1366 } 1367 1368 void ARMBaseInstrInfo:: 1369 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1370 Register DestReg, int FI, 1371 const TargetRegisterClass *RC, 1372 const TargetRegisterInfo *TRI) const { 1373 DebugLoc DL; 1374 if (I != MBB.end()) DL = I->getDebugLoc(); 1375 MachineFunction &MF = *MBB.getParent(); 1376 MachineFrameInfo &MFI = MF.getFrameInfo(); 1377 const Align Alignment = MFI.getObjectAlign(FI); 1378 MachineMemOperand *MMO = MF.getMachineMemOperand( 1379 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, 1380 MFI.getObjectSize(FI), Alignment); 1381 1382 switch (TRI->getSpillSize(*RC)) { 1383 case 2: 1384 if (ARM::HPRRegClass.hasSubClassEq(RC)) { 1385 BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg) 1386 .addFrameIndex(FI) 1387 .addImm(0) 1388 .addMemOperand(MMO) 1389 .add(predOps(ARMCC::AL)); 1390 } else 1391 llvm_unreachable("Unknown reg class!"); 1392 break; 1393 case 4: 1394 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 1395 BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) 1396 .addFrameIndex(FI) 1397 .addImm(0) 1398 .addMemOperand(MMO) 1399 .add(predOps(ARMCC::AL)); 1400 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 1401 BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) 1402 .addFrameIndex(FI) 1403 .addImm(0) 1404 .addMemOperand(MMO) 1405 .add(predOps(ARMCC::AL)); 1406 } else if (ARM::VCCRRegClass.hasSubClassEq(RC)) { 1407 BuildMI(MBB, I, DL, get(ARM::VLDR_P0_off), DestReg) 1408 .addFrameIndex(FI) 1409 .addImm(0) 1410 .addMemOperand(MMO) 1411 .add(predOps(ARMCC::AL)); 1412 } else 1413 llvm_unreachable("Unknown reg class!"); 1414 break; 1415 case 8: 1416 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 1417 BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) 1418 .addFrameIndex(FI) 1419 .addImm(0) 1420 .addMemOperand(MMO) 1421 .add(predOps(ARMCC::AL)); 1422 } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { 1423 MachineInstrBuilder MIB; 1424 1425 if (Subtarget.hasV5TEOps()) { 1426 MIB = BuildMI(MBB, I, DL, get(ARM::LDRD)); 1427 AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1428 AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1429 MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO) 1430 .add(predOps(ARMCC::AL)); 1431 } else { 1432 // Fallback to LDM instruction, which has existed since the dawn of 1433 // time. 1434 MIB = BuildMI(MBB, I, DL, get(ARM::LDMIA)) 1435 .addFrameIndex(FI) 1436 .addMemOperand(MMO) 1437 .add(predOps(ARMCC::AL)); 1438 MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); 1439 MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); 1440 } 1441 1442 if (Register::isPhysicalRegister(DestReg)) 1443 MIB.addReg(DestReg, RegState::ImplicitDefine); 1444 } else 1445 llvm_unreachable("Unknown reg class!"); 1446 break; 1447 case 16: 1448 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { 1449 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { 1450 BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) 1451 .addFrameIndex(FI) 1452 .addImm(16) 1453 .addMemOperand(MMO) 1454 .add(predOps(ARMCC::AL)); 1455 } else { 1456 BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) 1457 .addFrameIndex(FI) 1458 .addMemOperand(MMO) 1459 .add(predOps(ARMCC::AL)); 1460 } 1461 } else if (ARM::QPRRegClass.hasSubClassEq(RC) && 1462 Subtarget.hasMVEIntegerOps()) { 1463 auto MIB = BuildMI(MBB, I, DL, get(ARM::MVE_VLDRWU32), DestReg); 1464 MIB.addFrameIndex(FI) 1465 .addImm(0) 1466 .addMemOperand(MMO); 1467 addUnpredicatedMveVpredNOp(MIB); 1468 } else 1469 llvm_unreachable("Unknown reg class!"); 1470 break; 1471 case 24: 1472 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 1473 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1474 Subtarget.hasNEON()) { 1475 BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) 1476 .addFrameIndex(FI) 1477 .addImm(16) 1478 .addMemOperand(MMO) 1479 .add(predOps(ARMCC::AL)); 1480 } else { 1481 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1482 .addFrameIndex(FI) 1483 .addMemOperand(MMO) 1484 .add(predOps(ARMCC::AL)); 1485 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1486 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1487 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1488 if (Register::isPhysicalRegister(DestReg)) 1489 MIB.addReg(DestReg, RegState::ImplicitDefine); 1490 } 1491 } else 1492 llvm_unreachable("Unknown reg class!"); 1493 break; 1494 case 32: 1495 if (ARM::QQPRRegClass.hasSubClassEq(RC) || 1496 ARM::MQQPRRegClass.hasSubClassEq(RC) || 1497 ARM::DQuadRegClass.hasSubClassEq(RC)) { 1498 if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && 1499 Subtarget.hasNEON()) { 1500 BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) 1501 .addFrameIndex(FI) 1502 .addImm(16) 1503 .addMemOperand(MMO) 1504 .add(predOps(ARMCC::AL)); 1505 } else if (Subtarget.hasMVEIntegerOps()) { 1506 BuildMI(MBB, I, DL, get(ARM::MQQPRLoad), DestReg) 1507 .addFrameIndex(FI) 1508 .addMemOperand(MMO); 1509 } else { 1510 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1511 .addFrameIndex(FI) 1512 .add(predOps(ARMCC::AL)) 1513 .addMemOperand(MMO); 1514 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1515 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1516 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1517 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1518 if (Register::isPhysicalRegister(DestReg)) 1519 MIB.addReg(DestReg, RegState::ImplicitDefine); 1520 } 1521 } else 1522 llvm_unreachable("Unknown reg class!"); 1523 break; 1524 case 64: 1525 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) && 1526 Subtarget.hasMVEIntegerOps()) { 1527 BuildMI(MBB, I, DL, get(ARM::MQQQQPRLoad), DestReg) 1528 .addFrameIndex(FI) 1529 .addMemOperand(MMO); 1530 } else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1531 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1532 .addFrameIndex(FI) 1533 .add(predOps(ARMCC::AL)) 1534 .addMemOperand(MMO); 1535 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1536 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1537 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1538 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1539 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI); 1540 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI); 1541 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI); 1542 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI); 1543 if (Register::isPhysicalRegister(DestReg)) 1544 MIB.addReg(DestReg, RegState::ImplicitDefine); 1545 } else 1546 llvm_unreachable("Unknown reg class!"); 1547 break; 1548 default: 1549 llvm_unreachable("Unknown regclass!"); 1550 } 1551 } 1552 1553 unsigned ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 1554 int &FrameIndex) const { 1555 switch (MI.getOpcode()) { 1556 default: break; 1557 case ARM::LDRrs: 1558 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame. 1559 if (MI.getOperand(1).isFI() && MI.getOperand(2).isReg() && 1560 MI.getOperand(3).isImm() && MI.getOperand(2).getReg() == 0 && 1561 MI.getOperand(3).getImm() == 0) { 1562 FrameIndex = MI.getOperand(1).getIndex(); 1563 return MI.getOperand(0).getReg(); 1564 } 1565 break; 1566 case ARM::LDRi12: 1567 case ARM::t2LDRi12: 1568 case ARM::tLDRspi: 1569 case ARM::VLDRD: 1570 case ARM::VLDRS: 1571 case ARM::VLDR_P0_off: 1572 case ARM::MVE_VLDRWU32: 1573 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() && 1574 MI.getOperand(2).getImm() == 0) { 1575 FrameIndex = MI.getOperand(1).getIndex(); 1576 return MI.getOperand(0).getReg(); 1577 } 1578 break; 1579 case ARM::VLD1q64: 1580 case ARM::VLD1d8TPseudo: 1581 case ARM::VLD1d16TPseudo: 1582 case ARM::VLD1d32TPseudo: 1583 case ARM::VLD1d64TPseudo: 1584 case ARM::VLD1d8QPseudo: 1585 case ARM::VLD1d16QPseudo: 1586 case ARM::VLD1d32QPseudo: 1587 case ARM::VLD1d64QPseudo: 1588 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1589 FrameIndex = MI.getOperand(1).getIndex(); 1590 return MI.getOperand(0).getReg(); 1591 } 1592 break; 1593 case ARM::VLDMQIA: 1594 if (MI.getOperand(1).isFI() && MI.getOperand(0).getSubReg() == 0) { 1595 FrameIndex = MI.getOperand(1).getIndex(); 1596 return MI.getOperand(0).getReg(); 1597 } 1598 break; 1599 case ARM::MQQPRLoad: 1600 case ARM::MQQQQPRLoad: 1601 if (MI.getOperand(1).isFI()) { 1602 FrameIndex = MI.getOperand(1).getIndex(); 1603 return MI.getOperand(0).getReg(); 1604 } 1605 break; 1606 } 1607 1608 return 0; 1609 } 1610 1611 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI, 1612 int &FrameIndex) const { 1613 SmallVector<const MachineMemOperand *, 1> Accesses; 1614 if (MI.mayLoad() && hasLoadFromStackSlot(MI, Accesses) && 1615 Accesses.size() == 1) { 1616 FrameIndex = 1617 cast<FixedStackPseudoSourceValue>(Accesses.front()->getPseudoValue()) 1618 ->getFrameIndex(); 1619 return true; 1620 } 1621 return false; 1622 } 1623 1624 /// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD 1625 /// depending on whether the result is used. 1626 void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { 1627 bool isThumb1 = Subtarget.isThumb1Only(); 1628 bool isThumb2 = Subtarget.isThumb2(); 1629 const ARMBaseInstrInfo *TII = Subtarget.getInstrInfo(); 1630 1631 DebugLoc dl = MI->getDebugLoc(); 1632 MachineBasicBlock *BB = MI->getParent(); 1633 1634 MachineInstrBuilder LDM, STM; 1635 if (isThumb1 || !MI->getOperand(1).isDead()) { 1636 MachineOperand LDWb(MI->getOperand(1)); 1637 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD 1638 : isThumb1 ? ARM::tLDMIA_UPD 1639 : ARM::LDMIA_UPD)) 1640 .add(LDWb); 1641 } else { 1642 LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA)); 1643 } 1644 1645 if (isThumb1 || !MI->getOperand(0).isDead()) { 1646 MachineOperand STWb(MI->getOperand(0)); 1647 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD 1648 : isThumb1 ? ARM::tSTMIA_UPD 1649 : ARM::STMIA_UPD)) 1650 .add(STWb); 1651 } else { 1652 STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA)); 1653 } 1654 1655 MachineOperand LDBase(MI->getOperand(3)); 1656 LDM.add(LDBase).add(predOps(ARMCC::AL)); 1657 1658 MachineOperand STBase(MI->getOperand(2)); 1659 STM.add(STBase).add(predOps(ARMCC::AL)); 1660 1661 // Sort the scratch registers into ascending order. 1662 const TargetRegisterInfo &TRI = getRegisterInfo(); 1663 SmallVector<unsigned, 6> ScratchRegs; 1664 for(unsigned I = 5; I < MI->getNumOperands(); ++I) 1665 ScratchRegs.push_back(MI->getOperand(I).getReg()); 1666 llvm::sort(ScratchRegs, 1667 [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool { 1668 return TRI.getEncodingValue(Reg1) < 1669 TRI.getEncodingValue(Reg2); 1670 }); 1671 1672 for (const auto &Reg : ScratchRegs) { 1673 LDM.addReg(Reg, RegState::Define); 1674 STM.addReg(Reg, RegState::Kill); 1675 } 1676 1677 BB->erase(MI); 1678 } 1679 1680 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1681 if (MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) { 1682 expandLoadStackGuard(MI); 1683 MI.getParent()->erase(MI); 1684 return true; 1685 } 1686 1687 if (MI.getOpcode() == ARM::MEMCPY) { 1688 expandMEMCPY(MI); 1689 return true; 1690 } 1691 1692 // This hook gets to expand COPY instructions before they become 1693 // copyPhysReg() calls. Look for VMOVS instructions that can legally be 1694 // widened to VMOVD. We prefer the VMOVD when possible because it may be 1695 // changed into a VORR that can go down the NEON pipeline. 1696 if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64()) 1697 return false; 1698 1699 // Look for a copy between even S-registers. That is where we keep floats 1700 // when using NEON v2f32 instructions for f32 arithmetic. 1701 Register DstRegS = MI.getOperand(0).getReg(); 1702 Register SrcRegS = MI.getOperand(1).getReg(); 1703 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) 1704 return false; 1705 1706 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1707 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, 1708 &ARM::DPRRegClass); 1709 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, 1710 &ARM::DPRRegClass); 1711 if (!DstRegD || !SrcRegD) 1712 return false; 1713 1714 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only 1715 // legal if the COPY already defines the full DstRegD, and it isn't a 1716 // sub-register insertion. 1717 if (!MI.definesRegister(DstRegD, TRI) || MI.readsRegister(DstRegD, TRI)) 1718 return false; 1719 1720 // A dead copy shouldn't show up here, but reject it just in case. 1721 if (MI.getOperand(0).isDead()) 1722 return false; 1723 1724 // All clear, widen the COPY. 1725 LLVM_DEBUG(dbgs() << "widening: " << MI); 1726 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 1727 1728 // Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg 1729 // or some other super-register. 1730 int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD); 1731 if (ImpDefIdx != -1) 1732 MI.removeOperand(ImpDefIdx); 1733 1734 // Change the opcode and operands. 1735 MI.setDesc(get(ARM::VMOVD)); 1736 MI.getOperand(0).setReg(DstRegD); 1737 MI.getOperand(1).setReg(SrcRegD); 1738 MIB.add(predOps(ARMCC::AL)); 1739 1740 // We are now reading SrcRegD instead of SrcRegS. This may upset the 1741 // register scavenger and machine verifier, so we need to indicate that we 1742 // are reading an undefined value from SrcRegD, but a proper value from 1743 // SrcRegS. 1744 MI.getOperand(1).setIsUndef(); 1745 MIB.addReg(SrcRegS, RegState::Implicit); 1746 1747 // SrcRegD may actually contain an unrelated value in the ssub_1 1748 // sub-register. Don't kill it. Only kill the ssub_0 sub-register. 1749 if (MI.getOperand(1).isKill()) { 1750 MI.getOperand(1).setIsKill(false); 1751 MI.addRegisterKilled(SrcRegS, TRI, true); 1752 } 1753 1754 LLVM_DEBUG(dbgs() << "replaced by: " << MI); 1755 return true; 1756 } 1757 1758 /// Create a copy of a const pool value. Update CPI to the new index and return 1759 /// the label UID. 1760 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 1761 MachineConstantPool *MCP = MF.getConstantPool(); 1762 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1763 1764 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI]; 1765 assert(MCPE.isMachineConstantPoolEntry() && 1766 "Expecting a machine constantpool entry!"); 1767 ARMConstantPoolValue *ACPV = 1768 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal); 1769 1770 unsigned PCLabelId = AFI->createPICLabelUId(); 1771 ARMConstantPoolValue *NewCPV = nullptr; 1772 1773 // FIXME: The below assumes PIC relocation model and that the function 1774 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and 1775 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR 1776 // instructions, so that's probably OK, but is PIC always correct when 1777 // we get here? 1778 if (ACPV->isGlobalValue()) 1779 NewCPV = ARMConstantPoolConstant::Create( 1780 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, ARMCP::CPValue, 1781 4, ACPV->getModifier(), ACPV->mustAddCurrentAddress()); 1782 else if (ACPV->isExtSymbol()) 1783 NewCPV = ARMConstantPoolSymbol:: 1784 Create(MF.getFunction().getContext(), 1785 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); 1786 else if (ACPV->isBlockAddress()) 1787 NewCPV = ARMConstantPoolConstant:: 1788 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, 1789 ARMCP::CPBlockAddress, 4); 1790 else if (ACPV->isLSDA()) 1791 NewCPV = ARMConstantPoolConstant::Create(&MF.getFunction(), PCLabelId, 1792 ARMCP::CPLSDA, 4); 1793 else if (ACPV->isMachineBasicBlock()) 1794 NewCPV = ARMConstantPoolMBB:: 1795 Create(MF.getFunction().getContext(), 1796 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); 1797 else 1798 llvm_unreachable("Unexpected ARM constantpool value type!!"); 1799 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlign()); 1800 return PCLabelId; 1801 } 1802 1803 void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB, 1804 MachineBasicBlock::iterator I, 1805 Register DestReg, unsigned SubIdx, 1806 const MachineInstr &Orig, 1807 const TargetRegisterInfo &TRI) const { 1808 unsigned Opcode = Orig.getOpcode(); 1809 switch (Opcode) { 1810 default: { 1811 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); 1812 MI->substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI); 1813 MBB.insert(I, MI); 1814 break; 1815 } 1816 case ARM::tLDRpci_pic: 1817 case ARM::t2LDRpci_pic: { 1818 MachineFunction &MF = *MBB.getParent(); 1819 unsigned CPI = Orig.getOperand(1).getIndex(); 1820 unsigned PCLabelId = duplicateCPV(MF, CPI); 1821 BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg) 1822 .addConstantPoolIndex(CPI) 1823 .addImm(PCLabelId) 1824 .cloneMemRefs(Orig); 1825 break; 1826 } 1827 } 1828 } 1829 1830 MachineInstr & 1831 ARMBaseInstrInfo::duplicate(MachineBasicBlock &MBB, 1832 MachineBasicBlock::iterator InsertBefore, 1833 const MachineInstr &Orig) const { 1834 MachineInstr &Cloned = TargetInstrInfo::duplicate(MBB, InsertBefore, Orig); 1835 MachineBasicBlock::instr_iterator I = Cloned.getIterator(); 1836 for (;;) { 1837 switch (I->getOpcode()) { 1838 case ARM::tLDRpci_pic: 1839 case ARM::t2LDRpci_pic: { 1840 MachineFunction &MF = *MBB.getParent(); 1841 unsigned CPI = I->getOperand(1).getIndex(); 1842 unsigned PCLabelId = duplicateCPV(MF, CPI); 1843 I->getOperand(1).setIndex(CPI); 1844 I->getOperand(2).setImm(PCLabelId); 1845 break; 1846 } 1847 } 1848 if (!I->isBundledWithSucc()) 1849 break; 1850 ++I; 1851 } 1852 return Cloned; 1853 } 1854 1855 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0, 1856 const MachineInstr &MI1, 1857 const MachineRegisterInfo *MRI) const { 1858 unsigned Opcode = MI0.getOpcode(); 1859 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic || 1860 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic || 1861 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1862 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel || 1863 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr || 1864 Opcode == ARM::t2MOV_ga_pcrel) { 1865 if (MI1.getOpcode() != Opcode) 1866 return false; 1867 if (MI0.getNumOperands() != MI1.getNumOperands()) 1868 return false; 1869 1870 const MachineOperand &MO0 = MI0.getOperand(1); 1871 const MachineOperand &MO1 = MI1.getOperand(1); 1872 if (MO0.getOffset() != MO1.getOffset()) 1873 return false; 1874 1875 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr || 1876 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel || 1877 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr || 1878 Opcode == ARM::t2MOV_ga_pcrel) 1879 // Ignore the PC labels. 1880 return MO0.getGlobal() == MO1.getGlobal(); 1881 1882 const MachineFunction *MF = MI0.getParent()->getParent(); 1883 const MachineConstantPool *MCP = MF->getConstantPool(); 1884 int CPI0 = MO0.getIndex(); 1885 int CPI1 = MO1.getIndex(); 1886 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; 1887 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; 1888 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); 1889 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); 1890 if (isARMCP0 && isARMCP1) { 1891 ARMConstantPoolValue *ACPV0 = 1892 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal); 1893 ARMConstantPoolValue *ACPV1 = 1894 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal); 1895 return ACPV0->hasSameValue(ACPV1); 1896 } else if (!isARMCP0 && !isARMCP1) { 1897 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; 1898 } 1899 return false; 1900 } else if (Opcode == ARM::PICLDR) { 1901 if (MI1.getOpcode() != Opcode) 1902 return false; 1903 if (MI0.getNumOperands() != MI1.getNumOperands()) 1904 return false; 1905 1906 Register Addr0 = MI0.getOperand(1).getReg(); 1907 Register Addr1 = MI1.getOperand(1).getReg(); 1908 if (Addr0 != Addr1) { 1909 if (!MRI || !Register::isVirtualRegister(Addr0) || 1910 !Register::isVirtualRegister(Addr1)) 1911 return false; 1912 1913 // This assumes SSA form. 1914 MachineInstr *Def0 = MRI->getVRegDef(Addr0); 1915 MachineInstr *Def1 = MRI->getVRegDef(Addr1); 1916 // Check if the loaded value, e.g. a constantpool of a global address, are 1917 // the same. 1918 if (!produceSameValue(*Def0, *Def1, MRI)) 1919 return false; 1920 } 1921 1922 for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) { 1923 // %12 = PICLDR %11, 0, 14, %noreg 1924 const MachineOperand &MO0 = MI0.getOperand(i); 1925 const MachineOperand &MO1 = MI1.getOperand(i); 1926 if (!MO0.isIdenticalTo(MO1)) 1927 return false; 1928 } 1929 return true; 1930 } 1931 1932 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 1933 } 1934 1935 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 1936 /// determine if two loads are loading from the same base address. It should 1937 /// only return true if the base pointers are the same and the only differences 1938 /// between the two addresses is the offset. It also returns the offsets by 1939 /// reference. 1940 /// 1941 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 1942 /// is permanently disabled. 1943 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1944 int64_t &Offset1, 1945 int64_t &Offset2) const { 1946 // Don't worry about Thumb: just ARM and Thumb2. 1947 if (Subtarget.isThumb1Only()) return false; 1948 1949 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 1950 return false; 1951 1952 switch (Load1->getMachineOpcode()) { 1953 default: 1954 return false; 1955 case ARM::LDRi12: 1956 case ARM::LDRBi12: 1957 case ARM::LDRD: 1958 case ARM::LDRH: 1959 case ARM::LDRSB: 1960 case ARM::LDRSH: 1961 case ARM::VLDRD: 1962 case ARM::VLDRS: 1963 case ARM::t2LDRi8: 1964 case ARM::t2LDRBi8: 1965 case ARM::t2LDRDi8: 1966 case ARM::t2LDRSHi8: 1967 case ARM::t2LDRi12: 1968 case ARM::t2LDRBi12: 1969 case ARM::t2LDRSHi12: 1970 break; 1971 } 1972 1973 switch (Load2->getMachineOpcode()) { 1974 default: 1975 return false; 1976 case ARM::LDRi12: 1977 case ARM::LDRBi12: 1978 case ARM::LDRD: 1979 case ARM::LDRH: 1980 case ARM::LDRSB: 1981 case ARM::LDRSH: 1982 case ARM::VLDRD: 1983 case ARM::VLDRS: 1984 case ARM::t2LDRi8: 1985 case ARM::t2LDRBi8: 1986 case ARM::t2LDRSHi8: 1987 case ARM::t2LDRi12: 1988 case ARM::t2LDRBi12: 1989 case ARM::t2LDRSHi12: 1990 break; 1991 } 1992 1993 // Check if base addresses and chain operands match. 1994 if (Load1->getOperand(0) != Load2->getOperand(0) || 1995 Load1->getOperand(4) != Load2->getOperand(4)) 1996 return false; 1997 1998 // Index should be Reg0. 1999 if (Load1->getOperand(3) != Load2->getOperand(3)) 2000 return false; 2001 2002 // Determine the offsets. 2003 if (isa<ConstantSDNode>(Load1->getOperand(1)) && 2004 isa<ConstantSDNode>(Load2->getOperand(1))) { 2005 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); 2006 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); 2007 return true; 2008 } 2009 2010 return false; 2011 } 2012 2013 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 2014 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 2015 /// be scheduled togther. On some targets if two loads are loading from 2016 /// addresses in the same cache line, it's better if they are scheduled 2017 /// together. This function takes two integers that represent the load offsets 2018 /// from the common base address. It returns true if it decides it's desirable 2019 /// to schedule the two loads together. "NumLoads" is the number of loads that 2020 /// have already been scheduled after Load1. 2021 /// 2022 /// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched 2023 /// is permanently disabled. 2024 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 2025 int64_t Offset1, int64_t Offset2, 2026 unsigned NumLoads) const { 2027 // Don't worry about Thumb: just ARM and Thumb2. 2028 if (Subtarget.isThumb1Only()) return false; 2029 2030 assert(Offset2 > Offset1); 2031 2032 if ((Offset2 - Offset1) / 8 > 64) 2033 return false; 2034 2035 // Check if the machine opcodes are different. If they are different 2036 // then we consider them to not be of the same base address, 2037 // EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12. 2038 // In this case, they are considered to be the same because they are different 2039 // encoding forms of the same basic instruction. 2040 if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) && 2041 !((Load1->getMachineOpcode() == ARM::t2LDRBi8 && 2042 Load2->getMachineOpcode() == ARM::t2LDRBi12) || 2043 (Load1->getMachineOpcode() == ARM::t2LDRBi12 && 2044 Load2->getMachineOpcode() == ARM::t2LDRBi8))) 2045 return false; // FIXME: overly conservative? 2046 2047 // Four loads in a row should be sufficient. 2048 if (NumLoads >= 3) 2049 return false; 2050 2051 return true; 2052 } 2053 2054 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2055 const MachineBasicBlock *MBB, 2056 const MachineFunction &MF) const { 2057 // Debug info is never a scheduling boundary. It's necessary to be explicit 2058 // due to the special treatment of IT instructions below, otherwise a 2059 // dbg_value followed by an IT will result in the IT instruction being 2060 // considered a scheduling hazard, which is wrong. It should be the actual 2061 // instruction preceding the dbg_value instruction(s), just like it is 2062 // when debug info is not present. 2063 if (MI.isDebugInstr()) 2064 return false; 2065 2066 // Terminators and labels can't be scheduled around. 2067 if (MI.isTerminator() || MI.isPosition()) 2068 return true; 2069 2070 // INLINEASM_BR can jump to another block 2071 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 2072 return true; 2073 2074 // Treat the start of the IT block as a scheduling boundary, but schedule 2075 // t2IT along with all instructions following it. 2076 // FIXME: This is a big hammer. But the alternative is to add all potential 2077 // true and anti dependencies to IT block instructions as implicit operands 2078 // to the t2IT instruction. The added compile time and complexity does not 2079 // seem worth it. 2080 MachineBasicBlock::const_iterator I = MI; 2081 // Make sure to skip any debug instructions 2082 while (++I != MBB->end() && I->isDebugInstr()) 2083 ; 2084 if (I != MBB->end() && I->getOpcode() == ARM::t2IT) 2085 return true; 2086 2087 // Don't attempt to schedule around any instruction that defines 2088 // a stack-oriented pointer, as it's unlikely to be profitable. This 2089 // saves compile time, because it doesn't require every single 2090 // stack slot reference to depend on the instruction that does the 2091 // modification. 2092 // Calls don't actually change the stack pointer, even if they have imp-defs. 2093 // No ARM calling conventions change the stack pointer. (X86 calling 2094 // conventions sometimes do). 2095 if (!MI.isCall() && MI.definesRegister(ARM::SP)) 2096 return true; 2097 2098 return false; 2099 } 2100 2101 bool ARMBaseInstrInfo:: 2102 isProfitableToIfCvt(MachineBasicBlock &MBB, 2103 unsigned NumCycles, unsigned ExtraPredCycles, 2104 BranchProbability Probability) const { 2105 if (!NumCycles) 2106 return false; 2107 2108 // If we are optimizing for size, see if the branch in the predecessor can be 2109 // lowered to cbn?z by the constant island lowering pass, and return false if 2110 // so. This results in a shorter instruction sequence. 2111 if (MBB.getParent()->getFunction().hasOptSize()) { 2112 MachineBasicBlock *Pred = *MBB.pred_begin(); 2113 if (!Pred->empty()) { 2114 MachineInstr *LastMI = &*Pred->rbegin(); 2115 if (LastMI->getOpcode() == ARM::t2Bcc) { 2116 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2117 MachineInstr *CmpMI = findCMPToFoldIntoCBZ(LastMI, TRI); 2118 if (CmpMI) 2119 return false; 2120 } 2121 } 2122 } 2123 return isProfitableToIfCvt(MBB, NumCycles, ExtraPredCycles, 2124 MBB, 0, 0, Probability); 2125 } 2126 2127 bool ARMBaseInstrInfo:: 2128 isProfitableToIfCvt(MachineBasicBlock &TBB, 2129 unsigned TCycles, unsigned TExtra, 2130 MachineBasicBlock &FBB, 2131 unsigned FCycles, unsigned FExtra, 2132 BranchProbability Probability) const { 2133 if (!TCycles) 2134 return false; 2135 2136 // In thumb code we often end up trading one branch for a IT block, and 2137 // if we are cloning the instruction can increase code size. Prevent 2138 // blocks with multiple predecesors from being ifcvted to prevent this 2139 // cloning. 2140 if (Subtarget.isThumb2() && TBB.getParent()->getFunction().hasMinSize()) { 2141 if (TBB.pred_size() != 1 || FBB.pred_size() != 1) 2142 return false; 2143 } 2144 2145 // Attempt to estimate the relative costs of predication versus branching. 2146 // Here we scale up each component of UnpredCost to avoid precision issue when 2147 // scaling TCycles/FCycles by Probability. 2148 const unsigned ScalingUpFactor = 1024; 2149 2150 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor; 2151 unsigned UnpredCost; 2152 if (!Subtarget.hasBranchPredictor()) { 2153 // When we don't have a branch predictor it's always cheaper to not take a 2154 // branch than take it, so we have to take that into account. 2155 unsigned NotTakenBranchCost = 1; 2156 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty(); 2157 unsigned TUnpredCycles, FUnpredCycles; 2158 if (!FCycles) { 2159 // Triangle: TBB is the fallthrough 2160 TUnpredCycles = TCycles + NotTakenBranchCost; 2161 FUnpredCycles = TakenBranchCost; 2162 } else { 2163 // Diamond: TBB is the block that is branched to, FBB is the fallthrough 2164 TUnpredCycles = TCycles + TakenBranchCost; 2165 FUnpredCycles = FCycles + NotTakenBranchCost; 2166 // The branch at the end of FBB will disappear when it's predicated, so 2167 // discount it from PredCost. 2168 PredCost -= 1 * ScalingUpFactor; 2169 } 2170 // The total cost is the cost of each path scaled by their probabilites 2171 unsigned TUnpredCost = Probability.scale(TUnpredCycles * ScalingUpFactor); 2172 unsigned FUnpredCost = Probability.getCompl().scale(FUnpredCycles * ScalingUpFactor); 2173 UnpredCost = TUnpredCost + FUnpredCost; 2174 // When predicating assume that the first IT can be folded away but later 2175 // ones cost one cycle each 2176 if (Subtarget.isThumb2() && TCycles + FCycles > 4) { 2177 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor; 2178 } 2179 } else { 2180 unsigned TUnpredCost = Probability.scale(TCycles * ScalingUpFactor); 2181 unsigned FUnpredCost = 2182 Probability.getCompl().scale(FCycles * ScalingUpFactor); 2183 UnpredCost = TUnpredCost + FUnpredCost; 2184 UnpredCost += 1 * ScalingUpFactor; // The branch itself 2185 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10; 2186 } 2187 2188 return PredCost <= UnpredCost; 2189 } 2190 2191 unsigned 2192 ARMBaseInstrInfo::extraSizeToPredicateInstructions(const MachineFunction &MF, 2193 unsigned NumInsts) const { 2194 // Thumb2 needs a 2-byte IT instruction to predicate up to 4 instructions. 2195 // ARM has a condition code field in every predicable instruction, using it 2196 // doesn't change code size. 2197 if (!Subtarget.isThumb2()) 2198 return 0; 2199 2200 // It's possible that the size of the IT is restricted to a single block. 2201 unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4; 2202 return divideCeil(NumInsts, MaxInsts) * 2; 2203 } 2204 2205 unsigned 2206 ARMBaseInstrInfo::predictBranchSizeForIfCvt(MachineInstr &MI) const { 2207 // If this branch is likely to be folded into the comparison to form a 2208 // CB(N)Z, then removing it won't reduce code size at all, because that will 2209 // just replace the CB(N)Z with a CMP. 2210 if (MI.getOpcode() == ARM::t2Bcc && 2211 findCMPToFoldIntoCBZ(&MI, &getRegisterInfo())) 2212 return 0; 2213 2214 unsigned Size = getInstSizeInBytes(MI); 2215 2216 // For Thumb2, all branches are 32-bit instructions during the if conversion 2217 // pass, but may be replaced with 16-bit instructions during size reduction. 2218 // Since the branches considered by if conversion tend to be forward branches 2219 // over small basic blocks, they are very likely to be in range for the 2220 // narrow instructions, so we assume the final code size will be half what it 2221 // currently is. 2222 if (Subtarget.isThumb2()) 2223 Size /= 2; 2224 2225 return Size; 2226 } 2227 2228 bool 2229 ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, 2230 MachineBasicBlock &FMBB) const { 2231 // Reduce false anti-dependencies to let the target's out-of-order execution 2232 // engine do its thing. 2233 return Subtarget.isProfitableToUnpredicate(); 2234 } 2235 2236 /// getInstrPredicate - If instruction is predicated, returns its predicate 2237 /// condition, otherwise returns AL. It also returns the condition code 2238 /// register by reference. 2239 ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI, 2240 Register &PredReg) { 2241 int PIdx = MI.findFirstPredOperandIdx(); 2242 if (PIdx == -1) { 2243 PredReg = 0; 2244 return ARMCC::AL; 2245 } 2246 2247 PredReg = MI.getOperand(PIdx+1).getReg(); 2248 return (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 2249 } 2250 2251 unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) { 2252 if (Opc == ARM::B) 2253 return ARM::Bcc; 2254 if (Opc == ARM::tB) 2255 return ARM::tBcc; 2256 if (Opc == ARM::t2B) 2257 return ARM::t2Bcc; 2258 2259 llvm_unreachable("Unknown unconditional branch opcode!"); 2260 } 2261 2262 MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI, 2263 bool NewMI, 2264 unsigned OpIdx1, 2265 unsigned OpIdx2) const { 2266 switch (MI.getOpcode()) { 2267 case ARM::MOVCCr: 2268 case ARM::t2MOVCCr: { 2269 // MOVCC can be commuted by inverting the condition. 2270 Register PredReg; 2271 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg); 2272 // MOVCC AL can't be inverted. Shouldn't happen. 2273 if (CC == ARMCC::AL || PredReg != ARM::CPSR) 2274 return nullptr; 2275 MachineInstr *CommutedMI = 2276 TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 2277 if (!CommutedMI) 2278 return nullptr; 2279 // After swapping the MOVCC operands, also invert the condition. 2280 CommutedMI->getOperand(CommutedMI->findFirstPredOperandIdx()) 2281 .setImm(ARMCC::getOppositeCondition(CC)); 2282 return CommutedMI; 2283 } 2284 } 2285 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); 2286 } 2287 2288 /// Identify instructions that can be folded into a MOVCC instruction, and 2289 /// return the defining instruction. 2290 MachineInstr * 2291 ARMBaseInstrInfo::canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI, 2292 const TargetInstrInfo *TII) const { 2293 if (!Reg.isVirtual()) 2294 return nullptr; 2295 if (!MRI.hasOneNonDBGUse(Reg)) 2296 return nullptr; 2297 MachineInstr *MI = MRI.getVRegDef(Reg); 2298 if (!MI) 2299 return nullptr; 2300 // Check if MI can be predicated and folded into the MOVCC. 2301 if (!isPredicable(*MI)) 2302 return nullptr; 2303 // Check if MI has any non-dead defs or physreg uses. This also detects 2304 // predicated instructions which will be reading CPSR. 2305 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 1)) { 2306 // Reject frame index operands, PEI can't handle the predicated pseudos. 2307 if (MO.isFI() || MO.isCPI() || MO.isJTI()) 2308 return nullptr; 2309 if (!MO.isReg()) 2310 continue; 2311 // MI can't have any tied operands, that would conflict with predication. 2312 if (MO.isTied()) 2313 return nullptr; 2314 if (Register::isPhysicalRegister(MO.getReg())) 2315 return nullptr; 2316 if (MO.isDef() && !MO.isDead()) 2317 return nullptr; 2318 } 2319 bool DontMoveAcrossStores = true; 2320 if (!MI->isSafeToMove(/* AliasAnalysis = */ nullptr, DontMoveAcrossStores)) 2321 return nullptr; 2322 return MI; 2323 } 2324 2325 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI, 2326 SmallVectorImpl<MachineOperand> &Cond, 2327 unsigned &TrueOp, unsigned &FalseOp, 2328 bool &Optimizable) const { 2329 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) && 2330 "Unknown select instruction"); 2331 // MOVCC operands: 2332 // 0: Def. 2333 // 1: True use. 2334 // 2: False use. 2335 // 3: Condition code. 2336 // 4: CPSR use. 2337 TrueOp = 1; 2338 FalseOp = 2; 2339 Cond.push_back(MI.getOperand(3)); 2340 Cond.push_back(MI.getOperand(4)); 2341 // We can always fold a def. 2342 Optimizable = true; 2343 return false; 2344 } 2345 2346 MachineInstr * 2347 ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI, 2348 SmallPtrSetImpl<MachineInstr *> &SeenMIs, 2349 bool PreferFalse) const { 2350 assert((MI.getOpcode() == ARM::MOVCCr || MI.getOpcode() == ARM::t2MOVCCr) && 2351 "Unknown select instruction"); 2352 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 2353 MachineInstr *DefMI = canFoldIntoMOVCC(MI.getOperand(2).getReg(), MRI, this); 2354 bool Invert = !DefMI; 2355 if (!DefMI) 2356 DefMI = canFoldIntoMOVCC(MI.getOperand(1).getReg(), MRI, this); 2357 if (!DefMI) 2358 return nullptr; 2359 2360 // Find new register class to use. 2361 MachineOperand FalseReg = MI.getOperand(Invert ? 2 : 1); 2362 MachineOperand TrueReg = MI.getOperand(Invert ? 1 : 2); 2363 Register DestReg = MI.getOperand(0).getReg(); 2364 const TargetRegisterClass *FalseClass = MRI.getRegClass(FalseReg.getReg()); 2365 const TargetRegisterClass *TrueClass = MRI.getRegClass(TrueReg.getReg()); 2366 if (!MRI.constrainRegClass(DestReg, FalseClass)) 2367 return nullptr; 2368 if (!MRI.constrainRegClass(DestReg, TrueClass)) 2369 return nullptr; 2370 2371 // Create a new predicated version of DefMI. 2372 // Rfalse is the first use. 2373 MachineInstrBuilder NewMI = 2374 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), DefMI->getDesc(), DestReg); 2375 2376 // Copy all the DefMI operands, excluding its (null) predicate. 2377 const MCInstrDesc &DefDesc = DefMI->getDesc(); 2378 for (unsigned i = 1, e = DefDesc.getNumOperands(); 2379 i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) 2380 NewMI.add(DefMI->getOperand(i)); 2381 2382 unsigned CondCode = MI.getOperand(3).getImm(); 2383 if (Invert) 2384 NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode))); 2385 else 2386 NewMI.addImm(CondCode); 2387 NewMI.add(MI.getOperand(4)); 2388 2389 // DefMI is not the -S version that sets CPSR, so add an optional %noreg. 2390 if (NewMI->hasOptionalDef()) 2391 NewMI.add(condCodeOp()); 2392 2393 // The output register value when the predicate is false is an implicit 2394 // register operand tied to the first def. 2395 // The tie makes the register allocator ensure the FalseReg is allocated the 2396 // same register as operand 0. 2397 FalseReg.setImplicit(); 2398 NewMI.add(FalseReg); 2399 NewMI->tieOperands(0, NewMI->getNumOperands() - 1); 2400 2401 // Update SeenMIs set: register newly created MI and erase removed DefMI. 2402 SeenMIs.insert(NewMI); 2403 SeenMIs.erase(DefMI); 2404 2405 // If MI is inside a loop, and DefMI is outside the loop, then kill flags on 2406 // DefMI would be invalid when tranferred inside the loop. Checking for a 2407 // loop is expensive, but at least remove kill flags if they are in different 2408 // BBs. 2409 if (DefMI->getParent() != MI.getParent()) 2410 NewMI->clearKillInfo(); 2411 2412 // The caller will erase MI, but not DefMI. 2413 DefMI->eraseFromParent(); 2414 return NewMI; 2415 } 2416 2417 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the 2418 /// instruction is encoded with an 'S' bit is determined by the optional CPSR 2419 /// def operand. 2420 /// 2421 /// This will go away once we can teach tblgen how to set the optional CPSR def 2422 /// operand itself. 2423 struct AddSubFlagsOpcodePair { 2424 uint16_t PseudoOpc; 2425 uint16_t MachineOpc; 2426 }; 2427 2428 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 2429 {ARM::ADDSri, ARM::ADDri}, 2430 {ARM::ADDSrr, ARM::ADDrr}, 2431 {ARM::ADDSrsi, ARM::ADDrsi}, 2432 {ARM::ADDSrsr, ARM::ADDrsr}, 2433 2434 {ARM::SUBSri, ARM::SUBri}, 2435 {ARM::SUBSrr, ARM::SUBrr}, 2436 {ARM::SUBSrsi, ARM::SUBrsi}, 2437 {ARM::SUBSrsr, ARM::SUBrsr}, 2438 2439 {ARM::RSBSri, ARM::RSBri}, 2440 {ARM::RSBSrsi, ARM::RSBrsi}, 2441 {ARM::RSBSrsr, ARM::RSBrsr}, 2442 2443 {ARM::tADDSi3, ARM::tADDi3}, 2444 {ARM::tADDSi8, ARM::tADDi8}, 2445 {ARM::tADDSrr, ARM::tADDrr}, 2446 {ARM::tADCS, ARM::tADC}, 2447 2448 {ARM::tSUBSi3, ARM::tSUBi3}, 2449 {ARM::tSUBSi8, ARM::tSUBi8}, 2450 {ARM::tSUBSrr, ARM::tSUBrr}, 2451 {ARM::tSBCS, ARM::tSBC}, 2452 {ARM::tRSBS, ARM::tRSB}, 2453 {ARM::tLSLSri, ARM::tLSLri}, 2454 2455 {ARM::t2ADDSri, ARM::t2ADDri}, 2456 {ARM::t2ADDSrr, ARM::t2ADDrr}, 2457 {ARM::t2ADDSrs, ARM::t2ADDrs}, 2458 2459 {ARM::t2SUBSri, ARM::t2SUBri}, 2460 {ARM::t2SUBSrr, ARM::t2SUBrr}, 2461 {ARM::t2SUBSrs, ARM::t2SUBrs}, 2462 2463 {ARM::t2RSBSri, ARM::t2RSBri}, 2464 {ARM::t2RSBSrs, ARM::t2RSBrs}, 2465 }; 2466 2467 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { 2468 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i) 2469 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc) 2470 return AddSubFlagsOpcodeMap[i].MachineOpc; 2471 return 0; 2472 } 2473 2474 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, 2475 MachineBasicBlock::iterator &MBBI, 2476 const DebugLoc &dl, Register DestReg, 2477 Register BaseReg, int NumBytes, 2478 ARMCC::CondCodes Pred, Register PredReg, 2479 const ARMBaseInstrInfo &TII, 2480 unsigned MIFlags) { 2481 if (NumBytes == 0 && DestReg != BaseReg) { 2482 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg) 2483 .addReg(BaseReg, RegState::Kill) 2484 .add(predOps(Pred, PredReg)) 2485 .add(condCodeOp()) 2486 .setMIFlags(MIFlags); 2487 return; 2488 } 2489 2490 bool isSub = NumBytes < 0; 2491 if (isSub) NumBytes = -NumBytes; 2492 2493 while (NumBytes) { 2494 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes); 2495 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt); 2496 assert(ThisVal && "Didn't extract field correctly"); 2497 2498 // We will handle these bits from offset, clear them. 2499 NumBytes &= ~ThisVal; 2500 2501 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?"); 2502 2503 // Build the new ADD / SUB. 2504 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; 2505 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 2506 .addReg(BaseReg, RegState::Kill) 2507 .addImm(ThisVal) 2508 .add(predOps(Pred, PredReg)) 2509 .add(condCodeOp()) 2510 .setMIFlags(MIFlags); 2511 BaseReg = DestReg; 2512 } 2513 } 2514 2515 bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, 2516 MachineFunction &MF, MachineInstr *MI, 2517 unsigned NumBytes) { 2518 // This optimisation potentially adds lots of load and store 2519 // micro-operations, it's only really a great benefit to code-size. 2520 if (!Subtarget.hasMinSize()) 2521 return false; 2522 2523 // If only one register is pushed/popped, LLVM can use an LDR/STR 2524 // instead. We can't modify those so make sure we're dealing with an 2525 // instruction we understand. 2526 bool IsPop = isPopOpcode(MI->getOpcode()); 2527 bool IsPush = isPushOpcode(MI->getOpcode()); 2528 if (!IsPush && !IsPop) 2529 return false; 2530 2531 bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD || 2532 MI->getOpcode() == ARM::VLDMDIA_UPD; 2533 bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH || 2534 MI->getOpcode() == ARM::tPOP || 2535 MI->getOpcode() == ARM::tPOP_RET; 2536 2537 assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP && 2538 MI->getOperand(1).getReg() == ARM::SP)) && 2539 "trying to fold sp update into non-sp-updating push/pop"); 2540 2541 // The VFP push & pop act on D-registers, so we can only fold an adjustment 2542 // by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try 2543 // if this is violated. 2544 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0) 2545 return false; 2546 2547 // ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+ 2548 // pred) so the list starts at 4. Thumb1 starts after the predicate. 2549 int RegListIdx = IsT1PushPop ? 2 : 4; 2550 2551 // Calculate the space we'll need in terms of registers. 2552 unsigned RegsNeeded; 2553 const TargetRegisterClass *RegClass; 2554 if (IsVFPPushPop) { 2555 RegsNeeded = NumBytes / 8; 2556 RegClass = &ARM::DPRRegClass; 2557 } else { 2558 RegsNeeded = NumBytes / 4; 2559 RegClass = &ARM::GPRRegClass; 2560 } 2561 2562 // We're going to have to strip all list operands off before 2563 // re-adding them since the order matters, so save the existing ones 2564 // for later. 2565 SmallVector<MachineOperand, 4> RegList; 2566 2567 // We're also going to need the first register transferred by this 2568 // instruction, which won't necessarily be the first register in the list. 2569 unsigned FirstRegEnc = -1; 2570 2571 const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo(); 2572 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) { 2573 MachineOperand &MO = MI->getOperand(i); 2574 RegList.push_back(MO); 2575 2576 if (MO.isReg() && !MO.isImplicit() && 2577 TRI->getEncodingValue(MO.getReg()) < FirstRegEnc) 2578 FirstRegEnc = TRI->getEncodingValue(MO.getReg()); 2579 } 2580 2581 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF); 2582 2583 // Now try to find enough space in the reglist to allocate NumBytes. 2584 for (int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded; 2585 --CurRegEnc) { 2586 unsigned CurReg = RegClass->getRegister(CurRegEnc); 2587 if (IsT1PushPop && CurRegEnc > TRI->getEncodingValue(ARM::R7)) 2588 continue; 2589 if (!IsPop) { 2590 // Pushing any register is completely harmless, mark the register involved 2591 // as undef since we don't care about its value and must not restore it 2592 // during stack unwinding. 2593 RegList.push_back(MachineOperand::CreateReg(CurReg, false, false, 2594 false, false, true)); 2595 --RegsNeeded; 2596 continue; 2597 } 2598 2599 // However, we can only pop an extra register if it's not live. For 2600 // registers live within the function we might clobber a return value 2601 // register; the other way a register can be live here is if it's 2602 // callee-saved. 2603 if (isCalleeSavedRegister(CurReg, CSRegs) || 2604 MI->getParent()->computeRegisterLiveness(TRI, CurReg, MI) != 2605 MachineBasicBlock::LQR_Dead) { 2606 // VFP pops don't allow holes in the register list, so any skip is fatal 2607 // for our transformation. GPR pops do, so we should just keep looking. 2608 if (IsVFPPushPop) 2609 return false; 2610 else 2611 continue; 2612 } 2613 2614 // Mark the unimportant registers as <def,dead> in the POP. 2615 RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false, 2616 true)); 2617 --RegsNeeded; 2618 } 2619 2620 if (RegsNeeded > 0) 2621 return false; 2622 2623 // Finally we know we can profitably perform the optimisation so go 2624 // ahead: strip all existing registers off and add them back again 2625 // in the right order. 2626 for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i) 2627 MI->removeOperand(i); 2628 2629 // Add the complete list back in. 2630 MachineInstrBuilder MIB(MF, &*MI); 2631 for (const MachineOperand &MO : llvm::reverse(RegList)) 2632 MIB.add(MO); 2633 2634 return true; 2635 } 2636 2637 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 2638 Register FrameReg, int &Offset, 2639 const ARMBaseInstrInfo &TII) { 2640 unsigned Opcode = MI.getOpcode(); 2641 const MCInstrDesc &Desc = MI.getDesc(); 2642 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 2643 bool isSub = false; 2644 2645 // Memory operands in inline assembly always use AddrMode2. 2646 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR) 2647 AddrMode = ARMII::AddrMode2; 2648 2649 if (Opcode == ARM::ADDri) { 2650 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 2651 if (Offset == 0) { 2652 // Turn it into a move. 2653 MI.setDesc(TII.get(ARM::MOVr)); 2654 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2655 MI.removeOperand(FrameRegIdx+1); 2656 Offset = 0; 2657 return true; 2658 } else if (Offset < 0) { 2659 Offset = -Offset; 2660 isSub = true; 2661 MI.setDesc(TII.get(ARM::SUBri)); 2662 } 2663 2664 // Common case: small offset, fits into instruction. 2665 if (ARM_AM::getSOImmVal(Offset) != -1) { 2666 // Replace the FrameIndex with sp / fp 2667 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2668 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 2669 Offset = 0; 2670 return true; 2671 } 2672 2673 // Otherwise, pull as much of the immedidate into this ADDri/SUBri 2674 // as possible. 2675 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset); 2676 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt); 2677 2678 // We will handle these bits from offset, clear them. 2679 Offset &= ~ThisImmVal; 2680 2681 // Get the properly encoded SOImmVal field. 2682 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 && 2683 "Bit extraction didn't work?"); 2684 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 2685 } else { 2686 unsigned ImmIdx = 0; 2687 int InstrOffs = 0; 2688 unsigned NumBits = 0; 2689 unsigned Scale = 1; 2690 switch (AddrMode) { 2691 case ARMII::AddrMode_i12: 2692 ImmIdx = FrameRegIdx + 1; 2693 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2694 NumBits = 12; 2695 break; 2696 case ARMII::AddrMode2: 2697 ImmIdx = FrameRegIdx+2; 2698 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm()); 2699 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2700 InstrOffs *= -1; 2701 NumBits = 12; 2702 break; 2703 case ARMII::AddrMode3: 2704 ImmIdx = FrameRegIdx+2; 2705 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm()); 2706 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2707 InstrOffs *= -1; 2708 NumBits = 8; 2709 break; 2710 case ARMII::AddrMode4: 2711 case ARMII::AddrMode6: 2712 // Can't fold any offset even if it's zero. 2713 return false; 2714 case ARMII::AddrMode5: 2715 ImmIdx = FrameRegIdx+1; 2716 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2717 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2718 InstrOffs *= -1; 2719 NumBits = 8; 2720 Scale = 4; 2721 break; 2722 case ARMII::AddrMode5FP16: 2723 ImmIdx = FrameRegIdx+1; 2724 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 2725 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 2726 InstrOffs *= -1; 2727 NumBits = 8; 2728 Scale = 2; 2729 break; 2730 case ARMII::AddrModeT2_i7: 2731 case ARMII::AddrModeT2_i7s2: 2732 case ARMII::AddrModeT2_i7s4: 2733 ImmIdx = FrameRegIdx+1; 2734 InstrOffs = MI.getOperand(ImmIdx).getImm(); 2735 NumBits = 7; 2736 Scale = (AddrMode == ARMII::AddrModeT2_i7s2 ? 2 : 2737 AddrMode == ARMII::AddrModeT2_i7s4 ? 4 : 1); 2738 break; 2739 default: 2740 llvm_unreachable("Unsupported addressing mode!"); 2741 } 2742 2743 Offset += InstrOffs * Scale; 2744 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 2745 if (Offset < 0) { 2746 Offset = -Offset; 2747 isSub = true; 2748 } 2749 2750 // Attempt to fold address comp. if opcode has offset bits 2751 if (NumBits > 0) { 2752 // Common case: small offset, fits into instruction. 2753 MachineOperand &ImmOp = MI.getOperand(ImmIdx); 2754 int ImmedOffset = Offset / Scale; 2755 unsigned Mask = (1 << NumBits) - 1; 2756 if ((unsigned)Offset <= Mask * Scale) { 2757 // Replace the FrameIndex with sp 2758 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 2759 // FIXME: When addrmode2 goes away, this will simplify (like the 2760 // T2 version), as the LDR.i12 versions don't need the encoding 2761 // tricks for the offset value. 2762 if (isSub) { 2763 if (AddrMode == ARMII::AddrMode_i12) 2764 ImmedOffset = -ImmedOffset; 2765 else 2766 ImmedOffset |= 1 << NumBits; 2767 } 2768 ImmOp.ChangeToImmediate(ImmedOffset); 2769 Offset = 0; 2770 return true; 2771 } 2772 2773 // Otherwise, it didn't fit. Pull in what we can to simplify the immed. 2774 ImmedOffset = ImmedOffset & Mask; 2775 if (isSub) { 2776 if (AddrMode == ARMII::AddrMode_i12) 2777 ImmedOffset = -ImmedOffset; 2778 else 2779 ImmedOffset |= 1 << NumBits; 2780 } 2781 ImmOp.ChangeToImmediate(ImmedOffset); 2782 Offset &= ~(Mask*Scale); 2783 } 2784 } 2785 2786 Offset = (isSub) ? -Offset : Offset; 2787 return Offset == 0; 2788 } 2789 2790 /// analyzeCompare - For a comparison instruction, return the source registers 2791 /// in SrcReg and SrcReg2 if having two register operands, and the value it 2792 /// compares against in CmpValue. Return true if the comparison instruction 2793 /// can be analyzed. 2794 bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, 2795 Register &SrcReg2, int64_t &CmpMask, 2796 int64_t &CmpValue) const { 2797 switch (MI.getOpcode()) { 2798 default: break; 2799 case ARM::CMPri: 2800 case ARM::t2CMPri: 2801 case ARM::tCMPi8: 2802 SrcReg = MI.getOperand(0).getReg(); 2803 SrcReg2 = 0; 2804 CmpMask = ~0; 2805 CmpValue = MI.getOperand(1).getImm(); 2806 return true; 2807 case ARM::CMPrr: 2808 case ARM::t2CMPrr: 2809 case ARM::tCMPr: 2810 SrcReg = MI.getOperand(0).getReg(); 2811 SrcReg2 = MI.getOperand(1).getReg(); 2812 CmpMask = ~0; 2813 CmpValue = 0; 2814 return true; 2815 case ARM::TSTri: 2816 case ARM::t2TSTri: 2817 SrcReg = MI.getOperand(0).getReg(); 2818 SrcReg2 = 0; 2819 CmpMask = MI.getOperand(1).getImm(); 2820 CmpValue = 0; 2821 return true; 2822 } 2823 2824 return false; 2825 } 2826 2827 /// isSuitableForMask - Identify a suitable 'and' instruction that 2828 /// operates on the given source register and applies the same mask 2829 /// as a 'tst' instruction. Provide a limited look-through for copies. 2830 /// When successful, MI will hold the found instruction. 2831 static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, 2832 int CmpMask, bool CommonUse) { 2833 switch (MI->getOpcode()) { 2834 case ARM::ANDri: 2835 case ARM::t2ANDri: 2836 if (CmpMask != MI->getOperand(2).getImm()) 2837 return false; 2838 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg()) 2839 return true; 2840 break; 2841 } 2842 2843 return false; 2844 } 2845 2846 /// getCmpToAddCondition - assume the flags are set by CMP(a,b), return 2847 /// the condition code if we modify the instructions such that flags are 2848 /// set by ADD(a,b,X). 2849 inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) { 2850 switch (CC) { 2851 default: return ARMCC::AL; 2852 case ARMCC::HS: return ARMCC::LO; 2853 case ARMCC::LO: return ARMCC::HS; 2854 case ARMCC::VS: return ARMCC::VS; 2855 case ARMCC::VC: return ARMCC::VC; 2856 } 2857 } 2858 2859 /// isRedundantFlagInstr - check whether the first instruction, whose only 2860 /// purpose is to update flags, can be made redundant. 2861 /// CMPrr can be made redundant by SUBrr if the operands are the same. 2862 /// CMPri can be made redundant by SUBri if the operands are the same. 2863 /// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X). 2864 /// This function can be extended later on. 2865 inline static bool isRedundantFlagInstr(const MachineInstr *CmpI, 2866 Register SrcReg, Register SrcReg2, 2867 int64_t ImmValue, 2868 const MachineInstr *OI, 2869 bool &IsThumb1) { 2870 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && 2871 (OI->getOpcode() == ARM::SUBrr || OI->getOpcode() == ARM::t2SUBrr) && 2872 ((OI->getOperand(1).getReg() == SrcReg && 2873 OI->getOperand(2).getReg() == SrcReg2) || 2874 (OI->getOperand(1).getReg() == SrcReg2 && 2875 OI->getOperand(2).getReg() == SrcReg))) { 2876 IsThumb1 = false; 2877 return true; 2878 } 2879 2880 if (CmpI->getOpcode() == ARM::tCMPr && OI->getOpcode() == ARM::tSUBrr && 2881 ((OI->getOperand(2).getReg() == SrcReg && 2882 OI->getOperand(3).getReg() == SrcReg2) || 2883 (OI->getOperand(2).getReg() == SrcReg2 && 2884 OI->getOperand(3).getReg() == SrcReg))) { 2885 IsThumb1 = true; 2886 return true; 2887 } 2888 2889 if ((CmpI->getOpcode() == ARM::CMPri || CmpI->getOpcode() == ARM::t2CMPri) && 2890 (OI->getOpcode() == ARM::SUBri || OI->getOpcode() == ARM::t2SUBri) && 2891 OI->getOperand(1).getReg() == SrcReg && 2892 OI->getOperand(2).getImm() == ImmValue) { 2893 IsThumb1 = false; 2894 return true; 2895 } 2896 2897 if (CmpI->getOpcode() == ARM::tCMPi8 && 2898 (OI->getOpcode() == ARM::tSUBi8 || OI->getOpcode() == ARM::tSUBi3) && 2899 OI->getOperand(2).getReg() == SrcReg && 2900 OI->getOperand(3).getImm() == ImmValue) { 2901 IsThumb1 = true; 2902 return true; 2903 } 2904 2905 if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) && 2906 (OI->getOpcode() == ARM::ADDrr || OI->getOpcode() == ARM::t2ADDrr || 2907 OI->getOpcode() == ARM::ADDri || OI->getOpcode() == ARM::t2ADDri) && 2908 OI->getOperand(0).isReg() && OI->getOperand(1).isReg() && 2909 OI->getOperand(0).getReg() == SrcReg && 2910 OI->getOperand(1).getReg() == SrcReg2) { 2911 IsThumb1 = false; 2912 return true; 2913 } 2914 2915 if (CmpI->getOpcode() == ARM::tCMPr && 2916 (OI->getOpcode() == ARM::tADDi3 || OI->getOpcode() == ARM::tADDi8 || 2917 OI->getOpcode() == ARM::tADDrr) && 2918 OI->getOperand(0).getReg() == SrcReg && 2919 OI->getOperand(2).getReg() == SrcReg2) { 2920 IsThumb1 = true; 2921 return true; 2922 } 2923 2924 return false; 2925 } 2926 2927 static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) { 2928 switch (MI->getOpcode()) { 2929 default: return false; 2930 case ARM::tLSLri: 2931 case ARM::tLSRri: 2932 case ARM::tLSLrr: 2933 case ARM::tLSRrr: 2934 case ARM::tSUBrr: 2935 case ARM::tADDrr: 2936 case ARM::tADDi3: 2937 case ARM::tADDi8: 2938 case ARM::tSUBi3: 2939 case ARM::tSUBi8: 2940 case ARM::tMUL: 2941 case ARM::tADC: 2942 case ARM::tSBC: 2943 case ARM::tRSB: 2944 case ARM::tAND: 2945 case ARM::tORR: 2946 case ARM::tEOR: 2947 case ARM::tBIC: 2948 case ARM::tMVN: 2949 case ARM::tASRri: 2950 case ARM::tASRrr: 2951 case ARM::tROR: 2952 IsThumb1 = true; 2953 LLVM_FALLTHROUGH; 2954 case ARM::RSBrr: 2955 case ARM::RSBri: 2956 case ARM::RSCrr: 2957 case ARM::RSCri: 2958 case ARM::ADDrr: 2959 case ARM::ADDri: 2960 case ARM::ADCrr: 2961 case ARM::ADCri: 2962 case ARM::SUBrr: 2963 case ARM::SUBri: 2964 case ARM::SBCrr: 2965 case ARM::SBCri: 2966 case ARM::t2RSBri: 2967 case ARM::t2ADDrr: 2968 case ARM::t2ADDri: 2969 case ARM::t2ADCrr: 2970 case ARM::t2ADCri: 2971 case ARM::t2SUBrr: 2972 case ARM::t2SUBri: 2973 case ARM::t2SBCrr: 2974 case ARM::t2SBCri: 2975 case ARM::ANDrr: 2976 case ARM::ANDri: 2977 case ARM::t2ANDrr: 2978 case ARM::t2ANDri: 2979 case ARM::ORRrr: 2980 case ARM::ORRri: 2981 case ARM::t2ORRrr: 2982 case ARM::t2ORRri: 2983 case ARM::EORrr: 2984 case ARM::EORri: 2985 case ARM::t2EORrr: 2986 case ARM::t2EORri: 2987 case ARM::t2LSRri: 2988 case ARM::t2LSRrr: 2989 case ARM::t2LSLri: 2990 case ARM::t2LSLrr: 2991 return true; 2992 } 2993 } 2994 2995 /// optimizeCompareInstr - Convert the instruction supplying the argument to the 2996 /// comparison into one that sets the zero bit in the flags register; 2997 /// Remove a redundant Compare instruction if an earlier instruction can set the 2998 /// flags in the same way as Compare. 2999 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two 3000 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the 3001 /// condition code of instructions which use the flags. 3002 bool ARMBaseInstrInfo::optimizeCompareInstr( 3003 MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, 3004 int64_t CmpValue, const MachineRegisterInfo *MRI) const { 3005 // Get the unique definition of SrcReg. 3006 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 3007 if (!MI) return false; 3008 3009 // Masked compares sometimes use the same register as the corresponding 'and'. 3010 if (CmpMask != ~0) { 3011 if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(*MI)) { 3012 MI = nullptr; 3013 for (MachineRegisterInfo::use_instr_iterator 3014 UI = MRI->use_instr_begin(SrcReg), UE = MRI->use_instr_end(); 3015 UI != UE; ++UI) { 3016 if (UI->getParent() != CmpInstr.getParent()) 3017 continue; 3018 MachineInstr *PotentialAND = &*UI; 3019 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) || 3020 isPredicated(*PotentialAND)) 3021 continue; 3022 MI = PotentialAND; 3023 break; 3024 } 3025 if (!MI) return false; 3026 } 3027 } 3028 3029 // Get ready to iterate backward from CmpInstr. 3030 MachineBasicBlock::iterator I = CmpInstr, E = MI, 3031 B = CmpInstr.getParent()->begin(); 3032 3033 // Early exit if CmpInstr is at the beginning of the BB. 3034 if (I == B) return false; 3035 3036 // There are two possible candidates which can be changed to set CPSR: 3037 // One is MI, the other is a SUB or ADD instruction. 3038 // For CMPrr(r1,r2), we are looking for SUB(r1,r2), SUB(r2,r1), or 3039 // ADDr[ri](r1, r2, X). 3040 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue). 3041 MachineInstr *SubAdd = nullptr; 3042 if (SrcReg2 != 0) 3043 // MI is not a candidate for CMPrr. 3044 MI = nullptr; 3045 else if (MI->getParent() != CmpInstr.getParent() || CmpValue != 0) { 3046 // Conservatively refuse to convert an instruction which isn't in the same 3047 // BB as the comparison. 3048 // For CMPri w/ CmpValue != 0, a SubAdd may still be a candidate. 3049 // Thus we cannot return here. 3050 if (CmpInstr.getOpcode() == ARM::CMPri || 3051 CmpInstr.getOpcode() == ARM::t2CMPri || 3052 CmpInstr.getOpcode() == ARM::tCMPi8) 3053 MI = nullptr; 3054 else 3055 return false; 3056 } 3057 3058 bool IsThumb1 = false; 3059 if (MI && !isOptimizeCompareCandidate(MI, IsThumb1)) 3060 return false; 3061 3062 // We also want to do this peephole for cases like this: if (a*b == 0), 3063 // and optimise away the CMP instruction from the generated code sequence: 3064 // MULS, MOVS, MOVS, CMP. Here the MOVS instructions load the boolean values 3065 // resulting from the select instruction, but these MOVS instructions for 3066 // Thumb1 (V6M) are flag setting and are thus preventing this optimisation. 3067 // However, if we only have MOVS instructions in between the CMP and the 3068 // other instruction (the MULS in this example), then the CPSR is dead so we 3069 // can safely reorder the sequence into: MOVS, MOVS, MULS, CMP. We do this 3070 // reordering and then continue the analysis hoping we can eliminate the 3071 // CMP. This peephole works on the vregs, so is still in SSA form. As a 3072 // consequence, the movs won't redefine/kill the MUL operands which would 3073 // make this reordering illegal. 3074 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3075 if (MI && IsThumb1) { 3076 --I; 3077 if (I != E && !MI->readsRegister(ARM::CPSR, TRI)) { 3078 bool CanReorder = true; 3079 for (; I != E; --I) { 3080 if (I->getOpcode() != ARM::tMOVi8) { 3081 CanReorder = false; 3082 break; 3083 } 3084 } 3085 if (CanReorder) { 3086 MI = MI->removeFromParent(); 3087 E = CmpInstr; 3088 CmpInstr.getParent()->insert(E, MI); 3089 } 3090 } 3091 I = CmpInstr; 3092 E = MI; 3093 } 3094 3095 // Check that CPSR isn't set between the comparison instruction and the one we 3096 // want to change. At the same time, search for SubAdd. 3097 bool SubAddIsThumb1 = false; 3098 do { 3099 const MachineInstr &Instr = *--I; 3100 3101 // Check whether CmpInstr can be made redundant by the current instruction. 3102 if (isRedundantFlagInstr(&CmpInstr, SrcReg, SrcReg2, CmpValue, &Instr, 3103 SubAddIsThumb1)) { 3104 SubAdd = &*I; 3105 break; 3106 } 3107 3108 // Allow E (which was initially MI) to be SubAdd but do not search before E. 3109 if (I == E) 3110 break; 3111 3112 if (Instr.modifiesRegister(ARM::CPSR, TRI) || 3113 Instr.readsRegister(ARM::CPSR, TRI)) 3114 // This instruction modifies or uses CPSR after the one we want to 3115 // change. We can't do this transformation. 3116 return false; 3117 3118 if (I == B) { 3119 // In some cases, we scan the use-list of an instruction for an AND; 3120 // that AND is in the same BB, but may not be scheduled before the 3121 // corresponding TST. In that case, bail out. 3122 // 3123 // FIXME: We could try to reschedule the AND. 3124 return false; 3125 } 3126 } while (true); 3127 3128 // Return false if no candidates exist. 3129 if (!MI && !SubAdd) 3130 return false; 3131 3132 // If we found a SubAdd, use it as it will be closer to the CMP 3133 if (SubAdd) { 3134 MI = SubAdd; 3135 IsThumb1 = SubAddIsThumb1; 3136 } 3137 3138 // We can't use a predicated instruction - it doesn't always write the flags. 3139 if (isPredicated(*MI)) 3140 return false; 3141 3142 // Scan forward for the use of CPSR 3143 // When checking against MI: if it's a conditional code that requires 3144 // checking of the V bit or C bit, then this is not safe to do. 3145 // It is safe to remove CmpInstr if CPSR is redefined or killed. 3146 // If we are done with the basic block, we need to check whether CPSR is 3147 // live-out. 3148 SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4> 3149 OperandsToUpdate; 3150 bool isSafe = false; 3151 I = CmpInstr; 3152 E = CmpInstr.getParent()->end(); 3153 while (!isSafe && ++I != E) { 3154 const MachineInstr &Instr = *I; 3155 for (unsigned IO = 0, EO = Instr.getNumOperands(); 3156 !isSafe && IO != EO; ++IO) { 3157 const MachineOperand &MO = Instr.getOperand(IO); 3158 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) { 3159 isSafe = true; 3160 break; 3161 } 3162 if (!MO.isReg() || MO.getReg() != ARM::CPSR) 3163 continue; 3164 if (MO.isDef()) { 3165 isSafe = true; 3166 break; 3167 } 3168 // Condition code is after the operand before CPSR except for VSELs. 3169 ARMCC::CondCodes CC; 3170 bool IsInstrVSel = true; 3171 switch (Instr.getOpcode()) { 3172 default: 3173 IsInstrVSel = false; 3174 CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm(); 3175 break; 3176 case ARM::VSELEQD: 3177 case ARM::VSELEQS: 3178 case ARM::VSELEQH: 3179 CC = ARMCC::EQ; 3180 break; 3181 case ARM::VSELGTD: 3182 case ARM::VSELGTS: 3183 case ARM::VSELGTH: 3184 CC = ARMCC::GT; 3185 break; 3186 case ARM::VSELGED: 3187 case ARM::VSELGES: 3188 case ARM::VSELGEH: 3189 CC = ARMCC::GE; 3190 break; 3191 case ARM::VSELVSD: 3192 case ARM::VSELVSS: 3193 case ARM::VSELVSH: 3194 CC = ARMCC::VS; 3195 break; 3196 } 3197 3198 if (SubAdd) { 3199 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based 3200 // on CMP needs to be updated to be based on SUB. 3201 // If we have ADD(r1, r2, X) and CMP(r1, r2), the condition code also 3202 // needs to be modified. 3203 // Push the condition code operands to OperandsToUpdate. 3204 // If it is safe to remove CmpInstr, the condition code of these 3205 // operands will be modified. 3206 unsigned Opc = SubAdd->getOpcode(); 3207 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr || 3208 Opc == ARM::SUBri || Opc == ARM::t2SUBri || 3209 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 || 3210 Opc == ARM::tSUBi8; 3211 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2; 3212 if (!IsSub || 3213 (SrcReg2 != 0 && SubAdd->getOperand(OpI).getReg() == SrcReg2 && 3214 SubAdd->getOperand(OpI + 1).getReg() == SrcReg)) { 3215 // VSel doesn't support condition code update. 3216 if (IsInstrVSel) 3217 return false; 3218 // Ensure we can swap the condition. 3219 ARMCC::CondCodes NewCC = (IsSub ? getSwappedCondition(CC) : getCmpToAddCondition(CC)); 3220 if (NewCC == ARMCC::AL) 3221 return false; 3222 OperandsToUpdate.push_back( 3223 std::make_pair(&((*I).getOperand(IO - 1)), NewCC)); 3224 } 3225 } else { 3226 // No SubAdd, so this is x = <op> y, z; cmp x, 0. 3227 switch (CC) { 3228 case ARMCC::EQ: // Z 3229 case ARMCC::NE: // Z 3230 case ARMCC::MI: // N 3231 case ARMCC::PL: // N 3232 case ARMCC::AL: // none 3233 // CPSR can be used multiple times, we should continue. 3234 break; 3235 case ARMCC::HS: // C 3236 case ARMCC::LO: // C 3237 case ARMCC::VS: // V 3238 case ARMCC::VC: // V 3239 case ARMCC::HI: // C Z 3240 case ARMCC::LS: // C Z 3241 case ARMCC::GE: // N V 3242 case ARMCC::LT: // N V 3243 case ARMCC::GT: // Z N V 3244 case ARMCC::LE: // Z N V 3245 // The instruction uses the V bit or C bit which is not safe. 3246 return false; 3247 } 3248 } 3249 } 3250 } 3251 3252 // If CPSR is not killed nor re-defined, we should check whether it is 3253 // live-out. If it is live-out, do not optimize. 3254 if (!isSafe) { 3255 MachineBasicBlock *MBB = CmpInstr.getParent(); 3256 for (MachineBasicBlock *Succ : MBB->successors()) 3257 if (Succ->isLiveIn(ARM::CPSR)) 3258 return false; 3259 } 3260 3261 // Toggle the optional operand to CPSR (if it exists - in Thumb1 we always 3262 // set CPSR so this is represented as an explicit output) 3263 if (!IsThumb1) { 3264 MI->getOperand(5).setReg(ARM::CPSR); 3265 MI->getOperand(5).setIsDef(true); 3266 } 3267 assert(!isPredicated(*MI) && "Can't use flags from predicated instruction"); 3268 CmpInstr.eraseFromParent(); 3269 3270 // Modify the condition code of operands in OperandsToUpdate. 3271 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to 3272 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 3273 for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++) 3274 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second); 3275 3276 MI->clearRegisterDeads(ARM::CPSR); 3277 3278 return true; 3279 } 3280 3281 bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const { 3282 // Do not sink MI if it might be used to optimize a redundant compare. 3283 // We heuristically only look at the instruction immediately following MI to 3284 // avoid potentially searching the entire basic block. 3285 if (isPredicated(MI)) 3286 return true; 3287 MachineBasicBlock::const_iterator Next = &MI; 3288 ++Next; 3289 Register SrcReg, SrcReg2; 3290 int64_t CmpMask, CmpValue; 3291 bool IsThumb1; 3292 if (Next != MI.getParent()->end() && 3293 analyzeCompare(*Next, SrcReg, SrcReg2, CmpMask, CmpValue) && 3294 isRedundantFlagInstr(&*Next, SrcReg, SrcReg2, CmpValue, &MI, IsThumb1)) 3295 return false; 3296 return true; 3297 } 3298 3299 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 3300 Register Reg, 3301 MachineRegisterInfo *MRI) const { 3302 // Fold large immediates into add, sub, or, xor. 3303 unsigned DefOpc = DefMI.getOpcode(); 3304 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) 3305 return false; 3306 if (!DefMI.getOperand(1).isImm()) 3307 // Could be t2MOVi32imm @xx 3308 return false; 3309 3310 if (!MRI->hasOneNonDBGUse(Reg)) 3311 return false; 3312 3313 const MCInstrDesc &DefMCID = DefMI.getDesc(); 3314 if (DefMCID.hasOptionalDef()) { 3315 unsigned NumOps = DefMCID.getNumOperands(); 3316 const MachineOperand &MO = DefMI.getOperand(NumOps - 1); 3317 if (MO.getReg() == ARM::CPSR && !MO.isDead()) 3318 // If DefMI defines CPSR and it is not dead, it's obviously not safe 3319 // to delete DefMI. 3320 return false; 3321 } 3322 3323 const MCInstrDesc &UseMCID = UseMI.getDesc(); 3324 if (UseMCID.hasOptionalDef()) { 3325 unsigned NumOps = UseMCID.getNumOperands(); 3326 if (UseMI.getOperand(NumOps - 1).getReg() == ARM::CPSR) 3327 // If the instruction sets the flag, do not attempt this optimization 3328 // since it may change the semantics of the code. 3329 return false; 3330 } 3331 3332 unsigned UseOpc = UseMI.getOpcode(); 3333 unsigned NewUseOpc = 0; 3334 uint32_t ImmVal = (uint32_t)DefMI.getOperand(1).getImm(); 3335 uint32_t SOImmValV1 = 0, SOImmValV2 = 0; 3336 bool Commute = false; 3337 switch (UseOpc) { 3338 default: return false; 3339 case ARM::SUBrr: 3340 case ARM::ADDrr: 3341 case ARM::ORRrr: 3342 case ARM::EORrr: 3343 case ARM::t2SUBrr: 3344 case ARM::t2ADDrr: 3345 case ARM::t2ORRrr: 3346 case ARM::t2EORrr: { 3347 Commute = UseMI.getOperand(2).getReg() != Reg; 3348 switch (UseOpc) { 3349 default: break; 3350 case ARM::ADDrr: 3351 case ARM::SUBrr: 3352 if (UseOpc == ARM::SUBrr && Commute) 3353 return false; 3354 3355 // ADD/SUB are special because they're essentially the same operation, so 3356 // we can handle a larger range of immediates. 3357 if (ARM_AM::isSOImmTwoPartVal(ImmVal)) 3358 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri; 3359 else if (ARM_AM::isSOImmTwoPartVal(-ImmVal)) { 3360 ImmVal = -ImmVal; 3361 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri; 3362 } else 3363 return false; 3364 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 3365 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 3366 break; 3367 case ARM::ORRrr: 3368 case ARM::EORrr: 3369 if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) 3370 return false; 3371 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 3372 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 3373 switch (UseOpc) { 3374 default: break; 3375 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; 3376 case ARM::EORrr: NewUseOpc = ARM::EORri; break; 3377 } 3378 break; 3379 case ARM::t2ADDrr: 3380 case ARM::t2SUBrr: { 3381 if (UseOpc == ARM::t2SUBrr && Commute) 3382 return false; 3383 3384 // ADD/SUB are special because they're essentially the same operation, so 3385 // we can handle a larger range of immediates. 3386 const bool ToSP = DefMI.getOperand(0).getReg() == ARM::SP; 3387 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri; 3388 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri; 3389 if (ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 3390 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB; 3391 else if (ARM_AM::isT2SOImmTwoPartVal(-ImmVal)) { 3392 ImmVal = -ImmVal; 3393 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD; 3394 } else 3395 return false; 3396 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 3397 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 3398 break; 3399 } 3400 case ARM::t2ORRrr: 3401 case ARM::t2EORrr: 3402 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 3403 return false; 3404 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 3405 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 3406 switch (UseOpc) { 3407 default: break; 3408 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; 3409 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; 3410 } 3411 break; 3412 } 3413 } 3414 } 3415 3416 unsigned OpIdx = Commute ? 2 : 1; 3417 Register Reg1 = UseMI.getOperand(OpIdx).getReg(); 3418 bool isKill = UseMI.getOperand(OpIdx).isKill(); 3419 const TargetRegisterClass *TRC = MRI->getRegClass(Reg); 3420 Register NewReg = MRI->createVirtualRegister(TRC); 3421 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc), 3422 NewReg) 3423 .addReg(Reg1, getKillRegState(isKill)) 3424 .addImm(SOImmValV1) 3425 .add(predOps(ARMCC::AL)) 3426 .add(condCodeOp()); 3427 UseMI.setDesc(get(NewUseOpc)); 3428 UseMI.getOperand(1).setReg(NewReg); 3429 UseMI.getOperand(1).setIsKill(); 3430 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2); 3431 DefMI.eraseFromParent(); 3432 // FIXME: t2ADDrr should be split, as different rulles apply when writing to SP. 3433 // Just as t2ADDri, that was split to [t2ADDri, t2ADDspImm]. 3434 // Then the below code will not be needed, as the input/output register 3435 // classes will be rgpr or gprSP. 3436 // For now, we fix the UseMI operand explicitly here: 3437 switch(NewUseOpc){ 3438 case ARM::t2ADDspImm: 3439 case ARM::t2SUBspImm: 3440 case ARM::t2ADDri: 3441 case ARM::t2SUBri: 3442 MRI->constrainRegClass(UseMI.getOperand(0).getReg(), TRC); 3443 } 3444 return true; 3445 } 3446 3447 static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, 3448 const MachineInstr &MI) { 3449 switch (MI.getOpcode()) { 3450 default: { 3451 const MCInstrDesc &Desc = MI.getDesc(); 3452 int UOps = ItinData->getNumMicroOps(Desc.getSchedClass()); 3453 assert(UOps >= 0 && "bad # UOps"); 3454 return UOps; 3455 } 3456 3457 case ARM::LDRrs: 3458 case ARM::LDRBrs: 3459 case ARM::STRrs: 3460 case ARM::STRBrs: { 3461 unsigned ShOpVal = MI.getOperand(3).getImm(); 3462 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3463 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3464 if (!isSub && 3465 (ShImm == 0 || 3466 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3467 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3468 return 1; 3469 return 2; 3470 } 3471 3472 case ARM::LDRH: 3473 case ARM::STRH: { 3474 if (!MI.getOperand(2).getReg()) 3475 return 1; 3476 3477 unsigned ShOpVal = MI.getOperand(3).getImm(); 3478 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3479 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3480 if (!isSub && 3481 (ShImm == 0 || 3482 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3483 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3484 return 1; 3485 return 2; 3486 } 3487 3488 case ARM::LDRSB: 3489 case ARM::LDRSH: 3490 return (ARM_AM::getAM3Op(MI.getOperand(3).getImm()) == ARM_AM::sub) ? 3 : 2; 3491 3492 case ARM::LDRSB_POST: 3493 case ARM::LDRSH_POST: { 3494 Register Rt = MI.getOperand(0).getReg(); 3495 Register Rm = MI.getOperand(3).getReg(); 3496 return (Rt == Rm) ? 4 : 3; 3497 } 3498 3499 case ARM::LDR_PRE_REG: 3500 case ARM::LDRB_PRE_REG: { 3501 Register Rt = MI.getOperand(0).getReg(); 3502 Register Rm = MI.getOperand(3).getReg(); 3503 if (Rt == Rm) 3504 return 3; 3505 unsigned ShOpVal = MI.getOperand(4).getImm(); 3506 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3507 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3508 if (!isSub && 3509 (ShImm == 0 || 3510 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3511 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3512 return 2; 3513 return 3; 3514 } 3515 3516 case ARM::STR_PRE_REG: 3517 case ARM::STRB_PRE_REG: { 3518 unsigned ShOpVal = MI.getOperand(4).getImm(); 3519 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3520 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3521 if (!isSub && 3522 (ShImm == 0 || 3523 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3524 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3525 return 2; 3526 return 3; 3527 } 3528 3529 case ARM::LDRH_PRE: 3530 case ARM::STRH_PRE: { 3531 Register Rt = MI.getOperand(0).getReg(); 3532 Register Rm = MI.getOperand(3).getReg(); 3533 if (!Rm) 3534 return 2; 3535 if (Rt == Rm) 3536 return 3; 3537 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 3 : 2; 3538 } 3539 3540 case ARM::LDR_POST_REG: 3541 case ARM::LDRB_POST_REG: 3542 case ARM::LDRH_POST: { 3543 Register Rt = MI.getOperand(0).getReg(); 3544 Register Rm = MI.getOperand(3).getReg(); 3545 return (Rt == Rm) ? 3 : 2; 3546 } 3547 3548 case ARM::LDR_PRE_IMM: 3549 case ARM::LDRB_PRE_IMM: 3550 case ARM::LDR_POST_IMM: 3551 case ARM::LDRB_POST_IMM: 3552 case ARM::STRB_POST_IMM: 3553 case ARM::STRB_POST_REG: 3554 case ARM::STRB_PRE_IMM: 3555 case ARM::STRH_POST: 3556 case ARM::STR_POST_IMM: 3557 case ARM::STR_POST_REG: 3558 case ARM::STR_PRE_IMM: 3559 return 2; 3560 3561 case ARM::LDRSB_PRE: 3562 case ARM::LDRSH_PRE: { 3563 Register Rm = MI.getOperand(3).getReg(); 3564 if (Rm == 0) 3565 return 3; 3566 Register Rt = MI.getOperand(0).getReg(); 3567 if (Rt == Rm) 3568 return 4; 3569 unsigned ShOpVal = MI.getOperand(4).getImm(); 3570 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 3571 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3572 if (!isSub && 3573 (ShImm == 0 || 3574 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 3575 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 3576 return 3; 3577 return 4; 3578 } 3579 3580 case ARM::LDRD: { 3581 Register Rt = MI.getOperand(0).getReg(); 3582 Register Rn = MI.getOperand(2).getReg(); 3583 Register Rm = MI.getOperand(3).getReg(); 3584 if (Rm) 3585 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 3586 : 3; 3587 return (Rt == Rn) ? 3 : 2; 3588 } 3589 3590 case ARM::STRD: { 3591 Register Rm = MI.getOperand(3).getReg(); 3592 if (Rm) 3593 return (ARM_AM::getAM3Op(MI.getOperand(4).getImm()) == ARM_AM::sub) ? 4 3594 : 3; 3595 return 2; 3596 } 3597 3598 case ARM::LDRD_POST: 3599 case ARM::t2LDRD_POST: 3600 return 3; 3601 3602 case ARM::STRD_POST: 3603 case ARM::t2STRD_POST: 3604 return 4; 3605 3606 case ARM::LDRD_PRE: { 3607 Register Rt = MI.getOperand(0).getReg(); 3608 Register Rn = MI.getOperand(3).getReg(); 3609 Register Rm = MI.getOperand(4).getReg(); 3610 if (Rm) 3611 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 3612 : 4; 3613 return (Rt == Rn) ? 4 : 3; 3614 } 3615 3616 case ARM::t2LDRD_PRE: { 3617 Register Rt = MI.getOperand(0).getReg(); 3618 Register Rn = MI.getOperand(3).getReg(); 3619 return (Rt == Rn) ? 4 : 3; 3620 } 3621 3622 case ARM::STRD_PRE: { 3623 Register Rm = MI.getOperand(4).getReg(); 3624 if (Rm) 3625 return (ARM_AM::getAM3Op(MI.getOperand(5).getImm()) == ARM_AM::sub) ? 5 3626 : 4; 3627 return 3; 3628 } 3629 3630 case ARM::t2STRD_PRE: 3631 return 3; 3632 3633 case ARM::t2LDR_POST: 3634 case ARM::t2LDRB_POST: 3635 case ARM::t2LDRB_PRE: 3636 case ARM::t2LDRSBi12: 3637 case ARM::t2LDRSBi8: 3638 case ARM::t2LDRSBpci: 3639 case ARM::t2LDRSBs: 3640 case ARM::t2LDRH_POST: 3641 case ARM::t2LDRH_PRE: 3642 case ARM::t2LDRSBT: 3643 case ARM::t2LDRSB_POST: 3644 case ARM::t2LDRSB_PRE: 3645 case ARM::t2LDRSH_POST: 3646 case ARM::t2LDRSH_PRE: 3647 case ARM::t2LDRSHi12: 3648 case ARM::t2LDRSHi8: 3649 case ARM::t2LDRSHpci: 3650 case ARM::t2LDRSHs: 3651 return 2; 3652 3653 case ARM::t2LDRDi8: { 3654 Register Rt = MI.getOperand(0).getReg(); 3655 Register Rn = MI.getOperand(2).getReg(); 3656 return (Rt == Rn) ? 3 : 2; 3657 } 3658 3659 case ARM::t2STRB_POST: 3660 case ARM::t2STRB_PRE: 3661 case ARM::t2STRBs: 3662 case ARM::t2STRDi8: 3663 case ARM::t2STRH_POST: 3664 case ARM::t2STRH_PRE: 3665 case ARM::t2STRHs: 3666 case ARM::t2STR_POST: 3667 case ARM::t2STR_PRE: 3668 case ARM::t2STRs: 3669 return 2; 3670 } 3671 } 3672 3673 // Return the number of 32-bit words loaded by LDM or stored by STM. If this 3674 // can't be easily determined return 0 (missing MachineMemOperand). 3675 // 3676 // FIXME: The current MachineInstr design does not support relying on machine 3677 // mem operands to determine the width of a memory access. Instead, we expect 3678 // the target to provide this information based on the instruction opcode and 3679 // operands. However, using MachineMemOperand is the best solution now for 3680 // two reasons: 3681 // 3682 // 1) getNumMicroOps tries to infer LDM memory width from the total number of MI 3683 // operands. This is much more dangerous than using the MachineMemOperand 3684 // sizes because CodeGen passes can insert/remove optional machine operands. In 3685 // fact, it's totally incorrect for preRA passes and appears to be wrong for 3686 // postRA passes as well. 3687 // 3688 // 2) getNumLDMAddresses is only used by the scheduling machine model and any 3689 // machine model that calls this should handle the unknown (zero size) case. 3690 // 3691 // Long term, we should require a target hook that verifies MachineMemOperand 3692 // sizes during MC lowering. That target hook should be local to MC lowering 3693 // because we can't ensure that it is aware of other MI forms. Doing this will 3694 // ensure that MachineMemOperands are correctly propagated through all passes. 3695 unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const { 3696 unsigned Size = 0; 3697 for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), 3698 E = MI.memoperands_end(); 3699 I != E; ++I) { 3700 Size += (*I)->getSize(); 3701 } 3702 // FIXME: The scheduler currently can't handle values larger than 16. But 3703 // the values can actually go up to 32 for floating-point load/store 3704 // multiple (VLDMIA etc.). Also, the way this code is reasoning about memory 3705 // operations isn't right; we could end up with "extra" memory operands for 3706 // various reasons, like tail merge merging two memory operations. 3707 return std::min(Size / 4, 16U); 3708 } 3709 3710 static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, 3711 unsigned NumRegs) { 3712 unsigned UOps = 1 + NumRegs; // 1 for address computation. 3713 switch (Opc) { 3714 default: 3715 break; 3716 case ARM::VLDMDIA_UPD: 3717 case ARM::VLDMDDB_UPD: 3718 case ARM::VLDMSIA_UPD: 3719 case ARM::VLDMSDB_UPD: 3720 case ARM::VSTMDIA_UPD: 3721 case ARM::VSTMDDB_UPD: 3722 case ARM::VSTMSIA_UPD: 3723 case ARM::VSTMSDB_UPD: 3724 case ARM::LDMIA_UPD: 3725 case ARM::LDMDA_UPD: 3726 case ARM::LDMDB_UPD: 3727 case ARM::LDMIB_UPD: 3728 case ARM::STMIA_UPD: 3729 case ARM::STMDA_UPD: 3730 case ARM::STMDB_UPD: 3731 case ARM::STMIB_UPD: 3732 case ARM::tLDMIA_UPD: 3733 case ARM::tSTMIA_UPD: 3734 case ARM::t2LDMIA_UPD: 3735 case ARM::t2LDMDB_UPD: 3736 case ARM::t2STMIA_UPD: 3737 case ARM::t2STMDB_UPD: 3738 ++UOps; // One for base register writeback. 3739 break; 3740 case ARM::LDMIA_RET: 3741 case ARM::tPOP_RET: 3742 case ARM::t2LDMIA_RET: 3743 UOps += 2; // One for base reg wb, one for write to pc. 3744 break; 3745 } 3746 return UOps; 3747 } 3748 3749 unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 3750 const MachineInstr &MI) const { 3751 if (!ItinData || ItinData->isEmpty()) 3752 return 1; 3753 3754 const MCInstrDesc &Desc = MI.getDesc(); 3755 unsigned Class = Desc.getSchedClass(); 3756 int ItinUOps = ItinData->getNumMicroOps(Class); 3757 if (ItinUOps >= 0) { 3758 if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore())) 3759 return getNumMicroOpsSwiftLdSt(ItinData, MI); 3760 3761 return ItinUOps; 3762 } 3763 3764 unsigned Opc = MI.getOpcode(); 3765 switch (Opc) { 3766 default: 3767 llvm_unreachable("Unexpected multi-uops instruction!"); 3768 case ARM::VLDMQIA: 3769 case ARM::VSTMQIA: 3770 return 2; 3771 3772 // The number of uOps for load / store multiple are determined by the number 3773 // registers. 3774 // 3775 // On Cortex-A8, each pair of register loads / stores can be scheduled on the 3776 // same cycle. The scheduling for the first load / store must be done 3777 // separately by assuming the address is not 64-bit aligned. 3778 // 3779 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address 3780 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON 3781 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. 3782 case ARM::VLDMDIA: 3783 case ARM::VLDMDIA_UPD: 3784 case ARM::VLDMDDB_UPD: 3785 case ARM::VLDMSIA: 3786 case ARM::VLDMSIA_UPD: 3787 case ARM::VLDMSDB_UPD: 3788 case ARM::VSTMDIA: 3789 case ARM::VSTMDIA_UPD: 3790 case ARM::VSTMDDB_UPD: 3791 case ARM::VSTMSIA: 3792 case ARM::VSTMSIA_UPD: 3793 case ARM::VSTMSDB_UPD: { 3794 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands(); 3795 return (NumRegs / 2) + (NumRegs % 2) + 1; 3796 } 3797 3798 case ARM::LDMIA_RET: 3799 case ARM::LDMIA: 3800 case ARM::LDMDA: 3801 case ARM::LDMDB: 3802 case ARM::LDMIB: 3803 case ARM::LDMIA_UPD: 3804 case ARM::LDMDA_UPD: 3805 case ARM::LDMDB_UPD: 3806 case ARM::LDMIB_UPD: 3807 case ARM::STMIA: 3808 case ARM::STMDA: 3809 case ARM::STMDB: 3810 case ARM::STMIB: 3811 case ARM::STMIA_UPD: 3812 case ARM::STMDA_UPD: 3813 case ARM::STMDB_UPD: 3814 case ARM::STMIB_UPD: 3815 case ARM::tLDMIA: 3816 case ARM::tLDMIA_UPD: 3817 case ARM::tSTMIA_UPD: 3818 case ARM::tPOP_RET: 3819 case ARM::tPOP: 3820 case ARM::tPUSH: 3821 case ARM::t2LDMIA_RET: 3822 case ARM::t2LDMIA: 3823 case ARM::t2LDMDB: 3824 case ARM::t2LDMIA_UPD: 3825 case ARM::t2LDMDB_UPD: 3826 case ARM::t2STMIA: 3827 case ARM::t2STMDB: 3828 case ARM::t2STMIA_UPD: 3829 case ARM::t2STMDB_UPD: { 3830 unsigned NumRegs = MI.getNumOperands() - Desc.getNumOperands() + 1; 3831 switch (Subtarget.getLdStMultipleTiming()) { 3832 case ARMSubtarget::SingleIssuePlusExtras: 3833 return getNumMicroOpsSingleIssuePlusExtras(Opc, NumRegs); 3834 case ARMSubtarget::SingleIssue: 3835 // Assume the worst. 3836 return NumRegs; 3837 case ARMSubtarget::DoubleIssue: { 3838 if (NumRegs < 4) 3839 return 2; 3840 // 4 registers would be issued: 2, 2. 3841 // 5 registers would be issued: 2, 2, 1. 3842 unsigned UOps = (NumRegs / 2); 3843 if (NumRegs % 2) 3844 ++UOps; 3845 return UOps; 3846 } 3847 case ARMSubtarget::DoubleIssueCheckUnalignedAccess: { 3848 unsigned UOps = (NumRegs / 2); 3849 // If there are odd number of registers or if it's not 64-bit aligned, 3850 // then it takes an extra AGU (Address Generation Unit) cycle. 3851 if ((NumRegs % 2) || !MI.hasOneMemOperand() || 3852 (*MI.memoperands_begin())->getAlign() < Align(8)) 3853 ++UOps; 3854 return UOps; 3855 } 3856 } 3857 } 3858 } 3859 llvm_unreachable("Didn't find the number of microops"); 3860 } 3861 3862 int 3863 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, 3864 const MCInstrDesc &DefMCID, 3865 unsigned DefClass, 3866 unsigned DefIdx, unsigned DefAlign) const { 3867 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3868 if (RegNo <= 0) 3869 // Def is the address writeback. 3870 return ItinData->getOperandCycle(DefClass, DefIdx); 3871 3872 int DefCycle; 3873 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3874 // (regno / 2) + (regno % 2) + 1 3875 DefCycle = RegNo / 2 + 1; 3876 if (RegNo % 2) 3877 ++DefCycle; 3878 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3879 DefCycle = RegNo; 3880 bool isSLoad = false; 3881 3882 switch (DefMCID.getOpcode()) { 3883 default: break; 3884 case ARM::VLDMSIA: 3885 case ARM::VLDMSIA_UPD: 3886 case ARM::VLDMSDB_UPD: 3887 isSLoad = true; 3888 break; 3889 } 3890 3891 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3892 // then it takes an extra cycle. 3893 if ((isSLoad && (RegNo % 2)) || DefAlign < 8) 3894 ++DefCycle; 3895 } else { 3896 // Assume the worst. 3897 DefCycle = RegNo + 2; 3898 } 3899 3900 return DefCycle; 3901 } 3902 3903 int 3904 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, 3905 const MCInstrDesc &DefMCID, 3906 unsigned DefClass, 3907 unsigned DefIdx, unsigned DefAlign) const { 3908 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 3909 if (RegNo <= 0) 3910 // Def is the address writeback. 3911 return ItinData->getOperandCycle(DefClass, DefIdx); 3912 3913 int DefCycle; 3914 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3915 // 4 registers would be issued: 1, 2, 1. 3916 // 5 registers would be issued: 1, 2, 2. 3917 DefCycle = RegNo / 2; 3918 if (DefCycle < 1) 3919 DefCycle = 1; 3920 // Result latency is issue cycle + 2: E2. 3921 DefCycle += 2; 3922 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3923 DefCycle = (RegNo / 2); 3924 // If there are odd number of registers or if it's not 64-bit aligned, 3925 // then it takes an extra AGU (Address Generation Unit) cycle. 3926 if ((RegNo % 2) || DefAlign < 8) 3927 ++DefCycle; 3928 // Result latency is AGU cycles + 2. 3929 DefCycle += 2; 3930 } else { 3931 // Assume the worst. 3932 DefCycle = RegNo + 2; 3933 } 3934 3935 return DefCycle; 3936 } 3937 3938 int 3939 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, 3940 const MCInstrDesc &UseMCID, 3941 unsigned UseClass, 3942 unsigned UseIdx, unsigned UseAlign) const { 3943 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3944 if (RegNo <= 0) 3945 return ItinData->getOperandCycle(UseClass, UseIdx); 3946 3947 int UseCycle; 3948 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3949 // (regno / 2) + (regno % 2) + 1 3950 UseCycle = RegNo / 2 + 1; 3951 if (RegNo % 2) 3952 ++UseCycle; 3953 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3954 UseCycle = RegNo; 3955 bool isSStore = false; 3956 3957 switch (UseMCID.getOpcode()) { 3958 default: break; 3959 case ARM::VSTMSIA: 3960 case ARM::VSTMSIA_UPD: 3961 case ARM::VSTMSDB_UPD: 3962 isSStore = true; 3963 break; 3964 } 3965 3966 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 3967 // then it takes an extra cycle. 3968 if ((isSStore && (RegNo % 2)) || UseAlign < 8) 3969 ++UseCycle; 3970 } else { 3971 // Assume the worst. 3972 UseCycle = RegNo + 2; 3973 } 3974 3975 return UseCycle; 3976 } 3977 3978 int 3979 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, 3980 const MCInstrDesc &UseMCID, 3981 unsigned UseClass, 3982 unsigned UseIdx, unsigned UseAlign) const { 3983 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 3984 if (RegNo <= 0) 3985 return ItinData->getOperandCycle(UseClass, UseIdx); 3986 3987 int UseCycle; 3988 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) { 3989 UseCycle = RegNo / 2; 3990 if (UseCycle < 2) 3991 UseCycle = 2; 3992 // Read in E3. 3993 UseCycle += 2; 3994 } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { 3995 UseCycle = (RegNo / 2); 3996 // If there are odd number of registers or if it's not 64-bit aligned, 3997 // then it takes an extra AGU (Address Generation Unit) cycle. 3998 if ((RegNo % 2) || UseAlign < 8) 3999 ++UseCycle; 4000 } else { 4001 // Assume the worst. 4002 UseCycle = 1; 4003 } 4004 return UseCycle; 4005 } 4006 4007 int 4008 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4009 const MCInstrDesc &DefMCID, 4010 unsigned DefIdx, unsigned DefAlign, 4011 const MCInstrDesc &UseMCID, 4012 unsigned UseIdx, unsigned UseAlign) const { 4013 unsigned DefClass = DefMCID.getSchedClass(); 4014 unsigned UseClass = UseMCID.getSchedClass(); 4015 4016 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) 4017 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 4018 4019 // This may be a def / use of a variable_ops instruction, the operand 4020 // latency might be determinable dynamically. Let the target try to 4021 // figure it out. 4022 int DefCycle = -1; 4023 bool LdmBypass = false; 4024 switch (DefMCID.getOpcode()) { 4025 default: 4026 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 4027 break; 4028 4029 case ARM::VLDMDIA: 4030 case ARM::VLDMDIA_UPD: 4031 case ARM::VLDMDDB_UPD: 4032 case ARM::VLDMSIA: 4033 case ARM::VLDMSIA_UPD: 4034 case ARM::VLDMSDB_UPD: 4035 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 4036 break; 4037 4038 case ARM::LDMIA_RET: 4039 case ARM::LDMIA: 4040 case ARM::LDMDA: 4041 case ARM::LDMDB: 4042 case ARM::LDMIB: 4043 case ARM::LDMIA_UPD: 4044 case ARM::LDMDA_UPD: 4045 case ARM::LDMDB_UPD: 4046 case ARM::LDMIB_UPD: 4047 case ARM::tLDMIA: 4048 case ARM::tLDMIA_UPD: 4049 case ARM::tPUSH: 4050 case ARM::t2LDMIA_RET: 4051 case ARM::t2LDMIA: 4052 case ARM::t2LDMDB: 4053 case ARM::t2LDMIA_UPD: 4054 case ARM::t2LDMDB_UPD: 4055 LdmBypass = true; 4056 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 4057 break; 4058 } 4059 4060 if (DefCycle == -1) 4061 // We can't seem to determine the result latency of the def, assume it's 2. 4062 DefCycle = 2; 4063 4064 int UseCycle = -1; 4065 switch (UseMCID.getOpcode()) { 4066 default: 4067 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); 4068 break; 4069 4070 case ARM::VSTMDIA: 4071 case ARM::VSTMDIA_UPD: 4072 case ARM::VSTMDDB_UPD: 4073 case ARM::VSTMSIA: 4074 case ARM::VSTMSIA_UPD: 4075 case ARM::VSTMSDB_UPD: 4076 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 4077 break; 4078 4079 case ARM::STMIA: 4080 case ARM::STMDA: 4081 case ARM::STMDB: 4082 case ARM::STMIB: 4083 case ARM::STMIA_UPD: 4084 case ARM::STMDA_UPD: 4085 case ARM::STMDB_UPD: 4086 case ARM::STMIB_UPD: 4087 case ARM::tSTMIA_UPD: 4088 case ARM::tPOP_RET: 4089 case ARM::tPOP: 4090 case ARM::t2STMIA: 4091 case ARM::t2STMDB: 4092 case ARM::t2STMIA_UPD: 4093 case ARM::t2STMDB_UPD: 4094 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 4095 break; 4096 } 4097 4098 if (UseCycle == -1) 4099 // Assume it's read in the first stage. 4100 UseCycle = 1; 4101 4102 UseCycle = DefCycle - UseCycle + 1; 4103 if (UseCycle > 0) { 4104 if (LdmBypass) { 4105 // It's a variable_ops instruction so we can't use DefIdx here. Just use 4106 // first def operand. 4107 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, 4108 UseClass, UseIdx)) 4109 --UseCycle; 4110 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, 4111 UseClass, UseIdx)) { 4112 --UseCycle; 4113 } 4114 } 4115 4116 return UseCycle; 4117 } 4118 4119 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, 4120 const MachineInstr *MI, unsigned Reg, 4121 unsigned &DefIdx, unsigned &Dist) { 4122 Dist = 0; 4123 4124 MachineBasicBlock::const_iterator I = MI; ++I; 4125 MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator()); 4126 assert(II->isInsideBundle() && "Empty bundle?"); 4127 4128 int Idx = -1; 4129 while (II->isInsideBundle()) { 4130 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI); 4131 if (Idx != -1) 4132 break; 4133 --II; 4134 ++Dist; 4135 } 4136 4137 assert(Idx != -1 && "Cannot find bundled definition!"); 4138 DefIdx = Idx; 4139 return &*II; 4140 } 4141 4142 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, 4143 const MachineInstr &MI, unsigned Reg, 4144 unsigned &UseIdx, unsigned &Dist) { 4145 Dist = 0; 4146 4147 MachineBasicBlock::const_instr_iterator II = ++MI.getIterator(); 4148 assert(II->isInsideBundle() && "Empty bundle?"); 4149 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 4150 4151 // FIXME: This doesn't properly handle multiple uses. 4152 int Idx = -1; 4153 while (II != E && II->isInsideBundle()) { 4154 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI); 4155 if (Idx != -1) 4156 break; 4157 if (II->getOpcode() != ARM::t2IT) 4158 ++Dist; 4159 ++II; 4160 } 4161 4162 if (Idx == -1) { 4163 Dist = 0; 4164 return nullptr; 4165 } 4166 4167 UseIdx = Idx; 4168 return &*II; 4169 } 4170 4171 /// Return the number of cycles to add to (or subtract from) the static 4172 /// itinerary based on the def opcode and alignment. The caller will ensure that 4173 /// adjusted latency is at least one cycle. 4174 static int adjustDefLatency(const ARMSubtarget &Subtarget, 4175 const MachineInstr &DefMI, 4176 const MCInstrDesc &DefMCID, unsigned DefAlign) { 4177 int Adjust = 0; 4178 if (Subtarget.isCortexA8() || Subtarget.isLikeA9() || Subtarget.isCortexA7()) { 4179 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 4180 // variants are one cycle cheaper. 4181 switch (DefMCID.getOpcode()) { 4182 default: break; 4183 case ARM::LDRrs: 4184 case ARM::LDRBrs: { 4185 unsigned ShOpVal = DefMI.getOperand(3).getImm(); 4186 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4187 if (ShImm == 0 || 4188 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4189 --Adjust; 4190 break; 4191 } 4192 case ARM::t2LDRs: 4193 case ARM::t2LDRBs: 4194 case ARM::t2LDRHs: 4195 case ARM::t2LDRSHs: { 4196 // Thumb2 mode: lsl only. 4197 unsigned ShAmt = DefMI.getOperand(3).getImm(); 4198 if (ShAmt == 0 || ShAmt == 2) 4199 --Adjust; 4200 break; 4201 } 4202 } 4203 } else if (Subtarget.isSwift()) { 4204 // FIXME: Properly handle all of the latency adjustments for address 4205 // writeback. 4206 switch (DefMCID.getOpcode()) { 4207 default: break; 4208 case ARM::LDRrs: 4209 case ARM::LDRBrs: { 4210 unsigned ShOpVal = DefMI.getOperand(3).getImm(); 4211 bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; 4212 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4213 if (!isSub && 4214 (ShImm == 0 || 4215 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 4216 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) 4217 Adjust -= 2; 4218 else if (!isSub && 4219 ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 4220 --Adjust; 4221 break; 4222 } 4223 case ARM::t2LDRs: 4224 case ARM::t2LDRBs: 4225 case ARM::t2LDRHs: 4226 case ARM::t2LDRSHs: { 4227 // Thumb2 mode: lsl only. 4228 unsigned ShAmt = DefMI.getOperand(3).getImm(); 4229 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3) 4230 Adjust -= 2; 4231 break; 4232 } 4233 } 4234 } 4235 4236 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) { 4237 switch (DefMCID.getOpcode()) { 4238 default: break; 4239 case ARM::VLD1q8: 4240 case ARM::VLD1q16: 4241 case ARM::VLD1q32: 4242 case ARM::VLD1q64: 4243 case ARM::VLD1q8wb_fixed: 4244 case ARM::VLD1q16wb_fixed: 4245 case ARM::VLD1q32wb_fixed: 4246 case ARM::VLD1q64wb_fixed: 4247 case ARM::VLD1q8wb_register: 4248 case ARM::VLD1q16wb_register: 4249 case ARM::VLD1q32wb_register: 4250 case ARM::VLD1q64wb_register: 4251 case ARM::VLD2d8: 4252 case ARM::VLD2d16: 4253 case ARM::VLD2d32: 4254 case ARM::VLD2q8: 4255 case ARM::VLD2q16: 4256 case ARM::VLD2q32: 4257 case ARM::VLD2d8wb_fixed: 4258 case ARM::VLD2d16wb_fixed: 4259 case ARM::VLD2d32wb_fixed: 4260 case ARM::VLD2q8wb_fixed: 4261 case ARM::VLD2q16wb_fixed: 4262 case ARM::VLD2q32wb_fixed: 4263 case ARM::VLD2d8wb_register: 4264 case ARM::VLD2d16wb_register: 4265 case ARM::VLD2d32wb_register: 4266 case ARM::VLD2q8wb_register: 4267 case ARM::VLD2q16wb_register: 4268 case ARM::VLD2q32wb_register: 4269 case ARM::VLD3d8: 4270 case ARM::VLD3d16: 4271 case ARM::VLD3d32: 4272 case ARM::VLD1d64T: 4273 case ARM::VLD3d8_UPD: 4274 case ARM::VLD3d16_UPD: 4275 case ARM::VLD3d32_UPD: 4276 case ARM::VLD1d64Twb_fixed: 4277 case ARM::VLD1d64Twb_register: 4278 case ARM::VLD3q8_UPD: 4279 case ARM::VLD3q16_UPD: 4280 case ARM::VLD3q32_UPD: 4281 case ARM::VLD4d8: 4282 case ARM::VLD4d16: 4283 case ARM::VLD4d32: 4284 case ARM::VLD1d64Q: 4285 case ARM::VLD4d8_UPD: 4286 case ARM::VLD4d16_UPD: 4287 case ARM::VLD4d32_UPD: 4288 case ARM::VLD1d64Qwb_fixed: 4289 case ARM::VLD1d64Qwb_register: 4290 case ARM::VLD4q8_UPD: 4291 case ARM::VLD4q16_UPD: 4292 case ARM::VLD4q32_UPD: 4293 case ARM::VLD1DUPq8: 4294 case ARM::VLD1DUPq16: 4295 case ARM::VLD1DUPq32: 4296 case ARM::VLD1DUPq8wb_fixed: 4297 case ARM::VLD1DUPq16wb_fixed: 4298 case ARM::VLD1DUPq32wb_fixed: 4299 case ARM::VLD1DUPq8wb_register: 4300 case ARM::VLD1DUPq16wb_register: 4301 case ARM::VLD1DUPq32wb_register: 4302 case ARM::VLD2DUPd8: 4303 case ARM::VLD2DUPd16: 4304 case ARM::VLD2DUPd32: 4305 case ARM::VLD2DUPd8wb_fixed: 4306 case ARM::VLD2DUPd16wb_fixed: 4307 case ARM::VLD2DUPd32wb_fixed: 4308 case ARM::VLD2DUPd8wb_register: 4309 case ARM::VLD2DUPd16wb_register: 4310 case ARM::VLD2DUPd32wb_register: 4311 case ARM::VLD4DUPd8: 4312 case ARM::VLD4DUPd16: 4313 case ARM::VLD4DUPd32: 4314 case ARM::VLD4DUPd8_UPD: 4315 case ARM::VLD4DUPd16_UPD: 4316 case ARM::VLD4DUPd32_UPD: 4317 case ARM::VLD1LNd8: 4318 case ARM::VLD1LNd16: 4319 case ARM::VLD1LNd32: 4320 case ARM::VLD1LNd8_UPD: 4321 case ARM::VLD1LNd16_UPD: 4322 case ARM::VLD1LNd32_UPD: 4323 case ARM::VLD2LNd8: 4324 case ARM::VLD2LNd16: 4325 case ARM::VLD2LNd32: 4326 case ARM::VLD2LNq16: 4327 case ARM::VLD2LNq32: 4328 case ARM::VLD2LNd8_UPD: 4329 case ARM::VLD2LNd16_UPD: 4330 case ARM::VLD2LNd32_UPD: 4331 case ARM::VLD2LNq16_UPD: 4332 case ARM::VLD2LNq32_UPD: 4333 case ARM::VLD4LNd8: 4334 case ARM::VLD4LNd16: 4335 case ARM::VLD4LNd32: 4336 case ARM::VLD4LNq16: 4337 case ARM::VLD4LNq32: 4338 case ARM::VLD4LNd8_UPD: 4339 case ARM::VLD4LNd16_UPD: 4340 case ARM::VLD4LNd32_UPD: 4341 case ARM::VLD4LNq16_UPD: 4342 case ARM::VLD4LNq32_UPD: 4343 // If the address is not 64-bit aligned, the latencies of these 4344 // instructions increases by one. 4345 ++Adjust; 4346 break; 4347 } 4348 } 4349 return Adjust; 4350 } 4351 4352 int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4353 const MachineInstr &DefMI, 4354 unsigned DefIdx, 4355 const MachineInstr &UseMI, 4356 unsigned UseIdx) const { 4357 // No operand latency. The caller may fall back to getInstrLatency. 4358 if (!ItinData || ItinData->isEmpty()) 4359 return -1; 4360 4361 const MachineOperand &DefMO = DefMI.getOperand(DefIdx); 4362 Register Reg = DefMO.getReg(); 4363 4364 const MachineInstr *ResolvedDefMI = &DefMI; 4365 unsigned DefAdj = 0; 4366 if (DefMI.isBundle()) 4367 ResolvedDefMI = 4368 getBundledDefMI(&getRegisterInfo(), &DefMI, Reg, DefIdx, DefAdj); 4369 if (ResolvedDefMI->isCopyLike() || ResolvedDefMI->isInsertSubreg() || 4370 ResolvedDefMI->isRegSequence() || ResolvedDefMI->isImplicitDef()) { 4371 return 1; 4372 } 4373 4374 const MachineInstr *ResolvedUseMI = &UseMI; 4375 unsigned UseAdj = 0; 4376 if (UseMI.isBundle()) { 4377 ResolvedUseMI = 4378 getBundledUseMI(&getRegisterInfo(), UseMI, Reg, UseIdx, UseAdj); 4379 if (!ResolvedUseMI) 4380 return -1; 4381 } 4382 4383 return getOperandLatencyImpl( 4384 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->getDesc(), DefAdj, DefMO, 4385 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->getDesc(), UseAdj); 4386 } 4387 4388 int ARMBaseInstrInfo::getOperandLatencyImpl( 4389 const InstrItineraryData *ItinData, const MachineInstr &DefMI, 4390 unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj, 4391 const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI, 4392 unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const { 4393 if (Reg == ARM::CPSR) { 4394 if (DefMI.getOpcode() == ARM::FMSTAT) { 4395 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) 4396 return Subtarget.isLikeA9() ? 1 : 20; 4397 } 4398 4399 // CPSR set and branch can be paired in the same cycle. 4400 if (UseMI.isBranch()) 4401 return 0; 4402 4403 // Otherwise it takes the instruction latency (generally one). 4404 unsigned Latency = getInstrLatency(ItinData, DefMI); 4405 4406 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to 4407 // its uses. Instructions which are otherwise scheduled between them may 4408 // incur a code size penalty (not able to use the CPSR setting 16-bit 4409 // instructions). 4410 if (Latency > 0 && Subtarget.isThumb2()) { 4411 const MachineFunction *MF = DefMI.getParent()->getParent(); 4412 // FIXME: Use Function::hasOptSize(). 4413 if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize)) 4414 --Latency; 4415 } 4416 return Latency; 4417 } 4418 4419 if (DefMO.isImplicit() || UseMI.getOperand(UseIdx).isImplicit()) 4420 return -1; 4421 4422 unsigned DefAlign = DefMI.hasOneMemOperand() 4423 ? (*DefMI.memoperands_begin())->getAlign().value() 4424 : 0; 4425 unsigned UseAlign = UseMI.hasOneMemOperand() 4426 ? (*UseMI.memoperands_begin())->getAlign().value() 4427 : 0; 4428 4429 // Get the itinerary's latency if possible, and handle variable_ops. 4430 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID, 4431 UseIdx, UseAlign); 4432 // Unable to find operand latency. The caller may resort to getInstrLatency. 4433 if (Latency < 0) 4434 return Latency; 4435 4436 // Adjust for IT block position. 4437 int Adj = DefAdj + UseAdj; 4438 4439 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 4440 Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign); 4441 if (Adj >= 0 || (int)Latency > -Adj) { 4442 return Latency + Adj; 4443 } 4444 // Return the itinerary latency, which may be zero but not less than zero. 4445 return Latency; 4446 } 4447 4448 int 4449 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 4450 SDNode *DefNode, unsigned DefIdx, 4451 SDNode *UseNode, unsigned UseIdx) const { 4452 if (!DefNode->isMachineOpcode()) 4453 return 1; 4454 4455 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); 4456 4457 if (isZeroCost(DefMCID.Opcode)) 4458 return 0; 4459 4460 if (!ItinData || ItinData->isEmpty()) 4461 return DefMCID.mayLoad() ? 3 : 1; 4462 4463 if (!UseNode->isMachineOpcode()) { 4464 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); 4465 int Adj = Subtarget.getPreISelOperandLatencyAdjustment(); 4466 int Threshold = 1 + Adj; 4467 return Latency <= Threshold ? 1 : Latency - Adj; 4468 } 4469 4470 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); 4471 auto *DefMN = cast<MachineSDNode>(DefNode); 4472 unsigned DefAlign = !DefMN->memoperands_empty() 4473 ? (*DefMN->memoperands_begin())->getAlign().value() 4474 : 0; 4475 auto *UseMN = cast<MachineSDNode>(UseNode); 4476 unsigned UseAlign = !UseMN->memoperands_empty() 4477 ? (*UseMN->memoperands_begin())->getAlign().value() 4478 : 0; 4479 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, 4480 UseMCID, UseIdx, UseAlign); 4481 4482 if (Latency > 1 && 4483 (Subtarget.isCortexA8() || Subtarget.isLikeA9() || 4484 Subtarget.isCortexA7())) { 4485 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 4486 // variants are one cycle cheaper. 4487 switch (DefMCID.getOpcode()) { 4488 default: break; 4489 case ARM::LDRrs: 4490 case ARM::LDRBrs: { 4491 unsigned ShOpVal = 4492 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4493 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4494 if (ShImm == 0 || 4495 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4496 --Latency; 4497 break; 4498 } 4499 case ARM::t2LDRs: 4500 case ARM::t2LDRBs: 4501 case ARM::t2LDRHs: 4502 case ARM::t2LDRSHs: { 4503 // Thumb2 mode: lsl only. 4504 unsigned ShAmt = 4505 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4506 if (ShAmt == 0 || ShAmt == 2) 4507 --Latency; 4508 break; 4509 } 4510 } 4511 } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) { 4512 // FIXME: Properly handle all of the latency adjustments for address 4513 // writeback. 4514 switch (DefMCID.getOpcode()) { 4515 default: break; 4516 case ARM::LDRrs: 4517 case ARM::LDRBrs: { 4518 unsigned ShOpVal = 4519 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 4520 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 4521 if (ShImm == 0 || 4522 ((ShImm == 1 || ShImm == 2 || ShImm == 3) && 4523 ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 4524 Latency -= 2; 4525 else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) 4526 --Latency; 4527 break; 4528 } 4529 case ARM::t2LDRs: 4530 case ARM::t2LDRBs: 4531 case ARM::t2LDRHs: 4532 case ARM::t2LDRSHs: 4533 // Thumb2 mode: lsl 0-3 only. 4534 Latency -= 2; 4535 break; 4536 } 4537 } 4538 4539 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) 4540 switch (DefMCID.getOpcode()) { 4541 default: break; 4542 case ARM::VLD1q8: 4543 case ARM::VLD1q16: 4544 case ARM::VLD1q32: 4545 case ARM::VLD1q64: 4546 case ARM::VLD1q8wb_register: 4547 case ARM::VLD1q16wb_register: 4548 case ARM::VLD1q32wb_register: 4549 case ARM::VLD1q64wb_register: 4550 case ARM::VLD1q8wb_fixed: 4551 case ARM::VLD1q16wb_fixed: 4552 case ARM::VLD1q32wb_fixed: 4553 case ARM::VLD1q64wb_fixed: 4554 case ARM::VLD2d8: 4555 case ARM::VLD2d16: 4556 case ARM::VLD2d32: 4557 case ARM::VLD2q8Pseudo: 4558 case ARM::VLD2q16Pseudo: 4559 case ARM::VLD2q32Pseudo: 4560 case ARM::VLD2d8wb_fixed: 4561 case ARM::VLD2d16wb_fixed: 4562 case ARM::VLD2d32wb_fixed: 4563 case ARM::VLD2q8PseudoWB_fixed: 4564 case ARM::VLD2q16PseudoWB_fixed: 4565 case ARM::VLD2q32PseudoWB_fixed: 4566 case ARM::VLD2d8wb_register: 4567 case ARM::VLD2d16wb_register: 4568 case ARM::VLD2d32wb_register: 4569 case ARM::VLD2q8PseudoWB_register: 4570 case ARM::VLD2q16PseudoWB_register: 4571 case ARM::VLD2q32PseudoWB_register: 4572 case ARM::VLD3d8Pseudo: 4573 case ARM::VLD3d16Pseudo: 4574 case ARM::VLD3d32Pseudo: 4575 case ARM::VLD1d8TPseudo: 4576 case ARM::VLD1d16TPseudo: 4577 case ARM::VLD1d32TPseudo: 4578 case ARM::VLD1d64TPseudo: 4579 case ARM::VLD1d64TPseudoWB_fixed: 4580 case ARM::VLD1d64TPseudoWB_register: 4581 case ARM::VLD3d8Pseudo_UPD: 4582 case ARM::VLD3d16Pseudo_UPD: 4583 case ARM::VLD3d32Pseudo_UPD: 4584 case ARM::VLD3q8Pseudo_UPD: 4585 case ARM::VLD3q16Pseudo_UPD: 4586 case ARM::VLD3q32Pseudo_UPD: 4587 case ARM::VLD3q8oddPseudo: 4588 case ARM::VLD3q16oddPseudo: 4589 case ARM::VLD3q32oddPseudo: 4590 case ARM::VLD3q8oddPseudo_UPD: 4591 case ARM::VLD3q16oddPseudo_UPD: 4592 case ARM::VLD3q32oddPseudo_UPD: 4593 case ARM::VLD4d8Pseudo: 4594 case ARM::VLD4d16Pseudo: 4595 case ARM::VLD4d32Pseudo: 4596 case ARM::VLD1d8QPseudo: 4597 case ARM::VLD1d16QPseudo: 4598 case ARM::VLD1d32QPseudo: 4599 case ARM::VLD1d64QPseudo: 4600 case ARM::VLD1d64QPseudoWB_fixed: 4601 case ARM::VLD1d64QPseudoWB_register: 4602 case ARM::VLD1q8HighQPseudo: 4603 case ARM::VLD1q8LowQPseudo_UPD: 4604 case ARM::VLD1q8HighTPseudo: 4605 case ARM::VLD1q8LowTPseudo_UPD: 4606 case ARM::VLD1q16HighQPseudo: 4607 case ARM::VLD1q16LowQPseudo_UPD: 4608 case ARM::VLD1q16HighTPseudo: 4609 case ARM::VLD1q16LowTPseudo_UPD: 4610 case ARM::VLD1q32HighQPseudo: 4611 case ARM::VLD1q32LowQPseudo_UPD: 4612 case ARM::VLD1q32HighTPseudo: 4613 case ARM::VLD1q32LowTPseudo_UPD: 4614 case ARM::VLD1q64HighQPseudo: 4615 case ARM::VLD1q64LowQPseudo_UPD: 4616 case ARM::VLD1q64HighTPseudo: 4617 case ARM::VLD1q64LowTPseudo_UPD: 4618 case ARM::VLD4d8Pseudo_UPD: 4619 case ARM::VLD4d16Pseudo_UPD: 4620 case ARM::VLD4d32Pseudo_UPD: 4621 case ARM::VLD4q8Pseudo_UPD: 4622 case ARM::VLD4q16Pseudo_UPD: 4623 case ARM::VLD4q32Pseudo_UPD: 4624 case ARM::VLD4q8oddPseudo: 4625 case ARM::VLD4q16oddPseudo: 4626 case ARM::VLD4q32oddPseudo: 4627 case ARM::VLD4q8oddPseudo_UPD: 4628 case ARM::VLD4q16oddPseudo_UPD: 4629 case ARM::VLD4q32oddPseudo_UPD: 4630 case ARM::VLD1DUPq8: 4631 case ARM::VLD1DUPq16: 4632 case ARM::VLD1DUPq32: 4633 case ARM::VLD1DUPq8wb_fixed: 4634 case ARM::VLD1DUPq16wb_fixed: 4635 case ARM::VLD1DUPq32wb_fixed: 4636 case ARM::VLD1DUPq8wb_register: 4637 case ARM::VLD1DUPq16wb_register: 4638 case ARM::VLD1DUPq32wb_register: 4639 case ARM::VLD2DUPd8: 4640 case ARM::VLD2DUPd16: 4641 case ARM::VLD2DUPd32: 4642 case ARM::VLD2DUPd8wb_fixed: 4643 case ARM::VLD2DUPd16wb_fixed: 4644 case ARM::VLD2DUPd32wb_fixed: 4645 case ARM::VLD2DUPd8wb_register: 4646 case ARM::VLD2DUPd16wb_register: 4647 case ARM::VLD2DUPd32wb_register: 4648 case ARM::VLD2DUPq8EvenPseudo: 4649 case ARM::VLD2DUPq8OddPseudo: 4650 case ARM::VLD2DUPq16EvenPseudo: 4651 case ARM::VLD2DUPq16OddPseudo: 4652 case ARM::VLD2DUPq32EvenPseudo: 4653 case ARM::VLD2DUPq32OddPseudo: 4654 case ARM::VLD3DUPq8EvenPseudo: 4655 case ARM::VLD3DUPq8OddPseudo: 4656 case ARM::VLD3DUPq16EvenPseudo: 4657 case ARM::VLD3DUPq16OddPseudo: 4658 case ARM::VLD3DUPq32EvenPseudo: 4659 case ARM::VLD3DUPq32OddPseudo: 4660 case ARM::VLD4DUPd8Pseudo: 4661 case ARM::VLD4DUPd16Pseudo: 4662 case ARM::VLD4DUPd32Pseudo: 4663 case ARM::VLD4DUPd8Pseudo_UPD: 4664 case ARM::VLD4DUPd16Pseudo_UPD: 4665 case ARM::VLD4DUPd32Pseudo_UPD: 4666 case ARM::VLD4DUPq8EvenPseudo: 4667 case ARM::VLD4DUPq8OddPseudo: 4668 case ARM::VLD4DUPq16EvenPseudo: 4669 case ARM::VLD4DUPq16OddPseudo: 4670 case ARM::VLD4DUPq32EvenPseudo: 4671 case ARM::VLD4DUPq32OddPseudo: 4672 case ARM::VLD1LNq8Pseudo: 4673 case ARM::VLD1LNq16Pseudo: 4674 case ARM::VLD1LNq32Pseudo: 4675 case ARM::VLD1LNq8Pseudo_UPD: 4676 case ARM::VLD1LNq16Pseudo_UPD: 4677 case ARM::VLD1LNq32Pseudo_UPD: 4678 case ARM::VLD2LNd8Pseudo: 4679 case ARM::VLD2LNd16Pseudo: 4680 case ARM::VLD2LNd32Pseudo: 4681 case ARM::VLD2LNq16Pseudo: 4682 case ARM::VLD2LNq32Pseudo: 4683 case ARM::VLD2LNd8Pseudo_UPD: 4684 case ARM::VLD2LNd16Pseudo_UPD: 4685 case ARM::VLD2LNd32Pseudo_UPD: 4686 case ARM::VLD2LNq16Pseudo_UPD: 4687 case ARM::VLD2LNq32Pseudo_UPD: 4688 case ARM::VLD4LNd8Pseudo: 4689 case ARM::VLD4LNd16Pseudo: 4690 case ARM::VLD4LNd32Pseudo: 4691 case ARM::VLD4LNq16Pseudo: 4692 case ARM::VLD4LNq32Pseudo: 4693 case ARM::VLD4LNd8Pseudo_UPD: 4694 case ARM::VLD4LNd16Pseudo_UPD: 4695 case ARM::VLD4LNd32Pseudo_UPD: 4696 case ARM::VLD4LNq16Pseudo_UPD: 4697 case ARM::VLD4LNq32Pseudo_UPD: 4698 // If the address is not 64-bit aligned, the latencies of these 4699 // instructions increases by one. 4700 ++Latency; 4701 break; 4702 } 4703 4704 return Latency; 4705 } 4706 4707 unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const { 4708 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() || 4709 MI.isImplicitDef()) 4710 return 0; 4711 4712 if (MI.isBundle()) 4713 return 0; 4714 4715 const MCInstrDesc &MCID = MI.getDesc(); 4716 4717 if (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) && 4718 !Subtarget.cheapPredicableCPSRDef())) { 4719 // When predicated, CPSR is an additional source operand for CPSR updating 4720 // instructions, this apparently increases their latencies. 4721 return 1; 4722 } 4723 return 0; 4724 } 4725 4726 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 4727 const MachineInstr &MI, 4728 unsigned *PredCost) const { 4729 if (MI.isCopyLike() || MI.isInsertSubreg() || MI.isRegSequence() || 4730 MI.isImplicitDef()) 4731 return 1; 4732 4733 // An instruction scheduler typically runs on unbundled instructions, however 4734 // other passes may query the latency of a bundled instruction. 4735 if (MI.isBundle()) { 4736 unsigned Latency = 0; 4737 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 4738 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 4739 while (++I != E && I->isInsideBundle()) { 4740 if (I->getOpcode() != ARM::t2IT) 4741 Latency += getInstrLatency(ItinData, *I, PredCost); 4742 } 4743 return Latency; 4744 } 4745 4746 const MCInstrDesc &MCID = MI.getDesc(); 4747 if (PredCost && (MCID.isCall() || (MCID.hasImplicitDefOfPhysReg(ARM::CPSR) && 4748 !Subtarget.cheapPredicableCPSRDef()))) { 4749 // When predicated, CPSR is an additional source operand for CPSR updating 4750 // instructions, this apparently increases their latencies. 4751 *PredCost = 1; 4752 } 4753 // Be sure to call getStageLatency for an empty itinerary in case it has a 4754 // valid MinLatency property. 4755 if (!ItinData) 4756 return MI.mayLoad() ? 3 : 1; 4757 4758 unsigned Class = MCID.getSchedClass(); 4759 4760 // For instructions with variable uops, use uops as latency. 4761 if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0) 4762 return getNumMicroOps(ItinData, MI); 4763 4764 // For the common case, fall back on the itinerary's latency. 4765 unsigned Latency = ItinData->getStageLatency(Class); 4766 4767 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 4768 unsigned DefAlign = 4769 MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0; 4770 int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign); 4771 if (Adj >= 0 || (int)Latency > -Adj) { 4772 return Latency + Adj; 4773 } 4774 return Latency; 4775 } 4776 4777 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 4778 SDNode *Node) const { 4779 if (!Node->isMachineOpcode()) 4780 return 1; 4781 4782 if (!ItinData || ItinData->isEmpty()) 4783 return 1; 4784 4785 unsigned Opcode = Node->getMachineOpcode(); 4786 switch (Opcode) { 4787 default: 4788 return ItinData->getStageLatency(get(Opcode).getSchedClass()); 4789 case ARM::VLDMQIA: 4790 case ARM::VSTMQIA: 4791 return 2; 4792 } 4793 } 4794 4795 bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel, 4796 const MachineRegisterInfo *MRI, 4797 const MachineInstr &DefMI, 4798 unsigned DefIdx, 4799 const MachineInstr &UseMI, 4800 unsigned UseIdx) const { 4801 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask; 4802 unsigned UDomain = UseMI.getDesc().TSFlags & ARMII::DomainMask; 4803 if (Subtarget.nonpipelinedVFP() && 4804 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP)) 4805 return true; 4806 4807 // Hoist VFP / NEON instructions with 4 or higher latency. 4808 unsigned Latency = 4809 SchedModel.computeOperandLatency(&DefMI, DefIdx, &UseMI, UseIdx); 4810 if (Latency <= 3) 4811 return false; 4812 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON || 4813 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON; 4814 } 4815 4816 bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, 4817 const MachineInstr &DefMI, 4818 unsigned DefIdx) const { 4819 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); 4820 if (!ItinData || ItinData->isEmpty()) 4821 return false; 4822 4823 unsigned DDomain = DefMI.getDesc().TSFlags & ARMII::DomainMask; 4824 if (DDomain == ARMII::DomainGeneral) { 4825 unsigned DefClass = DefMI.getDesc().getSchedClass(); 4826 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 4827 return (DefCycle != -1 && DefCycle <= 2); 4828 } 4829 return false; 4830 } 4831 4832 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI, 4833 StringRef &ErrInfo) const { 4834 if (convertAddSubFlagsOpcode(MI.getOpcode())) { 4835 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; 4836 return false; 4837 } 4838 if (MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) { 4839 // Make sure we don't generate a lo-lo mov that isn't supported. 4840 if (!ARM::hGPRRegClass.contains(MI.getOperand(0).getReg()) && 4841 !ARM::hGPRRegClass.contains(MI.getOperand(1).getReg())) { 4842 ErrInfo = "Non-flag-setting Thumb1 mov is v6-only"; 4843 return false; 4844 } 4845 } 4846 if (MI.getOpcode() == ARM::tPUSH || 4847 MI.getOpcode() == ARM::tPOP || 4848 MI.getOpcode() == ARM::tPOP_RET) { 4849 for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2)) { 4850 if (MO.isImplicit() || !MO.isReg()) 4851 continue; 4852 Register Reg = MO.getReg(); 4853 if (Reg < ARM::R0 || Reg > ARM::R7) { 4854 if (!(MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) && 4855 !(MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) { 4856 ErrInfo = "Unsupported register in Thumb1 push/pop"; 4857 return false; 4858 } 4859 } 4860 } 4861 } 4862 if (MI.getOpcode() == ARM::MVE_VMOV_q_rr) { 4863 assert(MI.getOperand(4).isImm() && MI.getOperand(5).isImm()); 4864 if ((MI.getOperand(4).getImm() != 2 && MI.getOperand(4).getImm() != 3) || 4865 MI.getOperand(4).getImm() != MI.getOperand(5).getImm() + 2) { 4866 ErrInfo = "Incorrect array index for MVE_VMOV_q_rr"; 4867 return false; 4868 } 4869 } 4870 4871 // Check the address model by taking the first Imm operand and checking it is 4872 // legal for that addressing mode. 4873 ARMII::AddrMode AddrMode = 4874 (ARMII::AddrMode)(MI.getDesc().TSFlags & ARMII::AddrModeMask); 4875 switch (AddrMode) { 4876 default: 4877 break; 4878 case ARMII::AddrModeT2_i7: 4879 case ARMII::AddrModeT2_i7s2: 4880 case ARMII::AddrModeT2_i7s4: 4881 case ARMII::AddrModeT2_i8: 4882 case ARMII::AddrModeT2_i8pos: 4883 case ARMII::AddrModeT2_i8neg: 4884 case ARMII::AddrModeT2_i8s4: 4885 case ARMII::AddrModeT2_i12: { 4886 uint32_t Imm = 0; 4887 for (auto Op : MI.operands()) { 4888 if (Op.isImm()) { 4889 Imm = Op.getImm(); 4890 break; 4891 } 4892 } 4893 if (!isLegalAddressImm(MI.getOpcode(), Imm, this)) { 4894 ErrInfo = "Incorrect AddrMode Imm for instruction"; 4895 return false; 4896 } 4897 break; 4898 } 4899 } 4900 return true; 4901 } 4902 4903 void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI, 4904 unsigned LoadImmOpc, 4905 unsigned LoadOpc) const { 4906 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() && 4907 "ROPI/RWPI not currently supported with stack guard"); 4908 4909 MachineBasicBlock &MBB = *MI->getParent(); 4910 DebugLoc DL = MI->getDebugLoc(); 4911 Register Reg = MI->getOperand(0).getReg(); 4912 MachineInstrBuilder MIB; 4913 unsigned int Offset = 0; 4914 4915 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) { 4916 assert(Subtarget.isReadTPHard() && 4917 "TLS stack protector requires hardware TLS register"); 4918 4919 BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg) 4920 .addImm(15) 4921 .addImm(0) 4922 .addImm(13) 4923 .addImm(0) 4924 .addImm(3) 4925 .add(predOps(ARMCC::AL)); 4926 4927 Module &M = *MBB.getParent()->getFunction().getParent(); 4928 Offset = M.getStackProtectorGuardOffset(); 4929 if (Offset & ~0xfffU) { 4930 // The offset won't fit in the LDR's 12-bit immediate field, so emit an 4931 // extra ADD to cover the delta. This gives us a guaranteed 8 additional 4932 // bits, resulting in a range of 0 to +1 MiB for the guard offset. 4933 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri; 4934 BuildMI(MBB, MI, DL, get(AddOpc), Reg) 4935 .addReg(Reg, RegState::Kill) 4936 .addImm(Offset & ~0xfffU) 4937 .add(predOps(ARMCC::AL)) 4938 .addReg(0); 4939 Offset &= 0xfffU; 4940 } 4941 } else { 4942 const GlobalValue *GV = 4943 cast<GlobalValue>((*MI->memoperands_begin())->getValue()); 4944 bool IsIndirect = Subtarget.isGVIndirectSymbol(GV); 4945 4946 unsigned TargetFlags = ARMII::MO_NO_FLAG; 4947 if (Subtarget.isTargetMachO()) { 4948 TargetFlags |= ARMII::MO_NONLAZY; 4949 } else if (Subtarget.isTargetCOFF()) { 4950 if (GV->hasDLLImportStorageClass()) 4951 TargetFlags |= ARMII::MO_DLLIMPORT; 4952 else if (IsIndirect) 4953 TargetFlags |= ARMII::MO_COFFSTUB; 4954 } else if (Subtarget.isGVInGOT(GV)) { 4955 TargetFlags |= ARMII::MO_GOT; 4956 } 4957 4958 BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg) 4959 .addGlobalAddress(GV, 0, TargetFlags); 4960 4961 if (IsIndirect) { 4962 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); 4963 MIB.addReg(Reg, RegState::Kill).addImm(0); 4964 auto Flags = MachineMemOperand::MOLoad | 4965 MachineMemOperand::MODereferenceable | 4966 MachineMemOperand::MOInvariant; 4967 MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand( 4968 MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 4, Align(4)); 4969 MIB.addMemOperand(MMO).add(predOps(ARMCC::AL)); 4970 } 4971 } 4972 4973 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); 4974 MIB.addReg(Reg, RegState::Kill) 4975 .addImm(Offset) 4976 .cloneMemRefs(*MI) 4977 .add(predOps(ARMCC::AL)); 4978 } 4979 4980 bool 4981 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 4982 unsigned &AddSubOpc, 4983 bool &NegAcc, bool &HasLane) const { 4984 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode); 4985 if (I == MLxEntryMap.end()) 4986 return false; 4987 4988 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; 4989 MulOpc = Entry.MulOpc; 4990 AddSubOpc = Entry.AddSubOpc; 4991 NegAcc = Entry.NegAcc; 4992 HasLane = Entry.HasLane; 4993 return true; 4994 } 4995 4996 //===----------------------------------------------------------------------===// 4997 // Execution domains. 4998 //===----------------------------------------------------------------------===// 4999 // 5000 // Some instructions go down the NEON pipeline, some go down the VFP pipeline, 5001 // and some can go down both. The vmov instructions go down the VFP pipeline, 5002 // but they can be changed to vorr equivalents that are executed by the NEON 5003 // pipeline. 5004 // 5005 // We use the following execution domain numbering: 5006 // 5007 enum ARMExeDomain { 5008 ExeGeneric = 0, 5009 ExeVFP = 1, 5010 ExeNEON = 2 5011 }; 5012 5013 // 5014 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h 5015 // 5016 std::pair<uint16_t, uint16_t> 5017 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const { 5018 // If we don't have access to NEON instructions then we won't be able 5019 // to swizzle anything to the NEON domain. Check to make sure. 5020 if (Subtarget.hasNEON()) { 5021 // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON 5022 // if they are not predicated. 5023 if (MI.getOpcode() == ARM::VMOVD && !isPredicated(MI)) 5024 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON)); 5025 5026 // CortexA9 is particularly picky about mixing the two and wants these 5027 // converted. 5028 if (Subtarget.useNEONForFPMovs() && !isPredicated(MI) && 5029 (MI.getOpcode() == ARM::VMOVRS || MI.getOpcode() == ARM::VMOVSR || 5030 MI.getOpcode() == ARM::VMOVS)) 5031 return std::make_pair(ExeVFP, (1 << ExeVFP) | (1 << ExeNEON)); 5032 } 5033 // No other instructions can be swizzled, so just determine their domain. 5034 unsigned Domain = MI.getDesc().TSFlags & ARMII::DomainMask; 5035 5036 if (Domain & ARMII::DomainNEON) 5037 return std::make_pair(ExeNEON, 0); 5038 5039 // Certain instructions can go either way on Cortex-A8. 5040 // Treat them as NEON instructions. 5041 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) 5042 return std::make_pair(ExeNEON, 0); 5043 5044 if (Domain & ARMII::DomainVFP) 5045 return std::make_pair(ExeVFP, 0); 5046 5047 return std::make_pair(ExeGeneric, 0); 5048 } 5049 5050 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, 5051 unsigned SReg, unsigned &Lane) { 5052 unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass); 5053 Lane = 0; 5054 5055 if (DReg != ARM::NoRegister) 5056 return DReg; 5057 5058 Lane = 1; 5059 DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass); 5060 5061 assert(DReg && "S-register with no D super-register?"); 5062 return DReg; 5063 } 5064 5065 /// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, 5066 /// set ImplicitSReg to a register number that must be marked as implicit-use or 5067 /// zero if no register needs to be defined as implicit-use. 5068 /// 5069 /// If the function cannot determine if an SPR should be marked implicit use or 5070 /// not, it returns false. 5071 /// 5072 /// This function handles cases where an instruction is being modified from taking 5073 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict 5074 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other 5075 /// lane of the DPR). 5076 /// 5077 /// If the other SPR is defined, an implicit-use of it should be added. Else, 5078 /// (including the case where the DPR itself is defined), it should not. 5079 /// 5080 static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, 5081 MachineInstr &MI, unsigned DReg, 5082 unsigned Lane, unsigned &ImplicitSReg) { 5083 // If the DPR is defined or used already, the other SPR lane will be chained 5084 // correctly, so there is nothing to be done. 5085 if (MI.definesRegister(DReg, TRI) || MI.readsRegister(DReg, TRI)) { 5086 ImplicitSReg = 0; 5087 return true; 5088 } 5089 5090 // Otherwise we need to go searching to see if the SPR is set explicitly. 5091 ImplicitSReg = TRI->getSubReg(DReg, 5092 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1); 5093 MachineBasicBlock::LivenessQueryResult LQR = 5094 MI.getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI); 5095 5096 if (LQR == MachineBasicBlock::LQR_Live) 5097 return true; 5098 else if (LQR == MachineBasicBlock::LQR_Unknown) 5099 return false; 5100 5101 // If the register is known not to be live, there is no need to add an 5102 // implicit-use. 5103 ImplicitSReg = 0; 5104 return true; 5105 } 5106 5107 void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI, 5108 unsigned Domain) const { 5109 unsigned DstReg, SrcReg, DReg; 5110 unsigned Lane; 5111 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI); 5112 const TargetRegisterInfo *TRI = &getRegisterInfo(); 5113 switch (MI.getOpcode()) { 5114 default: 5115 llvm_unreachable("cannot handle opcode!"); 5116 break; 5117 case ARM::VMOVD: 5118 if (Domain != ExeNEON) 5119 break; 5120 5121 // Zap the predicate operands. 5122 assert(!isPredicated(MI) && "Cannot predicate a VORRd"); 5123 5124 // Make sure we've got NEON instructions. 5125 assert(Subtarget.hasNEON() && "VORRd requires NEON"); 5126 5127 // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits) 5128 DstReg = MI.getOperand(0).getReg(); 5129 SrcReg = MI.getOperand(1).getReg(); 5130 5131 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5132 MI.removeOperand(i - 1); 5133 5134 // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits) 5135 MI.setDesc(get(ARM::VORRd)); 5136 MIB.addReg(DstReg, RegState::Define) 5137 .addReg(SrcReg) 5138 .addReg(SrcReg) 5139 .add(predOps(ARMCC::AL)); 5140 break; 5141 case ARM::VMOVRS: 5142 if (Domain != ExeNEON) 5143 break; 5144 assert(!isPredicated(MI) && "Cannot predicate a VGETLN"); 5145 5146 // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits) 5147 DstReg = MI.getOperand(0).getReg(); 5148 SrcReg = MI.getOperand(1).getReg(); 5149 5150 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5151 MI.removeOperand(i - 1); 5152 5153 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); 5154 5155 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps) 5156 // Note that DSrc has been widened and the other lane may be undef, which 5157 // contaminates the entire register. 5158 MI.setDesc(get(ARM::VGETLNi32)); 5159 MIB.addReg(DstReg, RegState::Define) 5160 .addReg(DReg, RegState::Undef) 5161 .addImm(Lane) 5162 .add(predOps(ARMCC::AL)); 5163 5164 // The old source should be an implicit use, otherwise we might think it 5165 // was dead before here. 5166 MIB.addReg(SrcReg, RegState::Implicit); 5167 break; 5168 case ARM::VMOVSR: { 5169 if (Domain != ExeNEON) 5170 break; 5171 assert(!isPredicated(MI) && "Cannot predicate a VSETLN"); 5172 5173 // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits) 5174 DstReg = MI.getOperand(0).getReg(); 5175 SrcReg = MI.getOperand(1).getReg(); 5176 5177 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane); 5178 5179 unsigned ImplicitSReg; 5180 if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg)) 5181 break; 5182 5183 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5184 MI.removeOperand(i - 1); 5185 5186 // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps) 5187 // Again DDst may be undefined at the beginning of this instruction. 5188 MI.setDesc(get(ARM::VSETLNi32)); 5189 MIB.addReg(DReg, RegState::Define) 5190 .addReg(DReg, getUndefRegState(!MI.readsRegister(DReg, TRI))) 5191 .addReg(SrcReg) 5192 .addImm(Lane) 5193 .add(predOps(ARMCC::AL)); 5194 5195 // The narrower destination must be marked as set to keep previous chains 5196 // in place. 5197 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 5198 if (ImplicitSReg != 0) 5199 MIB.addReg(ImplicitSReg, RegState::Implicit); 5200 break; 5201 } 5202 case ARM::VMOVS: { 5203 if (Domain != ExeNEON) 5204 break; 5205 5206 // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits) 5207 DstReg = MI.getOperand(0).getReg(); 5208 SrcReg = MI.getOperand(1).getReg(); 5209 5210 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc; 5211 DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane); 5212 DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane); 5213 5214 unsigned ImplicitSReg; 5215 if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg)) 5216 break; 5217 5218 for (unsigned i = MI.getDesc().getNumOperands(); i; --i) 5219 MI.removeOperand(i - 1); 5220 5221 if (DSrc == DDst) { 5222 // Destination can be: 5223 // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits) 5224 MI.setDesc(get(ARM::VDUPLN32d)); 5225 MIB.addReg(DDst, RegState::Define) 5226 .addReg(DDst, getUndefRegState(!MI.readsRegister(DDst, TRI))) 5227 .addImm(SrcLane) 5228 .add(predOps(ARMCC::AL)); 5229 5230 // Neither the source or the destination are naturally represented any 5231 // more, so add them in manually. 5232 MIB.addReg(DstReg, RegState::Implicit | RegState::Define); 5233 MIB.addReg(SrcReg, RegState::Implicit); 5234 if (ImplicitSReg != 0) 5235 MIB.addReg(ImplicitSReg, RegState::Implicit); 5236 break; 5237 } 5238 5239 // In general there's no single instruction that can perform an S <-> S 5240 // move in NEON space, but a pair of VEXT instructions *can* do the 5241 // job. It turns out that the VEXTs needed will only use DSrc once, with 5242 // the position based purely on the combination of lane-0 and lane-1 5243 // involved. For example 5244 // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1 5245 // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1 5246 // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1 5247 // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1 5248 // 5249 // Pattern of the MachineInstrs is: 5250 // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits) 5251 MachineInstrBuilder NewMIB; 5252 NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32), 5253 DDst); 5254 5255 // On the first instruction, both DSrc and DDst may be undef if present. 5256 // Specifically when the original instruction didn't have them as an 5257 // <imp-use>. 5258 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst; 5259 bool CurUndef = !MI.readsRegister(CurReg, TRI); 5260 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); 5261 5262 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst; 5263 CurUndef = !MI.readsRegister(CurReg, TRI); 5264 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)) 5265 .addImm(1) 5266 .add(predOps(ARMCC::AL)); 5267 5268 if (SrcLane == DstLane) 5269 NewMIB.addReg(SrcReg, RegState::Implicit); 5270 5271 MI.setDesc(get(ARM::VEXTd32)); 5272 MIB.addReg(DDst, RegState::Define); 5273 5274 // On the second instruction, DDst has definitely been defined above, so 5275 // it is not undef. DSrc, if present, can be undef as above. 5276 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst; 5277 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); 5278 MIB.addReg(CurReg, getUndefRegState(CurUndef)); 5279 5280 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst; 5281 CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI); 5282 MIB.addReg(CurReg, getUndefRegState(CurUndef)) 5283 .addImm(1) 5284 .add(predOps(ARMCC::AL)); 5285 5286 if (SrcLane != DstLane) 5287 MIB.addReg(SrcReg, RegState::Implicit); 5288 5289 // As before, the original destination is no longer represented, add it 5290 // implicitly. 5291 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 5292 if (ImplicitSReg != 0) 5293 MIB.addReg(ImplicitSReg, RegState::Implicit); 5294 break; 5295 } 5296 } 5297 } 5298 5299 //===----------------------------------------------------------------------===// 5300 // Partial register updates 5301 //===----------------------------------------------------------------------===// 5302 // 5303 // Swift renames NEON registers with 64-bit granularity. That means any 5304 // instruction writing an S-reg implicitly reads the containing D-reg. The 5305 // problem is mostly avoided by translating f32 operations to v2f32 operations 5306 // on D-registers, but f32 loads are still a problem. 5307 // 5308 // These instructions can load an f32 into a NEON register: 5309 // 5310 // VLDRS - Only writes S, partial D update. 5311 // VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops. 5312 // VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops. 5313 // 5314 // FCONSTD can be used as a dependency-breaking instruction. 5315 unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance( 5316 const MachineInstr &MI, unsigned OpNum, 5317 const TargetRegisterInfo *TRI) const { 5318 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance(); 5319 if (!PartialUpdateClearance) 5320 return 0; 5321 5322 assert(TRI && "Need TRI instance"); 5323 5324 const MachineOperand &MO = MI.getOperand(OpNum); 5325 if (MO.readsReg()) 5326 return 0; 5327 Register Reg = MO.getReg(); 5328 int UseOp = -1; 5329 5330 switch (MI.getOpcode()) { 5331 // Normal instructions writing only an S-register. 5332 case ARM::VLDRS: 5333 case ARM::FCONSTS: 5334 case ARM::VMOVSR: 5335 case ARM::VMOVv8i8: 5336 case ARM::VMOVv4i16: 5337 case ARM::VMOVv2i32: 5338 case ARM::VMOVv2f32: 5339 case ARM::VMOVv1i64: 5340 UseOp = MI.findRegisterUseOperandIdx(Reg, false, TRI); 5341 break; 5342 5343 // Explicitly reads the dependency. 5344 case ARM::VLD1LNd32: 5345 UseOp = 3; 5346 break; 5347 default: 5348 return 0; 5349 } 5350 5351 // If this instruction actually reads a value from Reg, there is no unwanted 5352 // dependency. 5353 if (UseOp != -1 && MI.getOperand(UseOp).readsReg()) 5354 return 0; 5355 5356 // We must be able to clobber the whole D-reg. 5357 if (Register::isVirtualRegister(Reg)) { 5358 // Virtual register must be a def undef foo:ssub_0 operand. 5359 if (!MO.getSubReg() || MI.readsVirtualRegister(Reg)) 5360 return 0; 5361 } else if (ARM::SPRRegClass.contains(Reg)) { 5362 // Physical register: MI must define the full D-reg. 5363 unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0, 5364 &ARM::DPRRegClass); 5365 if (!DReg || !MI.definesRegister(DReg, TRI)) 5366 return 0; 5367 } 5368 5369 // MI has an unwanted D-register dependency. 5370 // Avoid defs in the previous N instructrions. 5371 return PartialUpdateClearance; 5372 } 5373 5374 // Break a partial register dependency after getPartialRegUpdateClearance 5375 // returned non-zero. 5376 void ARMBaseInstrInfo::breakPartialRegDependency( 5377 MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { 5378 assert(OpNum < MI.getDesc().getNumDefs() && "OpNum is not a def"); 5379 assert(TRI && "Need TRI instance"); 5380 5381 const MachineOperand &MO = MI.getOperand(OpNum); 5382 Register Reg = MO.getReg(); 5383 assert(Register::isPhysicalRegister(Reg) && 5384 "Can't break virtual register dependencies."); 5385 unsigned DReg = Reg; 5386 5387 // If MI defines an S-reg, find the corresponding D super-register. 5388 if (ARM::SPRRegClass.contains(Reg)) { 5389 DReg = ARM::D0 + (Reg - ARM::S0) / 2; 5390 assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken"); 5391 } 5392 5393 assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps"); 5394 assert(MI.definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg"); 5395 5396 // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines 5397 // the full D-register by loading the same value to both lanes. The 5398 // instruction is micro-coded with 2 uops, so don't do this until we can 5399 // properly schedule micro-coded instructions. The dispatcher stalls cause 5400 // too big regressions. 5401 5402 // Insert the dependency-breaking FCONSTD before MI. 5403 // 96 is the encoding of 0.5, but the actual value doesn't matter here. 5404 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::FCONSTD), DReg) 5405 .addImm(96) 5406 .add(predOps(ARMCC::AL)); 5407 MI.addRegisterKilled(DReg, TRI, true); 5408 } 5409 5410 bool ARMBaseInstrInfo::hasNOP() const { 5411 return Subtarget.getFeatureBits()[ARM::HasV6KOps]; 5412 } 5413 5414 bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const { 5415 if (MI->getNumOperands() < 4) 5416 return true; 5417 unsigned ShOpVal = MI->getOperand(3).getImm(); 5418 unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal); 5419 // Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1. 5420 if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) || 5421 ((ShImm == 1 || ShImm == 2) && 5422 ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl)) 5423 return true; 5424 5425 return false; 5426 } 5427 5428 bool ARMBaseInstrInfo::getRegSequenceLikeInputs( 5429 const MachineInstr &MI, unsigned DefIdx, 5430 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 5431 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5432 assert(MI.isRegSequenceLike() && "Invalid kind of instruction"); 5433 5434 switch (MI.getOpcode()) { 5435 case ARM::VMOVDRR: 5436 // dX = VMOVDRR rY, rZ 5437 // is the same as: 5438 // dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1 5439 // Populate the InputRegs accordingly. 5440 // rY 5441 const MachineOperand *MOReg = &MI.getOperand(1); 5442 if (!MOReg->isUndef()) 5443 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(), 5444 MOReg->getSubReg(), ARM::ssub_0)); 5445 // rZ 5446 MOReg = &MI.getOperand(2); 5447 if (!MOReg->isUndef()) 5448 InputRegs.push_back(RegSubRegPairAndIdx(MOReg->getReg(), 5449 MOReg->getSubReg(), ARM::ssub_1)); 5450 return true; 5451 } 5452 llvm_unreachable("Target dependent opcode missing"); 5453 } 5454 5455 bool ARMBaseInstrInfo::getExtractSubregLikeInputs( 5456 const MachineInstr &MI, unsigned DefIdx, 5457 RegSubRegPairAndIdx &InputReg) const { 5458 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5459 assert(MI.isExtractSubregLike() && "Invalid kind of instruction"); 5460 5461 switch (MI.getOpcode()) { 5462 case ARM::VMOVRRD: 5463 // rX, rY = VMOVRRD dZ 5464 // is the same as: 5465 // rX = EXTRACT_SUBREG dZ, ssub_0 5466 // rY = EXTRACT_SUBREG dZ, ssub_1 5467 const MachineOperand &MOReg = MI.getOperand(2); 5468 if (MOReg.isUndef()) 5469 return false; 5470 InputReg.Reg = MOReg.getReg(); 5471 InputReg.SubReg = MOReg.getSubReg(); 5472 InputReg.SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1; 5473 return true; 5474 } 5475 llvm_unreachable("Target dependent opcode missing"); 5476 } 5477 5478 bool ARMBaseInstrInfo::getInsertSubregLikeInputs( 5479 const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, 5480 RegSubRegPairAndIdx &InsertedReg) const { 5481 assert(DefIdx < MI.getDesc().getNumDefs() && "Invalid definition index"); 5482 assert(MI.isInsertSubregLike() && "Invalid kind of instruction"); 5483 5484 switch (MI.getOpcode()) { 5485 case ARM::VSETLNi32: 5486 case ARM::MVE_VMOV_to_lane_32: 5487 // dX = VSETLNi32 dY, rZ, imm 5488 // qX = MVE_VMOV_to_lane_32 qY, rZ, imm 5489 const MachineOperand &MOBaseReg = MI.getOperand(1); 5490 const MachineOperand &MOInsertedReg = MI.getOperand(2); 5491 if (MOInsertedReg.isUndef()) 5492 return false; 5493 const MachineOperand &MOIndex = MI.getOperand(3); 5494 BaseReg.Reg = MOBaseReg.getReg(); 5495 BaseReg.SubReg = MOBaseReg.getSubReg(); 5496 5497 InsertedReg.Reg = MOInsertedReg.getReg(); 5498 InsertedReg.SubReg = MOInsertedReg.getSubReg(); 5499 InsertedReg.SubIdx = ARM::ssub_0 + MOIndex.getImm(); 5500 return true; 5501 } 5502 llvm_unreachable("Target dependent opcode missing"); 5503 } 5504 5505 std::pair<unsigned, unsigned> 5506 ARMBaseInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 5507 const unsigned Mask = ARMII::MO_OPTION_MASK; 5508 return std::make_pair(TF & Mask, TF & ~Mask); 5509 } 5510 5511 ArrayRef<std::pair<unsigned, const char *>> 5512 ARMBaseInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 5513 using namespace ARMII; 5514 5515 static const std::pair<unsigned, const char *> TargetFlags[] = { 5516 {MO_LO16, "arm-lo16"}, {MO_HI16, "arm-hi16"}}; 5517 return makeArrayRef(TargetFlags); 5518 } 5519 5520 ArrayRef<std::pair<unsigned, const char *>> 5521 ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { 5522 using namespace ARMII; 5523 5524 static const std::pair<unsigned, const char *> TargetFlags[] = { 5525 {MO_COFFSTUB, "arm-coffstub"}, 5526 {MO_GOT, "arm-got"}, 5527 {MO_SBREL, "arm-sbrel"}, 5528 {MO_DLLIMPORT, "arm-dllimport"}, 5529 {MO_SECREL, "arm-secrel"}, 5530 {MO_NONLAZY, "arm-nonlazy"}}; 5531 return makeArrayRef(TargetFlags); 5532 } 5533 5534 Optional<RegImmPair> ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI, 5535 Register Reg) const { 5536 int Sign = 1; 5537 unsigned Opcode = MI.getOpcode(); 5538 int64_t Offset = 0; 5539 5540 // TODO: Handle cases where Reg is a super- or sub-register of the 5541 // destination register. 5542 const MachineOperand &Op0 = MI.getOperand(0); 5543 if (!Op0.isReg() || Reg != Op0.getReg()) 5544 return None; 5545 5546 // We describe SUBri or ADDri instructions. 5547 if (Opcode == ARM::SUBri) 5548 Sign = -1; 5549 else if (Opcode != ARM::ADDri) 5550 return None; 5551 5552 // TODO: Third operand can be global address (usually some string). Since 5553 // strings can be relocated we cannot calculate their offsets for 5554 // now. 5555 if (!MI.getOperand(1).isReg() || !MI.getOperand(2).isImm()) 5556 return None; 5557 5558 Offset = MI.getOperand(2).getImm() * Sign; 5559 return RegImmPair{MI.getOperand(1).getReg(), Offset}; 5560 } 5561 5562 bool llvm::registerDefinedBetween(unsigned Reg, 5563 MachineBasicBlock::iterator From, 5564 MachineBasicBlock::iterator To, 5565 const TargetRegisterInfo *TRI) { 5566 for (auto I = From; I != To; ++I) 5567 if (I->modifiesRegister(Reg, TRI)) 5568 return true; 5569 return false; 5570 } 5571 5572 MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br, 5573 const TargetRegisterInfo *TRI) { 5574 // Search backwards to the instruction that defines CSPR. This may or not 5575 // be a CMP, we check that after this loop. If we find another instruction 5576 // that reads cpsr, we return nullptr. 5577 MachineBasicBlock::iterator CmpMI = Br; 5578 while (CmpMI != Br->getParent()->begin()) { 5579 --CmpMI; 5580 if (CmpMI->modifiesRegister(ARM::CPSR, TRI)) 5581 break; 5582 if (CmpMI->readsRegister(ARM::CPSR, TRI)) 5583 break; 5584 } 5585 5586 // Check that this inst is a CMP r[0-7], #0 and that the register 5587 // is not redefined between the cmp and the br. 5588 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri) 5589 return nullptr; 5590 Register Reg = CmpMI->getOperand(0).getReg(); 5591 Register PredReg; 5592 ARMCC::CondCodes Pred = getInstrPredicate(*CmpMI, PredReg); 5593 if (Pred != ARMCC::AL || CmpMI->getOperand(1).getImm() != 0) 5594 return nullptr; 5595 if (!isARMLowRegister(Reg)) 5596 return nullptr; 5597 if (registerDefinedBetween(Reg, CmpMI->getNextNode(), Br, TRI)) 5598 return nullptr; 5599 5600 return &*CmpMI; 5601 } 5602 5603 unsigned llvm::ConstantMaterializationCost(unsigned Val, 5604 const ARMSubtarget *Subtarget, 5605 bool ForCodesize) { 5606 if (Subtarget->isThumb()) { 5607 if (Val <= 255) // MOV 5608 return ForCodesize ? 2 : 1; 5609 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff || // MOV 5610 ARM_AM::getT2SOImmVal(Val) != -1 || // MOVW 5611 ARM_AM::getT2SOImmVal(~Val) != -1)) // MVN 5612 return ForCodesize ? 4 : 1; 5613 if (Val <= 510) // MOV + ADDi8 5614 return ForCodesize ? 4 : 2; 5615 if (~Val <= 255) // MOV + MVN 5616 return ForCodesize ? 4 : 2; 5617 if (ARM_AM::isThumbImmShiftedVal(Val)) // MOV + LSL 5618 return ForCodesize ? 4 : 2; 5619 } else { 5620 if (ARM_AM::getSOImmVal(Val) != -1) // MOV 5621 return ForCodesize ? 4 : 1; 5622 if (ARM_AM::getSOImmVal(~Val) != -1) // MVN 5623 return ForCodesize ? 4 : 1; 5624 if (Subtarget->hasV6T2Ops() && Val <= 0xffff) // MOVW 5625 return ForCodesize ? 4 : 1; 5626 if (ARM_AM::isSOImmTwoPartVal(Val)) // two instrs 5627 return ForCodesize ? 8 : 2; 5628 if (ARM_AM::isSOImmTwoPartValNeg(Val)) // two instrs 5629 return ForCodesize ? 8 : 2; 5630 } 5631 if (Subtarget->useMovt()) // MOVW + MOVT 5632 return ForCodesize ? 8 : 2; 5633 return ForCodesize ? 8 : 3; // Literal pool load 5634 } 5635 5636 bool llvm::HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, 5637 const ARMSubtarget *Subtarget, 5638 bool ForCodesize) { 5639 // Check with ForCodesize 5640 unsigned Cost1 = ConstantMaterializationCost(Val1, Subtarget, ForCodesize); 5641 unsigned Cost2 = ConstantMaterializationCost(Val2, Subtarget, ForCodesize); 5642 if (Cost1 < Cost2) 5643 return true; 5644 if (Cost1 > Cost2) 5645 return false; 5646 5647 // If they are equal, try with !ForCodesize 5648 return ConstantMaterializationCost(Val1, Subtarget, !ForCodesize) < 5649 ConstantMaterializationCost(Val2, Subtarget, !ForCodesize); 5650 } 5651 5652 /// Constants defining how certain sequences should be outlined. 5653 /// This encompasses how an outlined function should be called, and what kind of 5654 /// frame should be emitted for that outlined function. 5655 /// 5656 /// \p MachineOutlinerTailCall implies that the function is being created from 5657 /// a sequence of instructions ending in a return. 5658 /// 5659 /// That is, 5660 /// 5661 /// I1 OUTLINED_FUNCTION: 5662 /// I2 --> B OUTLINED_FUNCTION I1 5663 /// BX LR I2 5664 /// BX LR 5665 /// 5666 /// +-------------------------+--------+-----+ 5667 /// | | Thumb2 | ARM | 5668 /// +-------------------------+--------+-----+ 5669 /// | Call overhead in Bytes | 4 | 4 | 5670 /// | Frame overhead in Bytes | 0 | 0 | 5671 /// | Stack fixup required | No | No | 5672 /// +-------------------------+--------+-----+ 5673 /// 5674 /// \p MachineOutlinerThunk implies that the function is being created from 5675 /// a sequence of instructions ending in a call. The outlined function is 5676 /// called with a BL instruction, and the outlined function tail-calls the 5677 /// original call destination. 5678 /// 5679 /// That is, 5680 /// 5681 /// I1 OUTLINED_FUNCTION: 5682 /// I2 --> BL OUTLINED_FUNCTION I1 5683 /// BL f I2 5684 /// B f 5685 /// 5686 /// +-------------------------+--------+-----+ 5687 /// | | Thumb2 | ARM | 5688 /// +-------------------------+--------+-----+ 5689 /// | Call overhead in Bytes | 4 | 4 | 5690 /// | Frame overhead in Bytes | 0 | 0 | 5691 /// | Stack fixup required | No | No | 5692 /// +-------------------------+--------+-----+ 5693 /// 5694 /// \p MachineOutlinerNoLRSave implies that the function should be called using 5695 /// a BL instruction, but doesn't require LR to be saved and restored. This 5696 /// happens when LR is known to be dead. 5697 /// 5698 /// That is, 5699 /// 5700 /// I1 OUTLINED_FUNCTION: 5701 /// I2 --> BL OUTLINED_FUNCTION I1 5702 /// I3 I2 5703 /// I3 5704 /// BX LR 5705 /// 5706 /// +-------------------------+--------+-----+ 5707 /// | | Thumb2 | ARM | 5708 /// +-------------------------+--------+-----+ 5709 /// | Call overhead in Bytes | 4 | 4 | 5710 /// | Frame overhead in Bytes | 2 | 4 | 5711 /// | Stack fixup required | No | No | 5712 /// +-------------------------+--------+-----+ 5713 /// 5714 /// \p MachineOutlinerRegSave implies that the function should be called with a 5715 /// save and restore of LR to an available register. This allows us to avoid 5716 /// stack fixups. Note that this outlining variant is compatible with the 5717 /// NoLRSave case. 5718 /// 5719 /// That is, 5720 /// 5721 /// I1 Save LR OUTLINED_FUNCTION: 5722 /// I2 --> BL OUTLINED_FUNCTION I1 5723 /// I3 Restore LR I2 5724 /// I3 5725 /// BX LR 5726 /// 5727 /// +-------------------------+--------+-----+ 5728 /// | | Thumb2 | ARM | 5729 /// +-------------------------+--------+-----+ 5730 /// | Call overhead in Bytes | 8 | 12 | 5731 /// | Frame overhead in Bytes | 2 | 4 | 5732 /// | Stack fixup required | No | No | 5733 /// +-------------------------+--------+-----+ 5734 /// 5735 /// \p MachineOutlinerDefault implies that the function should be called with 5736 /// a save and restore of LR to the stack. 5737 /// 5738 /// That is, 5739 /// 5740 /// I1 Save LR OUTLINED_FUNCTION: 5741 /// I2 --> BL OUTLINED_FUNCTION I1 5742 /// I3 Restore LR I2 5743 /// I3 5744 /// BX LR 5745 /// 5746 /// +-------------------------+--------+-----+ 5747 /// | | Thumb2 | ARM | 5748 /// +-------------------------+--------+-----+ 5749 /// | Call overhead in Bytes | 8 | 12 | 5750 /// | Frame overhead in Bytes | 2 | 4 | 5751 /// | Stack fixup required | Yes | Yes | 5752 /// +-------------------------+--------+-----+ 5753 5754 enum MachineOutlinerClass { 5755 MachineOutlinerTailCall, 5756 MachineOutlinerThunk, 5757 MachineOutlinerNoLRSave, 5758 MachineOutlinerRegSave, 5759 MachineOutlinerDefault 5760 }; 5761 5762 enum MachineOutlinerMBBFlags { 5763 LRUnavailableSomewhere = 0x2, 5764 HasCalls = 0x4, 5765 UnsafeRegsDead = 0x8 5766 }; 5767 5768 struct OutlinerCosts { 5769 int CallTailCall; 5770 int FrameTailCall; 5771 int CallThunk; 5772 int FrameThunk; 5773 int CallNoLRSave; 5774 int FrameNoLRSave; 5775 int CallRegSave; 5776 int FrameRegSave; 5777 int CallDefault; 5778 int FrameDefault; 5779 int SaveRestoreLROnStack; 5780 5781 OutlinerCosts(const ARMSubtarget &target) 5782 : CallTailCall(target.isThumb() ? 4 : 4), 5783 FrameTailCall(target.isThumb() ? 0 : 0), 5784 CallThunk(target.isThumb() ? 4 : 4), 5785 FrameThunk(target.isThumb() ? 0 : 0), 5786 CallNoLRSave(target.isThumb() ? 4 : 4), 5787 FrameNoLRSave(target.isThumb() ? 2 : 4), 5788 CallRegSave(target.isThumb() ? 8 : 12), 5789 FrameRegSave(target.isThumb() ? 2 : 4), 5790 CallDefault(target.isThumb() ? 8 : 12), 5791 FrameDefault(target.isThumb() ? 2 : 4), 5792 SaveRestoreLROnStack(target.isThumb() ? 8 : 8) {} 5793 }; 5794 5795 Register 5796 ARMBaseInstrInfo::findRegisterToSaveLRTo(outliner::Candidate &C) const { 5797 MachineFunction *MF = C.getMF(); 5798 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo(); 5799 const ARMBaseRegisterInfo *ARI = 5800 static_cast<const ARMBaseRegisterInfo *>(&TRI); 5801 5802 BitVector regsReserved = ARI->getReservedRegs(*MF); 5803 // Check if there is an available register across the sequence that we can 5804 // use. 5805 for (Register Reg : ARM::rGPRRegClass) { 5806 if (!(Reg < regsReserved.size() && regsReserved.test(Reg)) && 5807 Reg != ARM::LR && // LR is not reserved, but don't use it. 5808 Reg != ARM::R12 && // R12 is not guaranteed to be preserved. 5809 C.isAvailableAcrossAndOutOfSeq(Reg, TRI) && 5810 C.isAvailableInsideSeq(Reg, TRI)) 5811 return Reg; 5812 } 5813 return Register(); 5814 } 5815 5816 // Compute liveness of LR at the point after the interval [I, E), which 5817 // denotes a *backward* iteration through instructions. Used only for return 5818 // basic blocks, which do not end with a tail call. 5819 static bool isLRAvailable(const TargetRegisterInfo &TRI, 5820 MachineBasicBlock::reverse_iterator I, 5821 MachineBasicBlock::reverse_iterator E) { 5822 // At the end of the function LR dead. 5823 bool Live = false; 5824 for (; I != E; ++I) { 5825 const MachineInstr &MI = *I; 5826 5827 // Check defs of LR. 5828 if (MI.modifiesRegister(ARM::LR, &TRI)) 5829 Live = false; 5830 5831 // Check uses of LR. 5832 unsigned Opcode = MI.getOpcode(); 5833 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR || 5834 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET || 5835 Opcode == ARM::tBXNS_RET) { 5836 // These instructions use LR, but it's not an (explicit or implicit) 5837 // operand. 5838 Live = true; 5839 continue; 5840 } 5841 if (MI.readsRegister(ARM::LR, &TRI)) 5842 Live = true; 5843 } 5844 return !Live; 5845 } 5846 5847 outliner::OutlinedFunction ARMBaseInstrInfo::getOutliningCandidateInfo( 5848 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { 5849 outliner::Candidate &FirstCand = RepeatedSequenceLocs[0]; 5850 unsigned SequenceSize = 5851 std::accumulate(FirstCand.front(), std::next(FirstCand.back()), 0, 5852 [this](unsigned Sum, const MachineInstr &MI) { 5853 return Sum + getInstSizeInBytes(MI); 5854 }); 5855 5856 // Properties about candidate MBBs that hold for all of them. 5857 unsigned FlagsSetInAll = 0xF; 5858 5859 // Compute liveness information for each candidate, and set FlagsSetInAll. 5860 const TargetRegisterInfo &TRI = getRegisterInfo(); 5861 std::for_each( 5862 RepeatedSequenceLocs.begin(), RepeatedSequenceLocs.end(), 5863 [&FlagsSetInAll](outliner::Candidate &C) { FlagsSetInAll &= C.Flags; }); 5864 5865 // According to the ARM Procedure Call Standard, the following are 5866 // undefined on entry/exit from a function call: 5867 // 5868 // * Register R12(IP), 5869 // * Condition codes (and thus the CPSR register) 5870 // 5871 // Since we control the instructions which are part of the outlined regions 5872 // we don't need to be fully compliant with the AAPCS, but we have to 5873 // guarantee that if a veneer is inserted at link time the code is still 5874 // correct. Because of this, we can't outline any sequence of instructions 5875 // where one of these registers is live into/across it. Thus, we need to 5876 // delete those candidates. 5877 auto CantGuaranteeValueAcrossCall = [&TRI](outliner::Candidate &C) { 5878 // If the unsafe registers in this block are all dead, then we don't need 5879 // to compute liveness here. 5880 if (C.Flags & UnsafeRegsDead) 5881 return false; 5882 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR}, TRI); 5883 }; 5884 5885 // Are there any candidates where those registers are live? 5886 if (!(FlagsSetInAll & UnsafeRegsDead)) { 5887 // Erase every candidate that violates the restrictions above. (It could be 5888 // true that we have viable candidates, so it's not worth bailing out in 5889 // the case that, say, 1 out of 20 candidates violate the restructions.) 5890 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall); 5891 5892 // If the sequence doesn't have enough candidates left, then we're done. 5893 if (RepeatedSequenceLocs.size() < 2) 5894 return outliner::OutlinedFunction(); 5895 } 5896 5897 // We expect the majority of the outlining candidates to be in consensus with 5898 // regard to return address sign and authentication, and branch target 5899 // enforcement, in other words, partitioning according to all the four 5900 // possible combinations of PAC-RET and BTI is going to yield one big subset 5901 // and three small (likely empty) subsets. That allows us to cull incompatible 5902 // candidates separately for PAC-RET and BTI. 5903 5904 // Partition the candidates in two sets: one with BTI enabled and one with BTI 5905 // disabled. Remove the candidates from the smaller set. If they are the same 5906 // number prefer the non-BTI ones for outlining, since they have less 5907 // overhead. 5908 auto NoBTI = 5909 llvm::partition(RepeatedSequenceLocs, [](const outliner::Candidate &C) { 5910 const ARMFunctionInfo &AFI = *C.getMF()->getInfo<ARMFunctionInfo>(); 5911 return AFI.branchTargetEnforcement(); 5912 }); 5913 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) > 5914 std::distance(NoBTI, RepeatedSequenceLocs.end())) 5915 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end()); 5916 else 5917 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI); 5918 5919 if (RepeatedSequenceLocs.size() < 2) 5920 return outliner::OutlinedFunction(); 5921 5922 // Likewise, partition the candidates according to PAC-RET enablement. 5923 auto NoPAC = 5924 llvm::partition(RepeatedSequenceLocs, [](const outliner::Candidate &C) { 5925 const ARMFunctionInfo &AFI = *C.getMF()->getInfo<ARMFunctionInfo>(); 5926 // If the function happens to not spill the LR, do not disqualify it 5927 // from the outlining. 5928 return AFI.shouldSignReturnAddress(true); 5929 }); 5930 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) > 5931 std::distance(NoPAC, RepeatedSequenceLocs.end())) 5932 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end()); 5933 else 5934 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC); 5935 5936 if (RepeatedSequenceLocs.size() < 2) 5937 return outliner::OutlinedFunction(); 5938 5939 // At this point, we have only "safe" candidates to outline. Figure out 5940 // frame + call instruction information. 5941 5942 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back()->getOpcode(); 5943 5944 // Helper lambda which sets call information for every candidate. 5945 auto SetCandidateCallInfo = 5946 [&RepeatedSequenceLocs](unsigned CallID, unsigned NumBytesForCall) { 5947 for (outliner::Candidate &C : RepeatedSequenceLocs) 5948 C.setCallInfo(CallID, NumBytesForCall); 5949 }; 5950 5951 OutlinerCosts Costs(Subtarget); 5952 5953 const auto &SomeMFI = 5954 *RepeatedSequenceLocs.front().getMF()->getInfo<ARMFunctionInfo>(); 5955 // Adjust costs to account for the BTI instructions. 5956 if (SomeMFI.branchTargetEnforcement()) { 5957 Costs.FrameDefault += 4; 5958 Costs.FrameNoLRSave += 4; 5959 Costs.FrameRegSave += 4; 5960 Costs.FrameTailCall += 4; 5961 Costs.FrameThunk += 4; 5962 } 5963 5964 // Adjust costs to account for sign and authentication instructions. 5965 if (SomeMFI.shouldSignReturnAddress(true)) { 5966 Costs.CallDefault += 8; // +PAC instr, +AUT instr 5967 Costs.SaveRestoreLROnStack += 8; // +PAC instr, +AUT instr 5968 } 5969 5970 unsigned FrameID = MachineOutlinerDefault; 5971 unsigned NumBytesToCreateFrame = Costs.FrameDefault; 5972 5973 // If the last instruction in any candidate is a terminator, then we should 5974 // tail call all of the candidates. 5975 if (RepeatedSequenceLocs[0].back()->isTerminator()) { 5976 FrameID = MachineOutlinerTailCall; 5977 NumBytesToCreateFrame = Costs.FrameTailCall; 5978 SetCandidateCallInfo(MachineOutlinerTailCall, Costs.CallTailCall); 5979 } else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX || 5980 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL || 5981 LastInstrOpcode == ARM::tBLXr || 5982 LastInstrOpcode == ARM::tBLXr_noip || 5983 LastInstrOpcode == ARM::tBLXi) { 5984 FrameID = MachineOutlinerThunk; 5985 NumBytesToCreateFrame = Costs.FrameThunk; 5986 SetCandidateCallInfo(MachineOutlinerThunk, Costs.CallThunk); 5987 } else { 5988 // We need to decide how to emit calls + frames. We can always emit the same 5989 // frame if we don't need to save to the stack. If we have to save to the 5990 // stack, then we need a different frame. 5991 unsigned NumBytesNoStackCalls = 0; 5992 std::vector<outliner::Candidate> CandidatesWithoutStackFixups; 5993 5994 for (outliner::Candidate &C : RepeatedSequenceLocs) { 5995 // LR liveness is overestimated in return blocks, unless they end with a 5996 // tail call. 5997 const auto Last = C.getMBB()->rbegin(); 5998 const bool LRIsAvailable = 5999 C.getMBB()->isReturnBlock() && !Last->isCall() 6000 ? isLRAvailable(TRI, Last, 6001 (MachineBasicBlock::reverse_iterator)C.front()) 6002 : C.isAvailableAcrossAndOutOfSeq(ARM::LR, TRI); 6003 if (LRIsAvailable) { 6004 FrameID = MachineOutlinerNoLRSave; 6005 NumBytesNoStackCalls += Costs.CallNoLRSave; 6006 C.setCallInfo(MachineOutlinerNoLRSave, Costs.CallNoLRSave); 6007 CandidatesWithoutStackFixups.push_back(C); 6008 } 6009 6010 // Is an unused register available? If so, we won't modify the stack, so 6011 // we can outline with the same frame type as those that don't save LR. 6012 else if (findRegisterToSaveLRTo(C)) { 6013 FrameID = MachineOutlinerRegSave; 6014 NumBytesNoStackCalls += Costs.CallRegSave; 6015 C.setCallInfo(MachineOutlinerRegSave, Costs.CallRegSave); 6016 CandidatesWithoutStackFixups.push_back(C); 6017 } 6018 6019 // Is SP used in the sequence at all? If not, we don't have to modify 6020 // the stack, so we are guaranteed to get the same frame. 6021 else if (C.isAvailableInsideSeq(ARM::SP, TRI)) { 6022 NumBytesNoStackCalls += Costs.CallDefault; 6023 C.setCallInfo(MachineOutlinerDefault, Costs.CallDefault); 6024 CandidatesWithoutStackFixups.push_back(C); 6025 } 6026 6027 // If we outline this, we need to modify the stack. Pretend we don't 6028 // outline this by saving all of its bytes. 6029 else 6030 NumBytesNoStackCalls += SequenceSize; 6031 } 6032 6033 // If there are no places where we have to save LR, then note that we don't 6034 // have to update the stack. Otherwise, give every candidate the default 6035 // call type 6036 if (NumBytesNoStackCalls <= 6037 RepeatedSequenceLocs.size() * Costs.CallDefault) { 6038 RepeatedSequenceLocs = CandidatesWithoutStackFixups; 6039 FrameID = MachineOutlinerNoLRSave; 6040 } else 6041 SetCandidateCallInfo(MachineOutlinerDefault, Costs.CallDefault); 6042 } 6043 6044 // Does every candidate's MBB contain a call? If so, then we might have a 6045 // call in the range. 6046 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) { 6047 // check if the range contains a call. These require a save + restore of 6048 // the link register. 6049 if (std::any_of(FirstCand.front(), FirstCand.back(), 6050 [](const MachineInstr &MI) { return MI.isCall(); })) 6051 NumBytesToCreateFrame += Costs.SaveRestoreLROnStack; 6052 6053 // Handle the last instruction separately. If it is tail call, then the 6054 // last instruction is a call, we don't want to save + restore in this 6055 // case. However, it could be possible that the last instruction is a 6056 // call without it being valid to tail call this sequence. We should 6057 // consider this as well. 6058 else if (FrameID != MachineOutlinerThunk && 6059 FrameID != MachineOutlinerTailCall && FirstCand.back()->isCall()) 6060 NumBytesToCreateFrame += Costs.SaveRestoreLROnStack; 6061 } 6062 6063 return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 6064 NumBytesToCreateFrame, FrameID); 6065 } 6066 6067 bool ARMBaseInstrInfo::checkAndUpdateStackOffset(MachineInstr *MI, 6068 int64_t Fixup, 6069 bool Updt) const { 6070 int SPIdx = MI->findRegisterUseOperandIdx(ARM::SP); 6071 unsigned AddrMode = (MI->getDesc().TSFlags & ARMII::AddrModeMask); 6072 if (SPIdx < 0) 6073 // No SP operand 6074 return true; 6075 else if (SPIdx != 1 && (AddrMode != ARMII::AddrModeT2_i8s4 || SPIdx != 2)) 6076 // If SP is not the base register we can't do much 6077 return false; 6078 6079 // Stack might be involved but addressing mode doesn't handle any offset. 6080 // Rq: AddrModeT1_[1|2|4] don't operate on SP 6081 if (AddrMode == ARMII::AddrMode1 || // Arithmetic instructions 6082 AddrMode == ARMII::AddrMode4 || // Load/Store Multiple 6083 AddrMode == ARMII::AddrMode6 || // Neon Load/Store Multiple 6084 AddrMode == ARMII::AddrModeT2_so || // SP can't be used as based register 6085 AddrMode == ARMII::AddrModeT2_pc || // PCrel access 6086 AddrMode == ARMII::AddrMode2 || // Used by PRE and POST indexed LD/ST 6087 AddrMode == ARMII::AddrModeT2_i7 || // v8.1-M MVE 6088 AddrMode == ARMII::AddrModeT2_i7s2 || // v8.1-M MVE 6089 AddrMode == ARMII::AddrModeT2_i7s4 || // v8.1-M sys regs VLDR/VSTR 6090 AddrMode == ARMII::AddrModeNone || 6091 AddrMode == ARMII::AddrModeT2_i8 || // Pre/Post inc instructions 6092 AddrMode == ARMII::AddrModeT2_i8neg) // Always negative imm 6093 return false; 6094 6095 unsigned NumOps = MI->getDesc().getNumOperands(); 6096 unsigned ImmIdx = NumOps - 3; 6097 6098 const MachineOperand &Offset = MI->getOperand(ImmIdx); 6099 assert(Offset.isImm() && "Is not an immediate"); 6100 int64_t OffVal = Offset.getImm(); 6101 6102 if (OffVal < 0) 6103 // Don't override data if the are below SP. 6104 return false; 6105 6106 unsigned NumBits = 0; 6107 unsigned Scale = 1; 6108 6109 switch (AddrMode) { 6110 case ARMII::AddrMode3: 6111 if (ARM_AM::getAM3Op(OffVal) == ARM_AM::sub) 6112 return false; 6113 OffVal = ARM_AM::getAM3Offset(OffVal); 6114 NumBits = 8; 6115 break; 6116 case ARMII::AddrMode5: 6117 if (ARM_AM::getAM5Op(OffVal) == ARM_AM::sub) 6118 return false; 6119 OffVal = ARM_AM::getAM5Offset(OffVal); 6120 NumBits = 8; 6121 Scale = 4; 6122 break; 6123 case ARMII::AddrMode5FP16: 6124 if (ARM_AM::getAM5FP16Op(OffVal) == ARM_AM::sub) 6125 return false; 6126 OffVal = ARM_AM::getAM5FP16Offset(OffVal); 6127 NumBits = 8; 6128 Scale = 2; 6129 break; 6130 case ARMII::AddrModeT2_i8pos: 6131 NumBits = 8; 6132 break; 6133 case ARMII::AddrModeT2_i8s4: 6134 // FIXME: Values are already scaled in this addressing mode. 6135 assert((Fixup & 3) == 0 && "Can't encode this offset!"); 6136 NumBits = 10; 6137 break; 6138 case ARMII::AddrModeT2_ldrex: 6139 NumBits = 8; 6140 Scale = 4; 6141 break; 6142 case ARMII::AddrModeT2_i12: 6143 case ARMII::AddrMode_i12: 6144 NumBits = 12; 6145 break; 6146 case ARMII::AddrModeT1_s: // SP-relative LD/ST 6147 NumBits = 8; 6148 Scale = 4; 6149 break; 6150 default: 6151 llvm_unreachable("Unsupported addressing mode!"); 6152 } 6153 // Make sure the offset is encodable for instructions that scale the 6154 // immediate. 6155 assert(((OffVal * Scale + Fixup) & (Scale - 1)) == 0 && 6156 "Can't encode this offset!"); 6157 OffVal += Fixup / Scale; 6158 6159 unsigned Mask = (1 << NumBits) - 1; 6160 6161 if (OffVal <= Mask) { 6162 if (Updt) 6163 MI->getOperand(ImmIdx).setImm(OffVal); 6164 return true; 6165 } 6166 6167 return false; 6168 } 6169 6170 void ARMBaseInstrInfo::mergeOutliningCandidateAttributes( 6171 Function &F, std::vector<outliner::Candidate> &Candidates) const { 6172 outliner::Candidate &C = Candidates.front(); 6173 // branch-target-enforcement is guaranteed to be consistent between all 6174 // candidates, so we only need to look at one. 6175 const Function &CFn = C.getMF()->getFunction(); 6176 if (CFn.hasFnAttribute("branch-target-enforcement")) 6177 F.addFnAttr(CFn.getFnAttribute("branch-target-enforcement")); 6178 6179 ARMGenInstrInfo::mergeOutliningCandidateAttributes(F, Candidates); 6180 } 6181 6182 bool ARMBaseInstrInfo::isFunctionSafeToOutlineFrom( 6183 MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { 6184 const Function &F = MF.getFunction(); 6185 6186 // Can F be deduplicated by the linker? If it can, don't outline from it. 6187 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage()) 6188 return false; 6189 6190 // Don't outline from functions with section markings; the program could 6191 // expect that all the code is in the named section. 6192 // FIXME: Allow outlining from multiple functions with the same section 6193 // marking. 6194 if (F.hasSection()) 6195 return false; 6196 6197 // FIXME: Thumb1 outlining is not handled 6198 if (MF.getInfo<ARMFunctionInfo>()->isThumb1OnlyFunction()) 6199 return false; 6200 6201 // It's safe to outline from MF. 6202 return true; 6203 } 6204 6205 bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 6206 unsigned &Flags) const { 6207 // Check if LR is available through all of the MBB. If it's not, then set 6208 // a flag. 6209 assert(MBB.getParent()->getRegInfo().tracksLiveness() && 6210 "Suitable Machine Function for outlining must track liveness"); 6211 6212 LiveRegUnits LRU(getRegisterInfo()); 6213 6214 std::for_each(MBB.rbegin(), MBB.rend(), 6215 [&LRU](MachineInstr &MI) { LRU.accumulate(MI); }); 6216 6217 // Check if each of the unsafe registers are available... 6218 bool R12AvailableInBlock = LRU.available(ARM::R12); 6219 bool CPSRAvailableInBlock = LRU.available(ARM::CPSR); 6220 6221 // If all of these are dead (and not live out), we know we don't have to check 6222 // them later. 6223 if (R12AvailableInBlock && CPSRAvailableInBlock) 6224 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead; 6225 6226 // Now, add the live outs to the set. 6227 LRU.addLiveOuts(MBB); 6228 6229 // If any of these registers is available in the MBB, but also a live out of 6230 // the block, then we know outlining is unsafe. 6231 if (R12AvailableInBlock && !LRU.available(ARM::R12)) 6232 return false; 6233 if (CPSRAvailableInBlock && !LRU.available(ARM::CPSR)) 6234 return false; 6235 6236 // Check if there's a call inside this MachineBasicBlock. If there is, then 6237 // set a flag. 6238 if (any_of(MBB, [](MachineInstr &MI) { return MI.isCall(); })) 6239 Flags |= MachineOutlinerMBBFlags::HasCalls; 6240 6241 // LR liveness is overestimated in return blocks. 6242 6243 bool LRIsAvailable = 6244 MBB.isReturnBlock() && !MBB.back().isCall() 6245 ? isLRAvailable(getRegisterInfo(), MBB.rbegin(), MBB.rend()) 6246 : LRU.available(ARM::LR); 6247 if (!LRIsAvailable) 6248 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere; 6249 6250 return true; 6251 } 6252 6253 outliner::InstrType 6254 ARMBaseInstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, 6255 unsigned Flags) const { 6256 MachineInstr &MI = *MIT; 6257 const TargetRegisterInfo *TRI = &getRegisterInfo(); 6258 6259 // Be conservative with inline ASM 6260 if (MI.isInlineAsm()) 6261 return outliner::InstrType::Illegal; 6262 6263 // Don't allow debug values to impact outlining type. 6264 if (MI.isDebugInstr() || MI.isIndirectDebugValue()) 6265 return outliner::InstrType::Invisible; 6266 6267 // At this point, KILL or IMPLICIT_DEF instructions don't really tell us much 6268 // so we can go ahead and skip over them. 6269 if (MI.isKill() || MI.isImplicitDef()) 6270 return outliner::InstrType::Invisible; 6271 6272 // PIC instructions contain labels, outlining them would break offset 6273 // computing. unsigned Opc = MI.getOpcode(); 6274 unsigned Opc = MI.getOpcode(); 6275 if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR || 6276 Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR || 6277 Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB || 6278 Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic || 6279 Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel || 6280 Opc == ARM::t2MOV_ga_pcrel) 6281 return outliner::InstrType::Illegal; 6282 6283 // Be conservative with ARMv8.1 MVE instructions. 6284 if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart || 6285 Opc == ARM::t2DoLoopStartTP || Opc == ARM::t2WhileLoopStart || 6286 Opc == ARM::t2WhileLoopStartLR || Opc == ARM::t2WhileLoopStartTP || 6287 Opc == ARM::t2LoopDec || Opc == ARM::t2LoopEnd || 6288 Opc == ARM::t2LoopEndDec) 6289 return outliner::InstrType::Illegal; 6290 6291 const MCInstrDesc &MCID = MI.getDesc(); 6292 uint64_t MIFlags = MCID.TSFlags; 6293 if ((MIFlags & ARMII::DomainMask) == ARMII::DomainMVE) 6294 return outliner::InstrType::Illegal; 6295 6296 // Is this a terminator for a basic block? 6297 if (MI.isTerminator()) { 6298 // Don't outline if the branch is not unconditional. 6299 if (isPredicated(MI)) 6300 return outliner::InstrType::Illegal; 6301 6302 // Is this the end of a function? 6303 if (MI.getParent()->succ_empty()) 6304 return outliner::InstrType::Legal; 6305 6306 // It's not, so don't outline it. 6307 return outliner::InstrType::Illegal; 6308 } 6309 6310 // Make sure none of the operands are un-outlinable. 6311 for (const MachineOperand &MOP : MI.operands()) { 6312 if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() || 6313 MOP.isTargetIndex()) 6314 return outliner::InstrType::Illegal; 6315 } 6316 6317 // Don't outline if link register or program counter value are used. 6318 if (MI.readsRegister(ARM::LR, TRI) || MI.readsRegister(ARM::PC, TRI)) 6319 return outliner::InstrType::Illegal; 6320 6321 if (MI.isCall()) { 6322 // Get the function associated with the call. Look at each operand and find 6323 // the one that represents the calle and get its name. 6324 const Function *Callee = nullptr; 6325 for (const MachineOperand &MOP : MI.operands()) { 6326 if (MOP.isGlobal()) { 6327 Callee = dyn_cast<Function>(MOP.getGlobal()); 6328 break; 6329 } 6330 } 6331 6332 // Dont't outline calls to "mcount" like functions, in particular Linux 6333 // kernel function tracing relies on it. 6334 if (Callee && 6335 (Callee->getName() == "\01__gnu_mcount_nc" || 6336 Callee->getName() == "\01mcount" || Callee->getName() == "__mcount")) 6337 return outliner::InstrType::Illegal; 6338 6339 // If we don't know anything about the callee, assume it depends on the 6340 // stack layout of the caller. In that case, it's only legal to outline 6341 // as a tail-call. Explicitly list the call instructions we know about so 6342 // we don't get unexpected results with call pseudo-instructions. 6343 auto UnknownCallOutlineType = outliner::InstrType::Illegal; 6344 if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX || 6345 Opc == ARM::BLX_noip || Opc == ARM::tBLXr || Opc == ARM::tBLXr_noip || 6346 Opc == ARM::tBLXi) 6347 UnknownCallOutlineType = outliner::InstrType::LegalTerminator; 6348 6349 if (!Callee) 6350 return UnknownCallOutlineType; 6351 6352 // We have a function we have information about. Check if it's something we 6353 // can safely outline. 6354 MachineFunction *MF = MI.getParent()->getParent(); 6355 MachineFunction *CalleeMF = MF->getMMI().getMachineFunction(*Callee); 6356 6357 // We don't know what's going on with the callee at all. Don't touch it. 6358 if (!CalleeMF) 6359 return UnknownCallOutlineType; 6360 6361 // Check if we know anything about the callee saves on the function. If we 6362 // don't, then don't touch it, since that implies that we haven't computed 6363 // anything about its stack frame yet. 6364 MachineFrameInfo &MFI = CalleeMF->getFrameInfo(); 6365 if (!MFI.isCalleeSavedInfoValid() || MFI.getStackSize() > 0 || 6366 MFI.getNumObjects() > 0) 6367 return UnknownCallOutlineType; 6368 6369 // At this point, we can say that CalleeMF ought to not pass anything on the 6370 // stack. Therefore, we can outline it. 6371 return outliner::InstrType::Legal; 6372 } 6373 6374 // Since calls are handled, don't touch LR or PC 6375 if (MI.modifiesRegister(ARM::LR, TRI) || MI.modifiesRegister(ARM::PC, TRI)) 6376 return outliner::InstrType::Illegal; 6377 6378 // Does this use the stack? 6379 if (MI.modifiesRegister(ARM::SP, TRI) || MI.readsRegister(ARM::SP, TRI)) { 6380 // True if there is no chance that any outlined candidate from this range 6381 // could require stack fixups. That is, both 6382 // * LR is available in the range (No save/restore around call) 6383 // * The range doesn't include calls (No save/restore in outlined frame) 6384 // are true. 6385 // These conditions also ensure correctness of the return address 6386 // authentication - we insert sign and authentication instructions only if 6387 // we save/restore LR on stack, but then this condition ensures that the 6388 // outlined range does not modify the SP, therefore the SP value used for 6389 // signing is the same as the one used for authentication. 6390 // FIXME: This is very restrictive; the flags check the whole block, 6391 // not just the bit we will try to outline. 6392 bool MightNeedStackFixUp = 6393 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere | 6394 MachineOutlinerMBBFlags::HasCalls)); 6395 6396 if (!MightNeedStackFixUp) 6397 return outliner::InstrType::Legal; 6398 6399 // Any modification of SP will break our code to save/restore LR. 6400 // FIXME: We could handle some instructions which add a constant offset to 6401 // SP, with a bit more work. 6402 if (MI.modifiesRegister(ARM::SP, TRI)) 6403 return outliner::InstrType::Illegal; 6404 6405 // At this point, we have a stack instruction that we might need to fix up. 6406 // up. We'll handle it if it's a load or store. 6407 if (checkAndUpdateStackOffset(&MI, Subtarget.getStackAlignment().value(), 6408 false)) 6409 return outliner::InstrType::Legal; 6410 6411 // We can't fix it up, so don't outline it. 6412 return outliner::InstrType::Illegal; 6413 } 6414 6415 // Be conservative with IT blocks. 6416 if (MI.readsRegister(ARM::ITSTATE, TRI) || 6417 MI.modifiesRegister(ARM::ITSTATE, TRI)) 6418 return outliner::InstrType::Illegal; 6419 6420 // Don't outline positions. 6421 if (MI.isPosition()) 6422 return outliner::InstrType::Illegal; 6423 6424 return outliner::InstrType::Legal; 6425 } 6426 6427 void ARMBaseInstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const { 6428 for (MachineInstr &MI : MBB) { 6429 checkAndUpdateStackOffset(&MI, Subtarget.getStackAlignment().value(), true); 6430 } 6431 } 6432 6433 void ARMBaseInstrInfo::saveLROnStack(MachineBasicBlock &MBB, 6434 MachineBasicBlock::iterator It, bool CFI, 6435 bool Auth) const { 6436 int Align = std::max(Subtarget.getStackAlignment().value(), uint64_t(8)); 6437 assert(Align >= 8 && Align <= 256); 6438 if (Auth) { 6439 assert(Subtarget.isThumb2()); 6440 // Compute PAC in R12. Outlining ensures R12 is dead across the outlined 6441 // sequence. 6442 BuildMI(MBB, It, DebugLoc(), get(ARM::t2PAC)) 6443 .setMIFlags(MachineInstr::FrameSetup); 6444 BuildMI(MBB, It, DebugLoc(), get(ARM::t2STRD_PRE), ARM::SP) 6445 .addReg(ARM::R12, RegState::Kill) 6446 .addReg(ARM::LR, RegState::Kill) 6447 .addReg(ARM::SP) 6448 .addImm(-Align) 6449 .add(predOps(ARMCC::AL)) 6450 .setMIFlags(MachineInstr::FrameSetup); 6451 } else { 6452 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM; 6453 BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::SP) 6454 .addReg(ARM::LR, RegState::Kill) 6455 .addReg(ARM::SP) 6456 .addImm(-Align) 6457 .add(predOps(ARMCC::AL)) 6458 .setMIFlags(MachineInstr::FrameSetup); 6459 } 6460 6461 if (!CFI) 6462 return; 6463 6464 MachineFunction &MF = *MBB.getParent(); 6465 6466 // Add a CFI, saying CFA is offset by Align bytes from SP. 6467 int64_t StackPosEntry = 6468 MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, Align)); 6469 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6470 .addCFIIndex(StackPosEntry) 6471 .setMIFlags(MachineInstr::FrameSetup); 6472 6473 // Add a CFI saying that the LR that we want to find is now higher than 6474 // before. 6475 int LROffset = Auth ? Align - 4 : Align; 6476 const MCRegisterInfo *MRI = Subtarget.getRegisterInfo(); 6477 unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true); 6478 int64_t LRPosEntry = MF.addFrameInst( 6479 MCCFIInstruction::createOffset(nullptr, DwarfLR, -LROffset)); 6480 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6481 .addCFIIndex(LRPosEntry) 6482 .setMIFlags(MachineInstr::FrameSetup); 6483 if (Auth) { 6484 // Add a CFI for the location of the return adddress PAC. 6485 unsigned DwarfRAC = MRI->getDwarfRegNum(ARM::RA_AUTH_CODE, true); 6486 int64_t RACPosEntry = MF.addFrameInst( 6487 MCCFIInstruction::createOffset(nullptr, DwarfRAC, -Align)); 6488 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6489 .addCFIIndex(RACPosEntry) 6490 .setMIFlags(MachineInstr::FrameSetup); 6491 } 6492 } 6493 6494 void ARMBaseInstrInfo::emitCFIForLRSaveToReg(MachineBasicBlock &MBB, 6495 MachineBasicBlock::iterator It, 6496 Register Reg) const { 6497 MachineFunction &MF = *MBB.getParent(); 6498 const MCRegisterInfo *MRI = Subtarget.getRegisterInfo(); 6499 unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true); 6500 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); 6501 6502 int64_t LRPosEntry = MF.addFrameInst( 6503 MCCFIInstruction::createRegister(nullptr, DwarfLR, DwarfReg)); 6504 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6505 .addCFIIndex(LRPosEntry) 6506 .setMIFlags(MachineInstr::FrameSetup); 6507 } 6508 6509 void ARMBaseInstrInfo::restoreLRFromStack(MachineBasicBlock &MBB, 6510 MachineBasicBlock::iterator It, 6511 bool CFI, bool Auth) const { 6512 int Align = Subtarget.getStackAlignment().value(); 6513 if (Auth) { 6514 assert(Subtarget.isThumb2()); 6515 // Restore return address PAC and LR. 6516 BuildMI(MBB, It, DebugLoc(), get(ARM::t2LDRD_POST)) 6517 .addReg(ARM::R12, RegState::Define) 6518 .addReg(ARM::LR, RegState::Define) 6519 .addReg(ARM::SP, RegState::Define) 6520 .addReg(ARM::SP) 6521 .addImm(Align) 6522 .add(predOps(ARMCC::AL)) 6523 .setMIFlags(MachineInstr::FrameDestroy); 6524 // LR authentication is after the CFI instructions, below. 6525 } else { 6526 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM; 6527 MachineInstrBuilder MIB = BuildMI(MBB, It, DebugLoc(), get(Opc), ARM::LR) 6528 .addReg(ARM::SP, RegState::Define) 6529 .addReg(ARM::SP); 6530 if (!Subtarget.isThumb()) 6531 MIB.addReg(0); 6532 MIB.addImm(Subtarget.getStackAlignment().value()) 6533 .add(predOps(ARMCC::AL)) 6534 .setMIFlags(MachineInstr::FrameDestroy); 6535 } 6536 6537 if (CFI) { 6538 // Now stack has moved back up... 6539 MachineFunction &MF = *MBB.getParent(); 6540 const MCRegisterInfo *MRI = Subtarget.getRegisterInfo(); 6541 unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true); 6542 int64_t StackPosEntry = 6543 MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, 0)); 6544 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6545 .addCFIIndex(StackPosEntry) 6546 .setMIFlags(MachineInstr::FrameDestroy); 6547 6548 // ... and we have restored LR. 6549 int64_t LRPosEntry = 6550 MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, DwarfLR)); 6551 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6552 .addCFIIndex(LRPosEntry) 6553 .setMIFlags(MachineInstr::FrameDestroy); 6554 6555 if (Auth) { 6556 unsigned DwarfRAC = MRI->getDwarfRegNum(ARM::RA_AUTH_CODE, true); 6557 int64_t Entry = 6558 MF.addFrameInst(MCCFIInstruction::createUndefined(nullptr, DwarfRAC)); 6559 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6560 .addCFIIndex(Entry) 6561 .setMIFlags(MachineInstr::FrameDestroy); 6562 } 6563 } 6564 6565 if (Auth) 6566 BuildMI(MBB, It, DebugLoc(), get(ARM::t2AUT)); 6567 } 6568 6569 void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg( 6570 MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const { 6571 MachineFunction &MF = *MBB.getParent(); 6572 const MCRegisterInfo *MRI = Subtarget.getRegisterInfo(); 6573 unsigned DwarfLR = MRI->getDwarfRegNum(ARM::LR, true); 6574 6575 int64_t LRPosEntry = 6576 MF.addFrameInst(MCCFIInstruction::createRestore(nullptr, DwarfLR)); 6577 BuildMI(MBB, It, DebugLoc(), get(ARM::CFI_INSTRUCTION)) 6578 .addCFIIndex(LRPosEntry) 6579 .setMIFlags(MachineInstr::FrameDestroy); 6580 } 6581 6582 void ARMBaseInstrInfo::buildOutlinedFrame( 6583 MachineBasicBlock &MBB, MachineFunction &MF, 6584 const outliner::OutlinedFunction &OF) const { 6585 // For thunk outlining, rewrite the last instruction from a call to a 6586 // tail-call. 6587 if (OF.FrameConstructionID == MachineOutlinerThunk) { 6588 MachineInstr *Call = &*--MBB.instr_end(); 6589 bool isThumb = Subtarget.isThumb(); 6590 unsigned FuncOp = isThumb ? 2 : 0; 6591 unsigned Opc = Call->getOperand(FuncOp).isReg() 6592 ? isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr 6593 : isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd 6594 : ARM::tTAILJMPdND 6595 : ARM::TAILJMPd; 6596 MachineInstrBuilder MIB = BuildMI(MBB, MBB.end(), DebugLoc(), get(Opc)) 6597 .add(Call->getOperand(FuncOp)); 6598 if (isThumb && !Call->getOperand(FuncOp).isReg()) 6599 MIB.add(predOps(ARMCC::AL)); 6600 Call->eraseFromParent(); 6601 } 6602 6603 // Is there a call in the outlined range? 6604 auto IsNonTailCall = [](MachineInstr &MI) { 6605 return MI.isCall() && !MI.isReturn(); 6606 }; 6607 if (llvm::any_of(MBB.instrs(), IsNonTailCall)) { 6608 MachineBasicBlock::iterator It = MBB.begin(); 6609 MachineBasicBlock::iterator Et = MBB.end(); 6610 6611 if (OF.FrameConstructionID == MachineOutlinerTailCall || 6612 OF.FrameConstructionID == MachineOutlinerThunk) 6613 Et = std::prev(MBB.end()); 6614 6615 // We have to save and restore LR, we need to add it to the liveins if it 6616 // is not already part of the set. This is suffient since outlined 6617 // functions only have one block. 6618 if (!MBB.isLiveIn(ARM::LR)) 6619 MBB.addLiveIn(ARM::LR); 6620 6621 // Insert a save before the outlined region 6622 bool Auth = OF.Candidates.front() 6623 .getMF() 6624 ->getInfo<ARMFunctionInfo>() 6625 ->shouldSignReturnAddress(true); 6626 saveLROnStack(MBB, It, true, Auth); 6627 6628 // Fix up the instructions in the range, since we're going to modify the 6629 // stack. 6630 assert(OF.FrameConstructionID != MachineOutlinerDefault && 6631 "Can only fix up stack references once"); 6632 fixupPostOutline(MBB); 6633 6634 // Insert a restore before the terminator for the function. Restore LR. 6635 restoreLRFromStack(MBB, Et, true, Auth); 6636 } 6637 6638 // If this is a tail call outlined function, then there's already a return. 6639 if (OF.FrameConstructionID == MachineOutlinerTailCall || 6640 OF.FrameConstructionID == MachineOutlinerThunk) 6641 return; 6642 6643 // Here we have to insert the return ourselves. Get the correct opcode from 6644 // current feature set. 6645 BuildMI(MBB, MBB.end(), DebugLoc(), get(Subtarget.getReturnOpcode())) 6646 .add(predOps(ARMCC::AL)); 6647 6648 // Did we have to modify the stack by saving the link register? 6649 if (OF.FrameConstructionID != MachineOutlinerDefault && 6650 OF.Candidates[0].CallConstructionID != MachineOutlinerDefault) 6651 return; 6652 6653 // We modified the stack. 6654 // Walk over the basic block and fix up all the stack accesses. 6655 fixupPostOutline(MBB); 6656 } 6657 6658 MachineBasicBlock::iterator ARMBaseInstrInfo::insertOutlinedCall( 6659 Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, 6660 MachineFunction &MF, outliner::Candidate &C) const { 6661 MachineInstrBuilder MIB; 6662 MachineBasicBlock::iterator CallPt; 6663 unsigned Opc; 6664 bool isThumb = Subtarget.isThumb(); 6665 6666 // Are we tail calling? 6667 if (C.CallConstructionID == MachineOutlinerTailCall) { 6668 // If yes, then we can just branch to the label. 6669 Opc = isThumb 6670 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND 6671 : ARM::TAILJMPd; 6672 MIB = BuildMI(MF, DebugLoc(), get(Opc)) 6673 .addGlobalAddress(M.getNamedValue(MF.getName())); 6674 if (isThumb) 6675 MIB.add(predOps(ARMCC::AL)); 6676 It = MBB.insert(It, MIB); 6677 return It; 6678 } 6679 6680 // Create the call instruction. 6681 Opc = isThumb ? ARM::tBL : ARM::BL; 6682 MachineInstrBuilder CallMIB = BuildMI(MF, DebugLoc(), get(Opc)); 6683 if (isThumb) 6684 CallMIB.add(predOps(ARMCC::AL)); 6685 CallMIB.addGlobalAddress(M.getNamedValue(MF.getName())); 6686 6687 if (C.CallConstructionID == MachineOutlinerNoLRSave || 6688 C.CallConstructionID == MachineOutlinerThunk) { 6689 // No, so just insert the call. 6690 It = MBB.insert(It, CallMIB); 6691 return It; 6692 } 6693 6694 const ARMFunctionInfo &AFI = *C.getMF()->getInfo<ARMFunctionInfo>(); 6695 // Can we save to a register? 6696 if (C.CallConstructionID == MachineOutlinerRegSave) { 6697 unsigned Reg = findRegisterToSaveLRTo(C); 6698 assert(Reg != 0 && "No callee-saved register available?"); 6699 6700 // Save and restore LR from that register. 6701 copyPhysReg(MBB, It, DebugLoc(), Reg, ARM::LR, true); 6702 if (!AFI.isLRSpilled()) 6703 emitCFIForLRSaveToReg(MBB, It, Reg); 6704 CallPt = MBB.insert(It, CallMIB); 6705 copyPhysReg(MBB, It, DebugLoc(), ARM::LR, Reg, true); 6706 if (!AFI.isLRSpilled()) 6707 emitCFIForLRRestoreFromReg(MBB, It); 6708 It--; 6709 return CallPt; 6710 } 6711 // We have the default case. Save and restore from SP. 6712 if (!MBB.isLiveIn(ARM::LR)) 6713 MBB.addLiveIn(ARM::LR); 6714 bool Auth = !AFI.isLRSpilled() && AFI.shouldSignReturnAddress(true); 6715 saveLROnStack(MBB, It, !AFI.isLRSpilled(), Auth); 6716 CallPt = MBB.insert(It, CallMIB); 6717 restoreLRFromStack(MBB, It, !AFI.isLRSpilled(), Auth); 6718 It--; 6719 return CallPt; 6720 } 6721 6722 bool ARMBaseInstrInfo::shouldOutlineFromFunctionByDefault( 6723 MachineFunction &MF) const { 6724 return Subtarget.isMClass() && MF.getFunction().hasMinSize(); 6725 } 6726 6727 bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 6728 AAResults *AA) const { 6729 // Try hard to rematerialize any VCTPs because if we spill P0, it will block 6730 // the tail predication conversion. This means that the element count 6731 // register has to be live for longer, but that has to be better than 6732 // spill/restore and VPT predication. 6733 return isVCTP(&MI) && !isPredicated(MI); 6734 } 6735 6736 unsigned llvm::getBLXOpcode(const MachineFunction &MF) { 6737 return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::BLX_noip 6738 : ARM::BLX; 6739 } 6740 6741 unsigned llvm::gettBLXrOpcode(const MachineFunction &MF) { 6742 return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::tBLXr_noip 6743 : ARM::tBLXr; 6744 } 6745 6746 unsigned llvm::getBLXpredOpcode(const MachineFunction &MF) { 6747 return (MF.getSubtarget<ARMSubtarget>().hardenSlsBlr()) ? ARM::BLX_pred_noip 6748 : ARM::BLX_pred; 6749 } 6750 6751 namespace { 6752 class ARMPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo { 6753 MachineInstr *EndLoop, *LoopCount; 6754 MachineFunction *MF; 6755 const TargetInstrInfo *TII; 6756 6757 // Meanings of the various stuff with loop types: 6758 // t2Bcc: 6759 // EndLoop = branch at end of original BB that will become a kernel 6760 // LoopCount = CC setter live into branch 6761 // t2LoopEnd: 6762 // EndLoop = branch at end of original BB 6763 // LoopCount = t2LoopDec 6764 public: 6765 ARMPipelinerLoopInfo(MachineInstr *EndLoop, MachineInstr *LoopCount) 6766 : EndLoop(EndLoop), LoopCount(LoopCount), 6767 MF(EndLoop->getParent()->getParent()), 6768 TII(MF->getSubtarget().getInstrInfo()) {} 6769 6770 bool shouldIgnoreForPipelining(const MachineInstr *MI) const override { 6771 // Only ignore the terminator. 6772 return MI == EndLoop || MI == LoopCount; 6773 } 6774 6775 Optional<bool> createTripCountGreaterCondition( 6776 int TC, MachineBasicBlock &MBB, 6777 SmallVectorImpl<MachineOperand> &Cond) override { 6778 6779 if (isCondBranchOpcode(EndLoop->getOpcode())) { 6780 Cond.push_back(EndLoop->getOperand(1)); 6781 Cond.push_back(EndLoop->getOperand(2)); 6782 if (EndLoop->getOperand(0).getMBB() == EndLoop->getParent()) { 6783 TII->reverseBranchCondition(Cond); 6784 } 6785 return {}; 6786 } else if (EndLoop->getOpcode() == ARM::t2LoopEnd) { 6787 // General case just lets the unrolled t2LoopDec do the subtraction and 6788 // therefore just needs to check if zero has been reached. 6789 MachineInstr *LoopDec = nullptr; 6790 for (auto &I : MBB.instrs()) 6791 if (I.getOpcode() == ARM::t2LoopDec) 6792 LoopDec = &I; 6793 assert(LoopDec && "Unable to find copied LoopDec"); 6794 // Check if we're done with the loop. 6795 BuildMI(&MBB, LoopDec->getDebugLoc(), TII->get(ARM::t2CMPri)) 6796 .addReg(LoopDec->getOperand(0).getReg()) 6797 .addImm(0) 6798 .addImm(ARMCC::AL) 6799 .addReg(ARM::NoRegister); 6800 Cond.push_back(MachineOperand::CreateImm(ARMCC::EQ)); 6801 Cond.push_back(MachineOperand::CreateReg(ARM::CPSR, false)); 6802 return {}; 6803 } else 6804 llvm_unreachable("Unknown EndLoop"); 6805 } 6806 6807 void setPreheader(MachineBasicBlock *NewPreheader) override {} 6808 6809 void adjustTripCount(int TripCountAdjust) override {} 6810 6811 void disposed() override {} 6812 }; 6813 } // namespace 6814 6815 std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo> 6816 ARMBaseInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const { 6817 MachineBasicBlock::iterator I = LoopBB->getFirstTerminator(); 6818 MachineBasicBlock *Preheader = *LoopBB->pred_begin(); 6819 if (Preheader == LoopBB) 6820 Preheader = *std::next(LoopBB->pred_begin()); 6821 6822 if (I != LoopBB->end() && I->getOpcode() == ARM::t2Bcc) { 6823 // If the branch is a Bcc, then the CPSR should be set somewhere within the 6824 // block. We need to determine the reaching definition of CPSR so that 6825 // it can be marked as non-pipelineable, allowing the pipeliner to force 6826 // it into stage 0 or give up if it cannot or will not do so. 6827 MachineInstr *CCSetter = nullptr; 6828 for (auto &L : LoopBB->instrs()) { 6829 if (L.isCall()) 6830 return nullptr; 6831 if (isCPSRDefined(L)) 6832 CCSetter = &L; 6833 } 6834 if (CCSetter) 6835 return std::make_unique<ARMPipelinerLoopInfo>(&*I, CCSetter); 6836 else 6837 return nullptr; // Unable to find the CC setter, so unable to guarantee 6838 // that pipeline will work 6839 } 6840 6841 // Recognize: 6842 // preheader: 6843 // %1 = t2DoopLoopStart %0 6844 // loop: 6845 // %2 = phi %1, <not loop>, %..., %loop 6846 // %3 = t2LoopDec %2, <imm> 6847 // t2LoopEnd %3, %loop 6848 6849 if (I != LoopBB->end() && I->getOpcode() == ARM::t2LoopEnd) { 6850 for (auto &L : LoopBB->instrs()) 6851 if (L.isCall()) 6852 return nullptr; 6853 else if (isVCTP(&L)) 6854 return nullptr; 6855 Register LoopDecResult = I->getOperand(0).getReg(); 6856 MachineRegisterInfo &MRI = LoopBB->getParent()->getRegInfo(); 6857 MachineInstr *LoopDec = MRI.getUniqueVRegDef(LoopDecResult); 6858 if (!LoopDec || LoopDec->getOpcode() != ARM::t2LoopDec) 6859 return nullptr; 6860 MachineInstr *LoopStart = nullptr; 6861 for (auto &J : Preheader->instrs()) 6862 if (J.getOpcode() == ARM::t2DoLoopStart) 6863 LoopStart = &J; 6864 if (!LoopStart) 6865 return nullptr; 6866 return std::make_unique<ARMPipelinerLoopInfo>(&*I, LoopDec); 6867 } 6868 return nullptr; 6869 } 6870