1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 #include "ARMCallLowering.h" 15 #include "ARMInstructionSelector.h" 16 #include "ARMLegalizerInfo.h" 17 #include "ARMRegisterBankInfo.h" 18 #include "ARMSubtarget.h" 19 #include "ARMTargetMachine.h" 20 #include "ARMTargetObjectFile.h" 21 #include "ARMTargetTransformInfo.h" 22 #include "MCTargetDesc/ARMMCTargetDesc.h" 23 #include "llvm/ADT/Optional.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 29 #include "llvm/CodeGen/GlobalISel/GISelAccessor.h" 30 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 31 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 32 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 33 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 34 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" 35 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 36 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" 37 #include "llvm/CodeGen/MachineFunction.h" 38 #include "llvm/CodeGen/Passes.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Support/CodeGen.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/ErrorHandling.h" 47 #include "llvm/Support/TargetParser.h" 48 #include "llvm/Support/TargetRegistry.h" 49 #include "llvm/Target/TargetLoweringObjectFile.h" 50 #include "llvm/Target/TargetOptions.h" 51 #include "llvm/Transforms/Scalar.h" 52 #include <cassert> 53 #include <memory> 54 #include <string> 55 56 using namespace llvm; 57 58 static cl::opt<bool> 59 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, 60 cl::desc("Inhibit optimization of S->D register accesses on A15"), 61 cl::init(false)); 62 63 static cl::opt<bool> 64 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden, 65 cl::desc("Run SimplifyCFG after expanding atomic operations" 66 " to make use of cmpxchg flow-based information"), 67 cl::init(true)); 68 69 static cl::opt<bool> 70 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden, 71 cl::desc("Enable ARM load/store optimization pass"), 72 cl::init(true)); 73 74 // FIXME: Unify control over GlobalMerge. 75 static cl::opt<cl::boolOrDefault> 76 EnableGlobalMerge("arm-global-merge", cl::Hidden, 77 cl::desc("Enable the global merge pass")); 78 79 extern "C" void LLVMInitializeARMTarget() { 80 // Register the target. 81 RegisterTargetMachine<ARMLETargetMachine> X(getTheARMLETarget()); 82 RegisterTargetMachine<ARMBETargetMachine> Y(getTheARMBETarget()); 83 RegisterTargetMachine<ThumbLETargetMachine> A(getTheThumbLETarget()); 84 RegisterTargetMachine<ThumbBETargetMachine> B(getTheThumbBETarget()); 85 86 PassRegistry &Registry = *PassRegistry::getPassRegistry(); 87 initializeGlobalISel(Registry); 88 initializeARMLoadStoreOptPass(Registry); 89 initializeARMPreAllocLoadStoreOptPass(Registry); 90 } 91 92 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 93 if (TT.isOSBinFormatMachO()) 94 return llvm::make_unique<TargetLoweringObjectFileMachO>(); 95 if (TT.isOSWindows()) 96 return llvm::make_unique<TargetLoweringObjectFileCOFF>(); 97 return llvm::make_unique<ARMElfTargetObjectFile>(); 98 } 99 100 static ARMBaseTargetMachine::ARMABI 101 computeTargetABI(const Triple &TT, StringRef CPU, 102 const TargetOptions &Options) { 103 if (Options.MCOptions.getABIName() == "aapcs16") 104 return ARMBaseTargetMachine::ARM_ABI_AAPCS16; 105 else if (Options.MCOptions.getABIName().startswith("aapcs")) 106 return ARMBaseTargetMachine::ARM_ABI_AAPCS; 107 else if (Options.MCOptions.getABIName().startswith("apcs")) 108 return ARMBaseTargetMachine::ARM_ABI_APCS; 109 110 assert(Options.MCOptions.getABIName().empty() && 111 "Unknown target-abi option!"); 112 113 ARMBaseTargetMachine::ARMABI TargetABI = 114 ARMBaseTargetMachine::ARM_ABI_UNKNOWN; 115 116 unsigned ArchKind = ARM::parseCPUArch(CPU); 117 StringRef ArchName = ARM::getArchName(ArchKind); 118 // FIXME: This is duplicated code from the front end and should be unified. 119 if (TT.isOSBinFormatMachO()) { 120 if (TT.getEnvironment() == Triple::EABI || 121 (TT.getOS() == Triple::UnknownOS && TT.isOSBinFormatMachO()) || 122 ARM::parseArchProfile(ArchName) == ARM::PK_M) { 123 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 124 } else if (TT.isWatchABI()) { 125 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS16; 126 } else { 127 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 128 } 129 } else if (TT.isOSWindows()) { 130 // FIXME: this is invalid for WindowsCE 131 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 132 } else { 133 // Select the default based on the platform. 134 switch (TT.getEnvironment()) { 135 case Triple::Android: 136 case Triple::GNUEABI: 137 case Triple::GNUEABIHF: 138 case Triple::MuslEABI: 139 case Triple::MuslEABIHF: 140 case Triple::EABIHF: 141 case Triple::EABI: 142 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 143 break; 144 case Triple::GNU: 145 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 146 break; 147 default: 148 if (TT.isOSNetBSD()) 149 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 150 else 151 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 152 break; 153 } 154 } 155 156 return TargetABI; 157 } 158 159 static std::string computeDataLayout(const Triple &TT, StringRef CPU, 160 const TargetOptions &Options, 161 bool isLittle) { 162 auto ABI = computeTargetABI(TT, CPU, Options); 163 std::string Ret; 164 165 if (isLittle) 166 // Little endian. 167 Ret += "e"; 168 else 169 // Big endian. 170 Ret += "E"; 171 172 Ret += DataLayout::getManglingComponent(TT); 173 174 // Pointers are 32 bits and aligned to 32 bits. 175 Ret += "-p:32:32"; 176 177 // ABIs other than APCS have 64 bit integers with natural alignment. 178 if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS) 179 Ret += "-i64:64"; 180 181 // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 182 // bits, others to 64 bits. We always try to align to 64 bits. 183 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 184 Ret += "-f64:32:64"; 185 186 // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others 187 // to 64. We always ty to give them natural alignment. 188 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 189 Ret += "-v64:32:64-v128:32:128"; 190 else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16) 191 Ret += "-v128:64:128"; 192 193 // Try to align aggregates to 32 bits (the default is 64 bits, which has no 194 // particular hardware support on 32-bit ARM). 195 Ret += "-a:0:32"; 196 197 // Integer registers are 32 bits. 198 Ret += "-n32"; 199 200 // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit 201 // aligned everywhere else. 202 if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16) 203 Ret += "-S128"; 204 else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS) 205 Ret += "-S64"; 206 else 207 Ret += "-S32"; 208 209 return Ret; 210 } 211 212 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 213 Optional<Reloc::Model> RM) { 214 if (!RM.hasValue()) 215 // Default relocation model on Darwin is PIC. 216 return TT.isOSBinFormatMachO() ? Reloc::PIC_ : Reloc::Static; 217 218 if (*RM == Reloc::ROPI || *RM == Reloc::RWPI || *RM == Reloc::ROPI_RWPI) 219 assert(TT.isOSBinFormatELF() && 220 "ROPI/RWPI currently only supported for ELF"); 221 222 // DynamicNoPIC is only used on darwin. 223 if (*RM == Reloc::DynamicNoPIC && !TT.isOSDarwin()) 224 return Reloc::Static; 225 226 return *RM; 227 } 228 229 /// Create an ARM architecture model. 230 /// 231 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT, 232 StringRef CPU, StringRef FS, 233 const TargetOptions &Options, 234 Optional<Reloc::Model> RM, 235 CodeModel::Model CM, 236 CodeGenOpt::Level OL, bool isLittle) 237 : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, 238 CPU, FS, Options, getEffectiveRelocModel(TT, RM), CM, 239 OL), 240 TargetABI(computeTargetABI(TT, CPU, Options)), 241 TLOF(createTLOF(getTargetTriple())), 242 Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) { 243 244 // Default to triple-appropriate float ABI 245 if (Options.FloatABIType == FloatABI::Default) 246 this->Options.FloatABIType = 247 Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft; 248 249 // Default to triple-appropriate EABI 250 if (Options.EABIVersion == EABI::Default || 251 Options.EABIVersion == EABI::Unknown) { 252 // musl is compatible with glibc with regard to EABI version 253 if (Subtarget.isTargetGNUAEABI() || Subtarget.isTargetMuslAEABI()) 254 this->Options.EABIVersion = EABI::GNU; 255 else 256 this->Options.EABIVersion = EABI::EABI5; 257 } 258 } 259 260 ARMBaseTargetMachine::~ARMBaseTargetMachine() = default; 261 262 #ifdef LLVM_BUILD_GLOBAL_ISEL 263 namespace { 264 265 struct ARMGISelActualAccessor : public GISelAccessor { 266 std::unique_ptr<CallLowering> CallLoweringInfo; 267 std::unique_ptr<InstructionSelector> InstSelector; 268 std::unique_ptr<LegalizerInfo> Legalizer; 269 std::unique_ptr<RegisterBankInfo> RegBankInfo; 270 271 const CallLowering *getCallLowering() const override { 272 return CallLoweringInfo.get(); 273 } 274 275 const InstructionSelector *getInstructionSelector() const override { 276 return InstSelector.get(); 277 } 278 279 const LegalizerInfo *getLegalizerInfo() const override { 280 return Legalizer.get(); 281 } 282 283 const RegisterBankInfo *getRegBankInfo() const override { 284 return RegBankInfo.get(); 285 } 286 }; 287 288 } // end anonymous namespace 289 #endif 290 291 const ARMSubtarget * 292 ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { 293 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 294 Attribute FSAttr = F.getFnAttribute("target-features"); 295 296 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 297 ? CPUAttr.getValueAsString().str() 298 : TargetCPU; 299 std::string FS = !FSAttr.hasAttribute(Attribute::None) 300 ? FSAttr.getValueAsString().str() 301 : TargetFS; 302 303 // FIXME: This is related to the code below to reset the target options, 304 // we need to know whether or not the soft float flag is set on the 305 // function before we can generate a subtarget. We also need to use 306 // it as a key for the subtarget since that can be the only difference 307 // between two functions. 308 bool SoftFloat = 309 F.getFnAttribute("use-soft-float").getValueAsString() == "true"; 310 // If the soft float attribute is set on the function turn on the soft float 311 // subtarget feature. 312 if (SoftFloat) 313 FS += FS.empty() ? "+soft-float" : ",+soft-float"; 314 315 auto &I = SubtargetMap[CPU + FS]; 316 if (!I) { 317 // This needs to be done before we create a new subtarget since any 318 // creation will depend on the TM and the code generation flags on the 319 // function that reside in TargetOptions. 320 resetTargetOptions(F); 321 I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle); 322 323 #ifndef LLVM_BUILD_GLOBAL_ISEL 324 GISelAccessor *GISel = new GISelAccessor(); 325 #else 326 ARMGISelActualAccessor *GISel = new ARMGISelActualAccessor(); 327 GISel->CallLoweringInfo.reset(new ARMCallLowering(*I->getTargetLowering())); 328 GISel->Legalizer.reset(new ARMLegalizerInfo()); 329 330 auto *RBI = new ARMRegisterBankInfo(*I->getRegisterInfo()); 331 332 // FIXME: At this point, we can't rely on Subtarget having RBI. 333 // It's awkward to mix passing RBI and the Subtarget; should we pass 334 // TII/TRI as well? 335 GISel->InstSelector.reset(new ARMInstructionSelector(*I, *RBI)); 336 337 GISel->RegBankInfo.reset(RBI); 338 #endif 339 I->setGISelAccessor(*GISel); 340 } 341 return I.get(); 342 } 343 344 TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() { 345 return TargetIRAnalysis([this](const Function &F) { 346 return TargetTransformInfo(ARMTTIImpl(this, F)); 347 }); 348 } 349 350 void ARMTargetMachine::anchor() {} 351 352 ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT, 353 StringRef CPU, StringRef FS, 354 const TargetOptions &Options, 355 Optional<Reloc::Model> RM, 356 CodeModel::Model CM, CodeGenOpt::Level OL, 357 bool isLittle) 358 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 359 initAsmInfo(); 360 if (!Subtarget.hasARMOps()) 361 report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not " 362 "support ARM mode execution!"); 363 } 364 365 void ARMLETargetMachine::anchor() {} 366 367 ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, 368 StringRef CPU, StringRef FS, 369 const TargetOptions &Options, 370 Optional<Reloc::Model> RM, 371 CodeModel::Model CM, 372 CodeGenOpt::Level OL) 373 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 374 375 void ARMBETargetMachine::anchor() {} 376 377 ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, 378 StringRef CPU, StringRef FS, 379 const TargetOptions &Options, 380 Optional<Reloc::Model> RM, 381 CodeModel::Model CM, 382 CodeGenOpt::Level OL) 383 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 384 385 void ThumbTargetMachine::anchor() {} 386 387 ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT, 388 StringRef CPU, StringRef FS, 389 const TargetOptions &Options, 390 Optional<Reloc::Model> RM, 391 CodeModel::Model CM, 392 CodeGenOpt::Level OL, bool isLittle) 393 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 394 initAsmInfo(); 395 } 396 397 void ThumbLETargetMachine::anchor() {} 398 399 ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT, 400 StringRef CPU, StringRef FS, 401 const TargetOptions &Options, 402 Optional<Reloc::Model> RM, 403 CodeModel::Model CM, 404 CodeGenOpt::Level OL) 405 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 406 407 void ThumbBETargetMachine::anchor() {} 408 409 ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT, 410 StringRef CPU, StringRef FS, 411 const TargetOptions &Options, 412 Optional<Reloc::Model> RM, 413 CodeModel::Model CM, 414 CodeGenOpt::Level OL) 415 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 416 417 namespace { 418 419 /// ARM Code Generator Pass Configuration Options. 420 class ARMPassConfig : public TargetPassConfig { 421 public: 422 ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM) 423 : TargetPassConfig(TM, PM) {} 424 425 ARMBaseTargetMachine &getARMTargetMachine() const { 426 return getTM<ARMBaseTargetMachine>(); 427 } 428 429 void addIRPasses() override; 430 bool addPreISel() override; 431 bool addInstSelector() override; 432 #ifdef LLVM_BUILD_GLOBAL_ISEL 433 bool addIRTranslator() override; 434 bool addLegalizeMachineIR() override; 435 bool addRegBankSelect() override; 436 bool addGlobalInstructionSelect() override; 437 #endif 438 void addPreRegAlloc() override; 439 void addPreSched2() override; 440 void addPreEmitPass() override; 441 }; 442 443 } // end anonymous namespace 444 445 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { 446 return new ARMPassConfig(this, PM); 447 } 448 449 void ARMPassConfig::addIRPasses() { 450 if (TM->Options.ThreadModel == ThreadModel::Single) 451 addPass(createLowerAtomicPass()); 452 else 453 addPass(createAtomicExpandPass(TM)); 454 455 // Cmpxchg instructions are often used with a subsequent comparison to 456 // determine whether it succeeded. We can exploit existing control-flow in 457 // ldrex/strex loops to simplify this, but it needs tidying up. 458 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 459 addPass(createCFGSimplificationPass(-1, [this](const Function &F) { 460 const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F); 461 return ST.hasAnyDataBarrier() && !ST.isThumb1Only(); 462 })); 463 464 TargetPassConfig::addIRPasses(); 465 466 // Match interleaved memory accesses to ldN/stN intrinsics. 467 if (TM->getOptLevel() != CodeGenOpt::None) 468 addPass(createInterleavedAccessPass(TM)); 469 } 470 471 bool ARMPassConfig::addPreISel() { 472 if ((TM->getOptLevel() != CodeGenOpt::None && 473 EnableGlobalMerge == cl::BOU_UNSET) || 474 EnableGlobalMerge == cl::BOU_TRUE) { 475 // FIXME: This is using the thumb1 only constant value for 476 // maximal global offset for merging globals. We may want 477 // to look into using the old value for non-thumb1 code of 478 // 4095 based on the TargetMachine, but this starts to become 479 // tricky when doing code gen per function. 480 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 481 (EnableGlobalMerge == cl::BOU_UNSET); 482 // Merging of extern globals is enabled by default on non-Mach-O as we 483 // expect it to be generally either beneficial or harmless. On Mach-O it 484 // is disabled as we emit the .subsections_via_symbols directive which 485 // means that merging extern globals is not safe. 486 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); 487 addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize, 488 MergeExternalByDefault)); 489 } 490 491 return false; 492 } 493 494 bool ARMPassConfig::addInstSelector() { 495 addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); 496 return false; 497 } 498 499 #ifdef LLVM_BUILD_GLOBAL_ISEL 500 bool ARMPassConfig::addIRTranslator() { 501 addPass(new IRTranslator()); 502 return false; 503 } 504 505 bool ARMPassConfig::addLegalizeMachineIR() { 506 addPass(new Legalizer()); 507 return false; 508 } 509 510 bool ARMPassConfig::addRegBankSelect() { 511 addPass(new RegBankSelect()); 512 return false; 513 } 514 515 bool ARMPassConfig::addGlobalInstructionSelect() { 516 addPass(new InstructionSelect()); 517 return false; 518 } 519 #endif 520 521 void ARMPassConfig::addPreRegAlloc() { 522 if (getOptLevel() != CodeGenOpt::None) { 523 addPass(createMLxExpansionPass()); 524 525 if (EnableARMLoadStoreOpt) 526 addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true)); 527 528 if (!DisableA15SDOptimization) 529 addPass(createA15SDOptimizerPass()); 530 } 531 } 532 533 void ARMPassConfig::addPreSched2() { 534 if (getOptLevel() != CodeGenOpt::None) { 535 if (EnableARMLoadStoreOpt) 536 addPass(createARMLoadStoreOptimizationPass()); 537 538 addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); 539 } 540 541 // Expand some pseudo instructions into multiple instructions to allow 542 // proper scheduling. 543 addPass(createARMExpandPseudoPass()); 544 545 if (getOptLevel() != CodeGenOpt::None) { 546 // in v8, IfConversion depends on Thumb instruction widths 547 addPass(createThumb2SizeReductionPass([this](const Function &F) { 548 return this->TM->getSubtarget<ARMSubtarget>(F).restrictIT(); 549 })); 550 551 addPass(createIfConverter([](const MachineFunction &MF) { 552 return !MF.getSubtarget<ARMSubtarget>().isThumb1Only(); 553 })); 554 } 555 addPass(createThumb2ITBlockPass()); 556 } 557 558 void ARMPassConfig::addPreEmitPass() { 559 addPass(createThumb2SizeReductionPass()); 560 561 // Constant island pass work on unbundled instructions. 562 addPass(createUnpackMachineBundles([](const MachineFunction &MF) { 563 return MF.getSubtarget<ARMSubtarget>().isThumb2(); 564 })); 565 566 // Don't optimize barriers at -O0. 567 if (getOptLevel() != CodeGenOpt::None) 568 addPass(createARMOptimizeBarriersPass()); 569 570 addPass(createARMConstantIslandPass()); 571 } 572