1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 #include "ARMCallLowering.h" 15 #include "ARMInstructionSelector.h" 16 #include "ARMLegalizerInfo.h" 17 #include "ARMRegisterBankInfo.h" 18 #include "ARMSubtarget.h" 19 #include "ARMTargetMachine.h" 20 #include "ARMTargetObjectFile.h" 21 #include "ARMTargetTransformInfo.h" 22 #include "MCTargetDesc/ARMMCTargetDesc.h" 23 #include "llvm/ADT/Optional.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 29 #include "llvm/CodeGen/GlobalISel/GISelAccessor.h" 30 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 31 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 32 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 33 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 34 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" 35 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 36 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" 37 #include "llvm/CodeGen/MachineFunction.h" 38 #include "llvm/CodeGen/Passes.h" 39 #include "llvm/CodeGen/TargetPassConfig.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Support/CodeGen.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/ErrorHandling.h" 47 #include "llvm/Support/TargetParser.h" 48 #include "llvm/Support/TargetRegistry.h" 49 #include "llvm/Target/TargetLoweringObjectFile.h" 50 #include "llvm/Target/TargetOptions.h" 51 #include "llvm/Transforms/Scalar.h" 52 #include <cassert> 53 #include <memory> 54 #include <string> 55 56 using namespace llvm; 57 58 static cl::opt<bool> 59 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, 60 cl::desc("Inhibit optimization of S->D register accesses on A15"), 61 cl::init(false)); 62 63 static cl::opt<bool> 64 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden, 65 cl::desc("Run SimplifyCFG after expanding atomic operations" 66 " to make use of cmpxchg flow-based information"), 67 cl::init(true)); 68 69 static cl::opt<bool> 70 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden, 71 cl::desc("Enable ARM load/store optimization pass"), 72 cl::init(true)); 73 74 // FIXME: Unify control over GlobalMerge. 75 static cl::opt<cl::boolOrDefault> 76 EnableGlobalMerge("arm-global-merge", cl::Hidden, 77 cl::desc("Enable the global merge pass")); 78 79 extern "C" void LLVMInitializeARMTarget() { 80 // Register the target. 81 RegisterTargetMachine<ARMLETargetMachine> X(getTheARMLETarget()); 82 RegisterTargetMachine<ARMBETargetMachine> Y(getTheARMBETarget()); 83 RegisterTargetMachine<ThumbLETargetMachine> A(getTheThumbLETarget()); 84 RegisterTargetMachine<ThumbBETargetMachine> B(getTheThumbBETarget()); 85 86 PassRegistry &Registry = *PassRegistry::getPassRegistry(); 87 initializeGlobalISel(Registry); 88 initializeARMLoadStoreOptPass(Registry); 89 initializeARMPreAllocLoadStoreOptPass(Registry); 90 initializeARMConstantIslandsPass(Registry); 91 } 92 93 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 94 if (TT.isOSBinFormatMachO()) 95 return llvm::make_unique<TargetLoweringObjectFileMachO>(); 96 if (TT.isOSWindows()) 97 return llvm::make_unique<TargetLoweringObjectFileCOFF>(); 98 return llvm::make_unique<ARMElfTargetObjectFile>(); 99 } 100 101 static ARMBaseTargetMachine::ARMABI 102 computeTargetABI(const Triple &TT, StringRef CPU, 103 const TargetOptions &Options) { 104 if (Options.MCOptions.getABIName() == "aapcs16") 105 return ARMBaseTargetMachine::ARM_ABI_AAPCS16; 106 else if (Options.MCOptions.getABIName().startswith("aapcs")) 107 return ARMBaseTargetMachine::ARM_ABI_AAPCS; 108 else if (Options.MCOptions.getABIName().startswith("apcs")) 109 return ARMBaseTargetMachine::ARM_ABI_APCS; 110 111 assert(Options.MCOptions.getABIName().empty() && 112 "Unknown target-abi option!"); 113 114 ARMBaseTargetMachine::ARMABI TargetABI = 115 ARMBaseTargetMachine::ARM_ABI_UNKNOWN; 116 117 unsigned ArchKind = ARM::parseCPUArch(CPU); 118 StringRef ArchName = ARM::getArchName(ArchKind); 119 // FIXME: This is duplicated code from the front end and should be unified. 120 if (TT.isOSBinFormatMachO()) { 121 if (TT.getEnvironment() == Triple::EABI || 122 (TT.getOS() == Triple::UnknownOS && TT.isOSBinFormatMachO()) || 123 ARM::parseArchProfile(ArchName) == ARM::PK_M) { 124 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 125 } else if (TT.isWatchABI()) { 126 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS16; 127 } else { 128 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 129 } 130 } else if (TT.isOSWindows()) { 131 // FIXME: this is invalid for WindowsCE 132 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 133 } else { 134 // Select the default based on the platform. 135 switch (TT.getEnvironment()) { 136 case Triple::Android: 137 case Triple::GNUEABI: 138 case Triple::GNUEABIHF: 139 case Triple::MuslEABI: 140 case Triple::MuslEABIHF: 141 case Triple::EABIHF: 142 case Triple::EABI: 143 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 144 break; 145 case Triple::GNU: 146 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 147 break; 148 default: 149 if (TT.isOSNetBSD()) 150 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 151 else 152 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 153 break; 154 } 155 } 156 157 return TargetABI; 158 } 159 160 static std::string computeDataLayout(const Triple &TT, StringRef CPU, 161 const TargetOptions &Options, 162 bool isLittle) { 163 auto ABI = computeTargetABI(TT, CPU, Options); 164 std::string Ret; 165 166 if (isLittle) 167 // Little endian. 168 Ret += "e"; 169 else 170 // Big endian. 171 Ret += "E"; 172 173 Ret += DataLayout::getManglingComponent(TT); 174 175 // Pointers are 32 bits and aligned to 32 bits. 176 Ret += "-p:32:32"; 177 178 // ABIs other than APCS have 64 bit integers with natural alignment. 179 if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS) 180 Ret += "-i64:64"; 181 182 // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 183 // bits, others to 64 bits. We always try to align to 64 bits. 184 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 185 Ret += "-f64:32:64"; 186 187 // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others 188 // to 64. We always ty to give them natural alignment. 189 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 190 Ret += "-v64:32:64-v128:32:128"; 191 else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16) 192 Ret += "-v128:64:128"; 193 194 // Try to align aggregates to 32 bits (the default is 64 bits, which has no 195 // particular hardware support on 32-bit ARM). 196 Ret += "-a:0:32"; 197 198 // Integer registers are 32 bits. 199 Ret += "-n32"; 200 201 // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit 202 // aligned everywhere else. 203 if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16) 204 Ret += "-S128"; 205 else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS) 206 Ret += "-S64"; 207 else 208 Ret += "-S32"; 209 210 return Ret; 211 } 212 213 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 214 Optional<Reloc::Model> RM) { 215 if (!RM.hasValue()) 216 // Default relocation model on Darwin is PIC. 217 return TT.isOSBinFormatMachO() ? Reloc::PIC_ : Reloc::Static; 218 219 if (*RM == Reloc::ROPI || *RM == Reloc::RWPI || *RM == Reloc::ROPI_RWPI) 220 assert(TT.isOSBinFormatELF() && 221 "ROPI/RWPI currently only supported for ELF"); 222 223 // DynamicNoPIC is only used on darwin. 224 if (*RM == Reloc::DynamicNoPIC && !TT.isOSDarwin()) 225 return Reloc::Static; 226 227 return *RM; 228 } 229 230 /// Create an ARM architecture model. 231 /// 232 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT, 233 StringRef CPU, StringRef FS, 234 const TargetOptions &Options, 235 Optional<Reloc::Model> RM, 236 CodeModel::Model CM, 237 CodeGenOpt::Level OL, bool isLittle) 238 : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, 239 CPU, FS, Options, getEffectiveRelocModel(TT, RM), CM, 240 OL), 241 TargetABI(computeTargetABI(TT, CPU, Options)), 242 TLOF(createTLOF(getTargetTriple())), 243 Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) { 244 245 // Default to triple-appropriate float ABI 246 if (Options.FloatABIType == FloatABI::Default) 247 this->Options.FloatABIType = 248 Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft; 249 250 // Default to triple-appropriate EABI 251 if (Options.EABIVersion == EABI::Default || 252 Options.EABIVersion == EABI::Unknown) { 253 // musl is compatible with glibc with regard to EABI version 254 if (Subtarget.isTargetGNUAEABI() || Subtarget.isTargetMuslAEABI()) 255 this->Options.EABIVersion = EABI::GNU; 256 else 257 this->Options.EABIVersion = EABI::EABI5; 258 } 259 } 260 261 ARMBaseTargetMachine::~ARMBaseTargetMachine() = default; 262 263 #ifdef LLVM_BUILD_GLOBAL_ISEL 264 namespace { 265 266 struct ARMGISelActualAccessor : public GISelAccessor { 267 std::unique_ptr<CallLowering> CallLoweringInfo; 268 std::unique_ptr<InstructionSelector> InstSelector; 269 std::unique_ptr<LegalizerInfo> Legalizer; 270 std::unique_ptr<RegisterBankInfo> RegBankInfo; 271 272 const CallLowering *getCallLowering() const override { 273 return CallLoweringInfo.get(); 274 } 275 276 const InstructionSelector *getInstructionSelector() const override { 277 return InstSelector.get(); 278 } 279 280 const LegalizerInfo *getLegalizerInfo() const override { 281 return Legalizer.get(); 282 } 283 284 const RegisterBankInfo *getRegBankInfo() const override { 285 return RegBankInfo.get(); 286 } 287 }; 288 289 } // end anonymous namespace 290 #endif 291 292 const ARMSubtarget * 293 ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { 294 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 295 Attribute FSAttr = F.getFnAttribute("target-features"); 296 297 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 298 ? CPUAttr.getValueAsString().str() 299 : TargetCPU; 300 std::string FS = !FSAttr.hasAttribute(Attribute::None) 301 ? FSAttr.getValueAsString().str() 302 : TargetFS; 303 304 // FIXME: This is related to the code below to reset the target options, 305 // we need to know whether or not the soft float flag is set on the 306 // function before we can generate a subtarget. We also need to use 307 // it as a key for the subtarget since that can be the only difference 308 // between two functions. 309 bool SoftFloat = 310 F.getFnAttribute("use-soft-float").getValueAsString() == "true"; 311 // If the soft float attribute is set on the function turn on the soft float 312 // subtarget feature. 313 if (SoftFloat) 314 FS += FS.empty() ? "+soft-float" : ",+soft-float"; 315 316 auto &I = SubtargetMap[CPU + FS]; 317 if (!I) { 318 // This needs to be done before we create a new subtarget since any 319 // creation will depend on the TM and the code generation flags on the 320 // function that reside in TargetOptions. 321 resetTargetOptions(F); 322 I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle); 323 324 #ifndef LLVM_BUILD_GLOBAL_ISEL 325 GISelAccessor *GISel = new GISelAccessor(); 326 #else 327 ARMGISelActualAccessor *GISel = new ARMGISelActualAccessor(); 328 GISel->CallLoweringInfo.reset(new ARMCallLowering(*I->getTargetLowering())); 329 GISel->Legalizer.reset(new ARMLegalizerInfo(*I)); 330 331 auto *RBI = new ARMRegisterBankInfo(*I->getRegisterInfo()); 332 333 // FIXME: At this point, we can't rely on Subtarget having RBI. 334 // It's awkward to mix passing RBI and the Subtarget; should we pass 335 // TII/TRI as well? 336 GISel->InstSelector.reset(new ARMInstructionSelector(*I, *RBI)); 337 338 GISel->RegBankInfo.reset(RBI); 339 #endif 340 I->setGISelAccessor(*GISel); 341 } 342 return I.get(); 343 } 344 345 TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() { 346 return TargetIRAnalysis([this](const Function &F) { 347 return TargetTransformInfo(ARMTTIImpl(this, F)); 348 }); 349 } 350 351 void ARMTargetMachine::anchor() {} 352 353 ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT, 354 StringRef CPU, StringRef FS, 355 const TargetOptions &Options, 356 Optional<Reloc::Model> RM, 357 CodeModel::Model CM, CodeGenOpt::Level OL, 358 bool isLittle) 359 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 360 initAsmInfo(); 361 if (!Subtarget.hasARMOps()) 362 report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not " 363 "support ARM mode execution!"); 364 } 365 366 void ARMLETargetMachine::anchor() {} 367 368 ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, 369 StringRef CPU, StringRef FS, 370 const TargetOptions &Options, 371 Optional<Reloc::Model> RM, 372 CodeModel::Model CM, 373 CodeGenOpt::Level OL) 374 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 375 376 void ARMBETargetMachine::anchor() {} 377 378 ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, 379 StringRef CPU, StringRef FS, 380 const TargetOptions &Options, 381 Optional<Reloc::Model> RM, 382 CodeModel::Model CM, 383 CodeGenOpt::Level OL) 384 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 385 386 void ThumbTargetMachine::anchor() {} 387 388 ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT, 389 StringRef CPU, StringRef FS, 390 const TargetOptions &Options, 391 Optional<Reloc::Model> RM, 392 CodeModel::Model CM, 393 CodeGenOpt::Level OL, bool isLittle) 394 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 395 initAsmInfo(); 396 } 397 398 void ThumbLETargetMachine::anchor() {} 399 400 ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT, 401 StringRef CPU, StringRef FS, 402 const TargetOptions &Options, 403 Optional<Reloc::Model> RM, 404 CodeModel::Model CM, 405 CodeGenOpt::Level OL) 406 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 407 408 void ThumbBETargetMachine::anchor() {} 409 410 ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT, 411 StringRef CPU, StringRef FS, 412 const TargetOptions &Options, 413 Optional<Reloc::Model> RM, 414 CodeModel::Model CM, 415 CodeGenOpt::Level OL) 416 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 417 418 namespace { 419 420 /// ARM Code Generator Pass Configuration Options. 421 class ARMPassConfig : public TargetPassConfig { 422 public: 423 ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM) 424 : TargetPassConfig(TM, PM) {} 425 426 ARMBaseTargetMachine &getARMTargetMachine() const { 427 return getTM<ARMBaseTargetMachine>(); 428 } 429 430 void addIRPasses() override; 431 bool addPreISel() override; 432 bool addInstSelector() override; 433 #ifdef LLVM_BUILD_GLOBAL_ISEL 434 bool addIRTranslator() override; 435 bool addLegalizeMachineIR() override; 436 bool addRegBankSelect() override; 437 bool addGlobalInstructionSelect() override; 438 #endif 439 void addPreRegAlloc() override; 440 void addPreSched2() override; 441 void addPreEmitPass() override; 442 }; 443 444 } // end anonymous namespace 445 446 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { 447 return new ARMPassConfig(this, PM); 448 } 449 450 void ARMPassConfig::addIRPasses() { 451 if (TM->Options.ThreadModel == ThreadModel::Single) 452 addPass(createLowerAtomicPass()); 453 else 454 addPass(createAtomicExpandPass(TM)); 455 456 // Cmpxchg instructions are often used with a subsequent comparison to 457 // determine whether it succeeded. We can exploit existing control-flow in 458 // ldrex/strex loops to simplify this, but it needs tidying up. 459 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 460 addPass(createCFGSimplificationPass(-1, [this](const Function &F) { 461 const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F); 462 return ST.hasAnyDataBarrier() && !ST.isThumb1Only(); 463 })); 464 465 TargetPassConfig::addIRPasses(); 466 467 // Match interleaved memory accesses to ldN/stN intrinsics. 468 if (TM->getOptLevel() != CodeGenOpt::None) 469 addPass(createInterleavedAccessPass(TM)); 470 } 471 472 bool ARMPassConfig::addPreISel() { 473 if ((TM->getOptLevel() != CodeGenOpt::None && 474 EnableGlobalMerge == cl::BOU_UNSET) || 475 EnableGlobalMerge == cl::BOU_TRUE) { 476 // FIXME: This is using the thumb1 only constant value for 477 // maximal global offset for merging globals. We may want 478 // to look into using the old value for non-thumb1 code of 479 // 4095 based on the TargetMachine, but this starts to become 480 // tricky when doing code gen per function. 481 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 482 (EnableGlobalMerge == cl::BOU_UNSET); 483 // Merging of extern globals is enabled by default on non-Mach-O as we 484 // expect it to be generally either beneficial or harmless. On Mach-O it 485 // is disabled as we emit the .subsections_via_symbols directive which 486 // means that merging extern globals is not safe. 487 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); 488 addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize, 489 MergeExternalByDefault)); 490 } 491 492 return false; 493 } 494 495 bool ARMPassConfig::addInstSelector() { 496 addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); 497 return false; 498 } 499 500 #ifdef LLVM_BUILD_GLOBAL_ISEL 501 bool ARMPassConfig::addIRTranslator() { 502 addPass(new IRTranslator()); 503 return false; 504 } 505 506 bool ARMPassConfig::addLegalizeMachineIR() { 507 addPass(new Legalizer()); 508 return false; 509 } 510 511 bool ARMPassConfig::addRegBankSelect() { 512 addPass(new RegBankSelect()); 513 return false; 514 } 515 516 bool ARMPassConfig::addGlobalInstructionSelect() { 517 addPass(new InstructionSelect()); 518 return false; 519 } 520 #endif 521 522 void ARMPassConfig::addPreRegAlloc() { 523 if (getOptLevel() != CodeGenOpt::None) { 524 addPass(createMLxExpansionPass()); 525 526 if (EnableARMLoadStoreOpt) 527 addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true)); 528 529 if (!DisableA15SDOptimization) 530 addPass(createA15SDOptimizerPass()); 531 } 532 } 533 534 void ARMPassConfig::addPreSched2() { 535 if (getOptLevel() != CodeGenOpt::None) { 536 if (EnableARMLoadStoreOpt) 537 addPass(createARMLoadStoreOptimizationPass()); 538 539 addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); 540 } 541 542 // Expand some pseudo instructions into multiple instructions to allow 543 // proper scheduling. 544 addPass(createARMExpandPseudoPass()); 545 546 if (getOptLevel() != CodeGenOpt::None) { 547 // in v8, IfConversion depends on Thumb instruction widths 548 addPass(createThumb2SizeReductionPass([this](const Function &F) { 549 return this->TM->getSubtarget<ARMSubtarget>(F).restrictIT(); 550 })); 551 552 addPass(createIfConverter([](const MachineFunction &MF) { 553 return !MF.getSubtarget<ARMSubtarget>().isThumb1Only(); 554 })); 555 } 556 addPass(createThumb2ITBlockPass()); 557 } 558 559 void ARMPassConfig::addPreEmitPass() { 560 addPass(createThumb2SizeReductionPass()); 561 562 // Constant island pass work on unbundled instructions. 563 addPass(createUnpackMachineBundles([](const MachineFunction &MF) { 564 return MF.getSubtarget<ARMSubtarget>().isThumb2(); 565 })); 566 567 // Don't optimize barriers at -O0. 568 if (getOptLevel() != CodeGenOpt::None) 569 addPass(createARMOptimizeBarriersPass()); 570 571 addPass(createARMConstantIslandPass()); 572 } 573