1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 #include "ARMCallLowering.h" 15 #include "ARMLegalizerInfo.h" 16 #ifdef LLVM_BUILD_GLOBAL_ISEL 17 #include "ARMRegisterBankInfo.h" 18 #endif 19 #include "ARMSubtarget.h" 20 #include "ARMTargetMachine.h" 21 #include "ARMTargetObjectFile.h" 22 #include "ARMTargetTransformInfo.h" 23 #include "MCTargetDesc/ARMMCTargetDesc.h" 24 #include "llvm/ADT/Optional.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/Triple.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/CodeGen/ExecutionDepsFix.h" 30 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 31 #include "llvm/CodeGen/GlobalISel/GISelAccessor.h" 32 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 33 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 34 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 35 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 36 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" 37 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 38 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" 39 #include "llvm/CodeGen/MachineFunction.h" 40 #include "llvm/CodeGen/Passes.h" 41 #include "llvm/CodeGen/TargetPassConfig.h" 42 #include "llvm/IR/Attributes.h" 43 #include "llvm/IR/DataLayout.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/Pass.h" 46 #include "llvm/Support/CodeGen.h" 47 #include "llvm/Support/CommandLine.h" 48 #include "llvm/Support/ErrorHandling.h" 49 #include "llvm/Support/TargetParser.h" 50 #include "llvm/Support/TargetRegistry.h" 51 #include "llvm/Target/TargetLoweringObjectFile.h" 52 #include "llvm/Target/TargetOptions.h" 53 #include "llvm/Transforms/Scalar.h" 54 #include <cassert> 55 #include <memory> 56 #include <string> 57 58 using namespace llvm; 59 60 static cl::opt<bool> 61 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, 62 cl::desc("Inhibit optimization of S->D register accesses on A15"), 63 cl::init(false)); 64 65 static cl::opt<bool> 66 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden, 67 cl::desc("Run SimplifyCFG after expanding atomic operations" 68 " to make use of cmpxchg flow-based information"), 69 cl::init(true)); 70 71 static cl::opt<bool> 72 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden, 73 cl::desc("Enable ARM load/store optimization pass"), 74 cl::init(true)); 75 76 // FIXME: Unify control over GlobalMerge. 77 static cl::opt<cl::boolOrDefault> 78 EnableGlobalMerge("arm-global-merge", cl::Hidden, 79 cl::desc("Enable the global merge pass")); 80 81 namespace llvm { 82 void initializeARMExecutionDepsFixPass(PassRegistry&); 83 } 84 85 extern "C" void LLVMInitializeARMTarget() { 86 // Register the target. 87 RegisterTargetMachine<ARMLETargetMachine> X(getTheARMLETarget()); 88 RegisterTargetMachine<ARMLETargetMachine> A(getTheThumbLETarget()); 89 RegisterTargetMachine<ARMBETargetMachine> Y(getTheARMBETarget()); 90 RegisterTargetMachine<ARMBETargetMachine> B(getTheThumbBETarget()); 91 92 PassRegistry &Registry = *PassRegistry::getPassRegistry(); 93 initializeGlobalISel(Registry); 94 initializeARMLoadStoreOptPass(Registry); 95 initializeARMPreAllocLoadStoreOptPass(Registry); 96 initializeARMConstantIslandsPass(Registry); 97 initializeARMExecutionDepsFixPass(Registry); 98 } 99 100 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 101 if (TT.isOSBinFormatMachO()) 102 return llvm::make_unique<TargetLoweringObjectFileMachO>(); 103 if (TT.isOSWindows()) 104 return llvm::make_unique<TargetLoweringObjectFileCOFF>(); 105 return llvm::make_unique<ARMElfTargetObjectFile>(); 106 } 107 108 static ARMBaseTargetMachine::ARMABI 109 computeTargetABI(const Triple &TT, StringRef CPU, 110 const TargetOptions &Options) { 111 if (Options.MCOptions.getABIName() == "aapcs16") 112 return ARMBaseTargetMachine::ARM_ABI_AAPCS16; 113 else if (Options.MCOptions.getABIName().startswith("aapcs")) 114 return ARMBaseTargetMachine::ARM_ABI_AAPCS; 115 else if (Options.MCOptions.getABIName().startswith("apcs")) 116 return ARMBaseTargetMachine::ARM_ABI_APCS; 117 118 assert(Options.MCOptions.getABIName().empty() && 119 "Unknown target-abi option!"); 120 121 ARMBaseTargetMachine::ARMABI TargetABI = 122 ARMBaseTargetMachine::ARM_ABI_UNKNOWN; 123 124 unsigned ArchKind = ARM::parseCPUArch(CPU); 125 StringRef ArchName = ARM::getArchName(ArchKind); 126 // FIXME: This is duplicated code from the front end and should be unified. 127 if (TT.isOSBinFormatMachO()) { 128 if (TT.getEnvironment() == Triple::EABI || 129 (TT.getOS() == Triple::UnknownOS && TT.isOSBinFormatMachO()) || 130 ARM::parseArchProfile(ArchName) == ARM::PK_M) { 131 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 132 } else if (TT.isWatchABI()) { 133 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS16; 134 } else { 135 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 136 } 137 } else if (TT.isOSWindows()) { 138 // FIXME: this is invalid for WindowsCE 139 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 140 } else { 141 // Select the default based on the platform. 142 switch (TT.getEnvironment()) { 143 case Triple::Android: 144 case Triple::GNUEABI: 145 case Triple::GNUEABIHF: 146 case Triple::MuslEABI: 147 case Triple::MuslEABIHF: 148 case Triple::EABIHF: 149 case Triple::EABI: 150 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 151 break; 152 case Triple::GNU: 153 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 154 break; 155 default: 156 if (TT.isOSNetBSD()) 157 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 158 else 159 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 160 break; 161 } 162 } 163 164 return TargetABI; 165 } 166 167 static std::string computeDataLayout(const Triple &TT, StringRef CPU, 168 const TargetOptions &Options, 169 bool isLittle) { 170 auto ABI = computeTargetABI(TT, CPU, Options); 171 std::string Ret; 172 173 if (isLittle) 174 // Little endian. 175 Ret += "e"; 176 else 177 // Big endian. 178 Ret += "E"; 179 180 Ret += DataLayout::getManglingComponent(TT); 181 182 // Pointers are 32 bits and aligned to 32 bits. 183 Ret += "-p:32:32"; 184 185 // ABIs other than APCS have 64 bit integers with natural alignment. 186 if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS) 187 Ret += "-i64:64"; 188 189 // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 190 // bits, others to 64 bits. We always try to align to 64 bits. 191 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 192 Ret += "-f64:32:64"; 193 194 // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others 195 // to 64. We always ty to give them natural alignment. 196 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 197 Ret += "-v64:32:64-v128:32:128"; 198 else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16) 199 Ret += "-v128:64:128"; 200 201 // Try to align aggregates to 32 bits (the default is 64 bits, which has no 202 // particular hardware support on 32-bit ARM). 203 Ret += "-a:0:32"; 204 205 // Integer registers are 32 bits. 206 Ret += "-n32"; 207 208 // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit 209 // aligned everywhere else. 210 if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16) 211 Ret += "-S128"; 212 else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS) 213 Ret += "-S64"; 214 else 215 Ret += "-S32"; 216 217 return Ret; 218 } 219 220 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 221 Optional<Reloc::Model> RM) { 222 if (!RM.hasValue()) 223 // Default relocation model on Darwin is PIC. 224 return TT.isOSBinFormatMachO() ? Reloc::PIC_ : Reloc::Static; 225 226 if (*RM == Reloc::ROPI || *RM == Reloc::RWPI || *RM == Reloc::ROPI_RWPI) 227 assert(TT.isOSBinFormatELF() && 228 "ROPI/RWPI currently only supported for ELF"); 229 230 // DynamicNoPIC is only used on darwin. 231 if (*RM == Reloc::DynamicNoPIC && !TT.isOSDarwin()) 232 return Reloc::Static; 233 234 return *RM; 235 } 236 237 /// Create an ARM architecture model. 238 /// 239 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT, 240 StringRef CPU, StringRef FS, 241 const TargetOptions &Options, 242 Optional<Reloc::Model> RM, 243 CodeModel::Model CM, 244 CodeGenOpt::Level OL, bool isLittle) 245 : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, 246 CPU, FS, Options, getEffectiveRelocModel(TT, RM), CM, 247 OL), 248 TargetABI(computeTargetABI(TT, CPU, Options)), 249 TLOF(createTLOF(getTargetTriple())), 250 Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) { 251 252 // Default to triple-appropriate float ABI 253 if (Options.FloatABIType == FloatABI::Default) 254 this->Options.FloatABIType = 255 Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft; 256 257 // Default to triple-appropriate EABI 258 if (Options.EABIVersion == EABI::Default || 259 Options.EABIVersion == EABI::Unknown) { 260 // musl is compatible with glibc with regard to EABI version 261 if (Subtarget.isTargetGNUAEABI() || Subtarget.isTargetMuslAEABI()) 262 this->Options.EABIVersion = EABI::GNU; 263 else 264 this->Options.EABIVersion = EABI::EABI5; 265 } 266 267 initAsmInfo(); 268 if (!Subtarget.isThumb() && !Subtarget.hasARMOps()) 269 report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not " 270 "support ARM mode execution!"); 271 } 272 273 ARMBaseTargetMachine::~ARMBaseTargetMachine() = default; 274 275 #ifdef LLVM_BUILD_GLOBAL_ISEL 276 namespace { 277 278 struct ARMGISelActualAccessor : public GISelAccessor { 279 std::unique_ptr<CallLowering> CallLoweringInfo; 280 std::unique_ptr<InstructionSelector> InstSelector; 281 std::unique_ptr<LegalizerInfo> Legalizer; 282 std::unique_ptr<RegisterBankInfo> RegBankInfo; 283 284 const CallLowering *getCallLowering() const override { 285 return CallLoweringInfo.get(); 286 } 287 288 const InstructionSelector *getInstructionSelector() const override { 289 return InstSelector.get(); 290 } 291 292 const LegalizerInfo *getLegalizerInfo() const override { 293 return Legalizer.get(); 294 } 295 296 const RegisterBankInfo *getRegBankInfo() const override { 297 return RegBankInfo.get(); 298 } 299 }; 300 301 } // end anonymous namespace 302 #endif 303 304 const ARMSubtarget * 305 ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { 306 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 307 Attribute FSAttr = F.getFnAttribute("target-features"); 308 309 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 310 ? CPUAttr.getValueAsString().str() 311 : TargetCPU; 312 std::string FS = !FSAttr.hasAttribute(Attribute::None) 313 ? FSAttr.getValueAsString().str() 314 : TargetFS; 315 316 // FIXME: This is related to the code below to reset the target options, 317 // we need to know whether or not the soft float flag is set on the 318 // function before we can generate a subtarget. We also need to use 319 // it as a key for the subtarget since that can be the only difference 320 // between two functions. 321 bool SoftFloat = 322 F.getFnAttribute("use-soft-float").getValueAsString() == "true"; 323 // If the soft float attribute is set on the function turn on the soft float 324 // subtarget feature. 325 if (SoftFloat) 326 FS += FS.empty() ? "+soft-float" : ",+soft-float"; 327 328 auto &I = SubtargetMap[CPU + FS]; 329 if (!I) { 330 // This needs to be done before we create a new subtarget since any 331 // creation will depend on the TM and the code generation flags on the 332 // function that reside in TargetOptions. 333 resetTargetOptions(F); 334 I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle); 335 336 #ifndef LLVM_BUILD_GLOBAL_ISEL 337 GISelAccessor *GISel = new GISelAccessor(); 338 #else 339 ARMGISelActualAccessor *GISel = new ARMGISelActualAccessor(); 340 GISel->CallLoweringInfo.reset(new ARMCallLowering(*I->getTargetLowering())); 341 GISel->Legalizer.reset(new ARMLegalizerInfo(*I)); 342 343 auto *RBI = new ARMRegisterBankInfo(*I->getRegisterInfo()); 344 345 // FIXME: At this point, we can't rely on Subtarget having RBI. 346 // It's awkward to mix passing RBI and the Subtarget; should we pass 347 // TII/TRI as well? 348 GISel->InstSelector.reset(createARMInstructionSelector(*this, *I, *RBI)); 349 350 GISel->RegBankInfo.reset(RBI); 351 #endif 352 I->setGISelAccessor(*GISel); 353 } 354 return I.get(); 355 } 356 357 TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() { 358 return TargetIRAnalysis([this](const Function &F) { 359 return TargetTransformInfo(ARMTTIImpl(this, F)); 360 }); 361 } 362 363 364 ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, 365 StringRef CPU, StringRef FS, 366 const TargetOptions &Options, 367 Optional<Reloc::Model> RM, 368 CodeModel::Model CM, 369 CodeGenOpt::Level OL) 370 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 371 372 ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, 373 StringRef CPU, StringRef FS, 374 const TargetOptions &Options, 375 Optional<Reloc::Model> RM, 376 CodeModel::Model CM, 377 CodeGenOpt::Level OL) 378 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 379 380 namespace { 381 382 /// ARM Code Generator Pass Configuration Options. 383 class ARMPassConfig : public TargetPassConfig { 384 public: 385 ARMPassConfig(ARMBaseTargetMachine &TM, PassManagerBase &PM) 386 : TargetPassConfig(TM, PM) {} 387 388 ARMBaseTargetMachine &getARMTargetMachine() const { 389 return getTM<ARMBaseTargetMachine>(); 390 } 391 392 void addIRPasses() override; 393 bool addPreISel() override; 394 bool addInstSelector() override; 395 #ifdef LLVM_BUILD_GLOBAL_ISEL 396 bool addIRTranslator() override; 397 bool addLegalizeMachineIR() override; 398 bool addRegBankSelect() override; 399 bool addGlobalInstructionSelect() override; 400 #endif 401 void addPreRegAlloc() override; 402 void addPreSched2() override; 403 void addPreEmitPass() override; 404 }; 405 406 class ARMExecutionDepsFix : public ExecutionDepsFix { 407 public: 408 static char ID; 409 ARMExecutionDepsFix() : ExecutionDepsFix(ID, ARM::DPRRegClass) {} 410 StringRef getPassName() const override { 411 return "ARM Execution Dependency Fix"; 412 } 413 }; 414 char ARMExecutionDepsFix::ID; 415 416 } // end anonymous namespace 417 418 INITIALIZE_PASS(ARMExecutionDepsFix, "arm-execution-deps-fix", 419 "ARM Execution Dependency Fix", false, false) 420 421 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { 422 return new ARMPassConfig(*this, PM); 423 } 424 425 void ARMPassConfig::addIRPasses() { 426 if (TM->Options.ThreadModel == ThreadModel::Single) 427 addPass(createLowerAtomicPass()); 428 else 429 addPass(createAtomicExpandPass()); 430 431 // Cmpxchg instructions are often used with a subsequent comparison to 432 // determine whether it succeeded. We can exploit existing control-flow in 433 // ldrex/strex loops to simplify this, but it needs tidying up. 434 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 435 addPass(createCFGSimplificationPass(-1, [this](const Function &F) { 436 const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F); 437 return ST.hasAnyDataBarrier() && !ST.isThumb1Only(); 438 })); 439 440 TargetPassConfig::addIRPasses(); 441 442 // Match interleaved memory accesses to ldN/stN intrinsics. 443 if (TM->getOptLevel() != CodeGenOpt::None) 444 addPass(createInterleavedAccessPass()); 445 } 446 447 bool ARMPassConfig::addPreISel() { 448 if ((TM->getOptLevel() != CodeGenOpt::None && 449 EnableGlobalMerge == cl::BOU_UNSET) || 450 EnableGlobalMerge == cl::BOU_TRUE) { 451 // FIXME: This is using the thumb1 only constant value for 452 // maximal global offset for merging globals. We may want 453 // to look into using the old value for non-thumb1 code of 454 // 4095 based on the TargetMachine, but this starts to become 455 // tricky when doing code gen per function. 456 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 457 (EnableGlobalMerge == cl::BOU_UNSET); 458 // Merging of extern globals is enabled by default on non-Mach-O as we 459 // expect it to be generally either beneficial or harmless. On Mach-O it 460 // is disabled as we emit the .subsections_via_symbols directive which 461 // means that merging extern globals is not safe. 462 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); 463 addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize, 464 MergeExternalByDefault)); 465 } 466 467 return false; 468 } 469 470 bool ARMPassConfig::addInstSelector() { 471 addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); 472 return false; 473 } 474 475 #ifdef LLVM_BUILD_GLOBAL_ISEL 476 bool ARMPassConfig::addIRTranslator() { 477 addPass(new IRTranslator()); 478 return false; 479 } 480 481 bool ARMPassConfig::addLegalizeMachineIR() { 482 addPass(new Legalizer()); 483 return false; 484 } 485 486 bool ARMPassConfig::addRegBankSelect() { 487 addPass(new RegBankSelect()); 488 return false; 489 } 490 491 bool ARMPassConfig::addGlobalInstructionSelect() { 492 addPass(new InstructionSelect()); 493 return false; 494 } 495 #endif 496 497 void ARMPassConfig::addPreRegAlloc() { 498 if (getOptLevel() != CodeGenOpt::None) { 499 addPass(createMLxExpansionPass()); 500 501 if (EnableARMLoadStoreOpt) 502 addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true)); 503 504 if (!DisableA15SDOptimization) 505 addPass(createA15SDOptimizerPass()); 506 } 507 } 508 509 void ARMPassConfig::addPreSched2() { 510 if (getOptLevel() != CodeGenOpt::None) { 511 if (EnableARMLoadStoreOpt) 512 addPass(createARMLoadStoreOptimizationPass()); 513 514 addPass(new ARMExecutionDepsFix()); 515 } 516 517 // Expand some pseudo instructions into multiple instructions to allow 518 // proper scheduling. 519 addPass(createARMExpandPseudoPass()); 520 521 if (getOptLevel() != CodeGenOpt::None) { 522 // in v8, IfConversion depends on Thumb instruction widths 523 addPass(createThumb2SizeReductionPass([this](const Function &F) { 524 return this->TM->getSubtarget<ARMSubtarget>(F).restrictIT(); 525 })); 526 527 addPass(createIfConverter([](const MachineFunction &MF) { 528 return !MF.getSubtarget<ARMSubtarget>().isThumb1Only(); 529 })); 530 } 531 addPass(createThumb2ITBlockPass()); 532 } 533 534 void ARMPassConfig::addPreEmitPass() { 535 addPass(createThumb2SizeReductionPass()); 536 537 // Constant island pass work on unbundled instructions. 538 addPass(createUnpackMachineBundles([](const MachineFunction &MF) { 539 return MF.getSubtarget<ARMSubtarget>().isThumb2(); 540 })); 541 542 // Don't optimize barriers at -O0. 543 if (getOptLevel() != CodeGenOpt::None) 544 addPass(createARMOptimizeBarriersPass()); 545 546 addPass(createARMConstantIslandPass()); 547 } 548