1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 #include "ARMFrameLowering.h" 15 #include "ARMTargetMachine.h" 16 #include "ARMTargetObjectFile.h" 17 #include "ARMTargetTransformInfo.h" 18 #include "llvm/CodeGen/Passes.h" 19 #include "llvm/CodeGen/TargetPassConfig.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/LegacyPassManager.h" 22 #include "llvm/MC/MCAsmInfo.h" 23 #include "llvm/Support/CommandLine.h" 24 #include "llvm/Support/FormattedStream.h" 25 #include "llvm/Support/TargetRegistry.h" 26 #include "llvm/Support/TargetParser.h" 27 #include "llvm/Target/TargetOptions.h" 28 #include "llvm/Transforms/Scalar.h" 29 using namespace llvm; 30 31 static cl::opt<bool> 32 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, 33 cl::desc("Inhibit optimization of S->D register accesses on A15"), 34 cl::init(false)); 35 36 static cl::opt<bool> 37 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden, 38 cl::desc("Run SimplifyCFG after expanding atomic operations" 39 " to make use of cmpxchg flow-based information"), 40 cl::init(true)); 41 42 static cl::opt<bool> 43 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden, 44 cl::desc("Enable ARM load/store optimization pass"), 45 cl::init(true)); 46 47 // FIXME: Unify control over GlobalMerge. 48 static cl::opt<cl::boolOrDefault> 49 EnableGlobalMerge("arm-global-merge", cl::Hidden, 50 cl::desc("Enable the global merge pass")); 51 52 extern "C" void LLVMInitializeARMTarget() { 53 // Register the target. 54 RegisterTargetMachine<ARMLETargetMachine> X(getTheARMLETarget()); 55 RegisterTargetMachine<ARMBETargetMachine> Y(getTheARMBETarget()); 56 RegisterTargetMachine<ThumbLETargetMachine> A(getTheThumbLETarget()); 57 RegisterTargetMachine<ThumbBETargetMachine> B(getTheThumbBETarget()); 58 59 PassRegistry &Registry = *PassRegistry::getPassRegistry(); 60 initializeARMLoadStoreOptPass(Registry); 61 initializeARMPreAllocLoadStoreOptPass(Registry); 62 } 63 64 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 65 if (TT.isOSBinFormatMachO()) 66 return make_unique<TargetLoweringObjectFileMachO>(); 67 if (TT.isOSWindows()) 68 return make_unique<TargetLoweringObjectFileCOFF>(); 69 return make_unique<ARMElfTargetObjectFile>(); 70 } 71 72 static ARMBaseTargetMachine::ARMABI 73 computeTargetABI(const Triple &TT, StringRef CPU, 74 const TargetOptions &Options) { 75 if (Options.MCOptions.getABIName() == "aapcs16") 76 return ARMBaseTargetMachine::ARM_ABI_AAPCS16; 77 else if (Options.MCOptions.getABIName().startswith("aapcs")) 78 return ARMBaseTargetMachine::ARM_ABI_AAPCS; 79 else if (Options.MCOptions.getABIName().startswith("apcs")) 80 return ARMBaseTargetMachine::ARM_ABI_APCS; 81 82 assert(Options.MCOptions.getABIName().empty() && 83 "Unknown target-abi option!"); 84 85 ARMBaseTargetMachine::ARMABI TargetABI = 86 ARMBaseTargetMachine::ARM_ABI_UNKNOWN; 87 88 unsigned ArchKind = llvm::ARM::parseCPUArch(CPU); 89 StringRef ArchName = llvm::ARM::getArchName(ArchKind); 90 // FIXME: This is duplicated code from the front end and should be unified. 91 if (TT.isOSBinFormatMachO()) { 92 if (TT.getEnvironment() == llvm::Triple::EABI || 93 (TT.getOS() == llvm::Triple::UnknownOS && TT.isOSBinFormatMachO()) || 94 llvm::ARM::parseArchProfile(ArchName) == llvm::ARM::PK_M) { 95 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 96 } else if (TT.isWatchABI()) { 97 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS16; 98 } else { 99 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 100 } 101 } else if (TT.isOSWindows()) { 102 // FIXME: this is invalid for WindowsCE 103 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 104 } else { 105 // Select the default based on the platform. 106 switch (TT.getEnvironment()) { 107 case llvm::Triple::Android: 108 case llvm::Triple::GNUEABI: 109 case llvm::Triple::GNUEABIHF: 110 case llvm::Triple::MuslEABI: 111 case llvm::Triple::MuslEABIHF: 112 case llvm::Triple::EABIHF: 113 case llvm::Triple::EABI: 114 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 115 break; 116 case llvm::Triple::GNU: 117 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 118 break; 119 default: 120 if (TT.isOSNetBSD()) 121 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 122 else 123 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 124 break; 125 } 126 } 127 128 return TargetABI; 129 } 130 131 static std::string computeDataLayout(const Triple &TT, StringRef CPU, 132 const TargetOptions &Options, 133 bool isLittle) { 134 auto ABI = computeTargetABI(TT, CPU, Options); 135 std::string Ret = ""; 136 137 if (isLittle) 138 // Little endian. 139 Ret += "e"; 140 else 141 // Big endian. 142 Ret += "E"; 143 144 Ret += DataLayout::getManglingComponent(TT); 145 146 // Pointers are 32 bits and aligned to 32 bits. 147 Ret += "-p:32:32"; 148 149 // ABIs other than APCS have 64 bit integers with natural alignment. 150 if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS) 151 Ret += "-i64:64"; 152 153 // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 154 // bits, others to 64 bits. We always try to align to 64 bits. 155 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 156 Ret += "-f64:32:64"; 157 158 // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others 159 // to 64. We always ty to give them natural alignment. 160 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 161 Ret += "-v64:32:64-v128:32:128"; 162 else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16) 163 Ret += "-v128:64:128"; 164 165 // Try to align aggregates to 32 bits (the default is 64 bits, which has no 166 // particular hardware support on 32-bit ARM). 167 Ret += "-a:0:32"; 168 169 // Integer registers are 32 bits. 170 Ret += "-n32"; 171 172 // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit 173 // aligned everywhere else. 174 if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16) 175 Ret += "-S128"; 176 else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS) 177 Ret += "-S64"; 178 else 179 Ret += "-S32"; 180 181 return Ret; 182 } 183 184 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 185 Optional<Reloc::Model> RM) { 186 if (!RM.hasValue()) 187 // Default relocation model on Darwin is PIC. 188 return TT.isOSBinFormatMachO() ? Reloc::PIC_ : Reloc::Static; 189 190 if (*RM == Reloc::ROPI || *RM == Reloc::RWPI || *RM == Reloc::ROPI_RWPI) 191 assert(TT.isOSBinFormatELF() && 192 "ROPI/RWPI currently only supported for ELF"); 193 194 // DynamicNoPIC is only used on darwin. 195 if (*RM == Reloc::DynamicNoPIC && !TT.isOSDarwin()) 196 return Reloc::Static; 197 198 return *RM; 199 } 200 201 /// Create an ARM architecture model. 202 /// 203 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT, 204 StringRef CPU, StringRef FS, 205 const TargetOptions &Options, 206 Optional<Reloc::Model> RM, 207 CodeModel::Model CM, 208 CodeGenOpt::Level OL, bool isLittle) 209 : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, 210 CPU, FS, Options, getEffectiveRelocModel(TT, RM), CM, 211 OL), 212 TargetABI(computeTargetABI(TT, CPU, Options)), 213 TLOF(createTLOF(getTargetTriple())), 214 Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) { 215 216 // Default to triple-appropriate float ABI 217 if (Options.FloatABIType == FloatABI::Default) 218 this->Options.FloatABIType = 219 Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft; 220 221 // Default to triple-appropriate EABI 222 if (Options.EABIVersion == EABI::Default || 223 Options.EABIVersion == EABI::Unknown) { 224 // musl is compatible with glibc with regard to EABI version 225 if (Subtarget.isTargetGNUAEABI() || Subtarget.isTargetMuslAEABI()) 226 this->Options.EABIVersion = EABI::GNU; 227 else 228 this->Options.EABIVersion = EABI::EABI5; 229 } 230 } 231 232 ARMBaseTargetMachine::~ARMBaseTargetMachine() {} 233 234 const ARMSubtarget * 235 ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { 236 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 237 Attribute FSAttr = F.getFnAttribute("target-features"); 238 239 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 240 ? CPUAttr.getValueAsString().str() 241 : TargetCPU; 242 std::string FS = !FSAttr.hasAttribute(Attribute::None) 243 ? FSAttr.getValueAsString().str() 244 : TargetFS; 245 246 // FIXME: This is related to the code below to reset the target options, 247 // we need to know whether or not the soft float flag is set on the 248 // function before we can generate a subtarget. We also need to use 249 // it as a key for the subtarget since that can be the only difference 250 // between two functions. 251 bool SoftFloat = 252 F.getFnAttribute("use-soft-float").getValueAsString() == "true"; 253 // If the soft float attribute is set on the function turn on the soft float 254 // subtarget feature. 255 if (SoftFloat) 256 FS += FS.empty() ? "+soft-float" : ",+soft-float"; 257 258 auto &I = SubtargetMap[CPU + FS]; 259 if (!I) { 260 // This needs to be done before we create a new subtarget since any 261 // creation will depend on the TM and the code generation flags on the 262 // function that reside in TargetOptions. 263 resetTargetOptions(F); 264 I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle); 265 } 266 return I.get(); 267 } 268 269 TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() { 270 return TargetIRAnalysis([this](const Function &F) { 271 return TargetTransformInfo(ARMTTIImpl(this, F)); 272 }); 273 } 274 275 void ARMTargetMachine::anchor() {} 276 277 ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT, 278 StringRef CPU, StringRef FS, 279 const TargetOptions &Options, 280 Optional<Reloc::Model> RM, 281 CodeModel::Model CM, CodeGenOpt::Level OL, 282 bool isLittle) 283 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 284 initAsmInfo(); 285 if (!Subtarget.hasARMOps()) 286 report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not " 287 "support ARM mode execution!"); 288 } 289 290 void ARMLETargetMachine::anchor() {} 291 292 ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, 293 StringRef CPU, StringRef FS, 294 const TargetOptions &Options, 295 Optional<Reloc::Model> RM, 296 CodeModel::Model CM, 297 CodeGenOpt::Level OL) 298 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 299 300 void ARMBETargetMachine::anchor() {} 301 302 ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, 303 StringRef CPU, StringRef FS, 304 const TargetOptions &Options, 305 Optional<Reloc::Model> RM, 306 CodeModel::Model CM, 307 CodeGenOpt::Level OL) 308 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 309 310 void ThumbTargetMachine::anchor() {} 311 312 ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT, 313 StringRef CPU, StringRef FS, 314 const TargetOptions &Options, 315 Optional<Reloc::Model> RM, 316 CodeModel::Model CM, 317 CodeGenOpt::Level OL, bool isLittle) 318 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 319 initAsmInfo(); 320 } 321 322 void ThumbLETargetMachine::anchor() {} 323 324 ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT, 325 StringRef CPU, StringRef FS, 326 const TargetOptions &Options, 327 Optional<Reloc::Model> RM, 328 CodeModel::Model CM, 329 CodeGenOpt::Level OL) 330 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 331 332 void ThumbBETargetMachine::anchor() {} 333 334 ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT, 335 StringRef CPU, StringRef FS, 336 const TargetOptions &Options, 337 Optional<Reloc::Model> RM, 338 CodeModel::Model CM, 339 CodeGenOpt::Level OL) 340 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 341 342 namespace { 343 /// ARM Code Generator Pass Configuration Options. 344 class ARMPassConfig : public TargetPassConfig { 345 public: 346 ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM) 347 : TargetPassConfig(TM, PM) {} 348 349 ARMBaseTargetMachine &getARMTargetMachine() const { 350 return getTM<ARMBaseTargetMachine>(); 351 } 352 353 void addIRPasses() override; 354 bool addPreISel() override; 355 bool addInstSelector() override; 356 void addPreRegAlloc() override; 357 void addPreSched2() override; 358 void addPreEmitPass() override; 359 }; 360 } // namespace 361 362 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { 363 return new ARMPassConfig(this, PM); 364 } 365 366 void ARMPassConfig::addIRPasses() { 367 if (TM->Options.ThreadModel == ThreadModel::Single) 368 addPass(createLowerAtomicPass()); 369 else 370 addPass(createAtomicExpandPass(TM)); 371 372 // Cmpxchg instructions are often used with a subsequent comparison to 373 // determine whether it succeeded. We can exploit existing control-flow in 374 // ldrex/strex loops to simplify this, but it needs tidying up. 375 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 376 addPass(createCFGSimplificationPass(-1, [this](const Function &F) { 377 const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F); 378 return ST.hasAnyDataBarrier() && !ST.isThumb1Only(); 379 })); 380 381 TargetPassConfig::addIRPasses(); 382 383 // Match interleaved memory accesses to ldN/stN intrinsics. 384 if (TM->getOptLevel() != CodeGenOpt::None) 385 addPass(createInterleavedAccessPass(TM)); 386 } 387 388 bool ARMPassConfig::addPreISel() { 389 if ((TM->getOptLevel() != CodeGenOpt::None && 390 EnableGlobalMerge == cl::BOU_UNSET) || 391 EnableGlobalMerge == cl::BOU_TRUE) { 392 // FIXME: This is using the thumb1 only constant value for 393 // maximal global offset for merging globals. We may want 394 // to look into using the old value for non-thumb1 code of 395 // 4095 based on the TargetMachine, but this starts to become 396 // tricky when doing code gen per function. 397 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 398 (EnableGlobalMerge == cl::BOU_UNSET); 399 // Merging of extern globals is enabled by default on non-Mach-O as we 400 // expect it to be generally either beneficial or harmless. On Mach-O it 401 // is disabled as we emit the .subsections_via_symbols directive which 402 // means that merging extern globals is not safe. 403 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); 404 addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize, 405 MergeExternalByDefault)); 406 } 407 408 return false; 409 } 410 411 bool ARMPassConfig::addInstSelector() { 412 addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); 413 return false; 414 } 415 416 void ARMPassConfig::addPreRegAlloc() { 417 if (getOptLevel() != CodeGenOpt::None) { 418 addPass(createMLxExpansionPass()); 419 420 if (EnableARMLoadStoreOpt) 421 addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true)); 422 423 if (!DisableA15SDOptimization) 424 addPass(createA15SDOptimizerPass()); 425 } 426 } 427 428 void ARMPassConfig::addPreSched2() { 429 if (getOptLevel() != CodeGenOpt::None) { 430 if (EnableARMLoadStoreOpt) 431 addPass(createARMLoadStoreOptimizationPass()); 432 433 addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); 434 } 435 436 // Expand some pseudo instructions into multiple instructions to allow 437 // proper scheduling. 438 addPass(createARMExpandPseudoPass()); 439 440 if (getOptLevel() != CodeGenOpt::None) { 441 // in v8, IfConversion depends on Thumb instruction widths 442 addPass(createThumb2SizeReductionPass([this](const Function &F) { 443 return this->TM->getSubtarget<ARMSubtarget>(F).restrictIT(); 444 })); 445 446 addPass(createIfConverter([this](const Function &F) { 447 return !this->TM->getSubtarget<ARMSubtarget>(F).isThumb1Only(); 448 })); 449 } 450 addPass(createThumb2ITBlockPass()); 451 } 452 453 void ARMPassConfig::addPreEmitPass() { 454 addPass(createThumb2SizeReductionPass()); 455 456 // Constant island pass work on unbundled instructions. 457 addPass(createUnpackMachineBundles([this](const Function &F) { 458 return this->TM->getSubtarget<ARMSubtarget>(F).isThumb2(); 459 })); 460 461 // Don't optimize barriers at -O0. 462 if (getOptLevel() != CodeGenOpt::None) 463 addPass(createARMOptimizeBarriersPass()); 464 465 addPass(createARMConstantIslandPass()); 466 } 467