1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 #include "ARMFrameLowering.h" 15 #include "ARMTargetMachine.h" 16 #include "ARMTargetObjectFile.h" 17 #include "ARMTargetTransformInfo.h" 18 #include "llvm/CodeGen/Passes.h" 19 #include "llvm/CodeGen/TargetPassConfig.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/LegacyPassManager.h" 22 #include "llvm/MC/MCAsmInfo.h" 23 #include "llvm/Support/CommandLine.h" 24 #include "llvm/Support/FormattedStream.h" 25 #include "llvm/Support/TargetRegistry.h" 26 #include "llvm/Target/TargetOptions.h" 27 #include "llvm/Transforms/Scalar.h" 28 using namespace llvm; 29 30 static cl::opt<bool> 31 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, 32 cl::desc("Inhibit optimization of S->D register accesses on A15"), 33 cl::init(false)); 34 35 static cl::opt<bool> 36 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden, 37 cl::desc("Run SimplifyCFG after expanding atomic operations" 38 " to make use of cmpxchg flow-based information"), 39 cl::init(true)); 40 41 static cl::opt<bool> 42 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden, 43 cl::desc("Enable ARM load/store optimization pass"), 44 cl::init(true)); 45 46 // FIXME: Unify control over GlobalMerge. 47 static cl::opt<cl::boolOrDefault> 48 EnableGlobalMerge("arm-global-merge", cl::Hidden, 49 cl::desc("Enable the global merge pass")); 50 51 extern "C" void LLVMInitializeARMTarget() { 52 // Register the target. 53 RegisterTargetMachine<ARMLETargetMachine> X(TheARMLETarget); 54 RegisterTargetMachine<ARMBETargetMachine> Y(TheARMBETarget); 55 RegisterTargetMachine<ThumbLETargetMachine> A(TheThumbLETarget); 56 RegisterTargetMachine<ThumbBETargetMachine> B(TheThumbBETarget); 57 } 58 59 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 60 if (TT.isOSBinFormatMachO()) 61 return make_unique<TargetLoweringObjectFileMachO>(); 62 if (TT.isOSWindows()) 63 return make_unique<TargetLoweringObjectFileCOFF>(); 64 return make_unique<ARMElfTargetObjectFile>(); 65 } 66 67 static ARMBaseTargetMachine::ARMABI 68 computeTargetABI(const Triple &TT, StringRef CPU, 69 const TargetOptions &Options) { 70 if (Options.MCOptions.getABIName() == "aapcs16") 71 return ARMBaseTargetMachine::ARM_ABI_AAPCS16; 72 else if (Options.MCOptions.getABIName().startswith("aapcs")) 73 return ARMBaseTargetMachine::ARM_ABI_AAPCS; 74 else if (Options.MCOptions.getABIName().startswith("apcs")) 75 return ARMBaseTargetMachine::ARM_ABI_APCS; 76 77 assert(Options.MCOptions.getABIName().empty() && 78 "Unknown target-abi option!"); 79 80 ARMBaseTargetMachine::ARMABI TargetABI = 81 ARMBaseTargetMachine::ARM_ABI_UNKNOWN; 82 83 // FIXME: This is duplicated code from the front end and should be unified. 84 if (TT.isOSBinFormatMachO()) { 85 if (TT.getEnvironment() == llvm::Triple::EABI || 86 (TT.getOS() == llvm::Triple::UnknownOS && TT.isOSBinFormatMachO()) || 87 CPU.startswith("cortex-m")) { 88 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 89 } else if (TT.isWatchABI()) { 90 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS16; 91 } else { 92 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 93 } 94 } else if (TT.isOSWindows()) { 95 // FIXME: this is invalid for WindowsCE 96 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 97 } else { 98 // Select the default based on the platform. 99 switch (TT.getEnvironment()) { 100 case llvm::Triple::Android: 101 case llvm::Triple::GNUEABI: 102 case llvm::Triple::GNUEABIHF: 103 case llvm::Triple::EABIHF: 104 case llvm::Triple::EABI: 105 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 106 break; 107 case llvm::Triple::GNU: 108 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 109 break; 110 default: 111 if (TT.isOSNetBSD()) 112 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 113 else 114 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 115 break; 116 } 117 } 118 119 return TargetABI; 120 } 121 122 static std::string computeDataLayout(const Triple &TT, StringRef CPU, 123 const TargetOptions &Options, 124 bool isLittle) { 125 auto ABI = computeTargetABI(TT, CPU, Options); 126 std::string Ret = ""; 127 128 if (isLittle) 129 // Little endian. 130 Ret += "e"; 131 else 132 // Big endian. 133 Ret += "E"; 134 135 Ret += DataLayout::getManglingComponent(TT); 136 137 // Pointers are 32 bits and aligned to 32 bits. 138 Ret += "-p:32:32"; 139 140 // ABIs other than APCS have 64 bit integers with natural alignment. 141 if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS) 142 Ret += "-i64:64"; 143 144 // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 145 // bits, others to 64 bits. We always try to align to 64 bits. 146 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 147 Ret += "-f64:32:64"; 148 149 // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others 150 // to 64. We always ty to give them natural alignment. 151 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 152 Ret += "-v64:32:64-v128:32:128"; 153 else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16) 154 Ret += "-v128:64:128"; 155 156 // Try to align aggregates to 32 bits (the default is 64 bits, which has no 157 // particular hardware support on 32-bit ARM). 158 Ret += "-a:0:32"; 159 160 // Integer registers are 32 bits. 161 Ret += "-n32"; 162 163 // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit 164 // aligned everywhere else. 165 if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16) 166 Ret += "-S128"; 167 else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS) 168 Ret += "-S64"; 169 else 170 Ret += "-S32"; 171 172 return Ret; 173 } 174 175 /// TargetMachine ctor - Create an ARM architecture model. 176 /// 177 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT, 178 StringRef CPU, StringRef FS, 179 const TargetOptions &Options, 180 Reloc::Model RM, CodeModel::Model CM, 181 CodeGenOpt::Level OL, bool isLittle) 182 : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, 183 CPU, FS, Options, RM, CM, OL), 184 TargetABI(computeTargetABI(TT, CPU, Options)), 185 TLOF(createTLOF(getTargetTriple())), 186 Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) { 187 188 // Default to triple-appropriate float ABI 189 if (Options.FloatABIType == FloatABI::Default) 190 this->Options.FloatABIType = 191 Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft; 192 193 // Default to triple-appropriate EABI 194 if (Options.EABIVersion == EABI::Default || 195 Options.EABIVersion == EABI::Unknown) { 196 if (Subtarget.isTargetGNUAEABI()) 197 this->Options.EABIVersion = EABI::GNU; 198 else 199 this->Options.EABIVersion = EABI::EABI5; 200 } 201 } 202 203 ARMBaseTargetMachine::~ARMBaseTargetMachine() {} 204 205 const ARMSubtarget * 206 ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { 207 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 208 Attribute FSAttr = F.getFnAttribute("target-features"); 209 210 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 211 ? CPUAttr.getValueAsString().str() 212 : TargetCPU; 213 std::string FS = !FSAttr.hasAttribute(Attribute::None) 214 ? FSAttr.getValueAsString().str() 215 : TargetFS; 216 217 // FIXME: This is related to the code below to reset the target options, 218 // we need to know whether or not the soft float flag is set on the 219 // function before we can generate a subtarget. We also need to use 220 // it as a key for the subtarget since that can be the only difference 221 // between two functions. 222 bool SoftFloat = 223 F.getFnAttribute("use-soft-float").getValueAsString() == "true"; 224 // If the soft float attribute is set on the function turn on the soft float 225 // subtarget feature. 226 if (SoftFloat) 227 FS += FS.empty() ? "+soft-float" : ",+soft-float"; 228 229 auto &I = SubtargetMap[CPU + FS]; 230 if (!I) { 231 // This needs to be done before we create a new subtarget since any 232 // creation will depend on the TM and the code generation flags on the 233 // function that reside in TargetOptions. 234 resetTargetOptions(F); 235 I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle); 236 } 237 return I.get(); 238 } 239 240 TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() { 241 return TargetIRAnalysis([this](const Function &F) { 242 return TargetTransformInfo(ARMTTIImpl(this, F)); 243 }); 244 } 245 246 void ARMTargetMachine::anchor() {} 247 248 ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT, 249 StringRef CPU, StringRef FS, 250 const TargetOptions &Options, 251 Reloc::Model RM, CodeModel::Model CM, 252 CodeGenOpt::Level OL, bool isLittle) 253 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 254 initAsmInfo(); 255 if (!Subtarget.hasARMOps()) 256 report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not " 257 "support ARM mode execution!"); 258 } 259 260 void ARMLETargetMachine::anchor() {} 261 262 ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, 263 StringRef CPU, StringRef FS, 264 const TargetOptions &Options, 265 Reloc::Model RM, CodeModel::Model CM, 266 CodeGenOpt::Level OL) 267 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 268 269 void ARMBETargetMachine::anchor() {} 270 271 ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, 272 StringRef CPU, StringRef FS, 273 const TargetOptions &Options, 274 Reloc::Model RM, CodeModel::Model CM, 275 CodeGenOpt::Level OL) 276 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 277 278 void ThumbTargetMachine::anchor() {} 279 280 ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT, 281 StringRef CPU, StringRef FS, 282 const TargetOptions &Options, 283 Reloc::Model RM, CodeModel::Model CM, 284 CodeGenOpt::Level OL, bool isLittle) 285 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 286 initAsmInfo(); 287 } 288 289 void ThumbLETargetMachine::anchor() {} 290 291 ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT, 292 StringRef CPU, StringRef FS, 293 const TargetOptions &Options, 294 Reloc::Model RM, CodeModel::Model CM, 295 CodeGenOpt::Level OL) 296 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 297 298 void ThumbBETargetMachine::anchor() {} 299 300 ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT, 301 StringRef CPU, StringRef FS, 302 const TargetOptions &Options, 303 Reloc::Model RM, CodeModel::Model CM, 304 CodeGenOpt::Level OL) 305 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 306 307 namespace { 308 /// ARM Code Generator Pass Configuration Options. 309 class ARMPassConfig : public TargetPassConfig { 310 public: 311 ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM) 312 : TargetPassConfig(TM, PM) {} 313 314 ARMBaseTargetMachine &getARMTargetMachine() const { 315 return getTM<ARMBaseTargetMachine>(); 316 } 317 318 void addIRPasses() override; 319 bool addPreISel() override; 320 bool addInstSelector() override; 321 void addPreRegAlloc() override; 322 void addPreSched2() override; 323 void addPreEmitPass() override; 324 }; 325 } // namespace 326 327 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { 328 return new ARMPassConfig(this, PM); 329 } 330 331 void ARMPassConfig::addIRPasses() { 332 if (TM->Options.ThreadModel == ThreadModel::Single) 333 addPass(createLowerAtomicPass()); 334 else 335 addPass(createAtomicExpandPass(TM)); 336 337 // Cmpxchg instructions are often used with a subsequent comparison to 338 // determine whether it succeeded. We can exploit existing control-flow in 339 // ldrex/strex loops to simplify this, but it needs tidying up. 340 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 341 addPass(createCFGSimplificationPass(-1, [this](const Function &F) { 342 const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F); 343 return ST.hasAnyDataBarrier() && !ST.isThumb1Only(); 344 })); 345 346 TargetPassConfig::addIRPasses(); 347 348 // Match interleaved memory accesses to ldN/stN intrinsics. 349 if (TM->getOptLevel() != CodeGenOpt::None) 350 addPass(createInterleavedAccessPass(TM)); 351 } 352 353 bool ARMPassConfig::addPreISel() { 354 if ((TM->getOptLevel() != CodeGenOpt::None && 355 EnableGlobalMerge == cl::BOU_UNSET) || 356 EnableGlobalMerge == cl::BOU_TRUE) { 357 // FIXME: This is using the thumb1 only constant value for 358 // maximal global offset for merging globals. We may want 359 // to look into using the old value for non-thumb1 code of 360 // 4095 based on the TargetMachine, but this starts to become 361 // tricky when doing code gen per function. 362 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 363 (EnableGlobalMerge == cl::BOU_UNSET); 364 // Merging of extern globals is enabled by default on non-Mach-O as we 365 // expect it to be generally either beneficial or harmless. On Mach-O it 366 // is disabled as we emit the .subsections_via_symbols directive which 367 // means that merging extern globals is not safe. 368 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); 369 addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize, 370 MergeExternalByDefault)); 371 } 372 373 return false; 374 } 375 376 bool ARMPassConfig::addInstSelector() { 377 addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); 378 return false; 379 } 380 381 void ARMPassConfig::addPreRegAlloc() { 382 if (getOptLevel() != CodeGenOpt::None) { 383 addPass(createMLxExpansionPass()); 384 385 if (EnableARMLoadStoreOpt) 386 addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true)); 387 388 if (!DisableA15SDOptimization) 389 addPass(createA15SDOptimizerPass()); 390 } 391 } 392 393 void ARMPassConfig::addPreSched2() { 394 if (getOptLevel() != CodeGenOpt::None) { 395 if (EnableARMLoadStoreOpt) 396 addPass(createARMLoadStoreOptimizationPass()); 397 398 addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); 399 } 400 401 // Expand some pseudo instructions into multiple instructions to allow 402 // proper scheduling. 403 addPass(createARMExpandPseudoPass()); 404 405 if (getOptLevel() != CodeGenOpt::None) { 406 // in v8, IfConversion depends on Thumb instruction widths 407 addPass(createThumb2SizeReductionPass([this](const Function &F) { 408 return this->TM->getSubtarget<ARMSubtarget>(F).restrictIT(); 409 })); 410 411 addPass(createIfConverter([this](const Function &F) { 412 return !this->TM->getSubtarget<ARMSubtarget>(F).isThumb1Only(); 413 })); 414 } 415 addPass(createThumb2ITBlockPass()); 416 } 417 418 void ARMPassConfig::addPreEmitPass() { 419 addPass(createThumb2SizeReductionPass()); 420 421 // Constant island pass work on unbundled instructions. 422 addPass(createUnpackMachineBundles([this](const Function &F) { 423 return this->TM->getSubtarget<ARMSubtarget>(F).isThumb2(); 424 })); 425 426 // Don't optimize barriers at -O0. 427 if (getOptLevel() != CodeGenOpt::None) 428 addPass(createARMOptimizeBarriersPass()); 429 430 addPass(createARMConstantIslandPass()); 431 } 432