1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 #include "ARMFrameLowering.h" 15 #include "ARMTargetMachine.h" 16 #include "ARMTargetObjectFile.h" 17 #include "ARMTargetTransformInfo.h" 18 #include "llvm/CodeGen/Passes.h" 19 #include "llvm/IR/Function.h" 20 #include "llvm/IR/LegacyPassManager.h" 21 #include "llvm/MC/MCAsmInfo.h" 22 #include "llvm/Support/CommandLine.h" 23 #include "llvm/Support/FormattedStream.h" 24 #include "llvm/Support/TargetRegistry.h" 25 #include "llvm/Target/TargetOptions.h" 26 #include "llvm/Transforms/Scalar.h" 27 using namespace llvm; 28 29 static cl::opt<bool> 30 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden, 31 cl::desc("Inhibit optimization of S->D register accesses on A15"), 32 cl::init(false)); 33 34 static cl::opt<bool> 35 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden, 36 cl::desc("Run SimplifyCFG after expanding atomic operations" 37 " to make use of cmpxchg flow-based information"), 38 cl::init(true)); 39 40 static cl::opt<bool> 41 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden, 42 cl::desc("Enable ARM load/store optimization pass"), 43 cl::init(true)); 44 45 // FIXME: Unify control over GlobalMerge. 46 static cl::opt<cl::boolOrDefault> 47 EnableGlobalMerge("arm-global-merge", cl::Hidden, 48 cl::desc("Enable the global merge pass")); 49 50 extern "C" void LLVMInitializeARMTarget() { 51 // Register the target. 52 RegisterTargetMachine<ARMLETargetMachine> X(TheARMLETarget); 53 RegisterTargetMachine<ARMBETargetMachine> Y(TheARMBETarget); 54 RegisterTargetMachine<ThumbLETargetMachine> A(TheThumbLETarget); 55 RegisterTargetMachine<ThumbBETargetMachine> B(TheThumbBETarget); 56 } 57 58 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 59 if (TT.isOSBinFormatMachO()) 60 return make_unique<TargetLoweringObjectFileMachO>(); 61 if (TT.isOSWindows()) 62 return make_unique<TargetLoweringObjectFileCOFF>(); 63 return make_unique<ARMElfTargetObjectFile>(); 64 } 65 66 static ARMBaseTargetMachine::ARMABI 67 computeTargetABI(const Triple &TT, StringRef CPU, 68 const TargetOptions &Options) { 69 if (Options.MCOptions.getABIName().startswith("aapcs")) 70 return ARMBaseTargetMachine::ARM_ABI_AAPCS; 71 else if (Options.MCOptions.getABIName().startswith("apcs")) 72 return ARMBaseTargetMachine::ARM_ABI_APCS; 73 74 assert(Options.MCOptions.getABIName().empty() && 75 "Unknown target-abi option!"); 76 77 ARMBaseTargetMachine::ARMABI TargetABI = 78 ARMBaseTargetMachine::ARM_ABI_UNKNOWN; 79 80 // FIXME: This is duplicated code from the front end and should be unified. 81 if (TT.isOSBinFormatMachO()) { 82 if (TT.getEnvironment() == llvm::Triple::EABI || 83 (TT.getOS() == llvm::Triple::UnknownOS && 84 TT.getObjectFormat() == llvm::Triple::MachO) || 85 CPU.startswith("cortex-m")) { 86 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 87 } else { 88 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 89 } 90 } else if (TT.isOSWindows()) { 91 // FIXME: this is invalid for WindowsCE 92 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 93 } else { 94 // Select the default based on the platform. 95 switch (TT.getEnvironment()) { 96 case llvm::Triple::Android: 97 case llvm::Triple::GNUEABI: 98 case llvm::Triple::GNUEABIHF: 99 case llvm::Triple::EABIHF: 100 case llvm::Triple::EABI: 101 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 102 break; 103 case llvm::Triple::GNU: 104 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 105 break; 106 default: 107 if (TT.getOS() == llvm::Triple::NetBSD) 108 TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; 109 else 110 TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; 111 break; 112 } 113 } 114 115 return TargetABI; 116 } 117 118 static std::string computeDataLayout(const Triple &TT, StringRef CPU, 119 const TargetOptions &Options, 120 bool isLittle) { 121 auto ABI = computeTargetABI(TT, CPU, Options); 122 std::string Ret = ""; 123 124 if (isLittle) 125 // Little endian. 126 Ret += "e"; 127 else 128 // Big endian. 129 Ret += "E"; 130 131 Ret += DataLayout::getManglingComponent(TT); 132 133 // Pointers are 32 bits and aligned to 32 bits. 134 Ret += "-p:32:32"; 135 136 // ABIs other than APCS have 64 bit integers with natural alignment. 137 if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS) 138 Ret += "-i64:64"; 139 140 // We have 64 bits floats. The APCS ABI requires them to be aligned to 32 141 // bits, others to 64 bits. We always try to align to 64 bits. 142 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 143 Ret += "-f64:32:64"; 144 145 // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others 146 // to 64. We always ty to give them natural alignment. 147 if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS) 148 Ret += "-v64:32:64-v128:32:128"; 149 else 150 Ret += "-v128:64:128"; 151 152 // Try to align aggregates to 32 bits (the default is 64 bits, which has no 153 // particular hardware support on 32-bit ARM). 154 Ret += "-a:0:32"; 155 156 // Integer registers are 32 bits. 157 Ret += "-n32"; 158 159 // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit 160 // aligned everywhere else. 161 if (TT.isOSNaCl()) 162 Ret += "-S128"; 163 else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS) 164 Ret += "-S64"; 165 else 166 Ret += "-S32"; 167 168 return Ret; 169 } 170 171 /// TargetMachine ctor - Create an ARM architecture model. 172 /// 173 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT, 174 StringRef CPU, StringRef FS, 175 const TargetOptions &Options, 176 Reloc::Model RM, CodeModel::Model CM, 177 CodeGenOpt::Level OL, bool isLittle) 178 : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, 179 CPU, FS, Options, RM, CM, OL), 180 TargetABI(computeTargetABI(TT, CPU, Options)), 181 TLOF(createTLOF(getTargetTriple())), 182 Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) { 183 184 // Default to triple-appropriate float ABI 185 if (Options.FloatABIType == FloatABI::Default) 186 this->Options.FloatABIType = 187 Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft; 188 } 189 190 ARMBaseTargetMachine::~ARMBaseTargetMachine() {} 191 192 const ARMSubtarget * 193 ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { 194 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 195 Attribute FSAttr = F.getFnAttribute("target-features"); 196 197 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 198 ? CPUAttr.getValueAsString().str() 199 : TargetCPU; 200 std::string FS = !FSAttr.hasAttribute(Attribute::None) 201 ? FSAttr.getValueAsString().str() 202 : TargetFS; 203 204 // FIXME: This is related to the code below to reset the target options, 205 // we need to know whether or not the soft float flag is set on the 206 // function before we can generate a subtarget. We also need to use 207 // it as a key for the subtarget since that can be the only difference 208 // between two functions. 209 bool SoftFloat = 210 F.hasFnAttribute("use-soft-float") && 211 F.getFnAttribute("use-soft-float").getValueAsString() == "true"; 212 // If the soft float attribute is set on the function turn on the soft float 213 // subtarget feature. 214 if (SoftFloat) 215 FS += FS.empty() ? "+soft-float" : ",+soft-float"; 216 217 auto &I = SubtargetMap[CPU + FS]; 218 if (!I) { 219 // This needs to be done before we create a new subtarget since any 220 // creation will depend on the TM and the code generation flags on the 221 // function that reside in TargetOptions. 222 resetTargetOptions(F); 223 I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle); 224 } 225 return I.get(); 226 } 227 228 TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() { 229 return TargetIRAnalysis( 230 [this](Function &F) { return TargetTransformInfo(ARMTTIImpl(this, F)); }); 231 } 232 233 234 void ARMTargetMachine::anchor() { } 235 236 ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT, 237 StringRef CPU, StringRef FS, 238 const TargetOptions &Options, 239 Reloc::Model RM, CodeModel::Model CM, 240 CodeGenOpt::Level OL, bool isLittle) 241 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 242 initAsmInfo(); 243 if (!Subtarget.hasARMOps()) 244 report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not " 245 "support ARM mode execution!"); 246 } 247 248 void ARMLETargetMachine::anchor() { } 249 250 ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, 251 StringRef CPU, StringRef FS, 252 const TargetOptions &Options, 253 Reloc::Model RM, CodeModel::Model CM, 254 CodeGenOpt::Level OL) 255 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 256 257 void ARMBETargetMachine::anchor() { } 258 259 ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, 260 StringRef CPU, StringRef FS, 261 const TargetOptions &Options, 262 Reloc::Model RM, CodeModel::Model CM, 263 CodeGenOpt::Level OL) 264 : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 265 266 void ThumbTargetMachine::anchor() { } 267 268 ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT, 269 StringRef CPU, StringRef FS, 270 const TargetOptions &Options, 271 Reloc::Model RM, CodeModel::Model CM, 272 CodeGenOpt::Level OL, bool isLittle) 273 : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { 274 initAsmInfo(); 275 } 276 277 void ThumbLETargetMachine::anchor() { } 278 279 ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT, 280 StringRef CPU, StringRef FS, 281 const TargetOptions &Options, 282 Reloc::Model RM, CodeModel::Model CM, 283 CodeGenOpt::Level OL) 284 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 285 286 void ThumbBETargetMachine::anchor() { } 287 288 ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT, 289 StringRef CPU, StringRef FS, 290 const TargetOptions &Options, 291 Reloc::Model RM, CodeModel::Model CM, 292 CodeGenOpt::Level OL) 293 : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 294 295 namespace { 296 /// ARM Code Generator Pass Configuration Options. 297 class ARMPassConfig : public TargetPassConfig { 298 public: 299 ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM) 300 : TargetPassConfig(TM, PM) {} 301 302 ARMBaseTargetMachine &getARMTargetMachine() const { 303 return getTM<ARMBaseTargetMachine>(); 304 } 305 306 void addIRPasses() override; 307 bool addPreISel() override; 308 bool addInstSelector() override; 309 void addPreRegAlloc() override; 310 void addPreSched2() override; 311 void addPreEmitPass() override; 312 }; 313 } // namespace 314 315 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) { 316 return new ARMPassConfig(this, PM); 317 } 318 319 void ARMPassConfig::addIRPasses() { 320 if (TM->Options.ThreadModel == ThreadModel::Single) 321 addPass(createLowerAtomicPass()); 322 else 323 addPass(createAtomicExpandPass(TM)); 324 325 // Cmpxchg instructions are often used with a subsequent comparison to 326 // determine whether it succeeded. We can exploit existing control-flow in 327 // ldrex/strex loops to simplify this, but it needs tidying up. 328 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 329 addPass(createCFGSimplificationPass(-1, [this](const Function &F) { 330 const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F); 331 return ST.hasAnyDataBarrier() && !ST.isThumb1Only(); 332 })); 333 334 TargetPassConfig::addIRPasses(); 335 336 // Match interleaved memory accesses to ldN/stN intrinsics. 337 if (TM->getOptLevel() != CodeGenOpt::None) 338 addPass(createInterleavedAccessPass(TM)); 339 } 340 341 bool ARMPassConfig::addPreISel() { 342 if ((TM->getOptLevel() != CodeGenOpt::None && 343 EnableGlobalMerge == cl::BOU_UNSET) || 344 EnableGlobalMerge == cl::BOU_TRUE) { 345 // FIXME: This is using the thumb1 only constant value for 346 // maximal global offset for merging globals. We may want 347 // to look into using the old value for non-thumb1 code of 348 // 4095 based on the TargetMachine, but this starts to become 349 // tricky when doing code gen per function. 350 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 351 (EnableGlobalMerge == cl::BOU_UNSET); 352 addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize)); 353 } 354 355 return false; 356 } 357 358 bool ARMPassConfig::addInstSelector() { 359 addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); 360 361 if (TM->getTargetTriple().isOSBinFormatELF() && TM->Options.EnableFastISel) 362 addPass(createARMGlobalBaseRegPass()); 363 return false; 364 } 365 366 void ARMPassConfig::addPreRegAlloc() { 367 if (getOptLevel() != CodeGenOpt::None) { 368 addPass(createMLxExpansionPass()); 369 370 if (EnableARMLoadStoreOpt) 371 addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true)); 372 373 if (!DisableA15SDOptimization) 374 addPass(createA15SDOptimizerPass()); 375 } 376 } 377 378 void ARMPassConfig::addPreSched2() { 379 if (getOptLevel() != CodeGenOpt::None) { 380 if (EnableARMLoadStoreOpt) 381 addPass(createARMLoadStoreOptimizationPass()); 382 383 addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); 384 } 385 386 // Expand some pseudo instructions into multiple instructions to allow 387 // proper scheduling. 388 addPass(createARMExpandPseudoPass()); 389 390 if (getOptLevel() != CodeGenOpt::None) { 391 // in v8, IfConversion depends on Thumb instruction widths 392 addPass(createThumb2SizeReductionPass([this](const Function &F) { 393 return this->TM->getSubtarget<ARMSubtarget>(F).restrictIT(); 394 })); 395 396 addPass(createIfConverter([this](const Function &F) { 397 return !this->TM->getSubtarget<ARMSubtarget>(F).isThumb1Only(); 398 })); 399 } 400 addPass(createThumb2ITBlockPass()); 401 } 402 403 void ARMPassConfig::addPreEmitPass() { 404 addPass(createThumb2SizeReductionPass()); 405 406 // Constant island pass work on unbundled instructions. 407 addPass(createUnpackMachineBundles([this](const Function &F) { 408 return this->TM->getSubtarget<ARMSubtarget>(F).isThumb2(); 409 })); 410 411 // Don't optimize barriers at -O0. 412 if (getOptLevel() != CodeGenOpt::None) 413 addPass(createARMOptimizeBarriersPass()); 414 415 addPass(createARMConstantIslandPass()); 416 } 417