1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "AArch64.h" 14 #include "AArch64CallLowering.h" 15 #include "AArch64MachineLegalizer.h" 16 #include "AArch64RegisterBankInfo.h" 17 #include "AArch64TargetMachine.h" 18 #include "AArch64TargetObjectFile.h" 19 #include "AArch64TargetTransformInfo.h" 20 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 21 #include "llvm/CodeGen/GlobalISel/MachineLegalizePass.h" 22 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 23 #include "llvm/CodeGen/Passes.h" 24 #include "llvm/CodeGen/RegAllocRegistry.h" 25 #include "llvm/CodeGen/TargetPassConfig.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/LegacyPassManager.h" 28 #include "llvm/InitializePasses.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/TargetRegistry.h" 31 #include "llvm/Target/TargetOptions.h" 32 #include "llvm/Transforms/Scalar.h" 33 using namespace llvm; 34 35 static cl::opt<bool> 36 EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"), 37 cl::init(true), cl::Hidden); 38 39 static cl::opt<bool> EnableMCR("aarch64-mcr", 40 cl::desc("Enable the machine combiner pass"), 41 cl::init(true), cl::Hidden); 42 43 static cl::opt<bool> 44 EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"), 45 cl::init(true), cl::Hidden); 46 47 static cl::opt<bool> 48 EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar" 49 " integer instructions"), cl::init(false), cl::Hidden); 50 51 static cl::opt<bool> 52 EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote " 53 "constant pass"), cl::init(true), cl::Hidden); 54 55 static cl::opt<bool> 56 EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the" 57 " linker optimization hints (LOH)"), cl::init(true), 58 cl::Hidden); 59 60 static cl::opt<bool> 61 EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden, 62 cl::desc("Enable the pass that removes dead" 63 " definitons and replaces stores to" 64 " them with stores to the zero" 65 " register"), 66 cl::init(true)); 67 68 static cl::opt<bool> 69 EnableRedundantCopyElimination("aarch64-redundant-copy-elim", 70 cl::desc("Enable the redundant copy elimination pass"), 71 cl::init(true), cl::Hidden); 72 73 static cl::opt<bool> 74 EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair" 75 " optimization pass"), cl::init(true), cl::Hidden); 76 77 static cl::opt<bool> 78 EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden, 79 cl::desc("Run SimplifyCFG after expanding atomic operations" 80 " to make use of cmpxchg flow-based information"), 81 cl::init(true)); 82 83 static cl::opt<bool> 84 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, 85 cl::desc("Run early if-conversion"), 86 cl::init(true)); 87 88 static cl::opt<bool> 89 EnableCondOpt("aarch64-condopt", 90 cl::desc("Enable the condition optimizer pass"), 91 cl::init(true), cl::Hidden); 92 93 static cl::opt<bool> 94 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, 95 cl::desc("Work around Cortex-A53 erratum 835769"), 96 cl::init(false)); 97 98 static cl::opt<bool> 99 EnableGEPOpt("aarch64-gep-opt", cl::Hidden, 100 cl::desc("Enable optimizations on complex GEPs"), 101 cl::init(false)); 102 103 // FIXME: Unify control over GlobalMerge. 104 static cl::opt<cl::boolOrDefault> 105 EnableGlobalMerge("aarch64-global-merge", cl::Hidden, 106 cl::desc("Enable the global merge pass")); 107 108 static cl::opt<bool> 109 EnableLoopDataPrefetch("aarch64-loop-data-prefetch", cl::Hidden, 110 cl::desc("Enable the loop data prefetch pass"), 111 cl::init(true)); 112 113 extern "C" void LLVMInitializeAArch64Target() { 114 // Register the target. 115 RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget); 116 RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget); 117 RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target); 118 auto PR = PassRegistry::getPassRegistry(); 119 initializeGlobalISel(*PR); 120 initializeAArch64ExpandPseudoPass(*PR); 121 initializeAArch64LoadStoreOptPass(*PR); 122 } 123 124 //===----------------------------------------------------------------------===// 125 // AArch64 Lowering public interface. 126 //===----------------------------------------------------------------------===// 127 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 128 if (TT.isOSBinFormatMachO()) 129 return make_unique<AArch64_MachoTargetObjectFile>(); 130 131 return make_unique<AArch64_ELFTargetObjectFile>(); 132 } 133 134 // Helper function to build a DataLayout string 135 static std::string computeDataLayout(const Triple &TT, bool LittleEndian) { 136 if (TT.isOSBinFormatMachO()) 137 return "e-m:o-i64:64-i128:128-n32:64-S128"; 138 if (LittleEndian) 139 return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 140 return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 141 } 142 143 // Helper function to set up the defaults for reciprocals. 144 static void initReciprocals(AArch64TargetMachine& TM, AArch64Subtarget& ST) 145 { 146 // For the estimates, convergence is quadratic, so essentially the number of 147 // digits is doubled after each iteration. ARMv8, the minimum architected 148 // accuracy of the initial estimate is 2^-8. Therefore, the number of extra 149 // steps to refine the result for float (23 mantissa bits) and for double 150 // (52 mantissa bits) are 2 and 3, respectively. 151 unsigned ExtraStepsF = 2, 152 ExtraStepsD = ExtraStepsF + 1; 153 bool UseRsqrt = ST.useRSqrt(); 154 155 TM.Options.Reciprocals.setDefaults("sqrtf", UseRsqrt, ExtraStepsF); 156 TM.Options.Reciprocals.setDefaults("sqrtd", UseRsqrt, ExtraStepsD); 157 TM.Options.Reciprocals.setDefaults("vec-sqrtf", UseRsqrt, ExtraStepsF); 158 TM.Options.Reciprocals.setDefaults("vec-sqrtd", UseRsqrt, ExtraStepsD); 159 160 TM.Options.Reciprocals.setDefaults("divf", false, ExtraStepsF); 161 TM.Options.Reciprocals.setDefaults("divd", false, ExtraStepsD); 162 TM.Options.Reciprocals.setDefaults("vec-divf", false, ExtraStepsF); 163 TM.Options.Reciprocals.setDefaults("vec-divd", false, ExtraStepsD); 164 } 165 166 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 167 Optional<Reloc::Model> RM) { 168 // AArch64 Darwin is always PIC. 169 if (TT.isOSDarwin()) 170 return Reloc::PIC_; 171 // On ELF platforms the default static relocation model has a smart enough 172 // linker to cope with referencing external symbols defined in a shared 173 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC. 174 if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC) 175 return Reloc::Static; 176 return *RM; 177 } 178 179 /// Create an AArch64 architecture model. 180 /// 181 AArch64TargetMachine::AArch64TargetMachine( 182 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 183 const TargetOptions &Options, Optional<Reloc::Model> RM, 184 CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian) 185 // This nested ternary is horrible, but DL needs to be properly 186 // initialized before TLInfo is constructed. 187 : LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS, 188 Options, getEffectiveRelocModel(TT, RM), CM, OL), 189 TLOF(createTLOF(getTargetTriple())), 190 Subtarget(TT, CPU, FS, *this, LittleEndian) { 191 initReciprocals(*this, Subtarget); 192 initAsmInfo(); 193 } 194 195 AArch64TargetMachine::~AArch64TargetMachine() {} 196 197 #ifdef LLVM_BUILD_GLOBAL_ISEL 198 namespace { 199 struct AArch64GISelActualAccessor : public GISelAccessor { 200 std::unique_ptr<CallLowering> CallLoweringInfo; 201 std::unique_ptr<MachineLegalizer> Legalizer; 202 std::unique_ptr<RegisterBankInfo> RegBankInfo; 203 const CallLowering *getCallLowering() const override { 204 return CallLoweringInfo.get(); 205 } 206 const class MachineLegalizer *getMachineLegalizer() const override { 207 return Legalizer.get(); 208 } 209 const RegisterBankInfo *getRegBankInfo() const override { 210 return RegBankInfo.get(); 211 } 212 }; 213 } // End anonymous namespace. 214 #endif 215 216 const AArch64Subtarget * 217 AArch64TargetMachine::getSubtargetImpl(const Function &F) const { 218 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 219 Attribute FSAttr = F.getFnAttribute("target-features"); 220 221 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 222 ? CPUAttr.getValueAsString().str() 223 : TargetCPU; 224 std::string FS = !FSAttr.hasAttribute(Attribute::None) 225 ? FSAttr.getValueAsString().str() 226 : TargetFS; 227 228 auto &I = SubtargetMap[CPU + FS]; 229 if (!I) { 230 // This needs to be done before we create a new subtarget since any 231 // creation will depend on the TM and the code generation flags on the 232 // function that reside in TargetOptions. 233 resetTargetOptions(F); 234 I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, 235 Subtarget.isLittleEndian()); 236 #ifndef LLVM_BUILD_GLOBAL_ISEL 237 GISelAccessor *GISel = new GISelAccessor(); 238 #else 239 AArch64GISelActualAccessor *GISel = 240 new AArch64GISelActualAccessor(); 241 GISel->CallLoweringInfo.reset( 242 new AArch64CallLowering(*I->getTargetLowering())); 243 GISel->Legalizer.reset(new AArch64MachineLegalizer()); 244 GISel->RegBankInfo.reset( 245 new AArch64RegisterBankInfo(*I->getRegisterInfo())); 246 #endif 247 I->setGISelAccessor(*GISel); 248 } 249 return I.get(); 250 } 251 252 void AArch64leTargetMachine::anchor() { } 253 254 AArch64leTargetMachine::AArch64leTargetMachine( 255 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 256 const TargetOptions &Options, Optional<Reloc::Model> RM, 257 CodeModel::Model CM, CodeGenOpt::Level OL) 258 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 259 260 void AArch64beTargetMachine::anchor() { } 261 262 AArch64beTargetMachine::AArch64beTargetMachine( 263 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 264 const TargetOptions &Options, Optional<Reloc::Model> RM, 265 CodeModel::Model CM, CodeGenOpt::Level OL) 266 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 267 268 namespace { 269 /// AArch64 Code Generator Pass Configuration Options. 270 class AArch64PassConfig : public TargetPassConfig { 271 public: 272 AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM) 273 : TargetPassConfig(TM, PM) { 274 if (TM->getOptLevel() != CodeGenOpt::None) 275 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 276 } 277 278 AArch64TargetMachine &getAArch64TargetMachine() const { 279 return getTM<AArch64TargetMachine>(); 280 } 281 282 void addIRPasses() override; 283 bool addPreISel() override; 284 bool addInstSelector() override; 285 #ifdef LLVM_BUILD_GLOBAL_ISEL 286 bool addIRTranslator() override; 287 bool addLegalizeMachineIR() override; 288 bool addRegBankSelect() override; 289 #endif 290 bool addILPOpts() override; 291 void addPreRegAlloc() override; 292 void addPostRegAlloc() override; 293 void addPreSched2() override; 294 void addPreEmitPass() override; 295 }; 296 } // namespace 297 298 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() { 299 return TargetIRAnalysis([this](const Function &F) { 300 return TargetTransformInfo(AArch64TTIImpl(this, F)); 301 }); 302 } 303 304 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) { 305 return new AArch64PassConfig(this, PM); 306 } 307 308 void AArch64PassConfig::addIRPasses() { 309 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg 310 // ourselves. 311 addPass(createAtomicExpandPass(TM)); 312 313 // Cmpxchg instructions are often used with a subsequent comparison to 314 // determine whether it succeeded. We can exploit existing control-flow in 315 // ldrex/strex loops to simplify this, but it needs tidying up. 316 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 317 addPass(createCFGSimplificationPass()); 318 319 // Run LoopDataPrefetch 320 // 321 // Run this before LSR to remove the multiplies involved in computing the 322 // pointer values N iterations ahead. 323 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch) 324 addPass(createLoopDataPrefetchPass()); 325 326 TargetPassConfig::addIRPasses(); 327 328 // Match interleaved memory accesses to ldN/stN intrinsics. 329 if (TM->getOptLevel() != CodeGenOpt::None) 330 addPass(createInterleavedAccessPass(TM)); 331 332 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) { 333 // Call SeparateConstOffsetFromGEP pass to extract constants within indices 334 // and lower a GEP with multiple indices to either arithmetic operations or 335 // multiple GEPs with single index. 336 addPass(createSeparateConstOffsetFromGEPPass(TM, true)); 337 // Call EarlyCSE pass to find and remove subexpressions in the lowered 338 // result. 339 addPass(createEarlyCSEPass()); 340 // Do loop invariant code motion in case part of the lowered result is 341 // invariant. 342 addPass(createLICMPass()); 343 } 344 } 345 346 // Pass Pipeline Configuration 347 bool AArch64PassConfig::addPreISel() { 348 // Run promote constant before global merge, so that the promoted constants 349 // get a chance to be merged 350 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant) 351 addPass(createAArch64PromoteConstantPass()); 352 // FIXME: On AArch64, this depends on the type. 353 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes(). 354 // and the offset has to be a multiple of the related size in bytes. 355 if ((TM->getOptLevel() != CodeGenOpt::None && 356 EnableGlobalMerge == cl::BOU_UNSET) || 357 EnableGlobalMerge == cl::BOU_TRUE) { 358 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 359 (EnableGlobalMerge == cl::BOU_UNSET); 360 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize)); 361 } 362 363 if (TM->getOptLevel() != CodeGenOpt::None) 364 addPass(createAArch64AddressTypePromotionPass()); 365 366 return false; 367 } 368 369 bool AArch64PassConfig::addInstSelector() { 370 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel())); 371 372 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many 373 // references to _TLS_MODULE_BASE_ as possible. 374 if (TM->getTargetTriple().isOSBinFormatELF() && 375 getOptLevel() != CodeGenOpt::None) 376 addPass(createAArch64CleanupLocalDynamicTLSPass()); 377 378 return false; 379 } 380 381 #ifdef LLVM_BUILD_GLOBAL_ISEL 382 bool AArch64PassConfig::addIRTranslator() { 383 addPass(new IRTranslator()); 384 return false; 385 } 386 bool AArch64PassConfig::addLegalizeMachineIR() { 387 addPass(new MachineLegalizePass()); 388 return false; 389 } 390 bool AArch64PassConfig::addRegBankSelect() { 391 addPass(new RegBankSelect()); 392 return false; 393 } 394 #endif 395 396 bool AArch64PassConfig::addILPOpts() { 397 if (EnableCondOpt) 398 addPass(createAArch64ConditionOptimizerPass()); 399 if (EnableCCMP) 400 addPass(createAArch64ConditionalCompares()); 401 if (EnableMCR) 402 addPass(&MachineCombinerID); 403 if (EnableEarlyIfConversion) 404 addPass(&EarlyIfConverterID); 405 if (EnableStPairSuppress) 406 addPass(createAArch64StorePairSuppressPass()); 407 return true; 408 } 409 410 void AArch64PassConfig::addPreRegAlloc() { 411 // Use AdvSIMD scalar instructions whenever profitable. 412 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) { 413 addPass(createAArch64AdvSIMDScalar()); 414 // The AdvSIMD pass may produce copies that can be rewritten to 415 // be register coaleascer friendly. 416 addPass(&PeepholeOptimizerID); 417 } 418 } 419 420 void AArch64PassConfig::addPostRegAlloc() { 421 // Remove redundant copy instructions. 422 if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination) 423 addPass(createAArch64RedundantCopyEliminationPass()); 424 425 // Change dead register definitions to refer to the zero register. 426 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination) 427 addPass(createAArch64DeadRegisterDefinitions()); 428 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc()) 429 // Improve performance for some FP/SIMD code for A57. 430 addPass(createAArch64A57FPLoadBalancing()); 431 } 432 433 void AArch64PassConfig::addPreSched2() { 434 // Expand some pseudo instructions to allow proper scheduling. 435 addPass(createAArch64ExpandPseudoPass()); 436 // Use load/store pair instructions when possible. 437 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt) 438 addPass(createAArch64LoadStoreOptimizationPass()); 439 } 440 441 void AArch64PassConfig::addPreEmitPass() { 442 if (EnableA53Fix835769) 443 addPass(createAArch64A53Fix835769()); 444 // Relax conditional branch instructions if they're otherwise out of 445 // range of their destination. 446 addPass(createAArch64BranchRelaxation()); 447 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && 448 TM->getTargetTriple().isOSBinFormatMachO()) 449 addPass(createAArch64CollectLOHPass()); 450 } 451