1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "AArch64.h" 14 #include "AArch64CallLowering.h" 15 #include "AArch64RegisterBankInfo.h" 16 #include "AArch64TargetMachine.h" 17 #include "AArch64TargetObjectFile.h" 18 #include "AArch64TargetTransformInfo.h" 19 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 20 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 21 #include "llvm/CodeGen/Passes.h" 22 #include "llvm/CodeGen/RegAllocRegistry.h" 23 #include "llvm/IR/Function.h" 24 #include "llvm/IR/LegacyPassManager.h" 25 #include "llvm/InitializePasses.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Support/TargetRegistry.h" 28 #include "llvm/Target/TargetOptions.h" 29 #include "llvm/Transforms/Scalar.h" 30 using namespace llvm; 31 32 static cl::opt<bool> 33 EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"), 34 cl::init(true), cl::Hidden); 35 36 static cl::opt<bool> EnableMCR("aarch64-mcr", 37 cl::desc("Enable the machine combiner pass"), 38 cl::init(true), cl::Hidden); 39 40 static cl::opt<bool> 41 EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"), 42 cl::init(true), cl::Hidden); 43 44 static cl::opt<bool> 45 EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar" 46 " integer instructions"), cl::init(false), cl::Hidden); 47 48 static cl::opt<bool> 49 EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote " 50 "constant pass"), cl::init(true), cl::Hidden); 51 52 static cl::opt<bool> 53 EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the" 54 " linker optimization hints (LOH)"), cl::init(true), 55 cl::Hidden); 56 57 static cl::opt<bool> 58 EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden, 59 cl::desc("Enable the pass that removes dead" 60 " definitons and replaces stores to" 61 " them with stores to the zero" 62 " register"), 63 cl::init(true)); 64 65 static cl::opt<bool> 66 EnableRedundantCopyElimination("aarch64-redundant-copy-elim", 67 cl::desc("Enable the redundant copy elimination pass"), 68 cl::init(true), cl::Hidden); 69 70 static cl::opt<bool> 71 EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair" 72 " optimization pass"), cl::init(true), cl::Hidden); 73 74 static cl::opt<bool> 75 EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden, 76 cl::desc("Run SimplifyCFG after expanding atomic operations" 77 " to make use of cmpxchg flow-based information"), 78 cl::init(true)); 79 80 static cl::opt<bool> 81 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, 82 cl::desc("Run early if-conversion"), 83 cl::init(true)); 84 85 static cl::opt<bool> 86 EnableCondOpt("aarch64-condopt", 87 cl::desc("Enable the condition optimizer pass"), 88 cl::init(true), cl::Hidden); 89 90 static cl::opt<bool> 91 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, 92 cl::desc("Work around Cortex-A53 erratum 835769"), 93 cl::init(false)); 94 95 static cl::opt<bool> 96 EnableGEPOpt("aarch64-gep-opt", cl::Hidden, 97 cl::desc("Enable optimizations on complex GEPs"), 98 cl::init(false)); 99 100 // FIXME: Unify control over GlobalMerge. 101 static cl::opt<cl::boolOrDefault> 102 EnableGlobalMerge("aarch64-global-merge", cl::Hidden, 103 cl::desc("Enable the global merge pass")); 104 105 static cl::opt<bool> 106 EnableLoopDataPrefetch("aarch64-loop-data-prefetch", cl::Hidden, 107 cl::desc("Enable the loop data prefetch pass"), 108 cl::init(true)); 109 110 extern "C" void LLVMInitializeAArch64Target() { 111 // Register the target. 112 RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget); 113 RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget); 114 RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target); 115 auto PR = PassRegistry::getPassRegistry(); 116 initializeGlobalISel(*PR); 117 initializeAArch64ExpandPseudoPass(*PR); 118 } 119 120 //===----------------------------------------------------------------------===// 121 // AArch64 Lowering public interface. 122 //===----------------------------------------------------------------------===// 123 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 124 if (TT.isOSBinFormatMachO()) 125 return make_unique<AArch64_MachoTargetObjectFile>(); 126 127 return make_unique<AArch64_ELFTargetObjectFile>(); 128 } 129 130 // Helper function to build a DataLayout string 131 static std::string computeDataLayout(const Triple &TT, bool LittleEndian) { 132 if (TT.isOSBinFormatMachO()) 133 return "e-m:o-i64:64-i128:128-n32:64-S128"; 134 if (LittleEndian) 135 return "e-m:e-i64:64-i128:128-n32:64-S128"; 136 return "E-m:e-i64:64-i128:128-n32:64-S128"; 137 } 138 139 /// TargetMachine ctor - Create an AArch64 architecture model. 140 /// 141 AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT, 142 StringRef CPU, StringRef FS, 143 const TargetOptions &Options, 144 Reloc::Model RM, CodeModel::Model CM, 145 CodeGenOpt::Level OL, 146 bool LittleEndian) 147 // This nested ternary is horrible, but DL needs to be properly 148 // initialized before TLInfo is constructed. 149 : LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS, 150 Options, RM, CM, OL), 151 TLOF(createTLOF(getTargetTriple())), 152 isLittle(LittleEndian) { 153 initAsmInfo(); 154 } 155 156 AArch64TargetMachine::~AArch64TargetMachine() {} 157 158 #ifdef LLVM_BUILD_GLOBAL_ISEL 159 namespace { 160 struct AArch64GISelActualAccessor : public AArch64GISelAccessor { 161 std::unique_ptr<CallLowering> CallLoweringInfo; 162 std::unique_ptr<RegisterBankInfo> RegBankInfo; 163 const CallLowering *getCallLowering() const override { 164 return CallLoweringInfo.get(); 165 } 166 const RegisterBankInfo *getRegBankInfo() const override { 167 return RegBankInfo.get(); 168 } 169 }; 170 } // End anonymous namespace. 171 #endif 172 173 const AArch64Subtarget * 174 AArch64TargetMachine::getSubtargetImpl(const Function &F) const { 175 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 176 Attribute FSAttr = F.getFnAttribute("target-features"); 177 178 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 179 ? CPUAttr.getValueAsString().str() 180 : TargetCPU; 181 std::string FS = !FSAttr.hasAttribute(Attribute::None) 182 ? FSAttr.getValueAsString().str() 183 : TargetFS; 184 185 auto &I = SubtargetMap[CPU + FS]; 186 if (!I) { 187 // This needs to be done before we create a new subtarget since any 188 // creation will depend on the TM and the code generation flags on the 189 // function that reside in TargetOptions. 190 resetTargetOptions(F); 191 I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, 192 isLittle); 193 #ifndef LLVM_BUILD_GLOBAL_ISEL 194 AArch64GISelAccessor *GISelAccessor = new AArch64GISelAccessor(); 195 #else 196 AArch64GISelActualAccessor *GISelAccessor = 197 new AArch64GISelActualAccessor(); 198 GISelAccessor->CallLoweringInfo.reset( 199 new AArch64CallLowering(*I->getTargetLowering())); 200 GISelAccessor->RegBankInfo.reset( 201 new AArch64RegisterBankInfo(*I->getRegisterInfo())); 202 #endif 203 I->setGISelAccessor(*GISelAccessor); 204 } 205 return I.get(); 206 } 207 208 void AArch64leTargetMachine::anchor() { } 209 210 AArch64leTargetMachine::AArch64leTargetMachine( 211 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 212 const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, 213 CodeGenOpt::Level OL) 214 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 215 216 void AArch64beTargetMachine::anchor() { } 217 218 AArch64beTargetMachine::AArch64beTargetMachine( 219 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 220 const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, 221 CodeGenOpt::Level OL) 222 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 223 224 namespace { 225 /// AArch64 Code Generator Pass Configuration Options. 226 class AArch64PassConfig : public TargetPassConfig { 227 public: 228 AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM) 229 : TargetPassConfig(TM, PM) { 230 if (TM->getOptLevel() != CodeGenOpt::None) 231 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 232 } 233 234 AArch64TargetMachine &getAArch64TargetMachine() const { 235 return getTM<AArch64TargetMachine>(); 236 } 237 238 void addIRPasses() override; 239 bool addPreISel() override; 240 bool addInstSelector() override; 241 #ifdef LLVM_BUILD_GLOBAL_ISEL 242 bool addIRTranslator() override; 243 bool addRegBankSelect() override; 244 #endif 245 bool addILPOpts() override; 246 void addPreRegAlloc() override; 247 void addPostRegAlloc() override; 248 void addPreSched2() override; 249 void addPreEmitPass() override; 250 }; 251 } // namespace 252 253 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() { 254 return TargetIRAnalysis([this](const Function &F) { 255 return TargetTransformInfo(AArch64TTIImpl(this, F)); 256 }); 257 } 258 259 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) { 260 return new AArch64PassConfig(this, PM); 261 } 262 263 void AArch64PassConfig::addIRPasses() { 264 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg 265 // ourselves. 266 addPass(createAtomicExpandPass(TM)); 267 268 // Cmpxchg instructions are often used with a subsequent comparison to 269 // determine whether it succeeded. We can exploit existing control-flow in 270 // ldrex/strex loops to simplify this, but it needs tidying up. 271 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 272 addPass(createCFGSimplificationPass()); 273 274 // Run LoopDataPrefetch for Cyclone (the only subtarget that defines a 275 // non-zero getPrefetchDistance). 276 // 277 // Run this before LSR to remove the multiplies involved in computing the 278 // pointer values N iterations ahead. 279 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch) 280 addPass(createLoopDataPrefetchPass()); 281 282 TargetPassConfig::addIRPasses(); 283 284 // Match interleaved memory accesses to ldN/stN intrinsics. 285 if (TM->getOptLevel() != CodeGenOpt::None) 286 addPass(createInterleavedAccessPass(TM)); 287 288 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) { 289 // Call SeparateConstOffsetFromGEP pass to extract constants within indices 290 // and lower a GEP with multiple indices to either arithmetic operations or 291 // multiple GEPs with single index. 292 addPass(createSeparateConstOffsetFromGEPPass(TM, true)); 293 // Call EarlyCSE pass to find and remove subexpressions in the lowered 294 // result. 295 addPass(createEarlyCSEPass()); 296 // Do loop invariant code motion in case part of the lowered result is 297 // invariant. 298 addPass(createLICMPass()); 299 } 300 } 301 302 // Pass Pipeline Configuration 303 bool AArch64PassConfig::addPreISel() { 304 // Run promote constant before global merge, so that the promoted constants 305 // get a chance to be merged 306 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant) 307 addPass(createAArch64PromoteConstantPass()); 308 // FIXME: On AArch64, this depends on the type. 309 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes(). 310 // and the offset has to be a multiple of the related size in bytes. 311 if ((TM->getOptLevel() != CodeGenOpt::None && 312 EnableGlobalMerge == cl::BOU_UNSET) || 313 EnableGlobalMerge == cl::BOU_TRUE) { 314 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 315 (EnableGlobalMerge == cl::BOU_UNSET); 316 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize)); 317 } 318 319 if (TM->getOptLevel() != CodeGenOpt::None) 320 addPass(createAArch64AddressTypePromotionPass()); 321 322 return false; 323 } 324 325 bool AArch64PassConfig::addInstSelector() { 326 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel())); 327 328 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many 329 // references to _TLS_MODULE_BASE_ as possible. 330 if (TM->getTargetTriple().isOSBinFormatELF() && 331 getOptLevel() != CodeGenOpt::None) 332 addPass(createAArch64CleanupLocalDynamicTLSPass()); 333 334 return false; 335 } 336 337 #ifdef LLVM_BUILD_GLOBAL_ISEL 338 bool AArch64PassConfig::addIRTranslator() { 339 addPass(new IRTranslator()); 340 return false; 341 } 342 bool AArch64PassConfig::addRegBankSelect() { 343 addPass(new RegBankSelect()); 344 return false; 345 } 346 #endif 347 348 bool AArch64PassConfig::addILPOpts() { 349 if (EnableCondOpt) 350 addPass(createAArch64ConditionOptimizerPass()); 351 if (EnableCCMP) 352 addPass(createAArch64ConditionalCompares()); 353 if (EnableMCR) 354 addPass(&MachineCombinerID); 355 if (EnableEarlyIfConversion) 356 addPass(&EarlyIfConverterID); 357 if (EnableStPairSuppress) 358 addPass(createAArch64StorePairSuppressPass()); 359 return true; 360 } 361 362 void AArch64PassConfig::addPreRegAlloc() { 363 // Use AdvSIMD scalar instructions whenever profitable. 364 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) { 365 addPass(createAArch64AdvSIMDScalar()); 366 // The AdvSIMD pass may produce copies that can be rewritten to 367 // be register coaleascer friendly. 368 addPass(&PeepholeOptimizerID); 369 } 370 } 371 372 void AArch64PassConfig::addPostRegAlloc() { 373 // Remove redundant copy instructions. 374 if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination) 375 addPass(createAArch64RedundantCopyEliminationPass()); 376 377 // Change dead register definitions to refer to the zero register. 378 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination) 379 addPass(createAArch64DeadRegisterDefinitions()); 380 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc()) 381 // Improve performance for some FP/SIMD code for A57. 382 addPass(createAArch64A57FPLoadBalancing()); 383 } 384 385 void AArch64PassConfig::addPreSched2() { 386 // Expand some pseudo instructions to allow proper scheduling. 387 addPass(createAArch64ExpandPseudoPass()); 388 // Use load/store pair instructions when possible. 389 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt) 390 addPass(createAArch64LoadStoreOptimizationPass()); 391 } 392 393 void AArch64PassConfig::addPreEmitPass() { 394 if (EnableA53Fix835769) 395 addPass(createAArch64A53Fix835769()); 396 // Relax conditional branch instructions if they're otherwise out of 397 // range of their destination. 398 addPass(createAArch64BranchRelaxation()); 399 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && 400 TM->getTargetTriple().isOSBinFormatMachO()) 401 addPass(createAArch64CollectLOHPass()); 402 } 403