1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "AArch64TargetMachine.h" 14 #include "AArch64.h" 15 #include "AArch64MacroFusion.h" 16 #include "AArch64Subtarget.h" 17 #include "AArch64TargetObjectFile.h" 18 #include "AArch64TargetTransformInfo.h" 19 #include "MCTargetDesc/AArch64MCTargetDesc.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/Triple.h" 22 #include "llvm/Analysis/TargetTransformInfo.h" 23 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 24 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 25 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 26 #include "llvm/CodeGen/GlobalISel/Localizer.h" 27 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 28 #include "llvm/CodeGen/MachineScheduler.h" 29 #include "llvm/CodeGen/Passes.h" 30 #include "llvm/CodeGen/TargetPassConfig.h" 31 #include "llvm/IR/Attributes.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/MC/MCTargetOptions.h" 34 #include "llvm/Pass.h" 35 #include "llvm/Support/CodeGen.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/TargetRegistry.h" 38 #include "llvm/Target/TargetLoweringObjectFile.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Transforms/Scalar.h" 41 #include <memory> 42 #include <string> 43 44 using namespace llvm; 45 46 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp", 47 cl::desc("Enable the CCMP formation pass"), 48 cl::init(true), cl::Hidden); 49 50 static cl::opt<bool> 51 EnableCondBrTuning("aarch64-enable-cond-br-tune", 52 cl::desc("Enable the conditional branch tuning pass"), 53 cl::init(true), cl::Hidden); 54 55 static cl::opt<bool> EnableMCR("aarch64-enable-mcr", 56 cl::desc("Enable the machine combiner pass"), 57 cl::init(true), cl::Hidden); 58 59 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress", 60 cl::desc("Suppress STP for AArch64"), 61 cl::init(true), cl::Hidden); 62 63 static cl::opt<bool> EnableAdvSIMDScalar( 64 "aarch64-enable-simd-scalar", 65 cl::desc("Enable use of AdvSIMD scalar integer instructions"), 66 cl::init(false), cl::Hidden); 67 68 static cl::opt<bool> 69 EnablePromoteConstant("aarch64-enable-promote-const", 70 cl::desc("Enable the promote constant pass"), 71 cl::init(true), cl::Hidden); 72 73 static cl::opt<bool> EnableCollectLOH( 74 "aarch64-enable-collect-loh", 75 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), 76 cl::init(true), cl::Hidden); 77 78 static cl::opt<bool> 79 EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, 80 cl::desc("Enable the pass that removes dead" 81 " definitons and replaces stores to" 82 " them with stores to the zero" 83 " register"), 84 cl::init(true)); 85 86 static cl::opt<bool> EnableRedundantCopyElimination( 87 "aarch64-enable-copyelim", 88 cl::desc("Enable the redundant copy elimination pass"), cl::init(true), 89 cl::Hidden); 90 91 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt", 92 cl::desc("Enable the load/store pair" 93 " optimization pass"), 94 cl::init(true), cl::Hidden); 95 96 static cl::opt<bool> EnableAtomicTidy( 97 "aarch64-enable-atomic-cfg-tidy", cl::Hidden, 98 cl::desc("Run SimplifyCFG after expanding atomic operations" 99 " to make use of cmpxchg flow-based information"), 100 cl::init(true)); 101 102 static cl::opt<bool> 103 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, 104 cl::desc("Run early if-conversion"), 105 cl::init(true)); 106 107 static cl::opt<bool> 108 EnableCondOpt("aarch64-enable-condopt", 109 cl::desc("Enable the condition optimizer pass"), 110 cl::init(true), cl::Hidden); 111 112 static cl::opt<bool> 113 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, 114 cl::desc("Work around Cortex-A53 erratum 835769"), 115 cl::init(false)); 116 117 static cl::opt<bool> 118 EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, 119 cl::desc("Enable optimizations on complex GEPs"), 120 cl::init(false)); 121 122 static cl::opt<bool> 123 BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), 124 cl::desc("Relax out of range conditional branches")); 125 126 // FIXME: Unify control over GlobalMerge. 127 static cl::opt<cl::boolOrDefault> 128 EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, 129 cl::desc("Enable the global merge pass")); 130 131 static cl::opt<bool> 132 EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, 133 cl::desc("Enable the loop data prefetch pass"), 134 cl::init(true)); 135 136 static cl::opt<int> EnableGlobalISelAtO( 137 "aarch64-enable-global-isel-at-O", cl::Hidden, 138 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), 139 cl::init(-1)); 140 141 extern "C" void LLVMInitializeAArch64Target() { 142 // Register the target. 143 RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget()); 144 RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget()); 145 RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target()); 146 auto PR = PassRegistry::getPassRegistry(); 147 initializeGlobalISel(*PR); 148 initializeAArch64A53Fix835769Pass(*PR); 149 initializeAArch64A57FPLoadBalancingPass(*PR); 150 initializeAArch64AdvSIMDScalarPass(*PR); 151 initializeAArch64CollectLOHPass(*PR); 152 initializeAArch64ConditionalComparesPass(*PR); 153 initializeAArch64ConditionOptimizerPass(*PR); 154 initializeAArch64DeadRegisterDefinitionsPass(*PR); 155 initializeAArch64ExpandPseudoPass(*PR); 156 initializeAArch64LoadStoreOptPass(*PR); 157 initializeAArch64VectorByElementOptPass(*PR); 158 initializeAArch64PromoteConstantPass(*PR); 159 initializeAArch64RedundantCopyEliminationPass(*PR); 160 initializeAArch64StorePairSuppressPass(*PR); 161 initializeLDTLSCleanupPass(*PR); 162 } 163 164 //===----------------------------------------------------------------------===// 165 // AArch64 Lowering public interface. 166 //===----------------------------------------------------------------------===// 167 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 168 if (TT.isOSBinFormatMachO()) 169 return llvm::make_unique<AArch64_MachoTargetObjectFile>(); 170 if (TT.isOSBinFormatCOFF()) 171 return llvm::make_unique<AArch64_COFFTargetObjectFile>(); 172 173 return llvm::make_unique<AArch64_ELFTargetObjectFile>(); 174 } 175 176 // Helper function to build a DataLayout string 177 static std::string computeDataLayout(const Triple &TT, 178 const MCTargetOptions &Options, 179 bool LittleEndian) { 180 if (Options.getABIName() == "ilp32") 181 return "e-m:e-p:32:32-i8:8-i16:16-i64:64-S128"; 182 if (TT.isOSBinFormatMachO()) 183 return "e-m:o-i64:64-i128:128-n32:64-S128"; 184 if (TT.isOSBinFormatCOFF()) 185 return "e-m:w-i64:64-i128:128-n32:64-S128"; 186 if (LittleEndian) 187 return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 188 return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 189 } 190 191 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 192 Optional<Reloc::Model> RM) { 193 // AArch64 Darwin is always PIC. 194 if (TT.isOSDarwin()) 195 return Reloc::PIC_; 196 // On ELF platforms the default static relocation model has a smart enough 197 // linker to cope with referencing external symbols defined in a shared 198 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC. 199 if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC) 200 return Reloc::Static; 201 return *RM; 202 } 203 204 /// Create an AArch64 architecture model. 205 /// 206 AArch64TargetMachine::AArch64TargetMachine( 207 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 208 const TargetOptions &Options, Optional<Reloc::Model> RM, 209 CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian) 210 // This nested ternary is horrible, but DL needs to be properly 211 // initialized before TLInfo is constructed. 212 : LLVMTargetMachine(T, computeDataLayout(TT, Options.MCOptions, 213 LittleEndian), 214 TT, CPU, FS, Options, 215 getEffectiveRelocModel(TT, RM), CM, OL), 216 TLOF(createTLOF(getTargetTriple())), 217 isLittle(LittleEndian) { 218 initAsmInfo(); 219 } 220 221 AArch64TargetMachine::~AArch64TargetMachine() = default; 222 223 const AArch64Subtarget * 224 AArch64TargetMachine::getSubtargetImpl(const Function &F) const { 225 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 226 Attribute FSAttr = F.getFnAttribute("target-features"); 227 228 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 229 ? CPUAttr.getValueAsString().str() 230 : TargetCPU; 231 std::string FS = !FSAttr.hasAttribute(Attribute::None) 232 ? FSAttr.getValueAsString().str() 233 : TargetFS; 234 235 auto &I = SubtargetMap[CPU + FS]; 236 if (!I) { 237 // This needs to be done before we create a new subtarget since any 238 // creation will depend on the TM and the code generation flags on the 239 // function that reside in TargetOptions. 240 resetTargetOptions(F); 241 I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, 242 isLittle); 243 } 244 return I.get(); 245 } 246 247 void AArch64leTargetMachine::anchor() { } 248 249 AArch64leTargetMachine::AArch64leTargetMachine( 250 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 251 const TargetOptions &Options, Optional<Reloc::Model> RM, 252 CodeModel::Model CM, CodeGenOpt::Level OL) 253 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 254 255 void AArch64beTargetMachine::anchor() { } 256 257 AArch64beTargetMachine::AArch64beTargetMachine( 258 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 259 const TargetOptions &Options, Optional<Reloc::Model> RM, 260 CodeModel::Model CM, CodeGenOpt::Level OL) 261 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 262 263 namespace { 264 265 /// AArch64 Code Generator Pass Configuration Options. 266 class AArch64PassConfig : public TargetPassConfig { 267 public: 268 AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM) 269 : TargetPassConfig(TM, PM) { 270 if (TM.getOptLevel() != CodeGenOpt::None) 271 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 272 } 273 274 AArch64TargetMachine &getAArch64TargetMachine() const { 275 return getTM<AArch64TargetMachine>(); 276 } 277 278 ScheduleDAGInstrs * 279 createMachineScheduler(MachineSchedContext *C) const override { 280 ScheduleDAGMILive *DAG = createGenericSchedLive(C); 281 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 282 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 283 DAG->addMutation(createAArch64MacroFusionDAGMutation()); 284 return DAG; 285 } 286 287 ScheduleDAGInstrs * 288 createPostMachineScheduler(MachineSchedContext *C) const override { 289 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>(); 290 if (ST.hasFuseAES() || ST.hasFuseLiterals()) { 291 // Run the Macro Fusion after RA again since literals are expanded from 292 // pseudos then (v. addPreSched2()). 293 ScheduleDAGMI *DAG = createGenericSchedPostRA(C); 294 DAG->addMutation(createAArch64MacroFusionDAGMutation()); 295 return DAG; 296 } 297 298 return nullptr; 299 } 300 301 void addIRPasses() override; 302 bool addPreISel() override; 303 bool addInstSelector() override; 304 #ifdef LLVM_BUILD_GLOBAL_ISEL 305 bool addIRTranslator() override; 306 bool addLegalizeMachineIR() override; 307 bool addRegBankSelect() override; 308 void addPreGlobalInstructionSelect() override; 309 bool addGlobalInstructionSelect() override; 310 #endif 311 bool addILPOpts() override; 312 void addPreRegAlloc() override; 313 void addPostRegAlloc() override; 314 void addPreSched2() override; 315 void addPreEmitPass() override; 316 317 bool isGlobalISelEnabled() const override; 318 }; 319 320 } // end anonymous namespace 321 322 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() { 323 return TargetIRAnalysis([this](const Function &F) { 324 return TargetTransformInfo(AArch64TTIImpl(this, F)); 325 }); 326 } 327 328 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) { 329 return new AArch64PassConfig(*this, PM); 330 } 331 332 void AArch64PassConfig::addIRPasses() { 333 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg 334 // ourselves. 335 addPass(createAtomicExpandPass()); 336 337 // Cmpxchg instructions are often used with a subsequent comparison to 338 // determine whether it succeeded. We can exploit existing control-flow in 339 // ldrex/strex loops to simplify this, but it needs tidying up. 340 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 341 addPass(createCFGSimplificationPass()); 342 343 // Run LoopDataPrefetch 344 // 345 // Run this before LSR to remove the multiplies involved in computing the 346 // pointer values N iterations ahead. 347 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch) 348 addPass(createLoopDataPrefetchPass()); 349 350 TargetPassConfig::addIRPasses(); 351 352 // Match interleaved memory accesses to ldN/stN intrinsics. 353 if (TM->getOptLevel() != CodeGenOpt::None) 354 addPass(createInterleavedAccessPass()); 355 356 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) { 357 // Call SeparateConstOffsetFromGEP pass to extract constants within indices 358 // and lower a GEP with multiple indices to either arithmetic operations or 359 // multiple GEPs with single index. 360 addPass(createSeparateConstOffsetFromGEPPass(TM, true)); 361 // Call EarlyCSE pass to find and remove subexpressions in the lowered 362 // result. 363 addPass(createEarlyCSEPass()); 364 // Do loop invariant code motion in case part of the lowered result is 365 // invariant. 366 addPass(createLICMPass()); 367 } 368 } 369 370 // Pass Pipeline Configuration 371 bool AArch64PassConfig::addPreISel() { 372 // Run promote constant before global merge, so that the promoted constants 373 // get a chance to be merged 374 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant) 375 addPass(createAArch64PromoteConstantPass()); 376 // FIXME: On AArch64, this depends on the type. 377 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes(). 378 // and the offset has to be a multiple of the related size in bytes. 379 if ((TM->getOptLevel() != CodeGenOpt::None && 380 EnableGlobalMerge == cl::BOU_UNSET) || 381 EnableGlobalMerge == cl::BOU_TRUE) { 382 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 383 (EnableGlobalMerge == cl::BOU_UNSET); 384 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize)); 385 } 386 387 return false; 388 } 389 390 bool AArch64PassConfig::addInstSelector() { 391 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel())); 392 393 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many 394 // references to _TLS_MODULE_BASE_ as possible. 395 if (TM->getTargetTriple().isOSBinFormatELF() && 396 getOptLevel() != CodeGenOpt::None) 397 addPass(createAArch64CleanupLocalDynamicTLSPass()); 398 399 return false; 400 } 401 402 #ifdef LLVM_BUILD_GLOBAL_ISEL 403 bool AArch64PassConfig::addIRTranslator() { 404 addPass(new IRTranslator()); 405 return false; 406 } 407 408 bool AArch64PassConfig::addLegalizeMachineIR() { 409 addPass(new Legalizer()); 410 return false; 411 } 412 413 bool AArch64PassConfig::addRegBankSelect() { 414 addPass(new RegBankSelect()); 415 return false; 416 } 417 418 void AArch64PassConfig::addPreGlobalInstructionSelect() { 419 // Workaround the deficiency of the fast register allocator. 420 if (TM->getOptLevel() == CodeGenOpt::None) 421 addPass(new Localizer()); 422 } 423 424 bool AArch64PassConfig::addGlobalInstructionSelect() { 425 addPass(new InstructionSelect()); 426 return false; 427 } 428 #endif 429 430 bool AArch64PassConfig::isGlobalISelEnabled() const { 431 return TM->getOptLevel() <= EnableGlobalISelAtO; 432 } 433 434 bool AArch64PassConfig::addILPOpts() { 435 if (EnableCondOpt) 436 addPass(createAArch64ConditionOptimizerPass()); 437 if (EnableCCMP) 438 addPass(createAArch64ConditionalCompares()); 439 if (EnableMCR) 440 addPass(&MachineCombinerID); 441 if (EnableCondBrTuning) 442 addPass(createAArch64CondBrTuning()); 443 if (EnableEarlyIfConversion) 444 addPass(&EarlyIfConverterID); 445 if (EnableStPairSuppress) 446 addPass(createAArch64StorePairSuppressPass()); 447 addPass(createAArch64VectorByElementOptPass()); 448 return true; 449 } 450 451 void AArch64PassConfig::addPreRegAlloc() { 452 // Change dead register definitions to refer to the zero register. 453 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination) 454 addPass(createAArch64DeadRegisterDefinitions()); 455 456 // Use AdvSIMD scalar instructions whenever profitable. 457 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) { 458 addPass(createAArch64AdvSIMDScalar()); 459 // The AdvSIMD pass may produce copies that can be rewritten to 460 // be register coaleascer friendly. 461 addPass(&PeepholeOptimizerID); 462 } 463 } 464 465 void AArch64PassConfig::addPostRegAlloc() { 466 // Remove redundant copy instructions. 467 if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination) 468 addPass(createAArch64RedundantCopyEliminationPass()); 469 470 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc()) 471 // Improve performance for some FP/SIMD code for A57. 472 addPass(createAArch64A57FPLoadBalancing()); 473 } 474 475 void AArch64PassConfig::addPreSched2() { 476 // Expand some pseudo instructions to allow proper scheduling. 477 addPass(createAArch64ExpandPseudoPass()); 478 // Use load/store pair instructions when possible. 479 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt) 480 addPass(createAArch64LoadStoreOptimizationPass()); 481 } 482 483 void AArch64PassConfig::addPreEmitPass() { 484 if (EnableA53Fix835769) 485 addPass(createAArch64A53Fix835769()); 486 // Relax conditional branch instructions if they're otherwise out of 487 // range of their destination. 488 if (BranchRelaxation) 489 addPass(&BranchRelaxationPassID); 490 491 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && 492 TM->getTargetTriple().isOSBinFormatMachO()) 493 addPass(createAArch64CollectLOHPass()); 494 } 495