1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "AArch64.h" 14 #include "AArch64CallLowering.h" 15 #include "AArch64LegalizerInfo.h" 16 #include "AArch64MacroFusion.h" 17 #ifdef LLVM_BUILD_GLOBAL_ISEL 18 #include "AArch64RegisterBankInfo.h" 19 #endif 20 #include "AArch64Subtarget.h" 21 #include "AArch64TargetMachine.h" 22 #include "AArch64TargetObjectFile.h" 23 #include "AArch64TargetTransformInfo.h" 24 #include "MCTargetDesc/AArch64MCTargetDesc.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/CodeGen/GlobalISel/GISelAccessor.h" 29 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 31 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 32 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 33 #include "llvm/CodeGen/MachineScheduler.h" 34 #include "llvm/CodeGen/Passes.h" 35 #include "llvm/CodeGen/TargetPassConfig.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/MC/MCTargetOptions.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CodeGen.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/TargetRegistry.h" 43 #include "llvm/Target/TargetLoweringObjectFile.h" 44 #include "llvm/Target/TargetOptions.h" 45 #include "llvm/Transforms/Scalar.h" 46 #include <memory> 47 #include <string> 48 49 using namespace llvm; 50 51 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp", 52 cl::desc("Enable the CCMP formation pass"), 53 cl::init(true), cl::Hidden); 54 55 static cl::opt<bool> EnableMCR("aarch64-enable-mcr", 56 cl::desc("Enable the machine combiner pass"), 57 cl::init(true), cl::Hidden); 58 59 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress", 60 cl::desc("Suppress STP for AArch64"), 61 cl::init(true), cl::Hidden); 62 63 static cl::opt<bool> EnableAdvSIMDScalar( 64 "aarch64-enable-simd-scalar", 65 cl::desc("Enable use of AdvSIMD scalar integer instructions"), 66 cl::init(false), cl::Hidden); 67 68 static cl::opt<bool> 69 EnablePromoteConstant("aarch64-enable-promote-const", 70 cl::desc("Enable the promote constant pass"), 71 cl::init(true), cl::Hidden); 72 73 static cl::opt<bool> EnableCollectLOH( 74 "aarch64-enable-collect-loh", 75 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), 76 cl::init(true), cl::Hidden); 77 78 static cl::opt<bool> 79 EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, 80 cl::desc("Enable the pass that removes dead" 81 " definitons and replaces stores to" 82 " them with stores to the zero" 83 " register"), 84 cl::init(true)); 85 86 static cl::opt<bool> EnableRedundantCopyElimination( 87 "aarch64-enable-copyelim", 88 cl::desc("Enable the redundant copy elimination pass"), cl::init(true), 89 cl::Hidden); 90 91 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt", 92 cl::desc("Enable the load/store pair" 93 " optimization pass"), 94 cl::init(true), cl::Hidden); 95 96 static cl::opt<bool> EnableAtomicTidy( 97 "aarch64-enable-atomic-cfg-tidy", cl::Hidden, 98 cl::desc("Run SimplifyCFG after expanding atomic operations" 99 " to make use of cmpxchg flow-based information"), 100 cl::init(true)); 101 102 static cl::opt<bool> 103 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, 104 cl::desc("Run early if-conversion"), 105 cl::init(true)); 106 107 static cl::opt<bool> 108 EnableCondOpt("aarch64-enable-condopt", 109 cl::desc("Enable the condition optimizer pass"), 110 cl::init(true), cl::Hidden); 111 112 static cl::opt<bool> 113 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, 114 cl::desc("Work around Cortex-A53 erratum 835769"), 115 cl::init(false)); 116 117 static cl::opt<bool> 118 EnableAddressTypePromotion("aarch64-enable-type-promotion", cl::Hidden, 119 cl::desc("Enable the type promotion pass"), 120 cl::init(false)); 121 122 static cl::opt<bool> 123 EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, 124 cl::desc("Enable optimizations on complex GEPs"), 125 cl::init(false)); 126 127 static cl::opt<bool> 128 BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), 129 cl::desc("Relax out of range conditional branches")); 130 131 // FIXME: Unify control over GlobalMerge. 132 static cl::opt<cl::boolOrDefault> 133 EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, 134 cl::desc("Enable the global merge pass")); 135 136 static cl::opt<bool> 137 EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, 138 cl::desc("Enable the loop data prefetch pass"), 139 cl::init(true)); 140 141 static cl::opt<int> EnableGlobalISelAtO( 142 "aarch64-enable-global-isel-at-O", cl::Hidden, 143 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), 144 cl::init(-1)); 145 146 extern "C" void LLVMInitializeAArch64Target() { 147 // Register the target. 148 RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget()); 149 RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget()); 150 RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target()); 151 auto PR = PassRegistry::getPassRegistry(); 152 initializeGlobalISel(*PR); 153 initializeAArch64A53Fix835769Pass(*PR); 154 initializeAArch64A57FPLoadBalancingPass(*PR); 155 initializeAArch64AddressTypePromotionPass(*PR); 156 initializeAArch64AdvSIMDScalarPass(*PR); 157 initializeAArch64CollectLOHPass(*PR); 158 initializeAArch64ConditionalComparesPass(*PR); 159 initializeAArch64ConditionOptimizerPass(*PR); 160 initializeAArch64DeadRegisterDefinitionsPass(*PR); 161 initializeAArch64ExpandPseudoPass(*PR); 162 initializeAArch64LoadStoreOptPass(*PR); 163 initializeAArch64VectorByElementOptPass(*PR); 164 initializeAArch64PromoteConstantPass(*PR); 165 initializeAArch64RedundantCopyEliminationPass(*PR); 166 initializeAArch64StorePairSuppressPass(*PR); 167 initializeLDTLSCleanupPass(*PR); 168 } 169 170 //===----------------------------------------------------------------------===// 171 // AArch64 Lowering public interface. 172 //===----------------------------------------------------------------------===// 173 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 174 if (TT.isOSBinFormatMachO()) 175 return llvm::make_unique<AArch64_MachoTargetObjectFile>(); 176 177 return llvm::make_unique<AArch64_ELFTargetObjectFile>(); 178 } 179 180 // Helper function to build a DataLayout string 181 static std::string computeDataLayout(const Triple &TT, 182 const MCTargetOptions &Options, 183 bool LittleEndian) { 184 if (Options.getABIName() == "ilp32") 185 return "e-m:e-p:32:32-i8:8-i16:16-i64:64-S128"; 186 if (TT.isOSBinFormatMachO()) 187 return "e-m:o-i64:64-i128:128-n32:64-S128"; 188 if (LittleEndian) 189 return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 190 return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 191 } 192 193 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 194 Optional<Reloc::Model> RM) { 195 // AArch64 Darwin is always PIC. 196 if (TT.isOSDarwin()) 197 return Reloc::PIC_; 198 // On ELF platforms the default static relocation model has a smart enough 199 // linker to cope with referencing external symbols defined in a shared 200 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC. 201 if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC) 202 return Reloc::Static; 203 return *RM; 204 } 205 206 /// Create an AArch64 architecture model. 207 /// 208 AArch64TargetMachine::AArch64TargetMachine( 209 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 210 const TargetOptions &Options, Optional<Reloc::Model> RM, 211 CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian) 212 // This nested ternary is horrible, but DL needs to be properly 213 // initialized before TLInfo is constructed. 214 : LLVMTargetMachine(T, computeDataLayout(TT, Options.MCOptions, 215 LittleEndian), 216 TT, CPU, FS, Options, 217 getEffectiveRelocModel(TT, RM), CM, OL), 218 TLOF(createTLOF(getTargetTriple())), 219 isLittle(LittleEndian) { 220 initAsmInfo(); 221 } 222 223 AArch64TargetMachine::~AArch64TargetMachine() = default; 224 225 #ifdef LLVM_BUILD_GLOBAL_ISEL 226 namespace { 227 228 struct AArch64GISelActualAccessor : public GISelAccessor { 229 std::unique_ptr<CallLowering> CallLoweringInfo; 230 std::unique_ptr<InstructionSelector> InstSelector; 231 std::unique_ptr<LegalizerInfo> Legalizer; 232 std::unique_ptr<RegisterBankInfo> RegBankInfo; 233 234 const CallLowering *getCallLowering() const override { 235 return CallLoweringInfo.get(); 236 } 237 238 const InstructionSelector *getInstructionSelector() const override { 239 return InstSelector.get(); 240 } 241 242 const LegalizerInfo *getLegalizerInfo() const override { 243 return Legalizer.get(); 244 } 245 246 const RegisterBankInfo *getRegBankInfo() const override { 247 return RegBankInfo.get(); 248 } 249 }; 250 251 } // end anonymous namespace 252 #endif 253 254 const AArch64Subtarget * 255 AArch64TargetMachine::getSubtargetImpl(const Function &F) const { 256 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 257 Attribute FSAttr = F.getFnAttribute("target-features"); 258 259 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 260 ? CPUAttr.getValueAsString().str() 261 : TargetCPU; 262 std::string FS = !FSAttr.hasAttribute(Attribute::None) 263 ? FSAttr.getValueAsString().str() 264 : TargetFS; 265 266 auto &I = SubtargetMap[CPU + FS]; 267 if (!I) { 268 // This needs to be done before we create a new subtarget since any 269 // creation will depend on the TM and the code generation flags on the 270 // function that reside in TargetOptions. 271 resetTargetOptions(F); 272 I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, 273 isLittle); 274 #ifndef LLVM_BUILD_GLOBAL_ISEL 275 GISelAccessor *GISel = new GISelAccessor(); 276 #else 277 AArch64GISelActualAccessor *GISel = 278 new AArch64GISelActualAccessor(); 279 GISel->CallLoweringInfo.reset( 280 new AArch64CallLowering(*I->getTargetLowering())); 281 GISel->Legalizer.reset(new AArch64LegalizerInfo()); 282 283 auto *RBI = new AArch64RegisterBankInfo(*I->getRegisterInfo()); 284 285 // FIXME: At this point, we can't rely on Subtarget having RBI. 286 // It's awkward to mix passing RBI and the Subtarget; should we pass 287 // TII/TRI as well? 288 GISel->InstSelector.reset( 289 createAArch64InstructionSelector(*this, *I, *RBI)); 290 291 GISel->RegBankInfo.reset(RBI); 292 #endif 293 I->setGISelAccessor(*GISel); 294 } 295 return I.get(); 296 } 297 298 void AArch64leTargetMachine::anchor() { } 299 300 AArch64leTargetMachine::AArch64leTargetMachine( 301 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 302 const TargetOptions &Options, Optional<Reloc::Model> RM, 303 CodeModel::Model CM, CodeGenOpt::Level OL) 304 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} 305 306 void AArch64beTargetMachine::anchor() { } 307 308 AArch64beTargetMachine::AArch64beTargetMachine( 309 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 310 const TargetOptions &Options, Optional<Reloc::Model> RM, 311 CodeModel::Model CM, CodeGenOpt::Level OL) 312 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} 313 314 namespace { 315 316 /// AArch64 Code Generator Pass Configuration Options. 317 class AArch64PassConfig : public TargetPassConfig { 318 public: 319 AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM) 320 : TargetPassConfig(TM, PM) { 321 if (TM->getOptLevel() != CodeGenOpt::None) 322 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 323 } 324 325 AArch64TargetMachine &getAArch64TargetMachine() const { 326 return getTM<AArch64TargetMachine>(); 327 } 328 329 ScheduleDAGInstrs * 330 createMachineScheduler(MachineSchedContext *C) const override { 331 ScheduleDAGMILive *DAG = createGenericSchedLive(C); 332 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 333 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 334 DAG->addMutation(createAArch64MacroFusionDAGMutation()); 335 return DAG; 336 } 337 338 ScheduleDAGInstrs * 339 createPostMachineScheduler(MachineSchedContext *C) const override { 340 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>(); 341 if (ST.hasFuseLiterals()) { 342 // Run the Macro Fusion after RA again since literals are expanded from 343 // pseudos then (v. addPreSched2()). 344 ScheduleDAGMI *DAG = createGenericSchedPostRA(C); 345 DAG->addMutation(createAArch64MacroFusionDAGMutation()); 346 return DAG; 347 } 348 349 return nullptr; 350 } 351 352 void addIRPasses() override; 353 bool addPreISel() override; 354 bool addInstSelector() override; 355 #ifdef LLVM_BUILD_GLOBAL_ISEL 356 bool addIRTranslator() override; 357 bool addLegalizeMachineIR() override; 358 bool addRegBankSelect() override; 359 bool addGlobalInstructionSelect() override; 360 #endif 361 bool addILPOpts() override; 362 void addPreRegAlloc() override; 363 void addPostRegAlloc() override; 364 void addPreSched2() override; 365 void addPreEmitPass() override; 366 367 bool isGlobalISelEnabled() const override; 368 }; 369 370 } // end anonymous namespace 371 372 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() { 373 return TargetIRAnalysis([this](const Function &F) { 374 return TargetTransformInfo(AArch64TTIImpl(this, F)); 375 }); 376 } 377 378 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) { 379 return new AArch64PassConfig(this, PM); 380 } 381 382 void AArch64PassConfig::addIRPasses() { 383 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg 384 // ourselves. 385 addPass(createAtomicExpandPass(TM)); 386 387 // Cmpxchg instructions are often used with a subsequent comparison to 388 // determine whether it succeeded. We can exploit existing control-flow in 389 // ldrex/strex loops to simplify this, but it needs tidying up. 390 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 391 addPass(createCFGSimplificationPass()); 392 393 // Run LoopDataPrefetch 394 // 395 // Run this before LSR to remove the multiplies involved in computing the 396 // pointer values N iterations ahead. 397 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch) 398 addPass(createLoopDataPrefetchPass()); 399 400 TargetPassConfig::addIRPasses(); 401 402 // Match interleaved memory accesses to ldN/stN intrinsics. 403 if (TM->getOptLevel() != CodeGenOpt::None) 404 addPass(createInterleavedAccessPass(TM)); 405 406 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) { 407 // Call SeparateConstOffsetFromGEP pass to extract constants within indices 408 // and lower a GEP with multiple indices to either arithmetic operations or 409 // multiple GEPs with single index. 410 addPass(createSeparateConstOffsetFromGEPPass(TM, true)); 411 // Call EarlyCSE pass to find and remove subexpressions in the lowered 412 // result. 413 addPass(createEarlyCSEPass()); 414 // Do loop invariant code motion in case part of the lowered result is 415 // invariant. 416 addPass(createLICMPass()); 417 } 418 } 419 420 // Pass Pipeline Configuration 421 bool AArch64PassConfig::addPreISel() { 422 // Run promote constant before global merge, so that the promoted constants 423 // get a chance to be merged 424 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant) 425 addPass(createAArch64PromoteConstantPass()); 426 // FIXME: On AArch64, this depends on the type. 427 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes(). 428 // and the offset has to be a multiple of the related size in bytes. 429 if ((TM->getOptLevel() != CodeGenOpt::None && 430 EnableGlobalMerge == cl::BOU_UNSET) || 431 EnableGlobalMerge == cl::BOU_TRUE) { 432 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 433 (EnableGlobalMerge == cl::BOU_UNSET); 434 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize)); 435 } 436 437 if (TM->getOptLevel() != CodeGenOpt::None && EnableAddressTypePromotion) 438 addPass(createAArch64AddressTypePromotionPass()); 439 440 return false; 441 } 442 443 bool AArch64PassConfig::addInstSelector() { 444 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel())); 445 446 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many 447 // references to _TLS_MODULE_BASE_ as possible. 448 if (TM->getTargetTriple().isOSBinFormatELF() && 449 getOptLevel() != CodeGenOpt::None) 450 addPass(createAArch64CleanupLocalDynamicTLSPass()); 451 452 return false; 453 } 454 455 #ifdef LLVM_BUILD_GLOBAL_ISEL 456 bool AArch64PassConfig::addIRTranslator() { 457 addPass(new IRTranslator()); 458 return false; 459 } 460 461 bool AArch64PassConfig::addLegalizeMachineIR() { 462 addPass(new Legalizer()); 463 return false; 464 } 465 466 bool AArch64PassConfig::addRegBankSelect() { 467 addPass(new RegBankSelect()); 468 return false; 469 } 470 471 bool AArch64PassConfig::addGlobalInstructionSelect() { 472 addPass(new InstructionSelect()); 473 return false; 474 } 475 #endif 476 477 bool AArch64PassConfig::isGlobalISelEnabled() const { 478 return TM->getOptLevel() <= EnableGlobalISelAtO; 479 } 480 481 bool AArch64PassConfig::addILPOpts() { 482 if (EnableCondOpt) 483 addPass(createAArch64ConditionOptimizerPass()); 484 if (EnableCCMP) 485 addPass(createAArch64ConditionalCompares()); 486 if (EnableMCR) 487 addPass(&MachineCombinerID); 488 if (EnableEarlyIfConversion) 489 addPass(&EarlyIfConverterID); 490 if (EnableStPairSuppress) 491 addPass(createAArch64StorePairSuppressPass()); 492 addPass(createAArch64VectorByElementOptPass()); 493 return true; 494 } 495 496 void AArch64PassConfig::addPreRegAlloc() { 497 // Change dead register definitions to refer to the zero register. 498 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination) 499 addPass(createAArch64DeadRegisterDefinitions()); 500 501 // Use AdvSIMD scalar instructions whenever profitable. 502 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) { 503 addPass(createAArch64AdvSIMDScalar()); 504 // The AdvSIMD pass may produce copies that can be rewritten to 505 // be register coaleascer friendly. 506 addPass(&PeepholeOptimizerID); 507 } 508 } 509 510 void AArch64PassConfig::addPostRegAlloc() { 511 // Remove redundant copy instructions. 512 if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination) 513 addPass(createAArch64RedundantCopyEliminationPass()); 514 515 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc()) 516 // Improve performance for some FP/SIMD code for A57. 517 addPass(createAArch64A57FPLoadBalancing()); 518 } 519 520 void AArch64PassConfig::addPreSched2() { 521 // Expand some pseudo instructions to allow proper scheduling. 522 addPass(createAArch64ExpandPseudoPass()); 523 // Use load/store pair instructions when possible. 524 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt) 525 addPass(createAArch64LoadStoreOptimizationPass()); 526 } 527 528 void AArch64PassConfig::addPreEmitPass() { 529 if (EnableA53Fix835769) 530 addPass(createAArch64A53Fix835769()); 531 // Relax conditional branch instructions if they're otherwise out of 532 // range of their destination. 533 if (BranchRelaxation) 534 addPass(&BranchRelaxationPassID); 535 536 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && 537 TM->getTargetTriple().isOSBinFormatMachO()) 538 addPass(createAArch64CollectLOHPass()); 539 } 540