1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // 10 //===----------------------------------------------------------------------===// 11 12 #include "AArch64TargetMachine.h" 13 #include "AArch64.h" 14 #include "AArch64MacroFusion.h" 15 #include "AArch64Subtarget.h" 16 #include "AArch64TargetObjectFile.h" 17 #include "AArch64TargetTransformInfo.h" 18 #include "MCTargetDesc/AArch64MCTargetDesc.h" 19 #include "TargetInfo/AArch64TargetInfo.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/Triple.h" 22 #include "llvm/Analysis/TargetTransformInfo.h" 23 #include "llvm/CodeGen/CSEConfigBase.h" 24 #include "llvm/CodeGen/GlobalISel/IRTranslator.h" 25 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 26 #include "llvm/CodeGen/GlobalISel/Legalizer.h" 27 #include "llvm/CodeGen/GlobalISel/Localizer.h" 28 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h" 29 #include "llvm/CodeGen/MachineScheduler.h" 30 #include "llvm/CodeGen/Passes.h" 31 #include "llvm/CodeGen/TargetPassConfig.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/MC/MCAsmInfo.h" 35 #include "llvm/MC/MCTargetOptions.h" 36 #include "llvm/Pass.h" 37 #include "llvm/Support/CodeGen.h" 38 #include "llvm/Support/CommandLine.h" 39 #include "llvm/Support/TargetRegistry.h" 40 #include "llvm/Target/TargetLoweringObjectFile.h" 41 #include "llvm/Target/TargetOptions.h" 42 #include "llvm/Transforms/Scalar.h" 43 #include <memory> 44 #include <string> 45 46 using namespace llvm; 47 48 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp", 49 cl::desc("Enable the CCMP formation pass"), 50 cl::init(true), cl::Hidden); 51 52 static cl::opt<bool> 53 EnableCondBrTuning("aarch64-enable-cond-br-tune", 54 cl::desc("Enable the conditional branch tuning pass"), 55 cl::init(true), cl::Hidden); 56 57 static cl::opt<bool> EnableMCR("aarch64-enable-mcr", 58 cl::desc("Enable the machine combiner pass"), 59 cl::init(true), cl::Hidden); 60 61 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress", 62 cl::desc("Suppress STP for AArch64"), 63 cl::init(true), cl::Hidden); 64 65 static cl::opt<bool> EnableAdvSIMDScalar( 66 "aarch64-enable-simd-scalar", 67 cl::desc("Enable use of AdvSIMD scalar integer instructions"), 68 cl::init(false), cl::Hidden); 69 70 static cl::opt<bool> 71 EnablePromoteConstant("aarch64-enable-promote-const", 72 cl::desc("Enable the promote constant pass"), 73 cl::init(true), cl::Hidden); 74 75 static cl::opt<bool> EnableCollectLOH( 76 "aarch64-enable-collect-loh", 77 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"), 78 cl::init(true), cl::Hidden); 79 80 static cl::opt<bool> 81 EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden, 82 cl::desc("Enable the pass that removes dead" 83 " definitons and replaces stores to" 84 " them with stores to the zero" 85 " register"), 86 cl::init(true)); 87 88 static cl::opt<bool> EnableRedundantCopyElimination( 89 "aarch64-enable-copyelim", 90 cl::desc("Enable the redundant copy elimination pass"), cl::init(true), 91 cl::Hidden); 92 93 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt", 94 cl::desc("Enable the load/store pair" 95 " optimization pass"), 96 cl::init(true), cl::Hidden); 97 98 static cl::opt<bool> EnableAtomicTidy( 99 "aarch64-enable-atomic-cfg-tidy", cl::Hidden, 100 cl::desc("Run SimplifyCFG after expanding atomic operations" 101 " to make use of cmpxchg flow-based information"), 102 cl::init(true)); 103 104 static cl::opt<bool> 105 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, 106 cl::desc("Run early if-conversion"), 107 cl::init(true)); 108 109 static cl::opt<bool> 110 EnableCondOpt("aarch64-enable-condopt", 111 cl::desc("Enable the condition optimizer pass"), 112 cl::init(true), cl::Hidden); 113 114 static cl::opt<bool> 115 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden, 116 cl::desc("Work around Cortex-A53 erratum 835769"), 117 cl::init(false)); 118 119 static cl::opt<bool> 120 EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden, 121 cl::desc("Enable optimizations on complex GEPs"), 122 cl::init(false)); 123 124 static cl::opt<bool> 125 BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true), 126 cl::desc("Relax out of range conditional branches")); 127 128 static cl::opt<bool> EnableCompressJumpTables( 129 "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true), 130 cl::desc("Use smallest entry possible for jump tables")); 131 132 // FIXME: Unify control over GlobalMerge. 133 static cl::opt<cl::boolOrDefault> 134 EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden, 135 cl::desc("Enable the global merge pass")); 136 137 static cl::opt<bool> 138 EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden, 139 cl::desc("Enable the loop data prefetch pass"), 140 cl::init(true)); 141 142 static cl::opt<int> EnableGlobalISelAtO( 143 "aarch64-enable-global-isel-at-O", cl::Hidden, 144 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"), 145 cl::init(0)); 146 147 static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix", 148 cl::init(true), cl::Hidden); 149 150 static cl::opt<bool> 151 EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden, 152 cl::desc("Enable the AAcrh64 branch target pass"), 153 cl::init(true)); 154 155 extern "C" void LLVMInitializeAArch64Target() { 156 // Register the target. 157 RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget()); 158 RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget()); 159 RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target()); 160 auto PR = PassRegistry::getPassRegistry(); 161 initializeGlobalISel(*PR); 162 initializeAArch64A53Fix835769Pass(*PR); 163 initializeAArch64A57FPLoadBalancingPass(*PR); 164 initializeAArch64AdvSIMDScalarPass(*PR); 165 initializeAArch64BranchTargetsPass(*PR); 166 initializeAArch64CollectLOHPass(*PR); 167 initializeAArch64CompressJumpTablesPass(*PR); 168 initializeAArch64ConditionalComparesPass(*PR); 169 initializeAArch64ConditionOptimizerPass(*PR); 170 initializeAArch64DeadRegisterDefinitionsPass(*PR); 171 initializeAArch64ExpandPseudoPass(*PR); 172 initializeAArch64LoadStoreOptPass(*PR); 173 initializeAArch64SIMDInstrOptPass(*PR); 174 initializeAArch64PreLegalizerCombinerPass(*PR); 175 initializeAArch64PromoteConstantPass(*PR); 176 initializeAArch64RedundantCopyEliminationPass(*PR); 177 initializeAArch64StorePairSuppressPass(*PR); 178 initializeFalkorHWPFFixPass(*PR); 179 initializeFalkorMarkStridedAccessesLegacyPass(*PR); 180 initializeLDTLSCleanupPass(*PR); 181 initializeAArch64SpeculationHardeningPass(*PR); 182 initializeAArch64StackTaggingPass(*PR); 183 initializeAArch64StackTaggingPreRAPass(*PR); 184 } 185 186 //===----------------------------------------------------------------------===// 187 // AArch64 Lowering public interface. 188 //===----------------------------------------------------------------------===// 189 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { 190 if (TT.isOSBinFormatMachO()) 191 return std::make_unique<AArch64_MachoTargetObjectFile>(); 192 if (TT.isOSBinFormatCOFF()) 193 return std::make_unique<AArch64_COFFTargetObjectFile>(); 194 195 return std::make_unique<AArch64_ELFTargetObjectFile>(); 196 } 197 198 // Helper function to build a DataLayout string 199 static std::string computeDataLayout(const Triple &TT, 200 const MCTargetOptions &Options, 201 bool LittleEndian) { 202 if (Options.getABIName() == "ilp32") 203 return "e-m:e-p:32:32-i8:8-i16:16-i64:64-S128"; 204 if (TT.isOSBinFormatMachO()) 205 return "e-m:o-i64:64-i128:128-n32:64-S128"; 206 if (TT.isOSBinFormatCOFF()) 207 return "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128"; 208 if (LittleEndian) 209 return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 210 return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"; 211 } 212 213 static Reloc::Model getEffectiveRelocModel(const Triple &TT, 214 Optional<Reloc::Model> RM) { 215 // AArch64 Darwin and Windows are always PIC. 216 if (TT.isOSDarwin() || TT.isOSWindows()) 217 return Reloc::PIC_; 218 // On ELF platforms the default static relocation model has a smart enough 219 // linker to cope with referencing external symbols defined in a shared 220 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC. 221 if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC) 222 return Reloc::Static; 223 return *RM; 224 } 225 226 static CodeModel::Model 227 getEffectiveAArch64CodeModel(const Triple &TT, Optional<CodeModel::Model> CM, 228 bool JIT) { 229 if (CM) { 230 if (*CM != CodeModel::Small && *CM != CodeModel::Tiny && 231 *CM != CodeModel::Large) { 232 if (!TT.isOSFuchsia()) 233 report_fatal_error( 234 "Only small, tiny and large code models are allowed on AArch64"); 235 else if (*CM != CodeModel::Kernel) 236 report_fatal_error("Only small, tiny, kernel, and large code models " 237 "are allowed on AArch64"); 238 } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF()) 239 report_fatal_error("tiny code model is only supported on ELF"); 240 return *CM; 241 } 242 // The default MCJIT memory managers make no guarantees about where they can 243 // find an executable page; JITed code needs to be able to refer to globals 244 // no matter how far away they are. 245 if (JIT) 246 return CodeModel::Large; 247 return CodeModel::Small; 248 } 249 250 /// Create an AArch64 architecture model. 251 /// 252 AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT, 253 StringRef CPU, StringRef FS, 254 const TargetOptions &Options, 255 Optional<Reloc::Model> RM, 256 Optional<CodeModel::Model> CM, 257 CodeGenOpt::Level OL, bool JIT, 258 bool LittleEndian) 259 : LLVMTargetMachine(T, 260 computeDataLayout(TT, Options.MCOptions, LittleEndian), 261 TT, CPU, FS, Options, getEffectiveRelocModel(TT, RM), 262 getEffectiveAArch64CodeModel(TT, CM, JIT), OL), 263 TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) { 264 initAsmInfo(); 265 266 if (TT.isOSBinFormatMachO()) { 267 this->Options.TrapUnreachable = true; 268 this->Options.NoTrapAfterNoreturn = true; 269 } 270 271 if (getMCAsmInfo()->usesWindowsCFI()) { 272 // Unwinding can get confused if the last instruction in an 273 // exception-handling region (function, funclet, try block, etc.) 274 // is a call. 275 // 276 // FIXME: We could elide the trap if the next instruction would be in 277 // the same region anyway. 278 this->Options.TrapUnreachable = true; 279 } 280 281 // Enable GlobalISel at or below EnableGlobalISelAt0. 282 if (getOptLevel() <= EnableGlobalISelAtO) { 283 setGlobalISel(true); 284 setGlobalISelAbort(GlobalISelAbortMode::Disable); 285 } 286 287 // AArch64 supports the MachineOutliner. 288 setMachineOutliner(true); 289 290 // AArch64 supports default outlining behaviour. 291 setSupportsDefaultOutlining(true); 292 } 293 294 AArch64TargetMachine::~AArch64TargetMachine() = default; 295 296 const AArch64Subtarget * 297 AArch64TargetMachine::getSubtargetImpl(const Function &F) const { 298 Attribute CPUAttr = F.getFnAttribute("target-cpu"); 299 Attribute FSAttr = F.getFnAttribute("target-features"); 300 301 std::string CPU = !CPUAttr.hasAttribute(Attribute::None) 302 ? CPUAttr.getValueAsString().str() 303 : TargetCPU; 304 std::string FS = !FSAttr.hasAttribute(Attribute::None) 305 ? FSAttr.getValueAsString().str() 306 : TargetFS; 307 308 auto &I = SubtargetMap[CPU + FS]; 309 if (!I) { 310 // This needs to be done before we create a new subtarget since any 311 // creation will depend on the TM and the code generation flags on the 312 // function that reside in TargetOptions. 313 resetTargetOptions(F); 314 I = std::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, 315 isLittle); 316 } 317 return I.get(); 318 } 319 320 void AArch64leTargetMachine::anchor() { } 321 322 AArch64leTargetMachine::AArch64leTargetMachine( 323 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 324 const TargetOptions &Options, Optional<Reloc::Model> RM, 325 Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT) 326 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {} 327 328 void AArch64beTargetMachine::anchor() { } 329 330 AArch64beTargetMachine::AArch64beTargetMachine( 331 const Target &T, const Triple &TT, StringRef CPU, StringRef FS, 332 const TargetOptions &Options, Optional<Reloc::Model> RM, 333 Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT) 334 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {} 335 336 namespace { 337 338 /// AArch64 Code Generator Pass Configuration Options. 339 class AArch64PassConfig : public TargetPassConfig { 340 public: 341 AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM) 342 : TargetPassConfig(TM, PM) { 343 if (TM.getOptLevel() != CodeGenOpt::None) 344 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID); 345 } 346 347 AArch64TargetMachine &getAArch64TargetMachine() const { 348 return getTM<AArch64TargetMachine>(); 349 } 350 351 ScheduleDAGInstrs * 352 createMachineScheduler(MachineSchedContext *C) const override { 353 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>(); 354 ScheduleDAGMILive *DAG = createGenericSchedLive(C); 355 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 356 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 357 if (ST.hasFusion()) 358 DAG->addMutation(createAArch64MacroFusionDAGMutation()); 359 return DAG; 360 } 361 362 ScheduleDAGInstrs * 363 createPostMachineScheduler(MachineSchedContext *C) const override { 364 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>(); 365 if (ST.hasFusion()) { 366 // Run the Macro Fusion after RA again since literals are expanded from 367 // pseudos then (v. addPreSched2()). 368 ScheduleDAGMI *DAG = createGenericSchedPostRA(C); 369 DAG->addMutation(createAArch64MacroFusionDAGMutation()); 370 return DAG; 371 } 372 373 return nullptr; 374 } 375 376 void addIRPasses() override; 377 bool addPreISel() override; 378 bool addInstSelector() override; 379 bool addIRTranslator() override; 380 void addPreLegalizeMachineIR() override; 381 bool addLegalizeMachineIR() override; 382 bool addRegBankSelect() override; 383 void addPreGlobalInstructionSelect() override; 384 bool addGlobalInstructionSelect() override; 385 bool addILPOpts() override; 386 void addPreRegAlloc() override; 387 void addPostRegAlloc() override; 388 void addPreSched2() override; 389 void addPreEmitPass() override; 390 391 std::unique_ptr<CSEConfigBase> getCSEConfig() const override; 392 }; 393 394 } // end anonymous namespace 395 396 TargetTransformInfo 397 AArch64TargetMachine::getTargetTransformInfo(const Function &F) { 398 return TargetTransformInfo(AArch64TTIImpl(this, F)); 399 } 400 401 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) { 402 return new AArch64PassConfig(*this, PM); 403 } 404 405 std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const { 406 return getStandardCSEConfigForOpt(TM->getOptLevel()); 407 } 408 409 void AArch64PassConfig::addIRPasses() { 410 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg 411 // ourselves. 412 addPass(createAtomicExpandPass()); 413 414 // Cmpxchg instructions are often used with a subsequent comparison to 415 // determine whether it succeeded. We can exploit existing control-flow in 416 // ldrex/strex loops to simplify this, but it needs tidying up. 417 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) 418 addPass(createCFGSimplificationPass(1, true, true, false, true)); 419 420 // Run LoopDataPrefetch 421 // 422 // Run this before LSR to remove the multiplies involved in computing the 423 // pointer values N iterations ahead. 424 if (TM->getOptLevel() != CodeGenOpt::None) { 425 if (EnableLoopDataPrefetch) 426 addPass(createLoopDataPrefetchPass()); 427 if (EnableFalkorHWPFFix) 428 addPass(createFalkorMarkStridedAccessesPass()); 429 } 430 431 TargetPassConfig::addIRPasses(); 432 433 // Match interleaved memory accesses to ldN/stN intrinsics. 434 if (TM->getOptLevel() != CodeGenOpt::None) { 435 addPass(createInterleavedLoadCombinePass()); 436 addPass(createInterleavedAccessPass()); 437 } 438 439 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) { 440 // Call SeparateConstOffsetFromGEP pass to extract constants within indices 441 // and lower a GEP with multiple indices to either arithmetic operations or 442 // multiple GEPs with single index. 443 addPass(createSeparateConstOffsetFromGEPPass(true)); 444 // Call EarlyCSE pass to find and remove subexpressions in the lowered 445 // result. 446 addPass(createEarlyCSEPass()); 447 // Do loop invariant code motion in case part of the lowered result is 448 // invariant. 449 addPass(createLICMPass()); 450 } 451 452 addPass(createAArch64StackTaggingPass(/* MergeInit = */ TM->getOptLevel() != 453 CodeGenOpt::None)); 454 } 455 456 // Pass Pipeline Configuration 457 bool AArch64PassConfig::addPreISel() { 458 // Run promote constant before global merge, so that the promoted constants 459 // get a chance to be merged 460 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant) 461 addPass(createAArch64PromoteConstantPass()); 462 // FIXME: On AArch64, this depends on the type. 463 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes(). 464 // and the offset has to be a multiple of the related size in bytes. 465 if ((TM->getOptLevel() != CodeGenOpt::None && 466 EnableGlobalMerge == cl::BOU_UNSET) || 467 EnableGlobalMerge == cl::BOU_TRUE) { 468 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && 469 (EnableGlobalMerge == cl::BOU_UNSET); 470 471 // Merging of extern globals is enabled by default on non-Mach-O as we 472 // expect it to be generally either beneficial or harmless. On Mach-O it 473 // is disabled as we emit the .subsections_via_symbols directive which 474 // means that merging extern globals is not safe. 475 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); 476 477 // FIXME: extern global merging is only enabled when we optimise for size 478 // because there are some regressions with it also enabled for performance. 479 if (!OnlyOptimizeForSize) 480 MergeExternalByDefault = false; 481 482 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize, 483 MergeExternalByDefault)); 484 } 485 486 return false; 487 } 488 489 bool AArch64PassConfig::addInstSelector() { 490 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel())); 491 492 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many 493 // references to _TLS_MODULE_BASE_ as possible. 494 if (TM->getTargetTriple().isOSBinFormatELF() && 495 getOptLevel() != CodeGenOpt::None) 496 addPass(createAArch64CleanupLocalDynamicTLSPass()); 497 498 return false; 499 } 500 501 bool AArch64PassConfig::addIRTranslator() { 502 addPass(new IRTranslator()); 503 return false; 504 } 505 506 void AArch64PassConfig::addPreLegalizeMachineIR() { 507 bool IsOptNone = getOptLevel() == CodeGenOpt::None; 508 addPass(createAArch64PreLegalizeCombiner(IsOptNone)); 509 } 510 511 bool AArch64PassConfig::addLegalizeMachineIR() { 512 addPass(new Legalizer()); 513 return false; 514 } 515 516 bool AArch64PassConfig::addRegBankSelect() { 517 addPass(new RegBankSelect()); 518 return false; 519 } 520 521 void AArch64PassConfig::addPreGlobalInstructionSelect() { 522 addPass(new Localizer()); 523 } 524 525 bool AArch64PassConfig::addGlobalInstructionSelect() { 526 addPass(new InstructionSelect()); 527 return false; 528 } 529 530 bool AArch64PassConfig::addILPOpts() { 531 if (EnableCondOpt) 532 addPass(createAArch64ConditionOptimizerPass()); 533 if (EnableCCMP) 534 addPass(createAArch64ConditionalCompares()); 535 if (EnableMCR) 536 addPass(&MachineCombinerID); 537 if (EnableCondBrTuning) 538 addPass(createAArch64CondBrTuning()); 539 if (EnableEarlyIfConversion) 540 addPass(&EarlyIfConverterID); 541 if (EnableStPairSuppress) 542 addPass(createAArch64StorePairSuppressPass()); 543 addPass(createAArch64SIMDInstrOptPass()); 544 if (TM->getOptLevel() != CodeGenOpt::None) 545 addPass(createAArch64StackTaggingPreRAPass()); 546 return true; 547 } 548 549 void AArch64PassConfig::addPreRegAlloc() { 550 // Change dead register definitions to refer to the zero register. 551 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination) 552 addPass(createAArch64DeadRegisterDefinitions()); 553 554 // Use AdvSIMD scalar instructions whenever profitable. 555 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) { 556 addPass(createAArch64AdvSIMDScalar()); 557 // The AdvSIMD pass may produce copies that can be rewritten to 558 // be register coaleascer friendly. 559 addPass(&PeepholeOptimizerID); 560 } 561 } 562 563 void AArch64PassConfig::addPostRegAlloc() { 564 // Remove redundant copy instructions. 565 if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination) 566 addPass(createAArch64RedundantCopyEliminationPass()); 567 568 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc()) 569 // Improve performance for some FP/SIMD code for A57. 570 addPass(createAArch64A57FPLoadBalancing()); 571 } 572 573 void AArch64PassConfig::addPreSched2() { 574 // Expand some pseudo instructions to allow proper scheduling. 575 addPass(createAArch64ExpandPseudoPass()); 576 // Use load/store pair instructions when possible. 577 if (TM->getOptLevel() != CodeGenOpt::None) { 578 if (EnableLoadStoreOpt) 579 addPass(createAArch64LoadStoreOptimizationPass()); 580 } 581 582 // The AArch64SpeculationHardeningPass destroys dominator tree and natural 583 // loop info, which is needed for the FalkorHWPFFixPass and also later on. 584 // Therefore, run the AArch64SpeculationHardeningPass before the 585 // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop 586 // info. 587 addPass(createAArch64SpeculationHardeningPass()); 588 589 if (TM->getOptLevel() != CodeGenOpt::None) { 590 if (EnableFalkorHWPFFix) 591 addPass(createFalkorHWPFFixPass()); 592 } 593 } 594 595 void AArch64PassConfig::addPreEmitPass() { 596 // Machine Block Placement might have created new opportunities when run 597 // at O3, where the Tail Duplication Threshold is set to 4 instructions. 598 // Run the load/store optimizer once more. 599 if (TM->getOptLevel() >= CodeGenOpt::Aggressive && EnableLoadStoreOpt) 600 addPass(createAArch64LoadStoreOptimizationPass()); 601 602 if (EnableA53Fix835769) 603 addPass(createAArch64A53Fix835769()); 604 // Relax conditional branch instructions if they're otherwise out of 605 // range of their destination. 606 if (BranchRelaxation) 607 addPass(&BranchRelaxationPassID); 608 609 if (EnableBranchTargets) 610 addPass(createAArch64BranchTargetsPass()); 611 612 if (TM->getOptLevel() != CodeGenOpt::None && EnableCompressJumpTables) 613 addPass(createAArch64CompressJumpTablesPass()); 614 615 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && 616 TM->getTargetTriple().isOSBinFormatMachO()) 617 addPass(createAArch64CollectLOHPass()); 618 } 619