1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "AArch64CallLowering.h"
15 #include "AArch64InstructionSelector.h"
16 #include "AArch64MachineLegalizer.h"
17 #include "AArch64RegisterBankInfo.h"
18 #include "AArch64TargetMachine.h"
19 #include "AArch64TargetObjectFile.h"
20 #include "AArch64TargetTransformInfo.h"
21 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
23 #include "llvm/CodeGen/GlobalISel/MachineLegalizePass.h"
24 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/RegAllocRegistry.h"
27 #include "llvm/CodeGen/TargetPassConfig.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/LegacyPassManager.h"
30 #include "llvm/InitializePasses.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/TargetRegistry.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Transforms/Scalar.h"
35 using namespace llvm;
36 
37 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
38                                 cl::desc("Enable the CCMP formation pass"),
39                                 cl::init(true), cl::Hidden);
40 
41 static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
42                                cl::desc("Enable the machine combiner pass"),
43                                cl::init(true), cl::Hidden);
44 
45 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
46                                           cl::desc("Suppress STP for AArch64"),
47                                           cl::init(true), cl::Hidden);
48 
49 static cl::opt<bool> EnableAdvSIMDScalar(
50     "aarch64-enable-simd-scalar",
51     cl::desc("Enable use of AdvSIMD scalar integer instructions"),
52     cl::init(false), cl::Hidden);
53 
54 static cl::opt<bool>
55     EnablePromoteConstant("aarch64-enable-promote-const",
56                           cl::desc("Enable the promote constant pass"),
57                           cl::init(true), cl::Hidden);
58 
59 static cl::opt<bool> EnableCollectLOH(
60     "aarch64-enable-collect-loh",
61     cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
62     cl::init(true), cl::Hidden);
63 
64 static cl::opt<bool>
65     EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
66                                   cl::desc("Enable the pass that removes dead"
67                                            " definitons and replaces stores to"
68                                            " them with stores to the zero"
69                                            " register"),
70                                   cl::init(true));
71 
72 static cl::opt<bool> EnableRedundantCopyElimination(
73     "aarch64-enable-copyelim",
74     cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
75     cl::Hidden);
76 
77 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
78                                         cl::desc("Enable the load/store pair"
79                                                  " optimization pass"),
80                                         cl::init(true), cl::Hidden);
81 
82 static cl::opt<bool> EnableAtomicTidy(
83     "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
84     cl::desc("Run SimplifyCFG after expanding atomic operations"
85              " to make use of cmpxchg flow-based information"),
86     cl::init(true));
87 
88 static cl::opt<bool>
89 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
90                         cl::desc("Run early if-conversion"),
91                         cl::init(true));
92 
93 static cl::opt<bool>
94     EnableCondOpt("aarch64-enable-condopt",
95                   cl::desc("Enable the condition optimizer pass"),
96                   cl::init(true), cl::Hidden);
97 
98 static cl::opt<bool>
99 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
100                 cl::desc("Work around Cortex-A53 erratum 835769"),
101                 cl::init(false));
102 
103 static cl::opt<bool>
104     EnableAddressTypePromotion("aarch64-enable-type-promotion", cl::Hidden,
105                                cl::desc("Enable the type promotion pass"),
106                                cl::init(true));
107 
108 static cl::opt<bool>
109     EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
110                  cl::desc("Enable optimizations on complex GEPs"),
111                  cl::init(false));
112 
113 static cl::opt<bool>
114     BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
115                      cl::desc("Relax out of range conditional branches"));
116 
117 // FIXME: Unify control over GlobalMerge.
118 static cl::opt<cl::boolOrDefault>
119     EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
120                       cl::desc("Enable the global merge pass"));
121 
122 static cl::opt<bool>
123     EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
124                            cl::desc("Enable the loop data prefetch pass"),
125                            cl::init(true));
126 
127 extern "C" void LLVMInitializeAArch64Target() {
128   // Register the target.
129   RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
130   RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
131   RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target);
132   auto PR = PassRegistry::getPassRegistry();
133   initializeGlobalISel(*PR);
134   initializeAArch64A53Fix835769Pass(*PR);
135   initializeAArch64A57FPLoadBalancingPass(*PR);
136   initializeAArch64AddressTypePromotionPass(*PR);
137   initializeAArch64AdvSIMDScalarPass(*PR);
138   initializeAArch64BranchRelaxationPass(*PR);
139   initializeAArch64CollectLOHPass(*PR);
140   initializeAArch64ConditionalComparesPass(*PR);
141   initializeAArch64ConditionOptimizerPass(*PR);
142   initializeAArch64DeadRegisterDefinitionsPass(*PR);
143   initializeAArch64ExpandPseudoPass(*PR);
144   initializeAArch64LoadStoreOptPass(*PR);
145   initializeAArch64PromoteConstantPass(*PR);
146   initializeAArch64RedundantCopyEliminationPass(*PR);
147   initializeAArch64StorePairSuppressPass(*PR);
148   initializeLDTLSCleanupPass(*PR);
149 }
150 
151 //===----------------------------------------------------------------------===//
152 // AArch64 Lowering public interface.
153 //===----------------------------------------------------------------------===//
154 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
155   if (TT.isOSBinFormatMachO())
156     return make_unique<AArch64_MachoTargetObjectFile>();
157 
158   return make_unique<AArch64_ELFTargetObjectFile>();
159 }
160 
161 // Helper function to build a DataLayout string
162 static std::string computeDataLayout(const Triple &TT, bool LittleEndian) {
163   if (TT.isOSBinFormatMachO())
164     return "e-m:o-i64:64-i128:128-n32:64-S128";
165   if (LittleEndian)
166     return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
167   return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
168 }
169 
170 // Helper function to set up the defaults for reciprocals.
171 static void initReciprocals(AArch64TargetMachine& TM, AArch64Subtarget& ST)
172 {
173   // For the estimates, convergence is quadratic, so essentially the number of
174   // digits is doubled after each iteration. ARMv8, the minimum architected
175   // accuracy of the initial estimate is 2^-8.  Therefore, the number of extra
176   // steps to refine the result for float (23 mantissa bits) and for double
177   // (52 mantissa bits) are 2 and 3, respectively.
178   unsigned ExtraStepsF = 2,
179            ExtraStepsD = ExtraStepsF + 1;
180   bool UseRsqrt = ST.useRSqrt();
181 
182   TM.Options.Reciprocals.setDefaults("sqrtf", UseRsqrt, ExtraStepsF);
183   TM.Options.Reciprocals.setDefaults("sqrtd", UseRsqrt, ExtraStepsD);
184   TM.Options.Reciprocals.setDefaults("vec-sqrtf", UseRsqrt, ExtraStepsF);
185   TM.Options.Reciprocals.setDefaults("vec-sqrtd", UseRsqrt, ExtraStepsD);
186 
187   TM.Options.Reciprocals.setDefaults("divf", false, ExtraStepsF);
188   TM.Options.Reciprocals.setDefaults("divd", false, ExtraStepsD);
189   TM.Options.Reciprocals.setDefaults("vec-divf", false, ExtraStepsF);
190   TM.Options.Reciprocals.setDefaults("vec-divd", false, ExtraStepsD);
191 }
192 
193 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
194                                            Optional<Reloc::Model> RM) {
195   // AArch64 Darwin is always PIC.
196   if (TT.isOSDarwin())
197     return Reloc::PIC_;
198   // On ELF platforms the default static relocation model has a smart enough
199   // linker to cope with referencing external symbols defined in a shared
200   // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
201   if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
202     return Reloc::Static;
203   return *RM;
204 }
205 
206 /// Create an AArch64 architecture model.
207 ///
208 AArch64TargetMachine::AArch64TargetMachine(
209     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
210     const TargetOptions &Options, Optional<Reloc::Model> RM,
211     CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian)
212     // This nested ternary is horrible, but DL needs to be properly
213     // initialized before TLInfo is constructed.
214     : LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS,
215                         Options, getEffectiveRelocModel(TT, RM), CM, OL),
216       TLOF(createTLOF(getTargetTriple())),
217       Subtarget(TT, CPU, FS, *this, LittleEndian) {
218   initReciprocals(*this, Subtarget);
219   initAsmInfo();
220 }
221 
222 AArch64TargetMachine::~AArch64TargetMachine() {}
223 
224 #ifdef LLVM_BUILD_GLOBAL_ISEL
225 namespace {
226 struct AArch64GISelActualAccessor : public GISelAccessor {
227   std::unique_ptr<CallLowering> CallLoweringInfo;
228   std::unique_ptr<InstructionSelector> InstSelector;
229   std::unique_ptr<MachineLegalizer> Legalizer;
230   std::unique_ptr<RegisterBankInfo> RegBankInfo;
231   const CallLowering *getCallLowering() const override {
232     return CallLoweringInfo.get();
233   }
234   const InstructionSelector *getInstructionSelector() const override {
235     return InstSelector.get();
236   }
237   const class MachineLegalizer *getMachineLegalizer() const override {
238     return Legalizer.get();
239   }
240   const RegisterBankInfo *getRegBankInfo() const override {
241     return RegBankInfo.get();
242   }
243 };
244 } // End anonymous namespace.
245 #endif
246 
247 const AArch64Subtarget *
248 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
249   Attribute CPUAttr = F.getFnAttribute("target-cpu");
250   Attribute FSAttr = F.getFnAttribute("target-features");
251 
252   std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
253                         ? CPUAttr.getValueAsString().str()
254                         : TargetCPU;
255   std::string FS = !FSAttr.hasAttribute(Attribute::None)
256                        ? FSAttr.getValueAsString().str()
257                        : TargetFS;
258 
259   auto &I = SubtargetMap[CPU + FS];
260   if (!I) {
261     // This needs to be done before we create a new subtarget since any
262     // creation will depend on the TM and the code generation flags on the
263     // function that reside in TargetOptions.
264     resetTargetOptions(F);
265     I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
266                                             Subtarget.isLittleEndian());
267 #ifndef LLVM_BUILD_GLOBAL_ISEL
268    GISelAccessor *GISel = new GISelAccessor();
269 #else
270     AArch64GISelActualAccessor *GISel =
271         new AArch64GISelActualAccessor();
272     GISel->CallLoweringInfo.reset(
273         new AArch64CallLowering(*I->getTargetLowering()));
274     GISel->Legalizer.reset(new AArch64MachineLegalizer());
275 
276     auto *RBI = new AArch64RegisterBankInfo(*I->getRegisterInfo());
277 
278     // FIXME: At this point, we can't rely on Subtarget having RBI.
279     // It's awkward to mix passing RBI and the Subtarget; should we pass
280     // TII/TRI as well?
281     GISel->InstSelector.reset(new AArch64InstructionSelector(*I, *RBI));
282 
283     GISel->RegBankInfo.reset(RBI);
284 #endif
285     I->setGISelAccessor(*GISel);
286   }
287   return I.get();
288 }
289 
290 void AArch64leTargetMachine::anchor() { }
291 
292 AArch64leTargetMachine::AArch64leTargetMachine(
293     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
294     const TargetOptions &Options, Optional<Reloc::Model> RM,
295     CodeModel::Model CM, CodeGenOpt::Level OL)
296     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
297 
298 void AArch64beTargetMachine::anchor() { }
299 
300 AArch64beTargetMachine::AArch64beTargetMachine(
301     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
302     const TargetOptions &Options, Optional<Reloc::Model> RM,
303     CodeModel::Model CM, CodeGenOpt::Level OL)
304     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
305 
306 namespace {
307 /// AArch64 Code Generator Pass Configuration Options.
308 class AArch64PassConfig : public TargetPassConfig {
309 public:
310   AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
311       : TargetPassConfig(TM, PM) {
312     if (TM->getOptLevel() != CodeGenOpt::None)
313       substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
314   }
315 
316   AArch64TargetMachine &getAArch64TargetMachine() const {
317     return getTM<AArch64TargetMachine>();
318   }
319 
320   void addIRPasses()  override;
321   bool addPreISel() override;
322   bool addInstSelector() override;
323 #ifdef LLVM_BUILD_GLOBAL_ISEL
324   bool addIRTranslator() override;
325   bool addLegalizeMachineIR() override;
326   bool addRegBankSelect() override;
327   bool addGlobalInstructionSelect() override;
328 #endif
329   bool addILPOpts() override;
330   void addPreRegAlloc() override;
331   void addPostRegAlloc() override;
332   void addPreSched2() override;
333   void addPreEmitPass() override;
334 };
335 } // namespace
336 
337 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() {
338   return TargetIRAnalysis([this](const Function &F) {
339     return TargetTransformInfo(AArch64TTIImpl(this, F));
340   });
341 }
342 
343 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
344   return new AArch64PassConfig(this, PM);
345 }
346 
347 void AArch64PassConfig::addIRPasses() {
348   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
349   // ourselves.
350   addPass(createAtomicExpandPass(TM));
351 
352   // Cmpxchg instructions are often used with a subsequent comparison to
353   // determine whether it succeeded. We can exploit existing control-flow in
354   // ldrex/strex loops to simplify this, but it needs tidying up.
355   if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
356     addPass(createCFGSimplificationPass());
357 
358   // Run LoopDataPrefetch
359   //
360   // Run this before LSR to remove the multiplies involved in computing the
361   // pointer values N iterations ahead.
362   if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch)
363     addPass(createLoopDataPrefetchPass());
364 
365   TargetPassConfig::addIRPasses();
366 
367   // Match interleaved memory accesses to ldN/stN intrinsics.
368   if (TM->getOptLevel() != CodeGenOpt::None)
369     addPass(createInterleavedAccessPass(TM));
370 
371   if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
372     // Call SeparateConstOffsetFromGEP pass to extract constants within indices
373     // and lower a GEP with multiple indices to either arithmetic operations or
374     // multiple GEPs with single index.
375     addPass(createSeparateConstOffsetFromGEPPass(TM, true));
376     // Call EarlyCSE pass to find and remove subexpressions in the lowered
377     // result.
378     addPass(createEarlyCSEPass());
379     // Do loop invariant code motion in case part of the lowered result is
380     // invariant.
381     addPass(createLICMPass());
382   }
383 }
384 
385 // Pass Pipeline Configuration
386 bool AArch64PassConfig::addPreISel() {
387   // Run promote constant before global merge, so that the promoted constants
388   // get a chance to be merged
389   if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
390     addPass(createAArch64PromoteConstantPass());
391   // FIXME: On AArch64, this depends on the type.
392   // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
393   // and the offset has to be a multiple of the related size in bytes.
394   if ((TM->getOptLevel() != CodeGenOpt::None &&
395        EnableGlobalMerge == cl::BOU_UNSET) ||
396       EnableGlobalMerge == cl::BOU_TRUE) {
397     bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
398                                (EnableGlobalMerge == cl::BOU_UNSET);
399     addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
400   }
401 
402   if (TM->getOptLevel() != CodeGenOpt::None && EnableAddressTypePromotion)
403     addPass(createAArch64AddressTypePromotionPass());
404 
405   return false;
406 }
407 
408 bool AArch64PassConfig::addInstSelector() {
409   addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
410 
411   // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
412   // references to _TLS_MODULE_BASE_ as possible.
413   if (TM->getTargetTriple().isOSBinFormatELF() &&
414       getOptLevel() != CodeGenOpt::None)
415     addPass(createAArch64CleanupLocalDynamicTLSPass());
416 
417   return false;
418 }
419 
420 #ifdef LLVM_BUILD_GLOBAL_ISEL
421 bool AArch64PassConfig::addIRTranslator() {
422   addPass(new IRTranslator());
423   return false;
424 }
425 bool AArch64PassConfig::addLegalizeMachineIR() {
426   addPass(new MachineLegalizePass());
427   return false;
428 }
429 bool AArch64PassConfig::addRegBankSelect() {
430   addPass(new RegBankSelect());
431   return false;
432 }
433 bool AArch64PassConfig::addGlobalInstructionSelect() {
434   addPass(new InstructionSelect());
435   return false;
436 }
437 #endif
438 
439 bool AArch64PassConfig::addILPOpts() {
440   if (EnableCondOpt)
441     addPass(createAArch64ConditionOptimizerPass());
442   if (EnableCCMP)
443     addPass(createAArch64ConditionalCompares());
444   if (EnableMCR)
445     addPass(&MachineCombinerID);
446   if (EnableEarlyIfConversion)
447     addPass(&EarlyIfConverterID);
448   if (EnableStPairSuppress)
449     addPass(createAArch64StorePairSuppressPass());
450   return true;
451 }
452 
453 void AArch64PassConfig::addPreRegAlloc() {
454   // Use AdvSIMD scalar instructions whenever profitable.
455   if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
456     addPass(createAArch64AdvSIMDScalar());
457     // The AdvSIMD pass may produce copies that can be rewritten to
458     // be register coaleascer friendly.
459     addPass(&PeepholeOptimizerID);
460   }
461 }
462 
463 void AArch64PassConfig::addPostRegAlloc() {
464   // Remove redundant copy instructions.
465   if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
466     addPass(createAArch64RedundantCopyEliminationPass());
467 
468   // Change dead register definitions to refer to the zero register.
469   if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
470     addPass(createAArch64DeadRegisterDefinitions());
471   if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
472     // Improve performance for some FP/SIMD code for A57.
473     addPass(createAArch64A57FPLoadBalancing());
474 }
475 
476 void AArch64PassConfig::addPreSched2() {
477   // Expand some pseudo instructions to allow proper scheduling.
478   addPass(createAArch64ExpandPseudoPass());
479   // Use load/store pair instructions when possible.
480   if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
481     addPass(createAArch64LoadStoreOptimizationPass());
482 }
483 
484 void AArch64PassConfig::addPreEmitPass() {
485   if (EnableA53Fix835769)
486     addPass(createAArch64A53Fix835769());
487   // Relax conditional branch instructions if they're otherwise out of
488   // range of their destination.
489   if (BranchRelaxation)
490     addPass(createAArch64BranchRelaxation());
491   if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
492       TM->getTargetTriple().isOSBinFormatMachO())
493     addPass(createAArch64CollectLOHPass());
494 }
495