1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "AArch64CallLowering.h"
15 #include "AArch64RegisterBankInfo.h"
16 #include "AArch64TargetMachine.h"
17 #include "AArch64TargetObjectFile.h"
18 #include "AArch64TargetTransformInfo.h"
19 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
20 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
21 #include "llvm/CodeGen/Passes.h"
22 #include "llvm/CodeGen/RegAllocRegistry.h"
23 #include "llvm/CodeGen/TargetPassConfig.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/LegacyPassManager.h"
26 #include "llvm/InitializePasses.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/TargetRegistry.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Transforms/Scalar.h"
31 using namespace llvm;
32 
33 static cl::opt<bool>
34 EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"),
35            cl::init(true), cl::Hidden);
36 
37 static cl::opt<bool> EnableMCR("aarch64-mcr",
38                                cl::desc("Enable the machine combiner pass"),
39                                cl::init(true), cl::Hidden);
40 
41 static cl::opt<bool>
42 EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"),
43                      cl::init(true), cl::Hidden);
44 
45 static cl::opt<bool>
46 EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar"
47                     " integer instructions"), cl::init(false), cl::Hidden);
48 
49 static cl::opt<bool>
50 EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote "
51                       "constant pass"), cl::init(true), cl::Hidden);
52 
53 static cl::opt<bool>
54 EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the"
55                  " linker optimization hints (LOH)"), cl::init(true),
56                  cl::Hidden);
57 
58 static cl::opt<bool>
59 EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden,
60                               cl::desc("Enable the pass that removes dead"
61                                        " definitons and replaces stores to"
62                                        " them with stores to the zero"
63                                        " register"),
64                               cl::init(true));
65 
66 static cl::opt<bool>
67 EnableRedundantCopyElimination("aarch64-redundant-copy-elim",
68               cl::desc("Enable the redundant copy elimination pass"),
69               cl::init(true), cl::Hidden);
70 
71 static cl::opt<bool>
72 EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair"
73                    " optimization pass"), cl::init(true), cl::Hidden);
74 
75 static cl::opt<bool>
76 EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden,
77                  cl::desc("Run SimplifyCFG after expanding atomic operations"
78                           " to make use of cmpxchg flow-based information"),
79                  cl::init(true));
80 
81 static cl::opt<bool>
82 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
83                         cl::desc("Run early if-conversion"),
84                         cl::init(true));
85 
86 static cl::opt<bool>
87 EnableCondOpt("aarch64-condopt",
88               cl::desc("Enable the condition optimizer pass"),
89               cl::init(true), cl::Hidden);
90 
91 static cl::opt<bool>
92 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
93                 cl::desc("Work around Cortex-A53 erratum 835769"),
94                 cl::init(false));
95 
96 static cl::opt<bool>
97 EnableGEPOpt("aarch64-gep-opt", cl::Hidden,
98              cl::desc("Enable optimizations on complex GEPs"),
99              cl::init(false));
100 
101 // FIXME: Unify control over GlobalMerge.
102 static cl::opt<cl::boolOrDefault>
103 EnableGlobalMerge("aarch64-global-merge", cl::Hidden,
104                   cl::desc("Enable the global merge pass"));
105 
106 static cl::opt<bool>
107     EnableLoopDataPrefetch("aarch64-loop-data-prefetch", cl::Hidden,
108                            cl::desc("Enable the loop data prefetch pass"),
109                            cl::init(true));
110 
111 extern "C" void LLVMInitializeAArch64Target() {
112   // Register the target.
113   RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
114   RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
115   RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target);
116   auto PR = PassRegistry::getPassRegistry();
117   initializeGlobalISel(*PR);
118   initializeAArch64ExpandPseudoPass(*PR);
119 }
120 
121 //===----------------------------------------------------------------------===//
122 // AArch64 Lowering public interface.
123 //===----------------------------------------------------------------------===//
124 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
125   if (TT.isOSBinFormatMachO())
126     return make_unique<AArch64_MachoTargetObjectFile>();
127 
128   return make_unique<AArch64_ELFTargetObjectFile>();
129 }
130 
131 // Helper function to build a DataLayout string
132 static std::string computeDataLayout(const Triple &TT, bool LittleEndian) {
133   if (TT.isOSBinFormatMachO())
134     return "e-m:o-i64:64-i128:128-n32:64-S128";
135   if (LittleEndian)
136     return "e-m:e-i64:64-i128:128-n32:64-S128";
137   return "E-m:e-i64:64-i128:128-n32:64-S128";
138 }
139 
140 // Helper function to set up the defaults for reciprocals.
141 static void initReciprocals(AArch64TargetMachine& TM, AArch64Subtarget& ST)
142 {
143   // For the estimates, convergence is quadratic, so essentially the number of
144   // digits is doubled after each iteration. ARMv8, the minimum architected
145   // accuracy of the initial estimate is 2^-8.  Therefore, the number of extra
146   // steps to refine the result for float (23 mantissa bits) and for double
147   // (52 mantissa bits) are 2 and 3, respectively.
148   unsigned ExtraStepsF = 2,
149            ExtraStepsD = ExtraStepsF + 1;
150   // FIXME: Enable x^-1/2 only for Exynos M1 at the moment.
151   bool UseRsqrt = ST.isExynosM1();
152 
153   TM.Options.Reciprocals.setDefaults("sqrtf", UseRsqrt, ExtraStepsF);
154   TM.Options.Reciprocals.setDefaults("sqrtd", UseRsqrt, ExtraStepsD);
155   TM.Options.Reciprocals.setDefaults("vec-sqrtf", UseRsqrt, ExtraStepsF);
156   TM.Options.Reciprocals.setDefaults("vec-sqrtd", UseRsqrt, ExtraStepsD);
157 
158   TM.Options.Reciprocals.setDefaults("divf", false, ExtraStepsF);
159   TM.Options.Reciprocals.setDefaults("divd", false, ExtraStepsD);
160   TM.Options.Reciprocals.setDefaults("vec-divf", false, ExtraStepsF);
161   TM.Options.Reciprocals.setDefaults("vec-divd", false, ExtraStepsD);
162 }
163 
164 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
165                                            Optional<Reloc::Model> RM) {
166   // AArch64 Darwin is always PIC.
167   if (TT.isOSDarwin())
168     return Reloc::PIC_;
169   // On ELF platforms the default static relocation model has a smart enough
170   // linker to cope with referencing external symbols defined in a shared
171   // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
172   if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
173     return Reloc::Static;
174   return *RM;
175 }
176 
177 /// Create an AArch64 architecture model.
178 ///
179 AArch64TargetMachine::AArch64TargetMachine(
180     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
181     const TargetOptions &Options, Optional<Reloc::Model> RM,
182     CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian)
183     // This nested ternary is horrible, but DL needs to be properly
184     // initialized before TLInfo is constructed.
185     : LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS,
186                         Options, getEffectiveRelocModel(TT, RM), CM, OL),
187       TLOF(createTLOF(getTargetTriple())),
188       Subtarget(TT, CPU, FS, *this, LittleEndian) {
189   initReciprocals(*this, Subtarget);
190   initAsmInfo();
191 }
192 
193 AArch64TargetMachine::~AArch64TargetMachine() {}
194 
195 #ifdef LLVM_BUILD_GLOBAL_ISEL
196 namespace {
197 struct AArch64GISelActualAccessor : public GISelAccessor {
198   std::unique_ptr<CallLowering> CallLoweringInfo;
199   std::unique_ptr<RegisterBankInfo> RegBankInfo;
200   const CallLowering *getCallLowering() const override {
201     return CallLoweringInfo.get();
202   }
203   const RegisterBankInfo *getRegBankInfo() const override {
204     return RegBankInfo.get();
205   }
206 };
207 } // End anonymous namespace.
208 #endif
209 
210 const AArch64Subtarget *
211 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
212   Attribute CPUAttr = F.getFnAttribute("target-cpu");
213   Attribute FSAttr = F.getFnAttribute("target-features");
214 
215   std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
216                         ? CPUAttr.getValueAsString().str()
217                         : TargetCPU;
218   std::string FS = !FSAttr.hasAttribute(Attribute::None)
219                        ? FSAttr.getValueAsString().str()
220                        : TargetFS;
221 
222   auto &I = SubtargetMap[CPU + FS];
223   if (!I) {
224     // This needs to be done before we create a new subtarget since any
225     // creation will depend on the TM and the code generation flags on the
226     // function that reside in TargetOptions.
227     resetTargetOptions(F);
228     I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
229                                             Subtarget.isLittleEndian());
230 #ifndef LLVM_BUILD_GLOBAL_ISEL
231    GISelAccessor *GISel = new GISelAccessor();
232 #else
233     AArch64GISelActualAccessor *GISel =
234         new AArch64GISelActualAccessor();
235     GISel->CallLoweringInfo.reset(
236         new AArch64CallLowering(*I->getTargetLowering()));
237     GISel->RegBankInfo.reset(
238         new AArch64RegisterBankInfo(*I->getRegisterInfo()));
239 #endif
240     I->setGISelAccessor(*GISel);
241   }
242   return I.get();
243 }
244 
245 void AArch64leTargetMachine::anchor() { }
246 
247 AArch64leTargetMachine::AArch64leTargetMachine(
248     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
249     const TargetOptions &Options, Optional<Reloc::Model> RM,
250     CodeModel::Model CM, CodeGenOpt::Level OL)
251     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
252 
253 void AArch64beTargetMachine::anchor() { }
254 
255 AArch64beTargetMachine::AArch64beTargetMachine(
256     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
257     const TargetOptions &Options, Optional<Reloc::Model> RM,
258     CodeModel::Model CM, CodeGenOpt::Level OL)
259     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
260 
261 namespace {
262 /// AArch64 Code Generator Pass Configuration Options.
263 class AArch64PassConfig : public TargetPassConfig {
264 public:
265   AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
266       : TargetPassConfig(TM, PM) {
267     if (TM->getOptLevel() != CodeGenOpt::None)
268       substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
269   }
270 
271   AArch64TargetMachine &getAArch64TargetMachine() const {
272     return getTM<AArch64TargetMachine>();
273   }
274 
275   void addIRPasses()  override;
276   bool addPreISel() override;
277   bool addInstSelector() override;
278 #ifdef LLVM_BUILD_GLOBAL_ISEL
279   bool addIRTranslator() override;
280   bool addRegBankSelect() override;
281 #endif
282   bool addILPOpts() override;
283   void addPreRegAlloc() override;
284   void addPostRegAlloc() override;
285   void addPreSched2() override;
286   void addPreEmitPass() override;
287 };
288 } // namespace
289 
290 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() {
291   return TargetIRAnalysis([this](const Function &F) {
292     return TargetTransformInfo(AArch64TTIImpl(this, F));
293   });
294 }
295 
296 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
297   return new AArch64PassConfig(this, PM);
298 }
299 
300 void AArch64PassConfig::addIRPasses() {
301   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
302   // ourselves.
303   addPass(createAtomicExpandPass(TM));
304 
305   // Cmpxchg instructions are often used with a subsequent comparison to
306   // determine whether it succeeded. We can exploit existing control-flow in
307   // ldrex/strex loops to simplify this, but it needs tidying up.
308   if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
309     addPass(createCFGSimplificationPass());
310 
311   // Run LoopDataPrefetch for Cyclone (the only subtarget that defines a
312   // non-zero getPrefetchDistance).
313   //
314   // Run this before LSR to remove the multiplies involved in computing the
315   // pointer values N iterations ahead.
316   if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch)
317     addPass(createLoopDataPrefetchPass());
318 
319   TargetPassConfig::addIRPasses();
320 
321   // Match interleaved memory accesses to ldN/stN intrinsics.
322   if (TM->getOptLevel() != CodeGenOpt::None)
323     addPass(createInterleavedAccessPass(TM));
324 
325   if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
326     // Call SeparateConstOffsetFromGEP pass to extract constants within indices
327     // and lower a GEP with multiple indices to either arithmetic operations or
328     // multiple GEPs with single index.
329     addPass(createSeparateConstOffsetFromGEPPass(TM, true));
330     // Call EarlyCSE pass to find and remove subexpressions in the lowered
331     // result.
332     addPass(createEarlyCSEPass());
333     // Do loop invariant code motion in case part of the lowered result is
334     // invariant.
335     addPass(createLICMPass());
336   }
337 }
338 
339 // Pass Pipeline Configuration
340 bool AArch64PassConfig::addPreISel() {
341   // Run promote constant before global merge, so that the promoted constants
342   // get a chance to be merged
343   if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
344     addPass(createAArch64PromoteConstantPass());
345   // FIXME: On AArch64, this depends on the type.
346   // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
347   // and the offset has to be a multiple of the related size in bytes.
348   if ((TM->getOptLevel() != CodeGenOpt::None &&
349        EnableGlobalMerge == cl::BOU_UNSET) ||
350       EnableGlobalMerge == cl::BOU_TRUE) {
351     bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
352                                (EnableGlobalMerge == cl::BOU_UNSET);
353     addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
354   }
355 
356   if (TM->getOptLevel() != CodeGenOpt::None)
357     addPass(createAArch64AddressTypePromotionPass());
358 
359   return false;
360 }
361 
362 bool AArch64PassConfig::addInstSelector() {
363   addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
364 
365   // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
366   // references to _TLS_MODULE_BASE_ as possible.
367   if (TM->getTargetTriple().isOSBinFormatELF() &&
368       getOptLevel() != CodeGenOpt::None)
369     addPass(createAArch64CleanupLocalDynamicTLSPass());
370 
371   return false;
372 }
373 
374 #ifdef LLVM_BUILD_GLOBAL_ISEL
375 bool AArch64PassConfig::addIRTranslator() {
376   addPass(new IRTranslator());
377   return false;
378 }
379 bool AArch64PassConfig::addRegBankSelect() {
380   addPass(new RegBankSelect());
381   return false;
382 }
383 #endif
384 
385 bool AArch64PassConfig::addILPOpts() {
386   if (EnableCondOpt)
387     addPass(createAArch64ConditionOptimizerPass());
388   if (EnableCCMP)
389     addPass(createAArch64ConditionalCompares());
390   if (EnableMCR)
391     addPass(&MachineCombinerID);
392   if (EnableEarlyIfConversion)
393     addPass(&EarlyIfConverterID);
394   if (EnableStPairSuppress)
395     addPass(createAArch64StorePairSuppressPass());
396   return true;
397 }
398 
399 void AArch64PassConfig::addPreRegAlloc() {
400   // Use AdvSIMD scalar instructions whenever profitable.
401   if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
402     addPass(createAArch64AdvSIMDScalar());
403     // The AdvSIMD pass may produce copies that can be rewritten to
404     // be register coaleascer friendly.
405     addPass(&PeepholeOptimizerID);
406   }
407 }
408 
409 void AArch64PassConfig::addPostRegAlloc() {
410   // Remove redundant copy instructions.
411   if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
412     addPass(createAArch64RedundantCopyEliminationPass());
413 
414   // Change dead register definitions to refer to the zero register.
415   if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
416     addPass(createAArch64DeadRegisterDefinitions());
417   if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
418     // Improve performance for some FP/SIMD code for A57.
419     addPass(createAArch64A57FPLoadBalancing());
420 }
421 
422 void AArch64PassConfig::addPreSched2() {
423   // Expand some pseudo instructions to allow proper scheduling.
424   addPass(createAArch64ExpandPseudoPass());
425   // Use load/store pair instructions when possible.
426   if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
427     addPass(createAArch64LoadStoreOptimizationPass());
428 }
429 
430 void AArch64PassConfig::addPreEmitPass() {
431   if (EnableA53Fix835769)
432     addPass(createAArch64A53Fix835769());
433   // Relax conditional branch instructions if they're otherwise out of
434   // range of their destination.
435   addPass(createAArch64BranchRelaxation());
436   if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
437       TM->getTargetTriple().isOSBinFormatMachO())
438     addPass(createAArch64CollectLOHPass());
439 }
440