1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "AArch64CallLowering.h"
15 #include "AArch64InstructionSelector.h"
16 #include "AArch64MachineLegalizer.h"
17 #include "AArch64RegisterBankInfo.h"
18 #include "AArch64TargetMachine.h"
19 #include "AArch64TargetObjectFile.h"
20 #include "AArch64TargetTransformInfo.h"
21 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
23 #include "llvm/CodeGen/GlobalISel/MachineLegalizePass.h"
24 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/RegAllocRegistry.h"
27 #include "llvm/CodeGen/TargetPassConfig.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/LegacyPassManager.h"
30 #include "llvm/InitializePasses.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/TargetRegistry.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Transforms/Scalar.h"
35 using namespace llvm;
36 
37 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
38                                 cl::desc("Enable the CCMP formation pass"),
39                                 cl::init(true), cl::Hidden);
40 
41 static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
42                                cl::desc("Enable the machine combiner pass"),
43                                cl::init(true), cl::Hidden);
44 
45 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
46                                           cl::desc("Suppress STP for AArch64"),
47                                           cl::init(true), cl::Hidden);
48 
49 static cl::opt<bool> EnableAdvSIMDScalar(
50     "aarch64-enable-simd-scalar",
51     cl::desc("Enable use of AdvSIMD scalar integer instructions"),
52     cl::init(false), cl::Hidden);
53 
54 static cl::opt<bool>
55     EnablePromoteConstant("aarch64-enable-promote-const",
56                           cl::desc("Enable the promote constant pass"),
57                           cl::init(true), cl::Hidden);
58 
59 static cl::opt<bool> EnableCollectLOH(
60     "aarch64-enable-collect-loh",
61     cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
62     cl::init(true), cl::Hidden);
63 
64 static cl::opt<bool>
65     EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
66                                   cl::desc("Enable the pass that removes dead"
67                                            " definitons and replaces stores to"
68                                            " them with stores to the zero"
69                                            " register"),
70                                   cl::init(true));
71 
72 static cl::opt<bool> EnableRedundantCopyElimination(
73     "aarch64-enable-copyelim",
74     cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
75     cl::Hidden);
76 
77 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
78                                         cl::desc("Enable the load/store pair"
79                                                  " optimization pass"),
80                                         cl::init(true), cl::Hidden);
81 
82 static cl::opt<bool> EnableAtomicTidy(
83     "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
84     cl::desc("Run SimplifyCFG after expanding atomic operations"
85              " to make use of cmpxchg flow-based information"),
86     cl::init(true));
87 
88 static cl::opt<bool>
89 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
90                         cl::desc("Run early if-conversion"),
91                         cl::init(true));
92 
93 static cl::opt<bool>
94     EnableCondOpt("aarch64-enable-condopt",
95                   cl::desc("Enable the condition optimizer pass"),
96                   cl::init(true), cl::Hidden);
97 
98 static cl::opt<bool>
99 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
100                 cl::desc("Work around Cortex-A53 erratum 835769"),
101                 cl::init(false));
102 
103 static cl::opt<bool>
104     EnableAddressTypePromotion("aarch64-enable-type-promotion", cl::Hidden,
105                                cl::desc("Enable the type promotion pass"),
106                                cl::init(true));
107 
108 static cl::opt<bool>
109     EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
110                  cl::desc("Enable optimizations on complex GEPs"),
111                  cl::init(false));
112 
113 static cl::opt<bool>
114     BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
115                      cl::desc("Relax out of range conditional branches"));
116 
117 // FIXME: Unify control over GlobalMerge.
118 static cl::opt<cl::boolOrDefault>
119     EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
120                       cl::desc("Enable the global merge pass"));
121 
122 static cl::opt<bool>
123     EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
124                            cl::desc("Enable the loop data prefetch pass"),
125                            cl::init(true));
126 
127 extern "C" void LLVMInitializeAArch64Target() {
128   // Register the target.
129   RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget());
130   RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget());
131   RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target());
132   auto PR = PassRegistry::getPassRegistry();
133   initializeGlobalISel(*PR);
134   initializeAArch64A53Fix835769Pass(*PR);
135   initializeAArch64A57FPLoadBalancingPass(*PR);
136   initializeAArch64AddressTypePromotionPass(*PR);
137   initializeAArch64AdvSIMDScalarPass(*PR);
138   initializeAArch64CollectLOHPass(*PR);
139   initializeAArch64ConditionalComparesPass(*PR);
140   initializeAArch64ConditionOptimizerPass(*PR);
141   initializeAArch64DeadRegisterDefinitionsPass(*PR);
142   initializeAArch64ExpandPseudoPass(*PR);
143   initializeAArch64LoadStoreOptPass(*PR);
144   initializeAArch64VectorByElementOptPass(*PR);
145   initializeAArch64PromoteConstantPass(*PR);
146   initializeAArch64RedundantCopyEliminationPass(*PR);
147   initializeAArch64StorePairSuppressPass(*PR);
148   initializeLDTLSCleanupPass(*PR);
149 }
150 
151 //===----------------------------------------------------------------------===//
152 // AArch64 Lowering public interface.
153 //===----------------------------------------------------------------------===//
154 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
155   if (TT.isOSBinFormatMachO())
156     return make_unique<AArch64_MachoTargetObjectFile>();
157 
158   return make_unique<AArch64_ELFTargetObjectFile>();
159 }
160 
161 // Helper function to build a DataLayout string
162 static std::string computeDataLayout(const Triple &TT, bool LittleEndian) {
163   if (TT.isOSBinFormatMachO())
164     return "e-m:o-i64:64-i128:128-n32:64-S128";
165   if (LittleEndian)
166     return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
167   return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
168 }
169 
170 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
171                                            Optional<Reloc::Model> RM) {
172   // AArch64 Darwin is always PIC.
173   if (TT.isOSDarwin())
174     return Reloc::PIC_;
175   // On ELF platforms the default static relocation model has a smart enough
176   // linker to cope with referencing external symbols defined in a shared
177   // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
178   if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
179     return Reloc::Static;
180   return *RM;
181 }
182 
183 /// Create an AArch64 architecture model.
184 ///
185 AArch64TargetMachine::AArch64TargetMachine(
186     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
187     const TargetOptions &Options, Optional<Reloc::Model> RM,
188     CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian)
189     // This nested ternary is horrible, but DL needs to be properly
190     // initialized before TLInfo is constructed.
191     : LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS,
192                         Options, getEffectiveRelocModel(TT, RM), CM, OL),
193       TLOF(createTLOF(getTargetTriple())),
194       isLittle(LittleEndian) {
195   initAsmInfo();
196 }
197 
198 AArch64TargetMachine::~AArch64TargetMachine() {}
199 
200 #ifdef LLVM_BUILD_GLOBAL_ISEL
201 namespace {
202 struct AArch64GISelActualAccessor : public GISelAccessor {
203   std::unique_ptr<CallLowering> CallLoweringInfo;
204   std::unique_ptr<InstructionSelector> InstSelector;
205   std::unique_ptr<MachineLegalizer> Legalizer;
206   std::unique_ptr<RegisterBankInfo> RegBankInfo;
207   const CallLowering *getCallLowering() const override {
208     return CallLoweringInfo.get();
209   }
210   const InstructionSelector *getInstructionSelector() const override {
211     return InstSelector.get();
212   }
213   const class MachineLegalizer *getMachineLegalizer() const override {
214     return Legalizer.get();
215   }
216   const RegisterBankInfo *getRegBankInfo() const override {
217     return RegBankInfo.get();
218   }
219 };
220 } // End anonymous namespace.
221 #endif
222 
223 const AArch64Subtarget *
224 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
225   Attribute CPUAttr = F.getFnAttribute("target-cpu");
226   Attribute FSAttr = F.getFnAttribute("target-features");
227 
228   std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
229                         ? CPUAttr.getValueAsString().str()
230                         : TargetCPU;
231   std::string FS = !FSAttr.hasAttribute(Attribute::None)
232                        ? FSAttr.getValueAsString().str()
233                        : TargetFS;
234 
235   auto &I = SubtargetMap[CPU + FS];
236   if (!I) {
237     // This needs to be done before we create a new subtarget since any
238     // creation will depend on the TM and the code generation flags on the
239     // function that reside in TargetOptions.
240     resetTargetOptions(F);
241     I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
242                                             isLittle);
243 #ifndef LLVM_BUILD_GLOBAL_ISEL
244    GISelAccessor *GISel = new GISelAccessor();
245 #else
246     AArch64GISelActualAccessor *GISel =
247         new AArch64GISelActualAccessor();
248     GISel->CallLoweringInfo.reset(
249         new AArch64CallLowering(*I->getTargetLowering()));
250     GISel->Legalizer.reset(new AArch64MachineLegalizer());
251 
252     auto *RBI = new AArch64RegisterBankInfo(*I->getRegisterInfo());
253 
254     // FIXME: At this point, we can't rely on Subtarget having RBI.
255     // It's awkward to mix passing RBI and the Subtarget; should we pass
256     // TII/TRI as well?
257     GISel->InstSelector.reset(new AArch64InstructionSelector(*this, *I, *RBI));
258 
259     GISel->RegBankInfo.reset(RBI);
260 #endif
261     I->setGISelAccessor(*GISel);
262   }
263   return I.get();
264 }
265 
266 void AArch64leTargetMachine::anchor() { }
267 
268 AArch64leTargetMachine::AArch64leTargetMachine(
269     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
270     const TargetOptions &Options, Optional<Reloc::Model> RM,
271     CodeModel::Model CM, CodeGenOpt::Level OL)
272     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
273 
274 void AArch64beTargetMachine::anchor() { }
275 
276 AArch64beTargetMachine::AArch64beTargetMachine(
277     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
278     const TargetOptions &Options, Optional<Reloc::Model> RM,
279     CodeModel::Model CM, CodeGenOpt::Level OL)
280     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
281 
282 namespace {
283 /// AArch64 Code Generator Pass Configuration Options.
284 class AArch64PassConfig : public TargetPassConfig {
285 public:
286   AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
287       : TargetPassConfig(TM, PM) {
288     if (TM->getOptLevel() != CodeGenOpt::None)
289       substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
290   }
291 
292   AArch64TargetMachine &getAArch64TargetMachine() const {
293     return getTM<AArch64TargetMachine>();
294   }
295 
296   void addIRPasses()  override;
297   bool addPreISel() override;
298   bool addInstSelector() override;
299 #ifdef LLVM_BUILD_GLOBAL_ISEL
300   bool addIRTranslator() override;
301   bool addLegalizeMachineIR() override;
302   bool addRegBankSelect() override;
303   bool addGlobalInstructionSelect() override;
304 #endif
305   bool addILPOpts() override;
306   void addPreRegAlloc() override;
307   void addPostRegAlloc() override;
308   void addPreSched2() override;
309   void addPreEmitPass() override;
310 };
311 } // namespace
312 
313 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() {
314   return TargetIRAnalysis([this](const Function &F) {
315     return TargetTransformInfo(AArch64TTIImpl(this, F));
316   });
317 }
318 
319 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
320   return new AArch64PassConfig(this, PM);
321 }
322 
323 void AArch64PassConfig::addIRPasses() {
324   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
325   // ourselves.
326   addPass(createAtomicExpandPass(TM));
327 
328   // Cmpxchg instructions are often used with a subsequent comparison to
329   // determine whether it succeeded. We can exploit existing control-flow in
330   // ldrex/strex loops to simplify this, but it needs tidying up.
331   if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
332     addPass(createCFGSimplificationPass());
333 
334   // Run LoopDataPrefetch
335   //
336   // Run this before LSR to remove the multiplies involved in computing the
337   // pointer values N iterations ahead.
338   if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch)
339     addPass(createLoopDataPrefetchPass());
340 
341   TargetPassConfig::addIRPasses();
342 
343   // Match interleaved memory accesses to ldN/stN intrinsics.
344   if (TM->getOptLevel() != CodeGenOpt::None)
345     addPass(createInterleavedAccessPass(TM));
346 
347   if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
348     // Call SeparateConstOffsetFromGEP pass to extract constants within indices
349     // and lower a GEP with multiple indices to either arithmetic operations or
350     // multiple GEPs with single index.
351     addPass(createSeparateConstOffsetFromGEPPass(TM, true));
352     // Call EarlyCSE pass to find and remove subexpressions in the lowered
353     // result.
354     addPass(createEarlyCSEPass());
355     // Do loop invariant code motion in case part of the lowered result is
356     // invariant.
357     addPass(createLICMPass());
358   }
359 }
360 
361 // Pass Pipeline Configuration
362 bool AArch64PassConfig::addPreISel() {
363   // Run promote constant before global merge, so that the promoted constants
364   // get a chance to be merged
365   if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
366     addPass(createAArch64PromoteConstantPass());
367   // FIXME: On AArch64, this depends on the type.
368   // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
369   // and the offset has to be a multiple of the related size in bytes.
370   if ((TM->getOptLevel() != CodeGenOpt::None &&
371        EnableGlobalMerge == cl::BOU_UNSET) ||
372       EnableGlobalMerge == cl::BOU_TRUE) {
373     bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
374                                (EnableGlobalMerge == cl::BOU_UNSET);
375     addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
376   }
377 
378   if (TM->getOptLevel() != CodeGenOpt::None && EnableAddressTypePromotion)
379     addPass(createAArch64AddressTypePromotionPass());
380 
381   return false;
382 }
383 
384 bool AArch64PassConfig::addInstSelector() {
385   addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
386 
387   // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
388   // references to _TLS_MODULE_BASE_ as possible.
389   if (TM->getTargetTriple().isOSBinFormatELF() &&
390       getOptLevel() != CodeGenOpt::None)
391     addPass(createAArch64CleanupLocalDynamicTLSPass());
392 
393   return false;
394 }
395 
396 #ifdef LLVM_BUILD_GLOBAL_ISEL
397 bool AArch64PassConfig::addIRTranslator() {
398   addPass(new IRTranslator());
399   return false;
400 }
401 bool AArch64PassConfig::addLegalizeMachineIR() {
402   addPass(new MachineLegalizePass());
403   return false;
404 }
405 bool AArch64PassConfig::addRegBankSelect() {
406   addPass(new RegBankSelect());
407   return false;
408 }
409 bool AArch64PassConfig::addGlobalInstructionSelect() {
410   addPass(new InstructionSelect());
411   return false;
412 }
413 #endif
414 
415 bool AArch64PassConfig::addILPOpts() {
416   if (EnableCondOpt)
417     addPass(createAArch64ConditionOptimizerPass());
418   if (EnableCCMP)
419     addPass(createAArch64ConditionalCompares());
420   if (EnableMCR)
421     addPass(&MachineCombinerID);
422   if (EnableEarlyIfConversion)
423     addPass(&EarlyIfConverterID);
424   if (EnableStPairSuppress)
425     addPass(createAArch64StorePairSuppressPass());
426   addPass(createAArch64VectorByElementOptPass());
427   return true;
428 }
429 
430 void AArch64PassConfig::addPreRegAlloc() {
431   // Use AdvSIMD scalar instructions whenever profitable.
432   if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
433     addPass(createAArch64AdvSIMDScalar());
434     // The AdvSIMD pass may produce copies that can be rewritten to
435     // be register coaleascer friendly.
436     addPass(&PeepholeOptimizerID);
437   }
438 }
439 
440 void AArch64PassConfig::addPostRegAlloc() {
441   // Remove redundant copy instructions.
442   if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
443     addPass(createAArch64RedundantCopyEliminationPass());
444 
445   // Change dead register definitions to refer to the zero register.
446   if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
447     addPass(createAArch64DeadRegisterDefinitions());
448   if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
449     // Improve performance for some FP/SIMD code for A57.
450     addPass(createAArch64A57FPLoadBalancing());
451 }
452 
453 void AArch64PassConfig::addPreSched2() {
454   // Expand some pseudo instructions to allow proper scheduling.
455   addPass(createAArch64ExpandPseudoPass());
456   // Use load/store pair instructions when possible.
457   if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
458     addPass(createAArch64LoadStoreOptimizationPass());
459 }
460 
461 void AArch64PassConfig::addPreEmitPass() {
462   if (EnableA53Fix835769)
463     addPass(createAArch64A53Fix835769());
464   // Relax conditional branch instructions if they're otherwise out of
465   // range of their destination.
466   if (BranchRelaxation)
467     addPass(&BranchRelaxationPassID);
468 
469   if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
470       TM->getTargetTriple().isOSBinFormatMachO())
471     addPass(createAArch64CollectLOHPass());
472 }
473