1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "AArch64MacroFusion.h"
15 #include "AArch64Subtarget.h"
16 #include "AArch64TargetMachine.h"
17 #include "AArch64TargetObjectFile.h"
18 #include "AArch64TargetTransformInfo.h"
19 #include "MCTargetDesc/AArch64MCTargetDesc.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
25 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
26 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
27 #include "llvm/CodeGen/MachineScheduler.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/TargetPassConfig.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/MC/MCTargetOptions.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Support/CodeGen.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/TargetRegistry.h"
37 #include "llvm/Target/TargetLoweringObjectFile.h"
38 #include "llvm/Target/TargetOptions.h"
39 #include "llvm/Transforms/Scalar.h"
40 #include <memory>
41 #include <string>
42 
43 using namespace llvm;
44 
45 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
46                                 cl::desc("Enable the CCMP formation pass"),
47                                 cl::init(true), cl::Hidden);
48 
49 static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
50                                cl::desc("Enable the machine combiner pass"),
51                                cl::init(true), cl::Hidden);
52 
53 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
54                                           cl::desc("Suppress STP for AArch64"),
55                                           cl::init(true), cl::Hidden);
56 
57 static cl::opt<bool> EnableAdvSIMDScalar(
58     "aarch64-enable-simd-scalar",
59     cl::desc("Enable use of AdvSIMD scalar integer instructions"),
60     cl::init(false), cl::Hidden);
61 
62 static cl::opt<bool>
63     EnablePromoteConstant("aarch64-enable-promote-const",
64                           cl::desc("Enable the promote constant pass"),
65                           cl::init(true), cl::Hidden);
66 
67 static cl::opt<bool> EnableCollectLOH(
68     "aarch64-enable-collect-loh",
69     cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
70     cl::init(true), cl::Hidden);
71 
72 static cl::opt<bool>
73     EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
74                                   cl::desc("Enable the pass that removes dead"
75                                            " definitons and replaces stores to"
76                                            " them with stores to the zero"
77                                            " register"),
78                                   cl::init(true));
79 
80 static cl::opt<bool> EnableRedundantCopyElimination(
81     "aarch64-enable-copyelim",
82     cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
83     cl::Hidden);
84 
85 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
86                                         cl::desc("Enable the load/store pair"
87                                                  " optimization pass"),
88                                         cl::init(true), cl::Hidden);
89 
90 static cl::opt<bool> EnableAtomicTidy(
91     "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
92     cl::desc("Run SimplifyCFG after expanding atomic operations"
93              " to make use of cmpxchg flow-based information"),
94     cl::init(true));
95 
96 static cl::opt<bool>
97 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
98                         cl::desc("Run early if-conversion"),
99                         cl::init(true));
100 
101 static cl::opt<bool>
102     EnableCondOpt("aarch64-enable-condopt",
103                   cl::desc("Enable the condition optimizer pass"),
104                   cl::init(true), cl::Hidden);
105 
106 static cl::opt<bool>
107 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
108                 cl::desc("Work around Cortex-A53 erratum 835769"),
109                 cl::init(false));
110 
111 static cl::opt<bool>
112     EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
113                  cl::desc("Enable optimizations on complex GEPs"),
114                  cl::init(false));
115 
116 static cl::opt<bool>
117     BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
118                      cl::desc("Relax out of range conditional branches"));
119 
120 // FIXME: Unify control over GlobalMerge.
121 static cl::opt<cl::boolOrDefault>
122     EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
123                       cl::desc("Enable the global merge pass"));
124 
125 static cl::opt<bool>
126     EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
127                            cl::desc("Enable the loop data prefetch pass"),
128                            cl::init(true));
129 
130 static cl::opt<int> EnableGlobalISelAtO(
131     "aarch64-enable-global-isel-at-O", cl::Hidden,
132     cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
133     cl::init(-1));
134 
135 extern "C" void LLVMInitializeAArch64Target() {
136   // Register the target.
137   RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget());
138   RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget());
139   RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target());
140   auto PR = PassRegistry::getPassRegistry();
141   initializeGlobalISel(*PR);
142   initializeAArch64A53Fix835769Pass(*PR);
143   initializeAArch64A57FPLoadBalancingPass(*PR);
144   initializeAArch64AdvSIMDScalarPass(*PR);
145   initializeAArch64CollectLOHPass(*PR);
146   initializeAArch64ConditionalComparesPass(*PR);
147   initializeAArch64ConditionOptimizerPass(*PR);
148   initializeAArch64DeadRegisterDefinitionsPass(*PR);
149   initializeAArch64ExpandPseudoPass(*PR);
150   initializeAArch64LoadStoreOptPass(*PR);
151   initializeAArch64VectorByElementOptPass(*PR);
152   initializeAArch64PromoteConstantPass(*PR);
153   initializeAArch64RedundantCopyEliminationPass(*PR);
154   initializeAArch64StorePairSuppressPass(*PR);
155   initializeLDTLSCleanupPass(*PR);
156 }
157 
158 //===----------------------------------------------------------------------===//
159 // AArch64 Lowering public interface.
160 //===----------------------------------------------------------------------===//
161 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
162   if (TT.isOSBinFormatMachO())
163     return llvm::make_unique<AArch64_MachoTargetObjectFile>();
164 
165   return llvm::make_unique<AArch64_ELFTargetObjectFile>();
166 }
167 
168 // Helper function to build a DataLayout string
169 static std::string computeDataLayout(const Triple &TT,
170                                      const MCTargetOptions &Options,
171                                      bool LittleEndian) {
172   if (Options.getABIName() == "ilp32")
173     return "e-m:e-p:32:32-i8:8-i16:16-i64:64-S128";
174   if (TT.isOSBinFormatMachO())
175     return "e-m:o-i64:64-i128:128-n32:64-S128";
176   if (LittleEndian)
177     return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
178   return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
179 }
180 
181 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
182                                            Optional<Reloc::Model> RM) {
183   // AArch64 Darwin is always PIC.
184   if (TT.isOSDarwin())
185     return Reloc::PIC_;
186   // On ELF platforms the default static relocation model has a smart enough
187   // linker to cope with referencing external symbols defined in a shared
188   // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
189   if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
190     return Reloc::Static;
191   return *RM;
192 }
193 
194 /// Create an AArch64 architecture model.
195 ///
196 AArch64TargetMachine::AArch64TargetMachine(
197     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
198     const TargetOptions &Options, Optional<Reloc::Model> RM,
199     CodeModel::Model CM, CodeGenOpt::Level OL, bool LittleEndian)
200     // This nested ternary is horrible, but DL needs to be properly
201     // initialized before TLInfo is constructed.
202     : LLVMTargetMachine(T, computeDataLayout(TT, Options.MCOptions,
203                                              LittleEndian),
204                         TT, CPU, FS, Options,
205 			getEffectiveRelocModel(TT, RM), CM, OL),
206       TLOF(createTLOF(getTargetTriple())),
207       isLittle(LittleEndian) {
208   initAsmInfo();
209 }
210 
211 AArch64TargetMachine::~AArch64TargetMachine() = default;
212 
213 const AArch64Subtarget *
214 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
215   Attribute CPUAttr = F.getFnAttribute("target-cpu");
216   Attribute FSAttr = F.getFnAttribute("target-features");
217 
218   std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
219                         ? CPUAttr.getValueAsString().str()
220                         : TargetCPU;
221   std::string FS = !FSAttr.hasAttribute(Attribute::None)
222                        ? FSAttr.getValueAsString().str()
223                        : TargetFS;
224 
225   auto &I = SubtargetMap[CPU + FS];
226   if (!I) {
227     // This needs to be done before we create a new subtarget since any
228     // creation will depend on the TM and the code generation flags on the
229     // function that reside in TargetOptions.
230     resetTargetOptions(F);
231     I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
232                                             isLittle);
233   }
234   return I.get();
235 }
236 
237 void AArch64leTargetMachine::anchor() { }
238 
239 AArch64leTargetMachine::AArch64leTargetMachine(
240     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
241     const TargetOptions &Options, Optional<Reloc::Model> RM,
242     CodeModel::Model CM, CodeGenOpt::Level OL)
243     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
244 
245 void AArch64beTargetMachine::anchor() { }
246 
247 AArch64beTargetMachine::AArch64beTargetMachine(
248     const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
249     const TargetOptions &Options, Optional<Reloc::Model> RM,
250     CodeModel::Model CM, CodeGenOpt::Level OL)
251     : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
252 
253 namespace {
254 
255 /// AArch64 Code Generator Pass Configuration Options.
256 class AArch64PassConfig : public TargetPassConfig {
257 public:
258   AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
259       : TargetPassConfig(TM, PM) {
260     if (TM->getOptLevel() != CodeGenOpt::None)
261       substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
262   }
263 
264   AArch64TargetMachine &getAArch64TargetMachine() const {
265     return getTM<AArch64TargetMachine>();
266   }
267 
268   ScheduleDAGInstrs *
269   createMachineScheduler(MachineSchedContext *C) const override {
270     ScheduleDAGMILive *DAG = createGenericSchedLive(C);
271     DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
272     DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
273     DAG->addMutation(createAArch64MacroFusionDAGMutation());
274     return DAG;
275   }
276 
277   ScheduleDAGInstrs *
278   createPostMachineScheduler(MachineSchedContext *C) const override {
279     const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
280     if (ST.hasFuseAES() || ST.hasFuseLiterals()) {
281       // Run the Macro Fusion after RA again since literals are expanded from
282       // pseudos then (v. addPreSched2()).
283       ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
284       DAG->addMutation(createAArch64MacroFusionDAGMutation());
285       return DAG;
286     }
287 
288     return nullptr;
289   }
290 
291   void addIRPasses()  override;
292   bool addPreISel() override;
293   bool addInstSelector() override;
294 #ifdef LLVM_BUILD_GLOBAL_ISEL
295   bool addIRTranslator() override;
296   bool addLegalizeMachineIR() override;
297   bool addRegBankSelect() override;
298   bool addGlobalInstructionSelect() override;
299 #endif
300   bool addILPOpts() override;
301   void addPreRegAlloc() override;
302   void addPostRegAlloc() override;
303   void addPreSched2() override;
304   void addPreEmitPass() override;
305 
306   bool isGlobalISelEnabled() const override;
307 };
308 
309 } // end anonymous namespace
310 
311 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() {
312   return TargetIRAnalysis([this](const Function &F) {
313     return TargetTransformInfo(AArch64TTIImpl(this, F));
314   });
315 }
316 
317 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
318   return new AArch64PassConfig(this, PM);
319 }
320 
321 void AArch64PassConfig::addIRPasses() {
322   // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
323   // ourselves.
324   addPass(createAtomicExpandPass());
325 
326   // Cmpxchg instructions are often used with a subsequent comparison to
327   // determine whether it succeeded. We can exploit existing control-flow in
328   // ldrex/strex loops to simplify this, but it needs tidying up.
329   if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
330     addPass(createCFGSimplificationPass());
331 
332   // Run LoopDataPrefetch
333   //
334   // Run this before LSR to remove the multiplies involved in computing the
335   // pointer values N iterations ahead.
336   if (TM->getOptLevel() != CodeGenOpt::None && EnableLoopDataPrefetch)
337     addPass(createLoopDataPrefetchPass());
338 
339   TargetPassConfig::addIRPasses();
340 
341   // Match interleaved memory accesses to ldN/stN intrinsics.
342   if (TM->getOptLevel() != CodeGenOpt::None)
343     addPass(createInterleavedAccessPass());
344 
345   if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
346     // Call SeparateConstOffsetFromGEP pass to extract constants within indices
347     // and lower a GEP with multiple indices to either arithmetic operations or
348     // multiple GEPs with single index.
349     addPass(createSeparateConstOffsetFromGEPPass(TM, true));
350     // Call EarlyCSE pass to find and remove subexpressions in the lowered
351     // result.
352     addPass(createEarlyCSEPass());
353     // Do loop invariant code motion in case part of the lowered result is
354     // invariant.
355     addPass(createLICMPass());
356   }
357 }
358 
359 // Pass Pipeline Configuration
360 bool AArch64PassConfig::addPreISel() {
361   // Run promote constant before global merge, so that the promoted constants
362   // get a chance to be merged
363   if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
364     addPass(createAArch64PromoteConstantPass());
365   // FIXME: On AArch64, this depends on the type.
366   // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
367   // and the offset has to be a multiple of the related size in bytes.
368   if ((TM->getOptLevel() != CodeGenOpt::None &&
369        EnableGlobalMerge == cl::BOU_UNSET) ||
370       EnableGlobalMerge == cl::BOU_TRUE) {
371     bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
372                                (EnableGlobalMerge == cl::BOU_UNSET);
373     addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
374   }
375 
376   return false;
377 }
378 
379 bool AArch64PassConfig::addInstSelector() {
380   addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
381 
382   // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
383   // references to _TLS_MODULE_BASE_ as possible.
384   if (TM->getTargetTriple().isOSBinFormatELF() &&
385       getOptLevel() != CodeGenOpt::None)
386     addPass(createAArch64CleanupLocalDynamicTLSPass());
387 
388   return false;
389 }
390 
391 #ifdef LLVM_BUILD_GLOBAL_ISEL
392 bool AArch64PassConfig::addIRTranslator() {
393   addPass(new IRTranslator());
394   return false;
395 }
396 
397 bool AArch64PassConfig::addLegalizeMachineIR() {
398   addPass(new Legalizer());
399   return false;
400 }
401 
402 bool AArch64PassConfig::addRegBankSelect() {
403   addPass(new RegBankSelect());
404   return false;
405 }
406 
407 bool AArch64PassConfig::addGlobalInstructionSelect() {
408   addPass(new InstructionSelect());
409   return false;
410 }
411 #endif
412 
413 bool AArch64PassConfig::isGlobalISelEnabled() const {
414   return TM->getOptLevel() <= EnableGlobalISelAtO;
415 }
416 
417 bool AArch64PassConfig::addILPOpts() {
418   if (EnableCondOpt)
419     addPass(createAArch64ConditionOptimizerPass());
420   if (EnableCCMP)
421     addPass(createAArch64ConditionalCompares());
422   if (EnableMCR)
423     addPass(&MachineCombinerID);
424   if (EnableEarlyIfConversion)
425     addPass(&EarlyIfConverterID);
426   if (EnableStPairSuppress)
427     addPass(createAArch64StorePairSuppressPass());
428   addPass(createAArch64VectorByElementOptPass());
429   return true;
430 }
431 
432 void AArch64PassConfig::addPreRegAlloc() {
433   // Change dead register definitions to refer to the zero register.
434   if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
435     addPass(createAArch64DeadRegisterDefinitions());
436 
437   // Use AdvSIMD scalar instructions whenever profitable.
438   if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
439     addPass(createAArch64AdvSIMDScalar());
440     // The AdvSIMD pass may produce copies that can be rewritten to
441     // be register coaleascer friendly.
442     addPass(&PeepholeOptimizerID);
443   }
444 }
445 
446 void AArch64PassConfig::addPostRegAlloc() {
447   // Remove redundant copy instructions.
448   if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
449     addPass(createAArch64RedundantCopyEliminationPass());
450 
451   if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
452     // Improve performance for some FP/SIMD code for A57.
453     addPass(createAArch64A57FPLoadBalancing());
454 }
455 
456 void AArch64PassConfig::addPreSched2() {
457   // Expand some pseudo instructions to allow proper scheduling.
458   addPass(createAArch64ExpandPseudoPass());
459   // Use load/store pair instructions when possible.
460   if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
461     addPass(createAArch64LoadStoreOptimizationPass());
462 }
463 
464 void AArch64PassConfig::addPreEmitPass() {
465   if (EnableA53Fix835769)
466     addPass(createAArch64A53Fix835769());
467   // Relax conditional branch instructions if they're otherwise out of
468   // range of their destination.
469   if (BranchRelaxation)
470     addPass(&BranchRelaxationPassID);
471 
472   if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
473       TM->getTargetTriple().isOSBinFormatMachO())
474     addPass(createAArch64CollectLOHPass());
475 }
476