1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "ARM.h"
14 #include "ARMFrameLowering.h"
15 #include "ARMTargetMachine.h"
16 #include "ARMTargetObjectFile.h"
17 #include "ARMTargetTransformInfo.h"
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/CodeGen/TargetPassConfig.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/LegacyPassManager.h"
22 #include "llvm/MC/MCAsmInfo.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/FormattedStream.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Target/TargetOptions.h"
27 #include "llvm/Transforms/Scalar.h"
28 using namespace llvm;
29 
30 static cl::opt<bool>
31 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden,
32                    cl::desc("Inhibit optimization of S->D register accesses on A15"),
33                    cl::init(false));
34 
35 static cl::opt<bool>
36 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden,
37                  cl::desc("Run SimplifyCFG after expanding atomic operations"
38                           " to make use of cmpxchg flow-based information"),
39                  cl::init(true));
40 
41 static cl::opt<bool>
42 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden,
43                       cl::desc("Enable ARM load/store optimization pass"),
44                       cl::init(true));
45 
46 // FIXME: Unify control over GlobalMerge.
47 static cl::opt<cl::boolOrDefault>
48 EnableGlobalMerge("arm-global-merge", cl::Hidden,
49                   cl::desc("Enable the global merge pass"));
50 
51 extern "C" void LLVMInitializeARMTarget() {
52   // Register the target.
53   RegisterTargetMachine<ARMLETargetMachine> X(TheARMLETarget);
54   RegisterTargetMachine<ARMBETargetMachine> Y(TheARMBETarget);
55   RegisterTargetMachine<ThumbLETargetMachine> A(TheThumbLETarget);
56   RegisterTargetMachine<ThumbBETargetMachine> B(TheThumbBETarget);
57 }
58 
59 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
60   if (TT.isOSBinFormatMachO())
61     return make_unique<TargetLoweringObjectFileMachO>();
62   if (TT.isOSWindows())
63     return make_unique<TargetLoweringObjectFileCOFF>();
64   return make_unique<ARMElfTargetObjectFile>();
65 }
66 
67 static ARMBaseTargetMachine::ARMABI
68 computeTargetABI(const Triple &TT, StringRef CPU,
69                  const TargetOptions &Options) {
70   if (Options.MCOptions.getABIName() == "aapcs16")
71     return ARMBaseTargetMachine::ARM_ABI_AAPCS16;
72   else if (Options.MCOptions.getABIName().startswith("aapcs"))
73     return ARMBaseTargetMachine::ARM_ABI_AAPCS;
74   else if (Options.MCOptions.getABIName().startswith("apcs"))
75     return ARMBaseTargetMachine::ARM_ABI_APCS;
76 
77   assert(Options.MCOptions.getABIName().empty() &&
78          "Unknown target-abi option!");
79 
80   ARMBaseTargetMachine::ARMABI TargetABI =
81       ARMBaseTargetMachine::ARM_ABI_UNKNOWN;
82 
83   // FIXME: This is duplicated code from the front end and should be unified.
84   if (TT.isOSBinFormatMachO()) {
85     if (TT.getEnvironment() == llvm::Triple::EABI ||
86         (TT.getOS() == llvm::Triple::UnknownOS && TT.isOSBinFormatMachO()) ||
87         CPU.startswith("cortex-m")) {
88       TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
89     } else if (TT.isWatchABI()) {
90       TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS16;
91     } else {
92       TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
93     }
94   } else if (TT.isOSWindows()) {
95     // FIXME: this is invalid for WindowsCE
96     TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
97   } else {
98     // Select the default based on the platform.
99     switch (TT.getEnvironment()) {
100     case llvm::Triple::Android:
101     case llvm::Triple::GNUEABI:
102     case llvm::Triple::GNUEABIHF:
103     case llvm::Triple::EABIHF:
104     case llvm::Triple::EABI:
105       TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
106       break;
107     case llvm::Triple::GNU:
108       TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
109       break;
110     default:
111       if (TT.isOSNetBSD())
112         TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS;
113       else
114         TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS;
115       break;
116     }
117   }
118 
119   return TargetABI;
120 }
121 
122 static std::string computeDataLayout(const Triple &TT, StringRef CPU,
123                                      const TargetOptions &Options,
124                                      bool isLittle) {
125   auto ABI = computeTargetABI(TT, CPU, Options);
126   std::string Ret = "";
127 
128   if (isLittle)
129     // Little endian.
130     Ret += "e";
131   else
132     // Big endian.
133     Ret += "E";
134 
135   Ret += DataLayout::getManglingComponent(TT);
136 
137   // Pointers are 32 bits and aligned to 32 bits.
138   Ret += "-p:32:32";
139 
140   // ABIs other than APCS have 64 bit integers with natural alignment.
141   if (ABI != ARMBaseTargetMachine::ARM_ABI_APCS)
142     Ret += "-i64:64";
143 
144   // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
145   // bits, others to 64 bits. We always try to align to 64 bits.
146   if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
147     Ret += "-f64:32:64";
148 
149   // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
150   // to 64. We always ty to give them natural alignment.
151   if (ABI == ARMBaseTargetMachine::ARM_ABI_APCS)
152     Ret += "-v64:32:64-v128:32:128";
153   else if (ABI != ARMBaseTargetMachine::ARM_ABI_AAPCS16)
154     Ret += "-v128:64:128";
155 
156   // Try to align aggregates to 32 bits (the default is 64 bits, which has no
157   // particular hardware support on 32-bit ARM).
158   Ret += "-a:0:32";
159 
160   // Integer registers are 32 bits.
161   Ret += "-n32";
162 
163   // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
164   // aligned everywhere else.
165   if (TT.isOSNaCl() || ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16)
166     Ret += "-S128";
167   else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS)
168     Ret += "-S64";
169   else
170     Ret += "-S32";
171 
172   return Ret;
173 }
174 
175 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
176                                            Optional<Reloc::Model> RM) {
177   if (!RM.hasValue())
178     // Default relocation model on Darwin is PIC, not DynamicNoPIC.
179     return TT.isOSDarwin() ? Reloc::PIC_ : Reloc::DynamicNoPIC;
180   return *RM;
181 }
182 
183 /// Create an ARM architecture model.
184 ///
185 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT,
186                                            StringRef CPU, StringRef FS,
187                                            const TargetOptions &Options,
188                                            Optional<Reloc::Model> RM,
189                                            CodeModel::Model CM,
190                                            CodeGenOpt::Level OL, bool isLittle)
191     : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT,
192                         CPU, FS, Options, getEffectiveRelocModel(TT, RM), CM,
193                         OL),
194       TargetABI(computeTargetABI(TT, CPU, Options)),
195       TLOF(createTLOF(getTargetTriple())),
196       Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) {
197 
198   // Default to triple-appropriate float ABI
199   if (Options.FloatABIType == FloatABI::Default)
200     this->Options.FloatABIType =
201         Subtarget.isTargetHardFloat() ? FloatABI::Hard : FloatABI::Soft;
202 
203   // Default to triple-appropriate EABI
204   if (Options.EABIVersion == EABI::Default ||
205       Options.EABIVersion == EABI::Unknown) {
206     if (Subtarget.isTargetGNUAEABI())
207       this->Options.EABIVersion = EABI::GNU;
208     else
209       this->Options.EABIVersion = EABI::EABI5;
210   }
211 }
212 
213 ARMBaseTargetMachine::~ARMBaseTargetMachine() {}
214 
215 const ARMSubtarget *
216 ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const {
217   Attribute CPUAttr = F.getFnAttribute("target-cpu");
218   Attribute FSAttr = F.getFnAttribute("target-features");
219 
220   std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
221                         ? CPUAttr.getValueAsString().str()
222                         : TargetCPU;
223   std::string FS = !FSAttr.hasAttribute(Attribute::None)
224                        ? FSAttr.getValueAsString().str()
225                        : TargetFS;
226 
227   // FIXME: This is related to the code below to reset the target options,
228   // we need to know whether or not the soft float flag is set on the
229   // function before we can generate a subtarget. We also need to use
230   // it as a key for the subtarget since that can be the only difference
231   // between two functions.
232   bool SoftFloat =
233       F.getFnAttribute("use-soft-float").getValueAsString() == "true";
234   // If the soft float attribute is set on the function turn on the soft float
235   // subtarget feature.
236   if (SoftFloat)
237     FS += FS.empty() ? "+soft-float" : ",+soft-float";
238 
239   auto &I = SubtargetMap[CPU + FS];
240   if (!I) {
241     // This needs to be done before we create a new subtarget since any
242     // creation will depend on the TM and the code generation flags on the
243     // function that reside in TargetOptions.
244     resetTargetOptions(F);
245     I = llvm::make_unique<ARMSubtarget>(TargetTriple, CPU, FS, *this, isLittle);
246   }
247   return I.get();
248 }
249 
250 TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() {
251   return TargetIRAnalysis([this](const Function &F) {
252     return TargetTransformInfo(ARMTTIImpl(this, F));
253   });
254 }
255 
256 void ARMTargetMachine::anchor() {}
257 
258 ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT,
259                                    StringRef CPU, StringRef FS,
260                                    const TargetOptions &Options,
261                                    Optional<Reloc::Model> RM,
262                                    CodeModel::Model CM, CodeGenOpt::Level OL,
263                                    bool isLittle)
264     : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
265   initAsmInfo();
266   if (!Subtarget.hasARMOps())
267     report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not "
268                        "support ARM mode execution!");
269 }
270 
271 void ARMLETargetMachine::anchor() {}
272 
273 ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT,
274                                        StringRef CPU, StringRef FS,
275                                        const TargetOptions &Options,
276                                        Optional<Reloc::Model> RM,
277                                        CodeModel::Model CM,
278                                        CodeGenOpt::Level OL)
279     : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
280 
281 void ARMBETargetMachine::anchor() {}
282 
283 ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT,
284                                        StringRef CPU, StringRef FS,
285                                        const TargetOptions &Options,
286                                        Optional<Reloc::Model> RM,
287                                        CodeModel::Model CM,
288                                        CodeGenOpt::Level OL)
289     : ARMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
290 
291 void ThumbTargetMachine::anchor() {}
292 
293 ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT,
294                                        StringRef CPU, StringRef FS,
295                                        const TargetOptions &Options,
296                                        Optional<Reloc::Model> RM,
297                                        CodeModel::Model CM,
298                                        CodeGenOpt::Level OL, bool isLittle)
299     : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
300   initAsmInfo();
301 }
302 
303 void ThumbLETargetMachine::anchor() {}
304 
305 ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT,
306                                            StringRef CPU, StringRef FS,
307                                            const TargetOptions &Options,
308                                            Optional<Reloc::Model> RM,
309                                            CodeModel::Model CM,
310                                            CodeGenOpt::Level OL)
311     : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
312 
313 void ThumbBETargetMachine::anchor() {}
314 
315 ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT,
316                                            StringRef CPU, StringRef FS,
317                                            const TargetOptions &Options,
318                                            Optional<Reloc::Model> RM,
319                                            CodeModel::Model CM,
320                                            CodeGenOpt::Level OL)
321     : ThumbTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
322 
323 namespace {
324 /// ARM Code Generator Pass Configuration Options.
325 class ARMPassConfig : public TargetPassConfig {
326 public:
327   ARMPassConfig(ARMBaseTargetMachine *TM, PassManagerBase &PM)
328     : TargetPassConfig(TM, PM) {}
329 
330   ARMBaseTargetMachine &getARMTargetMachine() const {
331     return getTM<ARMBaseTargetMachine>();
332   }
333 
334   void addIRPasses() override;
335   bool addPreISel() override;
336   bool addInstSelector() override;
337   void addPreRegAlloc() override;
338   void addPreSched2() override;
339   void addPreEmitPass() override;
340 };
341 } // namespace
342 
343 TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
344   return new ARMPassConfig(this, PM);
345 }
346 
347 void ARMPassConfig::addIRPasses() {
348   if (TM->Options.ThreadModel == ThreadModel::Single)
349     addPass(createLowerAtomicPass());
350   else
351     addPass(createAtomicExpandPass(TM));
352 
353   // Cmpxchg instructions are often used with a subsequent comparison to
354   // determine whether it succeeded. We can exploit existing control-flow in
355   // ldrex/strex loops to simplify this, but it needs tidying up.
356   if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
357     addPass(createCFGSimplificationPass(-1, [this](const Function &F) {
358       const auto &ST = this->TM->getSubtarget<ARMSubtarget>(F);
359       return ST.hasAnyDataBarrier() && !ST.isThumb1Only();
360     }));
361 
362   TargetPassConfig::addIRPasses();
363 
364   // Match interleaved memory accesses to ldN/stN intrinsics.
365   if (TM->getOptLevel() != CodeGenOpt::None)
366     addPass(createInterleavedAccessPass(TM));
367 }
368 
369 bool ARMPassConfig::addPreISel() {
370   if ((TM->getOptLevel() != CodeGenOpt::None &&
371        EnableGlobalMerge == cl::BOU_UNSET) ||
372       EnableGlobalMerge == cl::BOU_TRUE) {
373     // FIXME: This is using the thumb1 only constant value for
374     // maximal global offset for merging globals. We may want
375     // to look into using the old value for non-thumb1 code of
376     // 4095 based on the TargetMachine, but this starts to become
377     // tricky when doing code gen per function.
378     bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
379                                (EnableGlobalMerge == cl::BOU_UNSET);
380     // Merging of extern globals is enabled by default on non-Mach-O as we
381     // expect it to be generally either beneficial or harmless. On Mach-O it
382     // is disabled as we emit the .subsections_via_symbols directive which
383     // means that merging extern globals is not safe.
384     bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
385     addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize,
386                                   MergeExternalByDefault));
387   }
388 
389   return false;
390 }
391 
392 bool ARMPassConfig::addInstSelector() {
393   addPass(createARMISelDag(getARMTargetMachine(), getOptLevel()));
394   return false;
395 }
396 
397 void ARMPassConfig::addPreRegAlloc() {
398   if (getOptLevel() != CodeGenOpt::None) {
399     addPass(createMLxExpansionPass());
400 
401     if (EnableARMLoadStoreOpt)
402       addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true));
403 
404     if (!DisableA15SDOptimization)
405       addPass(createA15SDOptimizerPass());
406   }
407 }
408 
409 void ARMPassConfig::addPreSched2() {
410   if (getOptLevel() != CodeGenOpt::None) {
411     if (EnableARMLoadStoreOpt)
412       addPass(createARMLoadStoreOptimizationPass());
413 
414     addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass));
415   }
416 
417   // Expand some pseudo instructions into multiple instructions to allow
418   // proper scheduling.
419   addPass(createARMExpandPseudoPass());
420 
421   if (getOptLevel() != CodeGenOpt::None) {
422     // in v8, IfConversion depends on Thumb instruction widths
423     addPass(createThumb2SizeReductionPass([this](const Function &F) {
424       return this->TM->getSubtarget<ARMSubtarget>(F).restrictIT();
425     }));
426 
427     addPass(createIfConverter([this](const Function &F) {
428       return !this->TM->getSubtarget<ARMSubtarget>(F).isThumb1Only();
429     }));
430   }
431   addPass(createThumb2ITBlockPass());
432 }
433 
434 void ARMPassConfig::addPreEmitPass() {
435   addPass(createThumb2SizeReductionPass());
436 
437   // Constant island pass work on unbundled instructions.
438   addPass(createUnpackMachineBundles([this](const Function &F) {
439     return this->TM->getSubtarget<ARMSubtarget>(F).isThumb2();
440   }));
441 
442   // Don't optimize barriers at -O0.
443   if (getOptLevel() != CodeGenOpt::None)
444     addPass(createARMOptimizeBarriersPass());
445 
446   addPass(createARMConstantIslandPass());
447 }
448