1 //===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ARM specific subclass of TargetSubtargetInfo.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "ARM.h"
14 
15 #include "ARMCallLowering.h"
16 #include "ARMLegalizerInfo.h"
17 #include "ARMRegisterBankInfo.h"
18 #include "ARMSubtarget.h"
19 #include "ARMFrameLowering.h"
20 #include "ARMInstrInfo.h"
21 #include "ARMSubtarget.h"
22 #include "ARMTargetMachine.h"
23 #include "MCTargetDesc/ARMMCTargetDesc.h"
24 #include "Thumb1FrameLowering.h"
25 #include "Thumb1InstrInfo.h"
26 #include "Thumb2InstrInfo.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Triple.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/MC/MCTargetOptions.h"
36 #include "llvm/Support/CodeGen.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/ARMTargetParser.h"
39 #include "llvm/Support/TargetParser.h"
40 #include "llvm/Target/TargetOptions.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "arm-subtarget"
45 
46 #define GET_SUBTARGETINFO_TARGET_DESC
47 #define GET_SUBTARGETINFO_CTOR
48 #include "ARMGenSubtargetInfo.inc"
49 
50 static cl::opt<bool>
51 UseFusedMulOps("arm-use-mulops",
52                cl::init(true), cl::Hidden);
53 
54 enum ITMode {
55   DefaultIT,
56   RestrictedIT,
57   NoRestrictedIT
58 };
59 
60 static cl::opt<ITMode>
61 IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT),
62    cl::ZeroOrMore,
63    cl::values(clEnumValN(DefaultIT, "arm-default-it",
64                          "Generate IT block based on arch"),
65               clEnumValN(RestrictedIT, "arm-restrict-it",
66                          "Disallow deprecated IT based on ARMv8"),
67               clEnumValN(NoRestrictedIT, "arm-no-restrict-it",
68                          "Allow IT blocks based on ARMv7")));
69 
70 /// ForceFastISel - Use the fast-isel, even for subtargets where it is not
71 /// currently supported (for testing only).
72 static cl::opt<bool>
73 ForceFastISel("arm-force-fast-isel",
74                cl::init(false), cl::Hidden);
75 
76 static cl::opt<bool> EnableSubRegLiveness("arm-enable-subreg-liveness",
77                                           cl::init(false), cl::Hidden);
78 
79 /// initializeSubtargetDependencies - Initializes using a CPU and feature string
80 /// so that we can use initializer lists for subtarget initialization.
81 ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU,
82                                                             StringRef FS) {
83   initializeEnvironment();
84   initSubtargetFeatures(CPU, FS);
85   return *this;
86 }
87 
88 ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU,
89                                                         StringRef FS) {
90   ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS);
91   if (STI.isThumb1Only())
92     return (ARMFrameLowering *)new Thumb1FrameLowering(STI);
93 
94   return new ARMFrameLowering(STI);
95 }
96 
97 ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU,
98                            const std::string &FS,
99                            const ARMBaseTargetMachine &TM, bool IsLittle,
100                            bool MinSize)
101     : ARMGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS),
102       UseMulOps(UseFusedMulOps), CPUString(CPU), OptMinSize(MinSize),
103       IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM),
104       FrameLowering(initializeFrameLowering(CPU, FS)),
105       // At this point initializeSubtargetDependencies has been called so
106       // we can query directly.
107       InstrInfo(isThumb1Only()
108                     ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this)
109                     : !isThumb()
110                           ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this)
111                           : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)),
112       TLInfo(TM, *this) {
113 
114   CallLoweringInfo.reset(new ARMCallLowering(*getTargetLowering()));
115   Legalizer.reset(new ARMLegalizerInfo(*this));
116 
117   auto *RBI = new ARMRegisterBankInfo(*getRegisterInfo());
118 
119   // FIXME: At this point, we can't rely on Subtarget having RBI.
120   // It's awkward to mix passing RBI and the Subtarget; should we pass
121   // TII/TRI as well?
122   InstSelector.reset(createARMInstructionSelector(
123       *static_cast<const ARMBaseTargetMachine *>(&TM), *this, *RBI));
124 
125   RegBankInfo.reset(RBI);
126 }
127 
128 const CallLowering *ARMSubtarget::getCallLowering() const {
129   return CallLoweringInfo.get();
130 }
131 
132 InstructionSelector *ARMSubtarget::getInstructionSelector() const {
133   return InstSelector.get();
134 }
135 
136 const LegalizerInfo *ARMSubtarget::getLegalizerInfo() const {
137   return Legalizer.get();
138 }
139 
140 const RegisterBankInfo *ARMSubtarget::getRegBankInfo() const {
141   return RegBankInfo.get();
142 }
143 
144 bool ARMSubtarget::isXRaySupported() const {
145   // We don't currently suppport Thumb, but Windows requires Thumb.
146   return hasV6Ops() && hasARMOps() && !isTargetWindows();
147 }
148 
149 void ARMSubtarget::initializeEnvironment() {
150   // MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this
151   // directly from it, but we can try to make sure they're consistent when both
152   // available.
153   UseSjLjEH = (isTargetDarwin() && !isTargetWatchABI() &&
154                Options.ExceptionModel == ExceptionHandling::None) ||
155               Options.ExceptionModel == ExceptionHandling::SjLj;
156   assert((!TM.getMCAsmInfo() ||
157           (TM.getMCAsmInfo()->getExceptionHandlingType() ==
158            ExceptionHandling::SjLj) == UseSjLjEH) &&
159          "inconsistent sjlj choice between CodeGen and MC");
160 }
161 
162 void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
163   if (CPUString.empty()) {
164     CPUString = "generic";
165 
166     if (isTargetDarwin()) {
167       StringRef ArchName = TargetTriple.getArchName();
168       ARM::ArchKind AK = ARM::parseArch(ArchName);
169       if (AK == ARM::ArchKind::ARMV7S)
170         // Default to the Swift CPU when targeting armv7s/thumbv7s.
171         CPUString = "swift";
172       else if (AK == ARM::ArchKind::ARMV7K)
173         // Default to the Cortex-a7 CPU when targeting armv7k/thumbv7k.
174         // ARMv7k does not use SjLj exception handling.
175         CPUString = "cortex-a7";
176     }
177   }
178 
179   // Insert the architecture feature derived from the target triple into the
180   // feature string. This is important for setting features that are implied
181   // based on the architecture version.
182   std::string ArchFS = ARM_MC::ParseARMTriple(TargetTriple, CPUString);
183   if (!FS.empty()) {
184     if (!ArchFS.empty())
185       ArchFS = (Twine(ArchFS) + "," + FS).str();
186     else
187       ArchFS = std::string(FS);
188   }
189   ParseSubtargetFeatures(CPUString, /*TuneCPU*/ CPUString, ArchFS);
190 
191   // FIXME: This used enable V6T2 support implicitly for Thumb2 mode.
192   // Assert this for now to make the change obvious.
193   assert(hasV6T2Ops() || !hasThumb2());
194 
195   // Execute only support requires movt support
196   if (genExecuteOnly()) {
197     NoMovt = false;
198     assert(hasV8MBaselineOps() && "Cannot generate execute-only code for this target");
199   }
200 
201   // Keep a pointer to static instruction cost data for the specified CPU.
202   SchedModel = getSchedModelForCPU(CPUString);
203 
204   // Initialize scheduling itinerary for the specified CPU.
205   InstrItins = getInstrItineraryForCPU(CPUString);
206 
207   // FIXME: this is invalid for WindowsCE
208   if (isTargetWindows())
209     NoARM = true;
210 
211   if (isAAPCS_ABI())
212     stackAlignment = Align(8);
213   if (isTargetNaCl() || isAAPCS16_ABI())
214     stackAlignment = Align(16);
215 
216   // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo::
217   // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
218   // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
219   // support in the assembler and linker to be used. This would need to be
220   // fixed to fully support tail calls in Thumb1.
221   //
222   // For ARMv8-M, we /do/ implement tail calls.  Doing this is tricky for v8-M
223   // baseline, since the LDM/POP instruction on Thumb doesn't take LR.  This
224   // means if we need to reload LR, it takes extra instructions, which outweighs
225   // the value of the tail call; but here we don't know yet whether LR is going
226   // to be used. We take the optimistic approach of generating the tail call and
227   // perhaps taking a hit if we need to restore the LR.
228 
229   // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
230   // but we need to make sure there are enough registers; the only valid
231   // registers are the 4 used for parameters.  We don't currently do this
232   // case.
233 
234   SupportsTailCall = !isThumb1Only() || hasV8MBaselineOps();
235 
236   if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(5, 0))
237     SupportsTailCall = false;
238 
239   switch (IT) {
240   case DefaultIT:
241     RestrictIT = hasV8Ops() && !hasMinSize();
242     break;
243   case RestrictedIT:
244     RestrictIT = true;
245     break;
246   case NoRestrictedIT:
247     RestrictIT = false;
248     break;
249   }
250 
251   // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default.
252   const FeatureBitset &Bits = getFeatureBits();
253   if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters
254       (Options.UnsafeFPMath || isTargetDarwin()))
255     UseNEONForSinglePrecisionFP = true;
256 
257   if (isRWPI())
258     ReserveR9 = true;
259 
260   // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2
261   if (MVEVectorCostFactor == 0)
262     MVEVectorCostFactor = 2;
263 
264   // FIXME: Teach TableGen to deal with these instead of doing it manually here.
265   switch (ARMProcFamily) {
266   case Others:
267   case CortexA5:
268     break;
269   case CortexA7:
270     LdStMultipleTiming = DoubleIssue;
271     break;
272   case CortexA8:
273     LdStMultipleTiming = DoubleIssue;
274     break;
275   case CortexA9:
276     LdStMultipleTiming = DoubleIssueCheckUnalignedAccess;
277     PreISelOperandLatencyAdjustment = 1;
278     break;
279   case CortexA12:
280     break;
281   case CortexA15:
282     MaxInterleaveFactor = 2;
283     PreISelOperandLatencyAdjustment = 1;
284     PartialUpdateClearance = 12;
285     break;
286   case CortexA17:
287   case CortexA32:
288   case CortexA35:
289   case CortexA53:
290   case CortexA55:
291   case CortexA57:
292   case CortexA72:
293   case CortexA73:
294   case CortexA75:
295   case CortexA76:
296   case CortexA77:
297   case CortexA78:
298   case CortexA78C:
299   case CortexA710:
300   case CortexR4:
301   case CortexR4F:
302   case CortexR5:
303   case CortexR7:
304   case CortexM3:
305   case CortexM7:
306   case CortexR52:
307   case CortexX1:
308     break;
309   case Exynos:
310     LdStMultipleTiming = SingleIssuePlusExtras;
311     MaxInterleaveFactor = 4;
312     if (!isThumb())
313       PrefLoopLogAlignment = 3;
314     break;
315   case Kryo:
316     break;
317   case Krait:
318     PreISelOperandLatencyAdjustment = 1;
319     break;
320   case NeoverseN1:
321   case NeoverseN2:
322   case NeoverseV1:
323     break;
324   case Swift:
325     MaxInterleaveFactor = 2;
326     LdStMultipleTiming = SingleIssuePlusExtras;
327     PreISelOperandLatencyAdjustment = 1;
328     PartialUpdateClearance = 12;
329     break;
330   }
331 }
332 
333 bool ARMSubtarget::isTargetHardFloat() const { return TM.isTargetHardFloat(); }
334 
335 bool ARMSubtarget::isAPCS_ABI() const {
336   assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN);
337   return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_APCS;
338 }
339 bool ARMSubtarget::isAAPCS_ABI() const {
340   assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN);
341   return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS ||
342          TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16;
343 }
344 bool ARMSubtarget::isAAPCS16_ABI() const {
345   assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN);
346   return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16;
347 }
348 
349 bool ARMSubtarget::isROPI() const {
350   return TM.getRelocationModel() == Reloc::ROPI ||
351          TM.getRelocationModel() == Reloc::ROPI_RWPI;
352 }
353 bool ARMSubtarget::isRWPI() const {
354   return TM.getRelocationModel() == Reloc::RWPI ||
355          TM.getRelocationModel() == Reloc::ROPI_RWPI;
356 }
357 
358 bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const {
359   if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
360     return true;
361 
362   // 32 bit macho has no relocation for a-b if a is undefined, even if b is in
363   // the section that is being relocated. This means we have to use o load even
364   // for GVs that are known to be local to the dso.
365   if (isTargetMachO() && TM.isPositionIndependent() &&
366       (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
367     return true;
368 
369   return false;
370 }
371 
372 bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const {
373   return isTargetELF() && TM.isPositionIndependent() &&
374          !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
375 }
376 
377 unsigned ARMSubtarget::getMispredictionPenalty() const {
378   return SchedModel.MispredictPenalty;
379 }
380 
381 bool ARMSubtarget::enableMachineScheduler() const {
382   // The MachineScheduler can increase register usage, so we use more high
383   // registers and end up with more T2 instructions that cannot be converted to
384   // T1 instructions. At least until we do better at converting to thumb1
385   // instructions, on cortex-m at Oz where we are size-paranoid, don't use the
386   // Machine scheduler, relying on the DAG register pressure scheduler instead.
387   if (isMClass() && hasMinSize())
388     return false;
389   // Enable the MachineScheduler before register allocation for subtargets
390   // with the use-misched feature.
391   return useMachineScheduler();
392 }
393 
394 bool ARMSubtarget::enableSubRegLiveness() const {
395   if (EnableSubRegLiveness.getNumOccurrences())
396     return EnableSubRegLiveness;
397   // Enable SubRegLiveness for MVE to better optimize s subregs for mqpr regs
398   // and q subregs for qqqqpr regs.
399   return hasMVEIntegerOps();
400 }
401 
402 // This overrides the PostRAScheduler bit in the SchedModel for any CPU.
403 bool ARMSubtarget::enablePostRAScheduler() const {
404   if (enableMachineScheduler())
405     return false;
406   if (disablePostRAScheduler())
407     return false;
408   // Thumb1 cores will generally not benefit from post-ra scheduling
409   return !isThumb1Only();
410 }
411 
412 bool ARMSubtarget::enablePostRAMachineScheduler() const {
413   if (!enableMachineScheduler())
414     return false;
415   if (disablePostRAScheduler())
416     return false;
417   return !isThumb1Only();
418 }
419 
420 bool ARMSubtarget::enableAtomicExpand() const { return hasAnyDataBarrier(); }
421 
422 bool ARMSubtarget::useStride4VFPs() const {
423   // For general targets, the prologue can grow when VFPs are allocated with
424   // stride 4 (more vpush instructions). But WatchOS uses a compact unwind
425   // format which it's more important to get right.
426   return isTargetWatchABI() ||
427          (useWideStrideVFP() && !OptMinSize);
428 }
429 
430 bool ARMSubtarget::useMovt() const {
431   // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit
432   // immediates as it is inherently position independent, and may be out of
433   // range otherwise.
434   return !NoMovt && hasV8MBaselineOps() &&
435          (isTargetWindows() || !OptMinSize || genExecuteOnly());
436 }
437 
438 bool ARMSubtarget::useFastISel() const {
439   // Enable fast-isel for any target, for testing only.
440   if (ForceFastISel)
441     return true;
442 
443   // Limit fast-isel to the targets that are or have been tested.
444   if (!hasV6Ops())
445     return false;
446 
447   // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl.
448   return TM.Options.EnableFastISel &&
449          ((isTargetMachO() && !isThumb1Only()) ||
450           (isTargetLinux() && !isThumb()) || (isTargetNaCl() && !isThumb()));
451 }
452 
453 unsigned ARMSubtarget::getGPRAllocationOrder(const MachineFunction &MF) const {
454   // The GPR register class has multiple possible allocation orders, with
455   // tradeoffs preferred by different sub-architectures and optimisation goals.
456   // The allocation orders are:
457   // 0: (the default tablegen order, not used)
458   // 1: r14, r0-r13
459   // 2: r0-r7
460   // 3: r0-r7, r12, lr, r8-r11
461   // Note that the register allocator will change this order so that
462   // callee-saved registers are used later, as they require extra work in the
463   // prologue/epilogue (though we sometimes override that).
464 
465   // For thumb1-only targets, only the low registers are allocatable.
466   if (isThumb1Only())
467     return 2;
468 
469   // Allocate low registers first, so we can select more 16-bit instructions.
470   // We also (in ignoreCSRForAllocationOrder) override  the default behaviour
471   // with regards to callee-saved registers, because pushing extra registers is
472   // much cheaper (in terms of code size) than using high registers. After
473   // that, we allocate r12 (doesn't need to be saved), lr (saving it means we
474   // can return with the pop, don't need an extra "bx lr") and then the rest of
475   // the high registers.
476   if (isThumb2() && MF.getFunction().hasMinSize())
477     return 3;
478 
479   // Otherwise, allocate in the default order, using LR first because saving it
480   // allows a shorter epilogue sequence.
481   return 1;
482 }
483 
484 bool ARMSubtarget::ignoreCSRForAllocationOrder(const MachineFunction &MF,
485                                                unsigned PhysReg) const {
486   // To minimize code size in Thumb2, we prefer the usage of low regs (lower
487   // cost per use) so we can  use narrow encoding. By default, caller-saved
488   // registers (e.g. lr, r12) are always  allocated first, regardless of
489   // their cost per use. When optForMinSize, we prefer the low regs even if
490   // they are CSR because usually push/pop can be folded into existing ones.
491   return isThumb2() && MF.getFunction().hasMinSize() &&
492          ARM::GPRRegClass.contains(PhysReg);
493 }
494