1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
44                                      const TargetOptions &Opts)
45     : TargetInfo(Triple), ABI("aapcs") {
46   if (getTriple().isOSOpenBSD()) {
47     Int64Type = SignedLongLong;
48     IntMaxType = SignedLongLong;
49   } else {
50     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
51       WCharType = UnsignedInt;
52 
53     Int64Type = SignedLong;
54     IntMaxType = SignedLong;
55   }
56 
57   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
58   HasLegalHalfType = true;
59   HasFloat16 = true;
60 
61   if (Triple.isArch64Bit())
62     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
63   else
64     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
65 
66   MaxVectorAlign = 128;
67   MaxAtomicInlineWidth = 128;
68   MaxAtomicPromoteWidth = 128;
69 
70   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
71   LongDoubleFormat = &llvm::APFloat::IEEEquad();
72 
73   BFloat16Width = BFloat16Align = 16;
74   BFloat16Format = &llvm::APFloat::BFloat();
75 
76   // Make __builtin_ms_va_list available.
77   HasBuiltinMSVaList = true;
78 
79   // Make the SVE types available.  Note that this deliberately doesn't
80   // depend on SveMode, since in principle it should be possible to turn
81   // SVE on and off within a translation unit.  It should also be possible
82   // to compile the global declaration:
83   //
84   // __SVInt8_t *ptr;
85   //
86   // even without SVE.
87   HasAArch64SVETypes = true;
88 
89   // {} in inline assembly are neon specifiers, not assembly variant
90   // specifiers.
91   NoAsmVariants = true;
92 
93   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
94   // contributes to the alignment of the containing aggregate in the same way
95   // a plain (non bit-field) member of that type would, without exception for
96   // zero-sized or anonymous bit-fields."
97   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
98   UseZeroLengthBitfieldAlignment = true;
99 
100   // AArch64 targets default to using the ARM C++ ABI.
101   TheCXXABI.set(TargetCXXABI::GenericAArch64);
102 
103   if (Triple.getOS() == llvm::Triple::Linux)
104     this->MCountName = "\01_mcount";
105   else if (Triple.getOS() == llvm::Triple::UnknownOS)
106     this->MCountName =
107         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
108 }
109 
110 StringRef AArch64TargetInfo::getABI() const { return ABI; }
111 
112 bool AArch64TargetInfo::setABI(const std::string &Name) {
113   if (Name != "aapcs" && Name != "darwinpcs")
114     return false;
115 
116   ABI = Name;
117   return true;
118 }
119 
120 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
121                                                  BranchProtectionInfo &BPI,
122                                                  StringRef &Err) const {
123   llvm::AArch64::ParsedBranchProtection PBP;
124   if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
125     return false;
126 
127   BPI.SignReturnAddr =
128       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
129           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
130           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
131           .Default(LangOptions::SignReturnAddressScopeKind::None);
132 
133   if (PBP.Key == "a_key")
134     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
135   else
136     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
137 
138   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
139   return true;
140 }
141 
142 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
143   return Name == "generic" ||
144          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
145 }
146 
147 bool AArch64TargetInfo::setCPU(const std::string &Name) {
148   return isValidCPUName(Name);
149 }
150 
151 void AArch64TargetInfo::fillValidCPUList(
152     SmallVectorImpl<StringRef> &Values) const {
153   llvm::AArch64::fillValidCPUArchList(Values);
154 }
155 
156 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
157                                                 MacroBuilder &Builder) const {
158   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
159   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
160   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
161 }
162 
163 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
164                                                 MacroBuilder &Builder) const {
165   // Also include the ARMv8.1 defines
166   getTargetDefinesARMV81A(Opts, Builder);
167 }
168 
169 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
170                                                 MacroBuilder &Builder) const {
171   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
172   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
173   // Also include the Armv8.2 defines
174   getTargetDefinesARMV82A(Opts, Builder);
175 }
176 
177 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
178                                                 MacroBuilder &Builder) const {
179   // Also include the Armv8.3 defines
180   getTargetDefinesARMV83A(Opts, Builder);
181 }
182 
183 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
184                                                 MacroBuilder &Builder) const {
185   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
186   // Also include the Armv8.4 defines
187   getTargetDefinesARMV84A(Opts, Builder);
188 }
189 
190 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
191                                                 MacroBuilder &Builder) const {
192   // Also include the Armv8.5 defines
193   // FIXME: Armv8.6 makes the following extensions mandatory:
194   // - __ARM_FEATURE_BF16
195   // - __ARM_FEATURE_MATMUL_INT8
196   // Handle them here.
197   getTargetDefinesARMV85A(Opts, Builder);
198 }
199 
200 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
201                                                 MacroBuilder &Builder) const {
202   // Also include the Armv8.6 defines
203   getTargetDefinesARMV86A(Opts, Builder);
204 }
205 
206 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
207                                          MacroBuilder &Builder) const {
208   // Target identification.
209   Builder.defineMacro("__aarch64__");
210   // For bare-metal.
211   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
212       getTriple().isOSBinFormatELF())
213     Builder.defineMacro("__ELF__");
214 
215   // Target properties.
216   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
217     Builder.defineMacro("_LP64");
218     Builder.defineMacro("__LP64__");
219   }
220 
221   std::string CodeModel = getTargetOpts().CodeModel;
222   if (CodeModel == "default")
223     CodeModel = "small";
224   for (char &c : CodeModel)
225     c = toupper(c);
226   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
227 
228   // ACLE predefines. Many can only have one possible value on v8 AArch64.
229   Builder.defineMacro("__ARM_ACLE", "200");
230   Builder.defineMacro("__ARM_ARCH", "8");
231   Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
232 
233   Builder.defineMacro("__ARM_64BIT_STATE", "1");
234   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
235   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
236 
237   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
238   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
239   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
240   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
241   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
242   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
243   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
244 
245   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
246 
247   // 0xe implies support for half, single and double precision operations.
248   Builder.defineMacro("__ARM_FP", "0xE");
249 
250   // PCS specifies this for SysV variants, which is all we support. Other ABIs
251   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
252   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
253   Builder.defineMacro("__ARM_FP16_ARGS", "1");
254 
255   if (Opts.UnsafeFPMath)
256     Builder.defineMacro("__ARM_FP_FAST", "1");
257 
258   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
259                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
260 
261   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
262 
263   if (FPU & NeonMode) {
264     Builder.defineMacro("__ARM_NEON", "1");
265     // 64-bit NEON supports half, single and double precision operations.
266     Builder.defineMacro("__ARM_NEON_FP", "0xE");
267   }
268 
269   if (FPU & SveMode)
270     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
271 
272   if (HasSVE2)
273     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
274 
275   if (HasSVE2 && HasSVE2AES)
276     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
277 
278   if (HasSVE2 && HasSVE2BitPerm)
279     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
280 
281   if (HasSVE2 && HasSVE2SHA3)
282     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
283 
284   if (HasSVE2 && HasSVE2SM4)
285     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
286 
287   if (HasCRC)
288     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
289 
290   if (HasCrypto)
291     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
292 
293   if (HasUnaligned)
294     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
295 
296   if ((FPU & NeonMode) && HasFullFP16)
297     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
298   if (HasFullFP16)
299    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
300 
301   if (HasDotProd)
302     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
303 
304   if (HasMTE)
305     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
306 
307   if (HasTME)
308     Builder.defineMacro("__ARM_FEATURE_TME", "1");
309 
310   if (HasMatMul)
311     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
312 
313   if (HasLSE)
314     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
315 
316   if (HasBFloat16) {
317     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
318     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
319     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
320     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
321   }
322 
323   if ((FPU & SveMode) && HasBFloat16) {
324     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
325   }
326 
327   if ((FPU & SveMode) && HasMatmulFP64)
328     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
329 
330   if ((FPU & SveMode) && HasMatmulFP32)
331     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
332 
333   if ((FPU & SveMode) && HasMatMul)
334     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
335 
336   if ((FPU & NeonMode) && HasFP16FML)
337     Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
338 
339   if (Opts.hasSignReturnAddress()) {
340     // Bitmask:
341     // 0: Protection using the A key
342     // 1: Protection using the B key
343     // 2: Protection including leaf functions
344     unsigned Value = 0;
345 
346     if (Opts.isSignReturnAddressWithAKey())
347       Value |= (1 << 0);
348     else
349       Value |= (1 << 1);
350 
351     if (Opts.isSignReturnAddressScopeAll())
352       Value |= (1 << 2);
353 
354     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
355   }
356 
357   if (Opts.BranchTargetEnforcement)
358     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
359 
360   if (HasLS64)
361     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
362 
363   switch (ArchKind) {
364   default:
365     break;
366   case llvm::AArch64::ArchKind::ARMV8_1A:
367     getTargetDefinesARMV81A(Opts, Builder);
368     break;
369   case llvm::AArch64::ArchKind::ARMV8_2A:
370     getTargetDefinesARMV82A(Opts, Builder);
371     break;
372   case llvm::AArch64::ArchKind::ARMV8_3A:
373     getTargetDefinesARMV83A(Opts, Builder);
374     break;
375   case llvm::AArch64::ArchKind::ARMV8_4A:
376     getTargetDefinesARMV84A(Opts, Builder);
377     break;
378   case llvm::AArch64::ArchKind::ARMV8_5A:
379     getTargetDefinesARMV85A(Opts, Builder);
380     break;
381   case llvm::AArch64::ArchKind::ARMV8_6A:
382     getTargetDefinesARMV86A(Opts, Builder);
383     break;
384   case llvm::AArch64::ArchKind::ARMV8_7A:
385     getTargetDefinesARMV87A(Opts, Builder);
386     break;
387   }
388 
389   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
390   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
391   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
392   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
393   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
394 
395   if (Opts.ArmSveVectorBits) {
396     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
397     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
398   }
399 }
400 
401 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
402   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
403                                              Builtin::FirstTSBuiltin);
404 }
405 
406 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
407   return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
408          (Feature == "neon" && (FPU & NeonMode)) ||
409          ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
410            Feature == "sve2-aes" || Feature == "sve2-sha3" ||
411            Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
412            Feature == "i8mm" || Feature == "bf16") &&
413           (FPU & SveMode));
414 }
415 
416 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
417                                              DiagnosticsEngine &Diags) {
418   FPU = FPUMode;
419   HasCRC = false;
420   HasCrypto = false;
421   HasUnaligned = true;
422   HasFullFP16 = false;
423   HasDotProd = false;
424   HasFP16FML = false;
425   HasMTE = false;
426   HasTME = false;
427   HasLS64 = false;
428   HasMatMul = false;
429   HasBFloat16 = false;
430   HasSVE2 = false;
431   HasSVE2AES = false;
432   HasSVE2SHA3 = false;
433   HasSVE2SM4 = false;
434   HasSVE2BitPerm = false;
435   HasMatmulFP64 = false;
436   HasMatmulFP32 = false;
437   HasLSE = false;
438 
439   ArchKind = llvm::AArch64::ArchKind::ARMV8A;
440 
441   for (const auto &Feature : Features) {
442     if (Feature == "+neon")
443       FPU |= NeonMode;
444     if (Feature == "+sve") {
445       FPU |= SveMode;
446       HasFullFP16 = 1;
447     }
448     if (Feature == "+sve2") {
449       FPU |= SveMode;
450       HasFullFP16 = 1;
451       HasSVE2 = 1;
452     }
453     if (Feature == "+sve2-aes") {
454       FPU |= SveMode;
455       HasFullFP16 = 1;
456       HasSVE2 = 1;
457       HasSVE2AES = 1;
458     }
459     if (Feature == "+sve2-sha3") {
460       FPU |= SveMode;
461       HasFullFP16 = 1;
462       HasSVE2 = 1;
463       HasSVE2SHA3 = 1;
464     }
465     if (Feature == "+sve2-sm4") {
466       FPU |= SveMode;
467       HasFullFP16 = 1;
468       HasSVE2 = 1;
469       HasSVE2SM4 = 1;
470     }
471     if (Feature == "+sve2-bitperm") {
472       FPU |= SveMode;
473       HasFullFP16 = 1;
474       HasSVE2 = 1;
475       HasSVE2BitPerm = 1;
476     }
477     if (Feature == "+f32mm") {
478       FPU |= SveMode;
479       HasMatmulFP32 = true;
480     }
481     if (Feature == "+f64mm") {
482       FPU |= SveMode;
483       HasMatmulFP64 = true;
484     }
485     if (Feature == "+crc")
486       HasCRC = true;
487     if (Feature == "+crypto")
488       HasCrypto = true;
489     if (Feature == "+strict-align")
490       HasUnaligned = false;
491     if (Feature == "+v8.1a")
492       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
493     if (Feature == "+v8.2a")
494       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
495     if (Feature == "+v8.3a")
496       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
497     if (Feature == "+v8.4a")
498       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
499     if (Feature == "+v8.5a")
500       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
501     if (Feature == "+v8.6a")
502       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
503     if (Feature == "+v8.7a")
504       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
505     if (Feature == "+v8r")
506       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
507     if (Feature == "+fullfp16")
508       HasFullFP16 = true;
509     if (Feature == "+dotprod")
510       HasDotProd = true;
511     if (Feature == "+fp16fml")
512       HasFP16FML = true;
513     if (Feature == "+mte")
514       HasMTE = true;
515     if (Feature == "+tme")
516       HasTME = true;
517     if (Feature == "+pauth")
518       HasPAuth = true;
519     if (Feature == "+i8mm")
520       HasMatMul = true;
521     if (Feature == "+bf16")
522       HasBFloat16 = true;
523     if (Feature == "+lse")
524       HasLSE = true;
525     if (Feature == "+ls64")
526       HasLS64 = true;
527     if (Feature == "+flagm")
528       HasFlagM = true;
529   }
530 
531   setDataLayout();
532 
533   return true;
534 }
535 
536 TargetInfo::CallingConvCheckResult
537 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
538   switch (CC) {
539   case CC_C:
540   case CC_Swift:
541   case CC_PreserveMost:
542   case CC_PreserveAll:
543   case CC_OpenCLKernel:
544   case CC_AArch64VectorCall:
545   case CC_Win64:
546     return CCCR_OK;
547   default:
548     return CCCR_Warning;
549   }
550 }
551 
552 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
553 
554 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
555   return TargetInfo::AArch64ABIBuiltinVaList;
556 }
557 
558 const char *const AArch64TargetInfo::GCCRegNames[] = {
559     // 32-bit Integer registers
560     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
561     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
562     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
563 
564     // 64-bit Integer registers
565     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
566     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
567     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
568 
569     // 32-bit floating point regsisters
570     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
571     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
572     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
573 
574     // 64-bit floating point regsisters
575     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
576     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
577     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
578 
579     // Neon vector registers
580     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
581     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
582     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
583 
584     // SVE vector registers
585     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
586     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
587     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
588 
589     // SVE predicate registers
590     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
591     "p11", "p12", "p13", "p14", "p15"
592 };
593 
594 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
595   return llvm::makeArrayRef(GCCRegNames);
596 }
597 
598 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
599     {{"w31"}, "wsp"},
600     {{"x31"}, "sp"},
601     // GCC rN registers are aliases of xN registers.
602     {{"r0"}, "x0"},
603     {{"r1"}, "x1"},
604     {{"r2"}, "x2"},
605     {{"r3"}, "x3"},
606     {{"r4"}, "x4"},
607     {{"r5"}, "x5"},
608     {{"r6"}, "x6"},
609     {{"r7"}, "x7"},
610     {{"r8"}, "x8"},
611     {{"r9"}, "x9"},
612     {{"r10"}, "x10"},
613     {{"r11"}, "x11"},
614     {{"r12"}, "x12"},
615     {{"r13"}, "x13"},
616     {{"r14"}, "x14"},
617     {{"r15"}, "x15"},
618     {{"r16"}, "x16"},
619     {{"r17"}, "x17"},
620     {{"r18"}, "x18"},
621     {{"r19"}, "x19"},
622     {{"r20"}, "x20"},
623     {{"r21"}, "x21"},
624     {{"r22"}, "x22"},
625     {{"r23"}, "x23"},
626     {{"r24"}, "x24"},
627     {{"r25"}, "x25"},
628     {{"r26"}, "x26"},
629     {{"r27"}, "x27"},
630     {{"r28"}, "x28"},
631     {{"r29", "x29"}, "fp"},
632     {{"r30", "x30"}, "lr"},
633     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
634     // don't want to substitute one of these for a different-sized one.
635 };
636 
637 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
638   return llvm::makeArrayRef(GCCRegAliases);
639 }
640 
641 bool AArch64TargetInfo::validateAsmConstraint(
642     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
643   switch (*Name) {
644   default:
645     return false;
646   case 'w': // Floating point and SIMD registers (V0-V31)
647     Info.setAllowsRegister();
648     return true;
649   case 'I': // Constant that can be used with an ADD instruction
650   case 'J': // Constant that can be used with a SUB instruction
651   case 'K': // Constant that can be used with a 32-bit logical instruction
652   case 'L': // Constant that can be used with a 64-bit logical instruction
653   case 'M': // Constant that can be used as a 32-bit MOV immediate
654   case 'N': // Constant that can be used as a 64-bit MOV immediate
655   case 'Y': // Floating point constant zero
656   case 'Z': // Integer constant zero
657     return true;
658   case 'Q': // A memory reference with base register and no offset
659     Info.setAllowsMemory();
660     return true;
661   case 'S': // A symbolic address
662     Info.setAllowsRegister();
663     return true;
664   case 'U':
665     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
666       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
667       Info.setAllowsRegister();
668       Name += 2;
669       return true;
670     }
671     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
672     // Utf: A memory address suitable for ldp/stp in TF mode.
673     // Usa: An absolute symbolic address.
674     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
675 
676     // Better to return an error saying that it's an unrecognised constraint
677     // even if this is a valid constraint in gcc.
678     return false;
679   case 'z': // Zero register, wzr or xzr
680     Info.setAllowsRegister();
681     return true;
682   case 'x': // Floating point and SIMD registers (V0-V15)
683     Info.setAllowsRegister();
684     return true;
685   case 'y': // SVE registers (V0-V7)
686     Info.setAllowsRegister();
687     return true;
688   }
689   return false;
690 }
691 
692 bool AArch64TargetInfo::validateConstraintModifier(
693     StringRef Constraint, char Modifier, unsigned Size,
694     std::string &SuggestedModifier) const {
695   // Strip off constraint modifiers.
696   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
697     Constraint = Constraint.substr(1);
698 
699   switch (Constraint[0]) {
700   default:
701     return true;
702   case 'z':
703   case 'r': {
704     switch (Modifier) {
705     case 'x':
706     case 'w':
707       // For now assume that the person knows what they're
708       // doing with the modifier.
709       return true;
710     default:
711       // By default an 'r' constraint will be in the 'x'
712       // registers.
713       if (Size == 64)
714         return true;
715 
716       SuggestedModifier = "w";
717       return false;
718     }
719   }
720   }
721 }
722 
723 const char *AArch64TargetInfo::getClobbers() const { return ""; }
724 
725 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
726   if (RegNo == 0)
727     return 0;
728   if (RegNo == 1)
729     return 1;
730   return -1;
731 }
732 
733 bool AArch64TargetInfo::hasInt128Type() const { return true; }
734 
735 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
736                                          const TargetOptions &Opts)
737     : AArch64TargetInfo(Triple, Opts) {}
738 
739 void AArch64leTargetInfo::setDataLayout() {
740   if (getTriple().isOSBinFormatMachO()) {
741     if(getTriple().isArch32Bit())
742       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128");
743     else
744       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128");
745   } else
746     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
747 }
748 
749 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
750                                            MacroBuilder &Builder) const {
751   Builder.defineMacro("__AARCH64EL__");
752   AArch64TargetInfo::getTargetDefines(Opts, Builder);
753 }
754 
755 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
756                                          const TargetOptions &Opts)
757     : AArch64TargetInfo(Triple, Opts) {}
758 
759 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
760                                            MacroBuilder &Builder) const {
761   Builder.defineMacro("__AARCH64EB__");
762   Builder.defineMacro("__AARCH_BIG_ENDIAN");
763   Builder.defineMacro("__ARM_BIG_ENDIAN");
764   AArch64TargetInfo::getTargetDefines(Opts, Builder);
765 }
766 
767 void AArch64beTargetInfo::setDataLayout() {
768   assert(!getTriple().isOSBinFormatMachO());
769   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
770 }
771 
772 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
773                                                const TargetOptions &Opts)
774     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
775 
776   // This is an LLP64 platform.
777   // int:4, long:4, long long:8, long double:8.
778   IntWidth = IntAlign = 32;
779   LongWidth = LongAlign = 32;
780   DoubleAlign = LongLongAlign = 64;
781   LongDoubleWidth = LongDoubleAlign = 64;
782   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
783   IntMaxType = SignedLongLong;
784   Int64Type = SignedLongLong;
785   SizeType = UnsignedLongLong;
786   PtrDiffType = SignedLongLong;
787   IntPtrType = SignedLongLong;
788 }
789 
790 void WindowsARM64TargetInfo::setDataLayout() {
791   resetDataLayout(Triple.isOSBinFormatMachO()
792                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
793                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128");
794 }
795 
796 TargetInfo::BuiltinVaListKind
797 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
798   return TargetInfo::CharPtrBuiltinVaList;
799 }
800 
801 TargetInfo::CallingConvCheckResult
802 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
803   switch (CC) {
804   case CC_X86StdCall:
805   case CC_X86ThisCall:
806   case CC_X86FastCall:
807   case CC_X86VectorCall:
808     return CCCR_Ignore;
809   case CC_C:
810   case CC_OpenCLKernel:
811   case CC_PreserveMost:
812   case CC_PreserveAll:
813   case CC_Swift:
814   case CC_Win64:
815     return CCCR_OK;
816   default:
817     return CCCR_Warning;
818   }
819 }
820 
821 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
822                                                    const TargetOptions &Opts)
823     : WindowsARM64TargetInfo(Triple, Opts) {
824   TheCXXABI.set(TargetCXXABI::Microsoft);
825 }
826 
827 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
828                                                 MacroBuilder &Builder) const {
829   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
830   Builder.defineMacro("_M_ARM64", "1");
831 }
832 
833 TargetInfo::CallingConvKind
834 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
835   return CCK_MicrosoftWin64;
836 }
837 
838 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
839   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
840 
841   // MSVC does size based alignment for arm64 based on alignment section in
842   // below document, replicate that to keep alignment consistent with object
843   // files compiled by MSVC.
844   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
845   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
846     Align = std::max(Align, 128u);    // align type at least 16 bytes
847   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
848     Align = std::max(Align, 64u);     // align type at least 8 butes
849   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
850     Align = std::max(Align, 32u);     // align type at least 4 bytes
851   }
852   return Align;
853 }
854 
855 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
856                                            const TargetOptions &Opts)
857     : WindowsARM64TargetInfo(Triple, Opts) {
858   TheCXXABI.set(TargetCXXABI::GenericAArch64);
859 }
860 
861 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
862                                                  const TargetOptions &Opts)
863     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
864   Int64Type = SignedLongLong;
865   if (getTriple().isArch32Bit())
866     IntMaxType = SignedLongLong;
867 
868   WCharType = SignedInt;
869   UseSignedCharForObjCBool = false;
870 
871   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
872   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
873 
874   UseZeroLengthBitfieldAlignment = false;
875 
876   if (getTriple().isArch32Bit()) {
877     UseBitFieldTypeAlignment = false;
878     ZeroLengthBitfieldBoundary = 32;
879     UseZeroLengthBitfieldAlignment = true;
880     TheCXXABI.set(TargetCXXABI::WatchOS);
881   } else
882     TheCXXABI.set(TargetCXXABI::AppleARM64);
883 }
884 
885 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
886                                            const llvm::Triple &Triple,
887                                            MacroBuilder &Builder) const {
888   Builder.defineMacro("__AARCH64_SIMD__");
889   if (Triple.isArch32Bit())
890     Builder.defineMacro("__ARM64_ARCH_8_32__");
891   else
892     Builder.defineMacro("__ARM64_ARCH_8__");
893   Builder.defineMacro("__ARM_NEON__");
894   Builder.defineMacro("__LITTLE_ENDIAN__");
895   Builder.defineMacro("__REGISTER_PREFIX__", "");
896   Builder.defineMacro("__arm64", "1");
897   Builder.defineMacro("__arm64__", "1");
898 
899   if (Triple.isArm64e())
900     Builder.defineMacro("__arm64e__", "1");
901 
902   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
903 }
904 
905 TargetInfo::BuiltinVaListKind
906 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
907   return TargetInfo::CharPtrBuiltinVaList;
908 }
909 
910 // 64-bit RenderScript is aarch64
911 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
912                                                    const TargetOptions &Opts)
913     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
914                                        Triple.getOSName(),
915                                        Triple.getEnvironmentName()),
916                           Opts) {
917   IsRenderScriptTarget = true;
918 }
919 
920 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
921                                                 MacroBuilder &Builder) const {
922   Builder.defineMacro("__RENDERSCRIPT__");
923   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
924 }
925