1 //===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements AArch64 TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AArch64.h"
14 #include "clang/Basic/LangOptions.h"
15 #include "clang/Basic/TargetBuiltins.h"
16 #include "clang/Basic/TargetInfo.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringSwitch.h"
20 #include "llvm/Support/AArch64TargetParser.h"
21 
22 using namespace clang;
23 using namespace clang::targets;
24 
25 const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
26 #define BUILTIN(ID, TYPE, ATTRS)                                               \
27    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
28 #include "clang/Basic/BuiltinsNEON.def"
29 
30 #define BUILTIN(ID, TYPE, ATTRS)                                               \
31    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
32 #include "clang/Basic/BuiltinsSVE.def"
33 
34 #define BUILTIN(ID, TYPE, ATTRS)                                               \
35    {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
36 #define LANGBUILTIN(ID, TYPE, ATTRS, LANG)                                     \
37   {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
38 #define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE)         \
39   {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
40 #include "clang/Basic/BuiltinsAArch64.def"
41 };
42 
43 static StringRef getArchVersionString(llvm::AArch64::ArchKind Kind) {
44   switch (Kind) {
45   case llvm::AArch64::ArchKind::ARMV9A:
46   case llvm::AArch64::ArchKind::ARMV9_1A:
47   case llvm::AArch64::ArchKind::ARMV9_2A:
48   case llvm::AArch64::ArchKind::ARMV9_3A:
49     return "9";
50   default:
51     return "8";
52   }
53 }
54 
55 StringRef AArch64TargetInfo::getArchProfile() const {
56   switch (ArchKind) {
57   case llvm::AArch64::ArchKind::ARMV8R:
58     return "R";
59   default:
60     return "A";
61   }
62 }
63 
64 AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
65                                      const TargetOptions &Opts)
66     : TargetInfo(Triple), ABI("aapcs") {
67   if (getTriple().isOSOpenBSD()) {
68     Int64Type = SignedLongLong;
69     IntMaxType = SignedLongLong;
70   } else {
71     if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
72       WCharType = UnsignedInt;
73 
74     Int64Type = SignedLong;
75     IntMaxType = SignedLong;
76   }
77 
78   // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
79   HasLegalHalfType = true;
80   HasFloat16 = true;
81 
82   if (Triple.isArch64Bit())
83     LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
84   else
85     LongWidth = LongAlign = PointerWidth = PointerAlign = 32;
86 
87   MaxVectorAlign = 128;
88   MaxAtomicInlineWidth = 128;
89   MaxAtomicPromoteWidth = 128;
90 
91   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
92   LongDoubleFormat = &llvm::APFloat::IEEEquad();
93 
94   BFloat16Width = BFloat16Align = 16;
95   BFloat16Format = &llvm::APFloat::BFloat();
96 
97   // Make __builtin_ms_va_list available.
98   HasBuiltinMSVaList = true;
99 
100   // Make the SVE types available.  Note that this deliberately doesn't
101   // depend on SveMode, since in principle it should be possible to turn
102   // SVE on and off within a translation unit.  It should also be possible
103   // to compile the global declaration:
104   //
105   // __SVInt8_t *ptr;
106   //
107   // even without SVE.
108   HasAArch64SVETypes = true;
109 
110   // {} in inline assembly are neon specifiers, not assembly variant
111   // specifiers.
112   NoAsmVariants = true;
113 
114   // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
115   // contributes to the alignment of the containing aggregate in the same way
116   // a plain (non bit-field) member of that type would, without exception for
117   // zero-sized or anonymous bit-fields."
118   assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
119   UseZeroLengthBitfieldAlignment = true;
120 
121   // AArch64 targets default to using the ARM C++ ABI.
122   TheCXXABI.set(TargetCXXABI::GenericAArch64);
123 
124   if (Triple.getOS() == llvm::Triple::Linux)
125     this->MCountName = "\01_mcount";
126   else if (Triple.getOS() == llvm::Triple::UnknownOS)
127     this->MCountName =
128         Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
129 }
130 
131 StringRef AArch64TargetInfo::getABI() const { return ABI; }
132 
133 bool AArch64TargetInfo::setABI(const std::string &Name) {
134   if (Name != "aapcs" && Name != "darwinpcs")
135     return false;
136 
137   ABI = Name;
138   return true;
139 }
140 
141 bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
142                                                  BranchProtectionInfo &BPI,
143                                                  StringRef &Err) const {
144   llvm::ARM::ParsedBranchProtection PBP;
145   if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
146     return false;
147 
148   BPI.SignReturnAddr =
149       llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
150           .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
151           .Case("all", LangOptions::SignReturnAddressScopeKind::All)
152           .Default(LangOptions::SignReturnAddressScopeKind::None);
153 
154   if (PBP.Key == "a_key")
155     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
156   else
157     BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
158 
159   BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
160   return true;
161 }
162 
163 bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
164   return Name == "generic" ||
165          llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
166 }
167 
168 bool AArch64TargetInfo::setCPU(const std::string &Name) {
169   return isValidCPUName(Name);
170 }
171 
172 void AArch64TargetInfo::fillValidCPUList(
173     SmallVectorImpl<StringRef> &Values) const {
174   llvm::AArch64::fillValidCPUArchList(Values);
175 }
176 
177 void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
178                                                 MacroBuilder &Builder) const {
179   Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
180   Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
181   Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
182 }
183 
184 void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
185                                                 MacroBuilder &Builder) const {
186   // Also include the ARMv8.1 defines
187   getTargetDefinesARMV81A(Opts, Builder);
188 }
189 
190 void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
191                                                 MacroBuilder &Builder) const {
192   Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
193   Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
194   // Also include the Armv8.2 defines
195   getTargetDefinesARMV82A(Opts, Builder);
196 }
197 
198 void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
199                                                 MacroBuilder &Builder) const {
200   // Also include the Armv8.3 defines
201   getTargetDefinesARMV83A(Opts, Builder);
202 }
203 
204 void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
205                                                 MacroBuilder &Builder) const {
206   Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
207   // Also include the Armv8.4 defines
208   getTargetDefinesARMV84A(Opts, Builder);
209 }
210 
211 void AArch64TargetInfo::getTargetDefinesARMV86A(const LangOptions &Opts,
212                                                 MacroBuilder &Builder) const {
213   // Also include the Armv8.5 defines
214   // FIXME: Armv8.6 makes the following extensions mandatory:
215   // - __ARM_FEATURE_BF16
216   // - __ARM_FEATURE_MATMUL_INT8
217   // Handle them here.
218   getTargetDefinesARMV85A(Opts, Builder);
219 }
220 
221 void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
222                                                 MacroBuilder &Builder) const {
223   // Also include the Armv8.6 defines
224   getTargetDefinesARMV86A(Opts, Builder);
225 }
226 
227 void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
228                                                 MacroBuilder &Builder) const {
229   // Also include the Armv8.7 defines
230   getTargetDefinesARMV87A(Opts, Builder);
231 }
232 
233 void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
234                                                MacroBuilder &Builder) const {
235   // Armv9-A maps to Armv8.5-A
236   getTargetDefinesARMV85A(Opts, Builder);
237 }
238 
239 void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
240                                                 MacroBuilder &Builder) const {
241   // Armv9.1-A maps to Armv8.6-A
242   getTargetDefinesARMV86A(Opts, Builder);
243 }
244 
245 void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
246                                                 MacroBuilder &Builder) const {
247   // Armv9.2-A maps to Armv8.7-A
248   getTargetDefinesARMV87A(Opts, Builder);
249 }
250 
251 void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
252                                                 MacroBuilder &Builder) const {
253   // Armv9.3-A maps to Armv8.8-A
254   getTargetDefinesARMV88A(Opts, Builder);
255 }
256 
257 void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
258                                          MacroBuilder &Builder) const {
259   // Target identification.
260   Builder.defineMacro("__aarch64__");
261   // For bare-metal.
262   if (getTriple().getOS() == llvm::Triple::UnknownOS &&
263       getTriple().isOSBinFormatELF())
264     Builder.defineMacro("__ELF__");
265 
266   // Target properties.
267   if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
268     Builder.defineMacro("_LP64");
269     Builder.defineMacro("__LP64__");
270   }
271 
272   std::string CodeModel = getTargetOpts().CodeModel;
273   if (CodeModel == "default")
274     CodeModel = "small";
275   for (char &c : CodeModel)
276     c = toupper(c);
277   Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
278 
279   // ACLE predefines. Many can only have one possible value on v8 AArch64.
280   Builder.defineMacro("__ARM_ACLE", "200");
281   Builder.defineMacro("__ARM_ARCH", getArchVersionString(ArchKind));
282   Builder.defineMacro("__ARM_ARCH_PROFILE", "'" + getArchProfile() + "'");
283 
284   Builder.defineMacro("__ARM_64BIT_STATE", "1");
285   Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
286   Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
287 
288   Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
289   Builder.defineMacro("__ARM_FEATURE_FMA", "1");
290   Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
291   Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
292   Builder.defineMacro("__ARM_FEATURE_DIV");       // For backwards compatibility
293   Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
294   Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
295 
296   Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
297 
298   // 0xe implies support for half, single and double precision operations.
299   Builder.defineMacro("__ARM_FP", "0xE");
300 
301   // PCS specifies this for SysV variants, which is all we support. Other ABIs
302   // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
303   Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
304   Builder.defineMacro("__ARM_FP16_ARGS", "1");
305 
306   if (Opts.UnsafeFPMath)
307     Builder.defineMacro("__ARM_FP_FAST", "1");
308 
309   Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
310                       Twine(Opts.WCharSize ? Opts.WCharSize : 4));
311 
312   Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
313 
314   if (FPU & NeonMode) {
315     Builder.defineMacro("__ARM_NEON", "1");
316     // 64-bit NEON supports half, single and double precision operations.
317     Builder.defineMacro("__ARM_NEON_FP", "0xE");
318   }
319 
320   if (FPU & SveMode)
321     Builder.defineMacro("__ARM_FEATURE_SVE", "1");
322 
323   if ((FPU & NeonMode) && (FPU & SveMode))
324     Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
325 
326   if (HasSVE2)
327     Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
328 
329   if (HasSVE2 && HasSVE2AES)
330     Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
331 
332   if (HasSVE2 && HasSVE2BitPerm)
333     Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
334 
335   if (HasSVE2 && HasSVE2SHA3)
336     Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
337 
338   if (HasSVE2 && HasSVE2SM4)
339     Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
340 
341   if (HasCRC)
342     Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
343 
344   // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
345   // macros for AES, SHA2, SHA3 and SM4
346   if (HasAES && HasSHA2)
347     Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
348 
349   if (HasAES)
350     Builder.defineMacro("__ARM_FEATURE_AES", "1");
351 
352   if (HasSHA2)
353     Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
354 
355   if (HasSHA3) {
356     Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
357     Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
358   }
359 
360   if (HasSM4) {
361     Builder.defineMacro("__ARM_FEATURE_SM3", "1");
362     Builder.defineMacro("__ARM_FEATURE_SM4", "1");
363   }
364 
365   if (HasUnaligned)
366     Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
367 
368   if ((FPU & NeonMode) && HasFullFP16)
369     Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
370   if (HasFullFP16)
371    Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
372 
373   if (HasDotProd)
374     Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
375 
376   if (HasMTE)
377     Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
378 
379   if (HasTME)
380     Builder.defineMacro("__ARM_FEATURE_TME", "1");
381 
382   if (HasMatMul)
383     Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
384 
385   if (HasLSE)
386     Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
387 
388   if (HasBFloat16) {
389     Builder.defineMacro("__ARM_FEATURE_BF16", "1");
390     Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
391     Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
392     Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
393   }
394 
395   if ((FPU & SveMode) && HasBFloat16) {
396     Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
397   }
398 
399   if ((FPU & SveMode) && HasMatmulFP64)
400     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
401 
402   if ((FPU & SveMode) && HasMatmulFP32)
403     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
404 
405   if ((FPU & SveMode) && HasMatMul)
406     Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
407 
408   if ((FPU & NeonMode) && HasFP16FML)
409     Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
410 
411   if (Opts.hasSignReturnAddress()) {
412     // Bitmask:
413     // 0: Protection using the A key
414     // 1: Protection using the B key
415     // 2: Protection including leaf functions
416     unsigned Value = 0;
417 
418     if (Opts.isSignReturnAddressWithAKey())
419       Value |= (1 << 0);
420     else
421       Value |= (1 << 1);
422 
423     if (Opts.isSignReturnAddressScopeAll())
424       Value |= (1 << 2);
425 
426     Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
427   }
428 
429   if (Opts.BranchTargetEnforcement)
430     Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
431 
432   if (HasLS64)
433     Builder.defineMacro("__ARM_FEATURE_LS64", "1");
434 
435   if (HasRandGen)
436     Builder.defineMacro("__ARM_FEATURE_RNG", "1");
437 
438   if (HasMOPS)
439     Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
440 
441   switch (ArchKind) {
442   default:
443     break;
444   case llvm::AArch64::ArchKind::ARMV8_1A:
445     getTargetDefinesARMV81A(Opts, Builder);
446     break;
447   case llvm::AArch64::ArchKind::ARMV8_2A:
448     getTargetDefinesARMV82A(Opts, Builder);
449     break;
450   case llvm::AArch64::ArchKind::ARMV8_3A:
451     getTargetDefinesARMV83A(Opts, Builder);
452     break;
453   case llvm::AArch64::ArchKind::ARMV8_4A:
454     getTargetDefinesARMV84A(Opts, Builder);
455     break;
456   case llvm::AArch64::ArchKind::ARMV8_5A:
457     getTargetDefinesARMV85A(Opts, Builder);
458     break;
459   case llvm::AArch64::ArchKind::ARMV8_6A:
460     getTargetDefinesARMV86A(Opts, Builder);
461     break;
462   case llvm::AArch64::ArchKind::ARMV8_7A:
463     getTargetDefinesARMV87A(Opts, Builder);
464     break;
465   case llvm::AArch64::ArchKind::ARMV8_8A:
466     getTargetDefinesARMV88A(Opts, Builder);
467     break;
468   case llvm::AArch64::ArchKind::ARMV9A:
469     getTargetDefinesARMV9A(Opts, Builder);
470     break;
471   case llvm::AArch64::ArchKind::ARMV9_1A:
472     getTargetDefinesARMV91A(Opts, Builder);
473     break;
474   case llvm::AArch64::ArchKind::ARMV9_2A:
475     getTargetDefinesARMV92A(Opts, Builder);
476     break;
477   case llvm::AArch64::ArchKind::ARMV9_3A:
478     getTargetDefinesARMV93A(Opts, Builder);
479     break;
480   }
481 
482   // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
483   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
484   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
485   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
486   Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
487 
488   if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
489     Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
490     Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
491   }
492 }
493 
494 ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
495   return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
496                                              Builtin::FirstTSBuiltin);
497 }
498 
499 Optional<std::pair<unsigned, unsigned>>
500 AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
501   if (LangOpts.VScaleMin || LangOpts.VScaleMax)
502     return std::pair<unsigned, unsigned>(
503         LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
504 
505   if (hasFeature("sve"))
506     return std::pair<unsigned, unsigned>(1, 16);
507 
508   return None;
509 }
510 
511 bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
512   return llvm::StringSwitch<bool>(Feature)
513     .Cases("aarch64", "arm64", "arm", true)
514     .Case("neon", FPU & NeonMode)
515     .Cases("sve", "sve2", "sve2-bitperm", "sve2-aes", "sve2-sha3", "sve2-sm4", "f64mm", "f32mm", "i8mm", "bf16", FPU & SveMode)
516     .Case("ls64", HasLS64)
517     .Default(false);
518 }
519 
520 bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
521                                              DiagnosticsEngine &Diags) {
522   FPU = FPUMode;
523   HasCRC = false;
524   HasAES = false;
525   HasSHA2 = false;
526   HasSHA3 = false;
527   HasSM4 = false;
528   HasUnaligned = true;
529   HasFullFP16 = false;
530   HasDotProd = false;
531   HasFP16FML = false;
532   HasMTE = false;
533   HasTME = false;
534   HasLS64 = false;
535   HasRandGen = false;
536   HasMatMul = false;
537   HasBFloat16 = false;
538   HasSVE2 = false;
539   HasSVE2AES = false;
540   HasSVE2SHA3 = false;
541   HasSVE2SM4 = false;
542   HasSVE2BitPerm = false;
543   HasMatmulFP64 = false;
544   HasMatmulFP32 = false;
545   HasLSE = false;
546   HasMOPS = false;
547 
548   ArchKind = llvm::AArch64::ArchKind::INVALID;
549 
550   for (const auto &Feature : Features) {
551     if (Feature == "+neon")
552       FPU |= NeonMode;
553     if (Feature == "+sve") {
554       FPU |= SveMode;
555       HasFullFP16 = true;
556     }
557     if (Feature == "+sve2") {
558       FPU |= SveMode;
559       HasFullFP16 = true;
560       HasSVE2 = true;
561     }
562     if (Feature == "+sve2-aes") {
563       FPU |= SveMode;
564       HasFullFP16 = true;
565       HasSVE2 = true;
566       HasSVE2AES = true;
567     }
568     if (Feature == "+sve2-sha3") {
569       FPU |= SveMode;
570       HasFullFP16 = true;
571       HasSVE2 = true;
572       HasSVE2SHA3 = true;
573     }
574     if (Feature == "+sve2-sm4") {
575       FPU |= SveMode;
576       HasFullFP16 = true;
577       HasSVE2 = true;
578       HasSVE2SM4 = true;
579     }
580     if (Feature == "+sve2-bitperm") {
581       FPU |= SveMode;
582       HasFullFP16 = true;
583       HasSVE2 = true;
584       HasSVE2BitPerm = true;
585     }
586     if (Feature == "+f32mm") {
587       FPU |= SveMode;
588       HasMatmulFP32 = true;
589     }
590     if (Feature == "+f64mm") {
591       FPU |= SveMode;
592       HasMatmulFP64 = true;
593     }
594     if (Feature == "+crc")
595       HasCRC = true;
596     if (Feature == "+aes")
597       HasAES = true;
598     if (Feature == "+sha2")
599       HasSHA2 = true;
600     if (Feature == "+sha3") {
601       HasSHA2 = true;
602       HasSHA3 = true;
603     }
604     if (Feature == "+sm4")
605       HasSM4 = true;
606     if (Feature == "+strict-align")
607       HasUnaligned = false;
608     if (Feature == "+v8a")
609       ArchKind = llvm::AArch64::ArchKind::ARMV8A;
610     if (Feature == "+v8.1a")
611       ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
612     if (Feature == "+v8.2a")
613       ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
614     if (Feature == "+v8.3a")
615       ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
616     if (Feature == "+v8.4a")
617       ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
618     if (Feature == "+v8.5a")
619       ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
620     if (Feature == "+v8.6a")
621       ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
622     if (Feature == "+v8.7a")
623       ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
624     if (Feature == "+v8.8a")
625       ArchKind = llvm::AArch64::ArchKind::ARMV8_8A;
626     if (Feature == "+v9a")
627       ArchKind = llvm::AArch64::ArchKind::ARMV9A;
628     if (Feature == "+v9.1a")
629       ArchKind = llvm::AArch64::ArchKind::ARMV9_1A;
630     if (Feature == "+v9.2a")
631       ArchKind = llvm::AArch64::ArchKind::ARMV9_2A;
632     if (Feature == "+v9.3a")
633       ArchKind = llvm::AArch64::ArchKind::ARMV9_3A;
634     if (Feature == "+v8r")
635       ArchKind = llvm::AArch64::ArchKind::ARMV8R;
636     if (Feature == "+fullfp16")
637       HasFullFP16 = true;
638     if (Feature == "+dotprod")
639       HasDotProd = true;
640     if (Feature == "+fp16fml")
641       HasFP16FML = true;
642     if (Feature == "+mte")
643       HasMTE = true;
644     if (Feature == "+tme")
645       HasTME = true;
646     if (Feature == "+pauth")
647       HasPAuth = true;
648     if (Feature == "+i8mm")
649       HasMatMul = true;
650     if (Feature == "+bf16")
651       HasBFloat16 = true;
652     if (Feature == "+lse")
653       HasLSE = true;
654     if (Feature == "+ls64")
655       HasLS64 = true;
656     if (Feature == "+rand")
657       HasRandGen = true;
658     if (Feature == "+flagm")
659       HasFlagM = true;
660     if (Feature == "+mops")
661       HasMOPS = true;
662   }
663 
664   setDataLayout();
665 
666   return true;
667 }
668 
669 TargetInfo::CallingConvCheckResult
670 AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
671   switch (CC) {
672   case CC_C:
673   case CC_Swift:
674   case CC_SwiftAsync:
675   case CC_PreserveMost:
676   case CC_PreserveAll:
677   case CC_OpenCLKernel:
678   case CC_AArch64VectorCall:
679   case CC_AArch64SVEPCS:
680   case CC_Win64:
681     return CCCR_OK;
682   default:
683     return CCCR_Warning;
684   }
685 }
686 
687 bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
688 
689 TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
690   return TargetInfo::AArch64ABIBuiltinVaList;
691 }
692 
693 const char *const AArch64TargetInfo::GCCRegNames[] = {
694     // 32-bit Integer registers
695     "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
696     "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
697     "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
698 
699     // 64-bit Integer registers
700     "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
701     "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
702     "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
703 
704     // 32-bit floating point regsisters
705     "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
706     "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
707     "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
708 
709     // 64-bit floating point regsisters
710     "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
711     "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
712     "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
713 
714     // Neon vector registers
715     "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
716     "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
717     "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
718 
719     // SVE vector registers
720     "z0",  "z1",  "z2",  "z3",  "z4",  "z5",  "z6",  "z7",  "z8",  "z9",  "z10",
721     "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
722     "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
723 
724     // SVE predicate registers
725     "p0",  "p1",  "p2",  "p3",  "p4",  "p5",  "p6",  "p7",  "p8",  "p9",  "p10",
726     "p11", "p12", "p13", "p14", "p15"
727 };
728 
729 ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
730   return llvm::makeArrayRef(GCCRegNames);
731 }
732 
733 const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
734     {{"w31"}, "wsp"},
735     {{"x31"}, "sp"},
736     // GCC rN registers are aliases of xN registers.
737     {{"r0"}, "x0"},
738     {{"r1"}, "x1"},
739     {{"r2"}, "x2"},
740     {{"r3"}, "x3"},
741     {{"r4"}, "x4"},
742     {{"r5"}, "x5"},
743     {{"r6"}, "x6"},
744     {{"r7"}, "x7"},
745     {{"r8"}, "x8"},
746     {{"r9"}, "x9"},
747     {{"r10"}, "x10"},
748     {{"r11"}, "x11"},
749     {{"r12"}, "x12"},
750     {{"r13"}, "x13"},
751     {{"r14"}, "x14"},
752     {{"r15"}, "x15"},
753     {{"r16"}, "x16"},
754     {{"r17"}, "x17"},
755     {{"r18"}, "x18"},
756     {{"r19"}, "x19"},
757     {{"r20"}, "x20"},
758     {{"r21"}, "x21"},
759     {{"r22"}, "x22"},
760     {{"r23"}, "x23"},
761     {{"r24"}, "x24"},
762     {{"r25"}, "x25"},
763     {{"r26"}, "x26"},
764     {{"r27"}, "x27"},
765     {{"r28"}, "x28"},
766     {{"r29", "x29"}, "fp"},
767     {{"r30", "x30"}, "lr"},
768     // The S/D/Q and W/X registers overlap, but aren't really aliases; we
769     // don't want to substitute one of these for a different-sized one.
770 };
771 
772 ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
773   return llvm::makeArrayRef(GCCRegAliases);
774 }
775 
776 bool AArch64TargetInfo::validateAsmConstraint(
777     const char *&Name, TargetInfo::ConstraintInfo &Info) const {
778   switch (*Name) {
779   default:
780     return false;
781   case 'w': // Floating point and SIMD registers (V0-V31)
782     Info.setAllowsRegister();
783     return true;
784   case 'I': // Constant that can be used with an ADD instruction
785   case 'J': // Constant that can be used with a SUB instruction
786   case 'K': // Constant that can be used with a 32-bit logical instruction
787   case 'L': // Constant that can be used with a 64-bit logical instruction
788   case 'M': // Constant that can be used as a 32-bit MOV immediate
789   case 'N': // Constant that can be used as a 64-bit MOV immediate
790   case 'Y': // Floating point constant zero
791   case 'Z': // Integer constant zero
792     return true;
793   case 'Q': // A memory reference with base register and no offset
794     Info.setAllowsMemory();
795     return true;
796   case 'S': // A symbolic address
797     Info.setAllowsRegister();
798     return true;
799   case 'U':
800     if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
801       // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
802       Info.setAllowsRegister();
803       Name += 2;
804       return true;
805     }
806     // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
807     // Utf: A memory address suitable for ldp/stp in TF mode.
808     // Usa: An absolute symbolic address.
809     // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
810 
811     // Better to return an error saying that it's an unrecognised constraint
812     // even if this is a valid constraint in gcc.
813     return false;
814   case 'z': // Zero register, wzr or xzr
815     Info.setAllowsRegister();
816     return true;
817   case 'x': // Floating point and SIMD registers (V0-V15)
818     Info.setAllowsRegister();
819     return true;
820   case 'y': // SVE registers (V0-V7)
821     Info.setAllowsRegister();
822     return true;
823   }
824   return false;
825 }
826 
827 bool AArch64TargetInfo::validateConstraintModifier(
828     StringRef Constraint, char Modifier, unsigned Size,
829     std::string &SuggestedModifier) const {
830   // Strip off constraint modifiers.
831   while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
832     Constraint = Constraint.substr(1);
833 
834   switch (Constraint[0]) {
835   default:
836     return true;
837   case 'z':
838   case 'r': {
839     switch (Modifier) {
840     case 'x':
841     case 'w':
842       // For now assume that the person knows what they're
843       // doing with the modifier.
844       return true;
845     default:
846       // By default an 'r' constraint will be in the 'x'
847       // registers.
848       if (Size == 64)
849         return true;
850 
851       if (Size == 512)
852         return HasLS64;
853 
854       SuggestedModifier = "w";
855       return false;
856     }
857   }
858   }
859 }
860 
861 const char *AArch64TargetInfo::getClobbers() const { return ""; }
862 
863 int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
864   if (RegNo == 0)
865     return 0;
866   if (RegNo == 1)
867     return 1;
868   return -1;
869 }
870 
871 bool AArch64TargetInfo::hasInt128Type() const { return true; }
872 
873 AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
874                                          const TargetOptions &Opts)
875     : AArch64TargetInfo(Triple, Opts) {}
876 
877 void AArch64leTargetInfo::setDataLayout() {
878   if (getTriple().isOSBinFormatMachO()) {
879     if(getTriple().isArch32Bit())
880       resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
881     else
882       resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
883   } else
884     resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
885 }
886 
887 void AArch64leTargetInfo::getTargetDefines(const LangOptions &Opts,
888                                            MacroBuilder &Builder) const {
889   Builder.defineMacro("__AARCH64EL__");
890   AArch64TargetInfo::getTargetDefines(Opts, Builder);
891 }
892 
893 AArch64beTargetInfo::AArch64beTargetInfo(const llvm::Triple &Triple,
894                                          const TargetOptions &Opts)
895     : AArch64TargetInfo(Triple, Opts) {}
896 
897 void AArch64beTargetInfo::getTargetDefines(const LangOptions &Opts,
898                                            MacroBuilder &Builder) const {
899   Builder.defineMacro("__AARCH64EB__");
900   Builder.defineMacro("__AARCH_BIG_ENDIAN");
901   Builder.defineMacro("__ARM_BIG_ENDIAN");
902   AArch64TargetInfo::getTargetDefines(Opts, Builder);
903 }
904 
905 void AArch64beTargetInfo::setDataLayout() {
906   assert(!getTriple().isOSBinFormatMachO());
907   resetDataLayout("E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
908 }
909 
910 WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
911                                                const TargetOptions &Opts)
912     : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
913 
914   // This is an LLP64 platform.
915   // int:4, long:4, long long:8, long double:8.
916   IntWidth = IntAlign = 32;
917   LongWidth = LongAlign = 32;
918   DoubleAlign = LongLongAlign = 64;
919   LongDoubleWidth = LongDoubleAlign = 64;
920   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
921   IntMaxType = SignedLongLong;
922   Int64Type = SignedLongLong;
923   SizeType = UnsignedLongLong;
924   PtrDiffType = SignedLongLong;
925   IntPtrType = SignedLongLong;
926 }
927 
928 void WindowsARM64TargetInfo::setDataLayout() {
929   resetDataLayout(Triple.isOSBinFormatMachO()
930                       ? "e-m:o-i64:64-i128:128-n32:64-S128"
931                       : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
932                   Triple.isOSBinFormatMachO() ? "_" : "");
933 }
934 
935 TargetInfo::BuiltinVaListKind
936 WindowsARM64TargetInfo::getBuiltinVaListKind() const {
937   return TargetInfo::CharPtrBuiltinVaList;
938 }
939 
940 TargetInfo::CallingConvCheckResult
941 WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
942   switch (CC) {
943   case CC_X86StdCall:
944   case CC_X86ThisCall:
945   case CC_X86FastCall:
946   case CC_X86VectorCall:
947     return CCCR_Ignore;
948   case CC_C:
949   case CC_OpenCLKernel:
950   case CC_PreserveMost:
951   case CC_PreserveAll:
952   case CC_Swift:
953   case CC_SwiftAsync:
954   case CC_Win64:
955     return CCCR_OK;
956   default:
957     return CCCR_Warning;
958   }
959 }
960 
961 MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
962                                                    const TargetOptions &Opts)
963     : WindowsARM64TargetInfo(Triple, Opts) {
964   TheCXXABI.set(TargetCXXABI::Microsoft);
965 }
966 
967 void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
968                                                 MacroBuilder &Builder) const {
969   WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
970   Builder.defineMacro("_M_ARM64", "1");
971 }
972 
973 TargetInfo::CallingConvKind
974 MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
975   return CCK_MicrosoftWin64;
976 }
977 
978 unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
979   unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
980 
981   // MSVC does size based alignment for arm64 based on alignment section in
982   // below document, replicate that to keep alignment consistent with object
983   // files compiled by MSVC.
984   // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
985   if (TypeSize >= 512) {              // TypeSize >= 64 bytes
986     Align = std::max(Align, 128u);    // align type at least 16 bytes
987   } else if (TypeSize >= 64) {        // TypeSize >= 8 bytes
988     Align = std::max(Align, 64u);     // align type at least 8 butes
989   } else if (TypeSize >= 16) {        // TypeSize >= 2 bytes
990     Align = std::max(Align, 32u);     // align type at least 4 bytes
991   }
992   return Align;
993 }
994 
995 MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
996                                            const TargetOptions &Opts)
997     : WindowsARM64TargetInfo(Triple, Opts) {
998   TheCXXABI.set(TargetCXXABI::GenericAArch64);
999 }
1000 
1001 DarwinAArch64TargetInfo::DarwinAArch64TargetInfo(const llvm::Triple &Triple,
1002                                                  const TargetOptions &Opts)
1003     : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1004   Int64Type = SignedLongLong;
1005   if (getTriple().isArch32Bit())
1006     IntMaxType = SignedLongLong;
1007 
1008   WCharType = SignedInt;
1009   UseSignedCharForObjCBool = false;
1010 
1011   LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1012   LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1013 
1014   UseZeroLengthBitfieldAlignment = false;
1015 
1016   if (getTriple().isArch32Bit()) {
1017     UseBitFieldTypeAlignment = false;
1018     ZeroLengthBitfieldBoundary = 32;
1019     UseZeroLengthBitfieldAlignment = true;
1020     TheCXXABI.set(TargetCXXABI::WatchOS);
1021   } else
1022     TheCXXABI.set(TargetCXXABI::AppleARM64);
1023 }
1024 
1025 void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
1026                                            const llvm::Triple &Triple,
1027                                            MacroBuilder &Builder) const {
1028   Builder.defineMacro("__AARCH64_SIMD__");
1029   if (Triple.isArch32Bit())
1030     Builder.defineMacro("__ARM64_ARCH_8_32__");
1031   else
1032     Builder.defineMacro("__ARM64_ARCH_8__");
1033   Builder.defineMacro("__ARM_NEON__");
1034   Builder.defineMacro("__LITTLE_ENDIAN__");
1035   Builder.defineMacro("__REGISTER_PREFIX__", "");
1036   Builder.defineMacro("__arm64", "1");
1037   Builder.defineMacro("__arm64__", "1");
1038 
1039   if (Triple.isArm64e())
1040     Builder.defineMacro("__arm64e__", "1");
1041 
1042   getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
1043 }
1044 
1045 TargetInfo::BuiltinVaListKind
1046 DarwinAArch64TargetInfo::getBuiltinVaListKind() const {
1047   return TargetInfo::CharPtrBuiltinVaList;
1048 }
1049 
1050 // 64-bit RenderScript is aarch64
1051 RenderScript64TargetInfo::RenderScript64TargetInfo(const llvm::Triple &Triple,
1052                                                    const TargetOptions &Opts)
1053     : AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
1054                                        Triple.getOSName(),
1055                                        Triple.getEnvironmentName()),
1056                           Opts) {
1057   IsRenderScriptTarget = true;
1058 }
1059 
1060 void RenderScript64TargetInfo::getTargetDefines(const LangOptions &Opts,
1061                                                 MacroBuilder &Builder) const {
1062   Builder.defineMacro("__RENDERSCRIPT__");
1063   AArch64leTargetInfo::getTargetDefines(Opts, Builder);
1064 }
1065