1 //===-- cpu_model.c - Support for __cpu_model builtin  ------------*- C -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file is based on LLVM's lib/Support/Host.cpp.
10 //  It implements the operating system Host concept and builtin
11 //  __cpu_model for the compiler_rt library for x86 and
12 //  __aarch64_have_lse_atomics for AArch64.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef __has_attribute
17 #define __has_attribute(attr) 0
18 #endif
19 
20 #if __has_attribute(constructor)
21 #if __GNUC__ >= 9
22 // Ordinarily init priorities below 101 are disallowed as they are reserved for the
23 // implementation. However, we are the implementation, so silence the diagnostic,
24 // since it doesn't apply to us.
25 #pragma GCC diagnostic ignored "-Wprio-ctor-dtor"
26 #endif
27 // We're choosing init priority 90 to force our constructors to run before any
28 // constructors in the end user application (starting at priority 101). This value
29 // matches the libgcc choice for the same functions.
30 #define CONSTRUCTOR_ATTRIBUTE __attribute__((constructor(90)))
31 #else
32 // FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
33 // this runs during initialization.
34 #define CONSTRUCTOR_ATTRIBUTE
35 #endif
36 
37 #if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) ||           \
38      defined(_M_X64)) &&                                                       \
39     (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))
40 
41 #include <assert.h>
42 
43 #define bool int
44 #define true 1
45 #define false 0
46 
47 #ifdef _MSC_VER
48 #include <intrin.h>
49 #endif
50 
51 enum VendorSignatures {
52   SIG_INTEL = 0x756e6547, // Genu
53   SIG_AMD = 0x68747541,   // Auth
54 };
55 
56 enum ProcessorVendors {
57   VENDOR_INTEL = 1,
58   VENDOR_AMD,
59   VENDOR_OTHER,
60   VENDOR_MAX
61 };
62 
63 enum ProcessorTypes {
64   INTEL_BONNELL = 1,
65   INTEL_CORE2,
66   INTEL_COREI7,
67   AMDFAM10H,
68   AMDFAM15H,
69   INTEL_SILVERMONT,
70   INTEL_KNL,
71   AMD_BTVER1,
72   AMD_BTVER2,
73   AMDFAM17H,
74   INTEL_KNM,
75   INTEL_GOLDMONT,
76   INTEL_GOLDMONT_PLUS,
77   INTEL_TREMONT,
78   AMDFAM19H,
79   CPU_TYPE_MAX
80 };
81 
82 enum ProcessorSubtypes {
83   INTEL_COREI7_NEHALEM = 1,
84   INTEL_COREI7_WESTMERE,
85   INTEL_COREI7_SANDYBRIDGE,
86   AMDFAM10H_BARCELONA,
87   AMDFAM10H_SHANGHAI,
88   AMDFAM10H_ISTANBUL,
89   AMDFAM15H_BDVER1,
90   AMDFAM15H_BDVER2,
91   AMDFAM15H_BDVER3,
92   AMDFAM15H_BDVER4,
93   AMDFAM17H_ZNVER1,
94   INTEL_COREI7_IVYBRIDGE,
95   INTEL_COREI7_HASWELL,
96   INTEL_COREI7_BROADWELL,
97   INTEL_COREI7_SKYLAKE,
98   INTEL_COREI7_SKYLAKE_AVX512,
99   INTEL_COREI7_CANNONLAKE,
100   INTEL_COREI7_ICELAKE_CLIENT,
101   INTEL_COREI7_ICELAKE_SERVER,
102   AMDFAM17H_ZNVER2,
103   INTEL_COREI7_CASCADELAKE,
104   INTEL_COREI7_TIGERLAKE,
105   INTEL_COREI7_COOPERLAKE,
106   INTEL_COREI7_SAPPHIRERAPIDS,
107   INTEL_COREI7_ALDERLAKE,
108   AMDFAM19H_ZNVER3,
109   INTEL_COREI7_ROCKETLAKE,
110   CPU_SUBTYPE_MAX
111 };
112 
113 enum ProcessorFeatures {
114   FEATURE_CMOV = 0,
115   FEATURE_MMX,
116   FEATURE_POPCNT,
117   FEATURE_SSE,
118   FEATURE_SSE2,
119   FEATURE_SSE3,
120   FEATURE_SSSE3,
121   FEATURE_SSE4_1,
122   FEATURE_SSE4_2,
123   FEATURE_AVX,
124   FEATURE_AVX2,
125   FEATURE_SSE4_A,
126   FEATURE_FMA4,
127   FEATURE_XOP,
128   FEATURE_FMA,
129   FEATURE_AVX512F,
130   FEATURE_BMI,
131   FEATURE_BMI2,
132   FEATURE_AES,
133   FEATURE_PCLMUL,
134   FEATURE_AVX512VL,
135   FEATURE_AVX512BW,
136   FEATURE_AVX512DQ,
137   FEATURE_AVX512CD,
138   FEATURE_AVX512ER,
139   FEATURE_AVX512PF,
140   FEATURE_AVX512VBMI,
141   FEATURE_AVX512IFMA,
142   FEATURE_AVX5124VNNIW,
143   FEATURE_AVX5124FMAPS,
144   FEATURE_AVX512VPOPCNTDQ,
145   FEATURE_AVX512VBMI2,
146   FEATURE_GFNI,
147   FEATURE_VPCLMULQDQ,
148   FEATURE_AVX512VNNI,
149   FEATURE_AVX512BITALG,
150   FEATURE_AVX512BF16,
151   FEATURE_AVX512VP2INTERSECT,
152   CPU_FEATURE_MAX
153 };
154 
155 // The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max).
156 // Check motivated by bug reports for OpenSSL crashing on CPUs without CPUID
157 // support. Consequently, for i386, the presence of CPUID is checked first
158 // via the corresponding eflags bit.
isCpuIdSupported(void)159 static bool isCpuIdSupported(void) {
160 #if defined(__GNUC__) || defined(__clang__)
161 #if defined(__i386__)
162   int __cpuid_supported;
163   __asm__("  pushfl\n"
164           "  popl   %%eax\n"
165           "  movl   %%eax,%%ecx\n"
166           "  xorl   $0x00200000,%%eax\n"
167           "  pushl  %%eax\n"
168           "  popfl\n"
169           "  pushfl\n"
170           "  popl   %%eax\n"
171           "  movl   $0,%0\n"
172           "  cmpl   %%eax,%%ecx\n"
173           "  je     1f\n"
174           "  movl   $1,%0\n"
175           "1:"
176           : "=r"(__cpuid_supported)
177           :
178           : "eax", "ecx");
179   if (!__cpuid_supported)
180     return false;
181 #endif
182   return true;
183 #endif
184   return true;
185 }
186 
187 // This code is copied from lib/Support/Host.cpp.
188 // Changes to either file should be mirrored in the other.
189 
190 /// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in
191 /// the specified arguments.  If we can't run cpuid on the host, return true.
getX86CpuIDAndInfo(unsigned value,unsigned * rEAX,unsigned * rEBX,unsigned * rECX,unsigned * rEDX)192 static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
193                                unsigned *rECX, unsigned *rEDX) {
194 #if defined(__GNUC__) || defined(__clang__)
195 #if defined(__x86_64__)
196   // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
197   // FIXME: should we save this for Clang?
198   __asm__("movq\t%%rbx, %%rsi\n\t"
199           "cpuid\n\t"
200           "xchgq\t%%rbx, %%rsi\n\t"
201           : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
202           : "a"(value));
203   return false;
204 #elif defined(__i386__)
205   __asm__("movl\t%%ebx, %%esi\n\t"
206           "cpuid\n\t"
207           "xchgl\t%%ebx, %%esi\n\t"
208           : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
209           : "a"(value));
210   return false;
211 #else
212   return true;
213 #endif
214 #elif defined(_MSC_VER)
215   // The MSVC intrinsic is portable across x86 and x64.
216   int registers[4];
217   __cpuid(registers, value);
218   *rEAX = registers[0];
219   *rEBX = registers[1];
220   *rECX = registers[2];
221   *rEDX = registers[3];
222   return false;
223 #else
224   return true;
225 #endif
226 }
227 
228 /// getX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return
229 /// the 4 values in the specified arguments.  If we can't run cpuid on the host,
230 /// return true.
getX86CpuIDAndInfoEx(unsigned value,unsigned subleaf,unsigned * rEAX,unsigned * rEBX,unsigned * rECX,unsigned * rEDX)231 static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
232                                  unsigned *rEAX, unsigned *rEBX, unsigned *rECX,
233                                  unsigned *rEDX) {
234 #if defined(__GNUC__) || defined(__clang__)
235 #if defined(__x86_64__)
236   // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
237   // FIXME: should we save this for Clang?
238   __asm__("movq\t%%rbx, %%rsi\n\t"
239           "cpuid\n\t"
240           "xchgq\t%%rbx, %%rsi\n\t"
241           : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
242           : "a"(value), "c"(subleaf));
243   return false;
244 #elif defined(__i386__)
245   __asm__("movl\t%%ebx, %%esi\n\t"
246           "cpuid\n\t"
247           "xchgl\t%%ebx, %%esi\n\t"
248           : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
249           : "a"(value), "c"(subleaf));
250   return false;
251 #else
252   return true;
253 #endif
254 #elif defined(_MSC_VER)
255   int registers[4];
256   __cpuidex(registers, value, subleaf);
257   *rEAX = registers[0];
258   *rEBX = registers[1];
259   *rECX = registers[2];
260   *rEDX = registers[3];
261   return false;
262 #else
263   return true;
264 #endif
265 }
266 
267 // Read control register 0 (XCR0). Used to detect features such as AVX.
getX86XCR0(unsigned * rEAX,unsigned * rEDX)268 static bool getX86XCR0(unsigned *rEAX, unsigned *rEDX) {
269 #if defined(__GNUC__) || defined(__clang__)
270   // Check xgetbv; this uses a .byte sequence instead of the instruction
271   // directly because older assemblers do not include support for xgetbv and
272   // there is no easy way to conditionally compile based on the assembler used.
273   __asm__(".byte 0x0f, 0x01, 0xd0" : "=a"(*rEAX), "=d"(*rEDX) : "c"(0));
274   return false;
275 #elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
276   unsigned long long Result = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
277   *rEAX = Result;
278   *rEDX = Result >> 32;
279   return false;
280 #else
281   return true;
282 #endif
283 }
284 
detectX86FamilyModel(unsigned EAX,unsigned * Family,unsigned * Model)285 static void detectX86FamilyModel(unsigned EAX, unsigned *Family,
286                                  unsigned *Model) {
287   *Family = (EAX >> 8) & 0xf; // Bits 8 - 11
288   *Model = (EAX >> 4) & 0xf;  // Bits 4 - 7
289   if (*Family == 6 || *Family == 0xf) {
290     if (*Family == 0xf)
291       // Examine extended family ID if family ID is F.
292       *Family += (EAX >> 20) & 0xff; // Bits 20 - 27
293     // Examine extended model ID if family ID is 6 or F.
294     *Model += ((EAX >> 16) & 0xf) << 4; // Bits 16 - 19
295   }
296 }
297 
298 static const char *
getIntelProcessorTypeAndSubtype(unsigned Family,unsigned Model,const unsigned * Features,unsigned * Type,unsigned * Subtype)299 getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
300                                 const unsigned *Features,
301                                 unsigned *Type, unsigned *Subtype) {
302 #define testFeature(F)                                                         \
303   (Features[F / 32] & (1 << (F % 32))) != 0
304 
305   // We select CPU strings to match the code in Host.cpp, but we don't use them
306   // in compiler-rt.
307   const char *CPU = 0;
308 
309   switch (Family) {
310   case 6:
311     switch (Model) {
312     case 0x0f: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile
313                // processor, Intel Core 2 Quad processor, Intel Core 2 Quad
314                // mobile processor, Intel Core 2 Extreme processor, Intel
315                // Pentium Dual-Core processor, Intel Xeon processor, model
316                // 0Fh. All processors are manufactured using the 65 nm process.
317     case 0x16: // Intel Celeron processor model 16h. All processors are
318                // manufactured using the 65 nm process
319       CPU = "core2";
320       *Type = INTEL_CORE2;
321       break;
322     case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model
323                // 17h. All processors are manufactured using the 45 nm process.
324                //
325                // 45nm: Penryn , Wolfdale, Yorkfield (XE)
326     case 0x1d: // Intel Xeon processor MP. All processors are manufactured using
327                // the 45 nm process.
328       CPU = "penryn";
329       *Type = INTEL_CORE2;
330       break;
331     case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All
332                // processors are manufactured using the 45 nm process.
333     case 0x1e: // Intel(R) Core(TM) i7 CPU         870  @ 2.93GHz.
334                // As found in a Summer 2010 model iMac.
335     case 0x1f:
336     case 0x2e:              // Nehalem EX
337       CPU = "nehalem";
338       *Type = INTEL_COREI7;
339       *Subtype = INTEL_COREI7_NEHALEM;
340       break;
341     case 0x25: // Intel Core i7, laptop version.
342     case 0x2c: // Intel Core i7 processor and Intel Xeon processor. All
343                // processors are manufactured using the 32 nm process.
344     case 0x2f: // Westmere EX
345       CPU = "westmere";
346       *Type = INTEL_COREI7;
347       *Subtype = INTEL_COREI7_WESTMERE;
348       break;
349     case 0x2a: // Intel Core i7 processor. All processors are manufactured
350                // using the 32 nm process.
351     case 0x2d:
352       CPU = "sandybridge";
353       *Type = INTEL_COREI7;
354       *Subtype = INTEL_COREI7_SANDYBRIDGE;
355       break;
356     case 0x3a:
357     case 0x3e:              // Ivy Bridge EP
358       CPU = "ivybridge";
359       *Type = INTEL_COREI7;
360       *Subtype = INTEL_COREI7_IVYBRIDGE;
361       break;
362 
363     // Haswell:
364     case 0x3c:
365     case 0x3f:
366     case 0x45:
367     case 0x46:
368       CPU = "haswell";
369       *Type = INTEL_COREI7;
370       *Subtype = INTEL_COREI7_HASWELL;
371       break;
372 
373     // Broadwell:
374     case 0x3d:
375     case 0x47:
376     case 0x4f:
377     case 0x56:
378       CPU = "broadwell";
379       *Type = INTEL_COREI7;
380       *Subtype = INTEL_COREI7_BROADWELL;
381       break;
382 
383     // Skylake:
384     case 0x4e:              // Skylake mobile
385     case 0x5e:              // Skylake desktop
386     case 0x8e:              // Kaby Lake mobile
387     case 0x9e:              // Kaby Lake desktop
388     case 0xa5:              // Comet Lake-H/S
389     case 0xa6:              // Comet Lake-U
390       CPU = "skylake";
391       *Type = INTEL_COREI7;
392       *Subtype = INTEL_COREI7_SKYLAKE;
393       break;
394 
395     // Rocketlake:
396     case 0xa7:
397       CPU = "rocketlake";
398       *Type = INTEL_COREI7;
399       *Subtype = INTEL_COREI7_ROCKETLAKE;
400       break;
401 
402     // Skylake Xeon:
403     case 0x55:
404       *Type = INTEL_COREI7;
405       if (testFeature(FEATURE_AVX512BF16)) {
406         CPU = "cooperlake";
407         *Subtype = INTEL_COREI7_COOPERLAKE;
408       } else if (testFeature(FEATURE_AVX512VNNI)) {
409         CPU = "cascadelake";
410         *Subtype = INTEL_COREI7_CASCADELAKE;
411       } else {
412         CPU = "skylake-avx512";
413         *Subtype = INTEL_COREI7_SKYLAKE_AVX512;
414       }
415       break;
416 
417     // Cannonlake:
418     case 0x66:
419       CPU = "cannonlake";
420       *Type = INTEL_COREI7;
421       *Subtype = INTEL_COREI7_CANNONLAKE;
422       break;
423 
424     // Icelake:
425     case 0x7d:
426     case 0x7e:
427       CPU = "icelake-client";
428       *Type = INTEL_COREI7;
429       *Subtype = INTEL_COREI7_ICELAKE_CLIENT;
430       break;
431 
432     // Tigerlake:
433     case 0x8c:
434     case 0x8d:
435       CPU = "tigerlake";
436       *Type = INTEL_COREI7;
437       *Subtype = INTEL_COREI7_TIGERLAKE;
438       break;
439 
440     // Alderlake:
441     case 0x97:
442     case 0x9a:
443       CPU = "alderlake";
444       *Type = INTEL_COREI7;
445       *Subtype = INTEL_COREI7_ALDERLAKE;
446       break;
447 
448     // Icelake Xeon:
449     case 0x6a:
450     case 0x6c:
451       CPU = "icelake-server";
452       *Type = INTEL_COREI7;
453       *Subtype = INTEL_COREI7_ICELAKE_SERVER;
454       break;
455 
456     // Sapphire Rapids:
457     case 0x8f:
458       CPU = "sapphirerapids";
459       *Type = INTEL_COREI7;
460       *Subtype = INTEL_COREI7_SAPPHIRERAPIDS;
461       break;
462 
463     case 0x1c: // Most 45 nm Intel Atom processors
464     case 0x26: // 45 nm Atom Lincroft
465     case 0x27: // 32 nm Atom Medfield
466     case 0x35: // 32 nm Atom Midview
467     case 0x36: // 32 nm Atom Midview
468       CPU = "bonnell";
469       *Type = INTEL_BONNELL;
470       break;
471 
472     // Atom Silvermont codes from the Intel software optimization guide.
473     case 0x37:
474     case 0x4a:
475     case 0x4d:
476     case 0x5a:
477     case 0x5d:
478     case 0x4c: // really airmont
479       CPU = "silvermont";
480       *Type = INTEL_SILVERMONT;
481       break;
482     // Goldmont:
483     case 0x5c: // Apollo Lake
484     case 0x5f: // Denverton
485       CPU = "goldmont";
486       *Type = INTEL_GOLDMONT;
487       break; // "goldmont"
488     case 0x7a:
489       CPU = "goldmont-plus";
490       *Type = INTEL_GOLDMONT_PLUS;
491       break;
492     case 0x86:
493       CPU = "tremont";
494       *Type = INTEL_TREMONT;
495       break;
496 
497     case 0x57:
498       CPU = "knl";
499       *Type = INTEL_KNL;
500       break;
501 
502     case 0x85:
503       CPU = "knm";
504       *Type = INTEL_KNM;
505       break;
506 
507     default: // Unknown family 6 CPU.
508       break;
509     }
510     break;
511   default:
512     break; // Unknown.
513   }
514 
515   return CPU;
516 }
517 
518 static const char *
getAMDProcessorTypeAndSubtype(unsigned Family,unsigned Model,const unsigned * Features,unsigned * Type,unsigned * Subtype)519 getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
520                               const unsigned *Features,
521                               unsigned *Type, unsigned *Subtype) {
522   // We select CPU strings to match the code in Host.cpp, but we don't use them
523   // in compiler-rt.
524   const char *CPU = 0;
525 
526   switch (Family) {
527   case 16:
528     CPU = "amdfam10";
529     *Type = AMDFAM10H;
530     switch (Model) {
531     case 2:
532       *Subtype = AMDFAM10H_BARCELONA;
533       break;
534     case 4:
535       *Subtype = AMDFAM10H_SHANGHAI;
536       break;
537     case 8:
538       *Subtype = AMDFAM10H_ISTANBUL;
539       break;
540     }
541     break;
542   case 20:
543     CPU = "btver1";
544     *Type = AMD_BTVER1;
545     break;
546   case 21:
547     CPU = "bdver1";
548     *Type = AMDFAM15H;
549     if (Model >= 0x60 && Model <= 0x7f) {
550       CPU = "bdver4";
551       *Subtype = AMDFAM15H_BDVER4;
552       break; // 60h-7Fh: Excavator
553     }
554     if (Model >= 0x30 && Model <= 0x3f) {
555       CPU = "bdver3";
556       *Subtype = AMDFAM15H_BDVER3;
557       break; // 30h-3Fh: Steamroller
558     }
559     if ((Model >= 0x10 && Model <= 0x1f) || Model == 0x02) {
560       CPU = "bdver2";
561       *Subtype = AMDFAM15H_BDVER2;
562       break; // 02h, 10h-1Fh: Piledriver
563     }
564     if (Model <= 0x0f) {
565       *Subtype = AMDFAM15H_BDVER1;
566       break; // 00h-0Fh: Bulldozer
567     }
568     break;
569   case 22:
570     CPU = "btver2";
571     *Type = AMD_BTVER2;
572     break;
573   case 23:
574     CPU = "znver1";
575     *Type = AMDFAM17H;
576     if ((Model >= 0x30 && Model <= 0x3f) || Model == 0x71) {
577       CPU = "znver2";
578       *Subtype = AMDFAM17H_ZNVER2;
579       break; // 30h-3fh, 71h: Zen2
580     }
581     if (Model <= 0x0f) {
582       *Subtype = AMDFAM17H_ZNVER1;
583       break; // 00h-0Fh: Zen1
584     }
585     break;
586   case 25:
587     CPU = "znver3";
588     *Type = AMDFAM19H;
589     if (Model <= 0x0f || Model == 0x21) {
590       *Subtype = AMDFAM19H_ZNVER3;
591       break; // 00h-0Fh, 21h: Zen3
592     }
593     break;
594   default:
595     break; // Unknown AMD CPU.
596   }
597 
598   return CPU;
599 }
600 
getAvailableFeatures(unsigned ECX,unsigned EDX,unsigned MaxLeaf,unsigned * Features)601 static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
602                                  unsigned *Features) {
603   unsigned EAX, EBX;
604 
605 #define setFeature(F)                                                          \
606   Features[F / 32] |= 1U << (F % 32)
607 
608   if ((EDX >> 15) & 1)
609     setFeature(FEATURE_CMOV);
610   if ((EDX >> 23) & 1)
611     setFeature(FEATURE_MMX);
612   if ((EDX >> 25) & 1)
613     setFeature(FEATURE_SSE);
614   if ((EDX >> 26) & 1)
615     setFeature(FEATURE_SSE2);
616 
617   if ((ECX >> 0) & 1)
618     setFeature(FEATURE_SSE3);
619   if ((ECX >> 1) & 1)
620     setFeature(FEATURE_PCLMUL);
621   if ((ECX >> 9) & 1)
622     setFeature(FEATURE_SSSE3);
623   if ((ECX >> 12) & 1)
624     setFeature(FEATURE_FMA);
625   if ((ECX >> 19) & 1)
626     setFeature(FEATURE_SSE4_1);
627   if ((ECX >> 20) & 1)
628     setFeature(FEATURE_SSE4_2);
629   if ((ECX >> 23) & 1)
630     setFeature(FEATURE_POPCNT);
631   if ((ECX >> 25) & 1)
632     setFeature(FEATURE_AES);
633 
634   // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV
635   // indicates that the AVX registers will be saved and restored on context
636   // switch, then we have full AVX support.
637   const unsigned AVXBits = (1 << 27) | (1 << 28);
638   bool HasAVX = ((ECX & AVXBits) == AVXBits) && !getX86XCR0(&EAX, &EDX) &&
639                 ((EAX & 0x6) == 0x6);
640 #if defined(__APPLE__)
641   // Darwin lazily saves the AVX512 context on first use: trust that the OS will
642   // save the AVX512 context if we use AVX512 instructions, even the bit is not
643   // set right now.
644   bool HasAVX512Save = true;
645 #else
646   // AVX512 requires additional context to be saved by the OS.
647   bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0);
648 #endif
649 
650   if (HasAVX)
651     setFeature(FEATURE_AVX);
652 
653   bool HasLeaf7 =
654       MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX);
655 
656   if (HasLeaf7 && ((EBX >> 3) & 1))
657     setFeature(FEATURE_BMI);
658   if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX)
659     setFeature(FEATURE_AVX2);
660   if (HasLeaf7 && ((EBX >> 8) & 1))
661     setFeature(FEATURE_BMI2);
662   if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save)
663     setFeature(FEATURE_AVX512F);
664   if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save)
665     setFeature(FEATURE_AVX512DQ);
666   if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save)
667     setFeature(FEATURE_AVX512IFMA);
668   if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save)
669     setFeature(FEATURE_AVX512PF);
670   if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save)
671     setFeature(FEATURE_AVX512ER);
672   if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save)
673     setFeature(FEATURE_AVX512CD);
674   if (HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save)
675     setFeature(FEATURE_AVX512BW);
676   if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save)
677     setFeature(FEATURE_AVX512VL);
678 
679   if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save)
680     setFeature(FEATURE_AVX512VBMI);
681   if (HasLeaf7 && ((ECX >> 6) & 1) && HasAVX512Save)
682     setFeature(FEATURE_AVX512VBMI2);
683   if (HasLeaf7 && ((ECX >> 8) & 1))
684     setFeature(FEATURE_GFNI);
685   if (HasLeaf7 && ((ECX >> 10) & 1) && HasAVX)
686     setFeature(FEATURE_VPCLMULQDQ);
687   if (HasLeaf7 && ((ECX >> 11) & 1) && HasAVX512Save)
688     setFeature(FEATURE_AVX512VNNI);
689   if (HasLeaf7 && ((ECX >> 12) & 1) && HasAVX512Save)
690     setFeature(FEATURE_AVX512BITALG);
691   if (HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save)
692     setFeature(FEATURE_AVX512VPOPCNTDQ);
693 
694   if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save)
695     setFeature(FEATURE_AVX5124VNNIW);
696   if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save)
697     setFeature(FEATURE_AVX5124FMAPS);
698   if (HasLeaf7 && ((EDX >> 8) & 1) && HasAVX512Save)
699     setFeature(FEATURE_AVX512VP2INTERSECT);
700 
701   bool HasLeaf7Subleaf1 =
702       MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX);
703   if (HasLeaf7Subleaf1 && ((EAX >> 5) & 1) && HasAVX512Save)
704     setFeature(FEATURE_AVX512BF16);
705 
706   unsigned MaxExtLevel;
707   getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX);
708 
709   bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 &&
710                      !getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
711   if (HasExtLeaf1 && ((ECX >> 6) & 1))
712     setFeature(FEATURE_SSE4_A);
713   if (HasExtLeaf1 && ((ECX >> 11) & 1))
714     setFeature(FEATURE_XOP);
715   if (HasExtLeaf1 && ((ECX >> 16) & 1))
716     setFeature(FEATURE_FMA4);
717 #undef setFeature
718 }
719 
720 #ifndef _WIN32
721 __attribute__((visibility("hidden")))
722 #endif
723 int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE;
724 
725 #ifndef _WIN32
726 __attribute__((visibility("hidden")))
727 #endif
728 struct __processor_model {
729   unsigned int __cpu_vendor;
730   unsigned int __cpu_type;
731   unsigned int __cpu_subtype;
732   unsigned int __cpu_features[1];
733 } __cpu_model = {0, 0, 0, {0}};
734 
735 #ifndef _WIN32
736 __attribute__((visibility("hidden")))
737 #endif
738 unsigned int __cpu_features2 = 0;
739 
740 // A constructor function that is sets __cpu_model and __cpu_features2 with
741 // the right values.  This needs to run only once.  This constructor is
742 // given the highest priority and it should run before constructors without
743 // the priority set.  However, it still runs after ifunc initializers and
744 // needs to be called explicitly there.
745 
__cpu_indicator_init(void)746 int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
747   unsigned EAX, EBX, ECX, EDX;
748   unsigned MaxLeaf = 5;
749   unsigned Vendor;
750   unsigned Model, Family;
751   unsigned Features[(CPU_FEATURE_MAX + 31) / 32] = {0};
752 
753   // This function needs to run just once.
754   if (__cpu_model.__cpu_vendor)
755     return 0;
756 
757   if (!isCpuIdSupported() ||
758       getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1) {
759     __cpu_model.__cpu_vendor = VENDOR_OTHER;
760     return -1;
761   }
762 
763   getX86CpuIDAndInfo(1, &EAX, &EBX, &ECX, &EDX);
764   detectX86FamilyModel(EAX, &Family, &Model);
765 
766   // Find available features.
767   getAvailableFeatures(ECX, EDX, MaxLeaf, &Features[0]);
768 
769   assert((sizeof(Features)/sizeof(Features[0])) == 2);
770   __cpu_model.__cpu_features[0] = Features[0];
771   __cpu_features2 = Features[1];
772 
773   if (Vendor == SIG_INTEL) {
774     // Get CPU type.
775     getIntelProcessorTypeAndSubtype(Family, Model, &Features[0],
776                                     &(__cpu_model.__cpu_type),
777                                     &(__cpu_model.__cpu_subtype));
778     __cpu_model.__cpu_vendor = VENDOR_INTEL;
779   } else if (Vendor == SIG_AMD) {
780     // Get CPU type.
781     getAMDProcessorTypeAndSubtype(Family, Model, &Features[0],
782                                   &(__cpu_model.__cpu_type),
783                                   &(__cpu_model.__cpu_subtype));
784     __cpu_model.__cpu_vendor = VENDOR_AMD;
785   } else
786     __cpu_model.__cpu_vendor = VENDOR_OTHER;
787 
788   assert(__cpu_model.__cpu_vendor < VENDOR_MAX);
789   assert(__cpu_model.__cpu_type < CPU_TYPE_MAX);
790   assert(__cpu_model.__cpu_subtype < CPU_SUBTYPE_MAX);
791 
792   return 0;
793 }
794 #elif defined(__aarch64__)
795 // LSE support detection for out-of-line atomics
796 // using HWCAP and Auxiliary vector
797 _Bool __aarch64_have_lse_atomics
798     __attribute__((visibility("hidden"), nocommon));
799 #if defined(__has_include)
800 #if __has_include(<sys/auxv.h>)
801 #include <sys/auxv.h>
802 #ifndef AT_HWCAP
803 #define AT_HWCAP 16
804 #endif
805 #ifndef HWCAP_ATOMICS
806 #define HWCAP_ATOMICS (1 << 8)
807 #endif
808 #if defined(__ANDROID__)
809 #include <string.h>
810 #include <sys/system_properties.h>
811 #elif defined(__Fuchsia__)
812 #include <zircon/features.h>
813 #include <zircon/syscalls.h>
814 #endif
init_have_lse_atomics(void)815 static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
816 #if defined(__FreeBSD__)
817   unsigned long hwcap;
818   int result = elf_aux_info(AT_HWCAP, &hwcap, sizeof hwcap);
819   __aarch64_have_lse_atomics = result == 0 && (hwcap & HWCAP_ATOMICS) != 0;
820 #elif defined(__Fuchsia__)
821   // This ensures the vDSO is a direct link-time dependency of anything that
822   // needs this initializer code.
823 #pragma comment(lib, "zircon")
824   uint32_t features;
825   zx_status_t status = _zx_system_get_features(ZX_FEATURE_KIND_CPU, &features);
826   __aarch64_have_lse_atomics =
827       status == ZX_OK && (features & ZX_ARM64_FEATURE_ISA_ATOMICS) != 0;
828 #else
829   unsigned long hwcap = getauxval(AT_HWCAP);
830   _Bool result = (hwcap & HWCAP_ATOMICS) != 0;
831 #if defined(__ANDROID__)
832   if (result) {
833     char arch[PROP_VALUE_MAX];
834     if (__system_property_get("ro.arch", arch) > 0 &&
835         strncmp(arch, "exynos9810", sizeof("exynos9810") - 1) == 0) {
836       // Some cores in the Exynos 9810 CPU are ARMv8.2 and others are ARMv8.0;
837       // only the former support LSE atomics.  However, the kernel in the
838       // initial Android 8.0 release of Galaxy S9/S9+ devices incorrectly
839       // reported the feature as being supported.
840       //
841       // The kernel appears to have been corrected to mark it unsupported as of
842       // the Android 9.0 release on those devices, and this issue has not been
843       // observed anywhere else. Thus, this workaround may be removed if
844       // compiler-rt ever drops support for Android 8.0.
845       result = false;
846     }
847   }
848 #endif // defined(__ANDROID__)
849   __aarch64_have_lse_atomics = result;
850 #endif // defined(__FreeBSD__)
851 }
852 #endif // defined(__has_include)
853 #endif // __has_include(<sys/auxv.h>)
854 #endif // defined(__aarch64__)
855