xref: /f-stack/freebsd/arm64/arm64/identcpu.c (revision 22ce4aff)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/elf.h>
47 #include <machine/md_var.h>
48 #include <machine/undefined.h>
49 
50 static void print_cpu_features(u_int cpu);
51 static u_long parse_cpu_features_hwcap(void);
52 static u_long parse_cpu_features_hwcap2(void);
53 
54 char machine[] = "arm64";
55 
56 #ifdef SCTL_MASK32
57 extern int adaptive_machine_arch;
58 #endif
59 
60 static SYSCTL_NODE(_machdep, OID_AUTO, cache, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
61     "Cache management tuning");
62 
63 static int allow_dic = 1;
64 SYSCTL_INT(_machdep_cache, OID_AUTO, allow_dic, CTLFLAG_RDTUN, &allow_dic, 0,
65     "Allow optimizations based on the DIC cache bit");
66 
67 static int allow_idc = 1;
68 SYSCTL_INT(_machdep_cache, OID_AUTO, allow_idc, CTLFLAG_RDTUN, &allow_idc, 0,
69     "Allow optimizations based on the IDC cache bit");
70 
71 static void check_cpu_regs(u_int cpu);
72 
73 /*
74  * The default implementation of I-cache sync assumes we have an
75  * aliasing cache until we know otherwise.
76  */
77 void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t) =
78     &arm64_aliasing_icache_sync_range;
79 
80 static int
sysctl_hw_machine(SYSCTL_HANDLER_ARGS)81 sysctl_hw_machine(SYSCTL_HANDLER_ARGS)
82 {
83 #ifdef SCTL_MASK32
84 	static const char machine32[] = "arm";
85 #endif
86 	int error;
87 
88 #ifdef SCTL_MASK32
89 	if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch)
90 		error = SYSCTL_OUT(req, machine32, sizeof(machine32));
91 	else
92 #endif
93 		error = SYSCTL_OUT(req, machine, sizeof(machine));
94 	return (error);
95 }
96 
97 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD |
98 	CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class");
99 
100 static char cpu_model[64];
101 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
102 	cpu_model, sizeof(cpu_model), "Machine model");
103 
104 /*
105  * Per-CPU affinity as provided in MPIDR_EL1
106  * Indexed by CPU number in logical order selected by the system.
107  * Relevant fields can be extracted using CPU_AFFn macros,
108  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
109  *
110  * Fields used by us:
111  * Aff1 - Cluster number
112  * Aff0 - CPU number in Aff1 cluster
113  */
114 uint64_t __cpu_affinity[MAXCPU];
115 static u_int cpu_aff_levels;
116 
117 struct cpu_desc {
118 	u_int		cpu_impl;
119 	u_int		cpu_part_num;
120 	u_int		cpu_variant;
121 	u_int		cpu_revision;
122 	const char	*cpu_impl_name;
123 	const char	*cpu_part_name;
124 
125 	uint64_t	mpidr;
126 	uint64_t	id_aa64afr0;
127 	uint64_t	id_aa64afr1;
128 	uint64_t	id_aa64dfr0;
129 	uint64_t	id_aa64dfr1;
130 	uint64_t	id_aa64isar0;
131 	uint64_t	id_aa64isar1;
132 	uint64_t	id_aa64mmfr0;
133 	uint64_t	id_aa64mmfr1;
134 	uint64_t	id_aa64mmfr2;
135 	uint64_t	id_aa64pfr0;
136 	uint64_t	id_aa64pfr1;
137 	uint64_t	ctr;
138 };
139 
140 static struct cpu_desc cpu_desc[MAXCPU];
141 static struct cpu_desc kern_cpu_desc;
142 static struct cpu_desc user_cpu_desc;
143 static u_int cpu_print_regs;
144 #define	PRINT_ID_AA64_AFR0	0x00000001
145 #define	PRINT_ID_AA64_AFR1	0x00000002
146 #define	PRINT_ID_AA64_DFR0	0x00000010
147 #define	PRINT_ID_AA64_DFR1	0x00000020
148 #define	PRINT_ID_AA64_ISAR0	0x00000100
149 #define	PRINT_ID_AA64_ISAR1	0x00000200
150 #define	PRINT_ID_AA64_MMFR0	0x00001000
151 #define	PRINT_ID_AA64_MMFR1	0x00002000
152 #define	PRINT_ID_AA64_MMFR2	0x00004000
153 #define	PRINT_ID_AA64_PFR0	0x00010000
154 #define	PRINT_ID_AA64_PFR1	0x00020000
155 #define	PRINT_CTR_EL0		0x10000000
156 
157 struct cpu_parts {
158 	u_int		part_id;
159 	const char	*part_name;
160 };
161 #define	CPU_PART_NONE	{ 0, "Unknown Processor" }
162 
163 struct cpu_implementers {
164 	u_int			impl_id;
165 	const char		*impl_name;
166 	/*
167 	 * Part number is implementation defined
168 	 * so each vendor will have its own set of values and names.
169 	 */
170 	const struct cpu_parts	*cpu_parts;
171 };
172 #define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
173 
174 /*
175  * Per-implementer table of (PartNum, CPU Name) pairs.
176  */
177 /* ARM Ltd. */
178 static const struct cpu_parts cpu_parts_arm[] = {
179 	{ CPU_PART_FOUNDATION, "Foundation-Model" },
180 	{ CPU_PART_CORTEX_A35, "Cortex-A35" },
181 	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
182 	{ CPU_PART_CORTEX_A55, "Cortex-A55" },
183 	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
184 	{ CPU_PART_CORTEX_A65, "Cortex-A65" },
185 	{ CPU_PART_CORTEX_A72, "Cortex-A72" },
186 	{ CPU_PART_CORTEX_A73, "Cortex-A73" },
187 	{ CPU_PART_CORTEX_A75, "Cortex-A75" },
188 	{ CPU_PART_CORTEX_A76, "Cortex-A76" },
189 	{ CPU_PART_CORTEX_A76AE, "Cortex-A76AE" },
190 	{ CPU_PART_CORTEX_A77, "Cortex-A77" },
191 	{ CPU_PART_NEOVERSE_N1, "Neoverse-N1" },
192 	CPU_PART_NONE,
193 };
194 
195 /* Cavium */
196 static const struct cpu_parts cpu_parts_cavium[] = {
197 	{ CPU_PART_THUNDERX, "ThunderX" },
198 	{ CPU_PART_THUNDERX2, "ThunderX2" },
199 	CPU_PART_NONE,
200 };
201 
202 /* APM / Ampere */
203 static const struct cpu_parts cpu_parts_apm[] = {
204 	{ CPU_PART_EMAG8180, "eMAG 8180" },
205 	CPU_PART_NONE,
206 };
207 
208 /* Unknown */
209 static const struct cpu_parts cpu_parts_none[] = {
210 	CPU_PART_NONE,
211 };
212 
213 /*
214  * Implementers table.
215  */
216 const struct cpu_implementers cpu_implementers[] = {
217 	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
218 	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
219 	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
220 	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
221 	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
222 	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
223 	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
224 	{ CPU_IMPL_APM,		"APM",		cpu_parts_apm },
225 	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
226 	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
227 	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
228 	CPU_IMPLEMENTER_NONE,
229 };
230 
231 #define	MRS_TYPE_MASK		0xf
232 #define	MRS_INVALID		0
233 #define	MRS_EXACT		1
234 #define	MRS_EXACT_VAL(x)	(MRS_EXACT | ((x) << 4))
235 #define	MRS_EXACT_FIELD(x)	((x) >> 4)
236 #define	MRS_LOWER		2
237 
238 struct mrs_field_value {
239 	uint64_t	value;
240 	const char	*desc;
241 };
242 
243 #define	MRS_FIELD_VALUE(_value, _desc)					\
244 	{								\
245 		.value = (_value),					\
246 		.desc = (_desc),					\
247 	}
248 
249 #define	MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl)		\
250 	MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""),		\
251 	MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field)
252 
253 #define	MRS_FIELD_VALUE_COUNT(_reg, _field, _desc)			\
254 	MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \
255 	MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \
256 	MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \
257 	MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \
258 	MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \
259 	MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \
260 	MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \
261 	MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \
262 	MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \
263 	MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \
264 	MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \
265 	MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \
266 	MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \
267 	MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \
268 	MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \
269 	MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s")
270 
271 #define	MRS_FIELD_VALUE_END	{ .desc = NULL }
272 
273 struct mrs_field {
274 	const char	*name;
275 	struct mrs_field_value *values;
276 	uint64_t	mask;
277 	bool		sign;
278 	u_int		type;
279 	u_int		shift;
280 };
281 
282 #define	MRS_FIELD(_register, _name, _sign, _type, _values)		\
283 	{								\
284 		.name = #_name,						\
285 		.sign = (_sign),					\
286 		.type = (_type),					\
287 		.shift = _register ## _ ## _name ## _SHIFT,		\
288 		.mask = _register ## _ ## _name ## _MASK,		\
289 		.values = (_values),					\
290 	}
291 
292 #define	MRS_FIELD_END	{ .type = MRS_INVALID, }
293 
294 /* ID_AA64AFR0_EL1 */
295 static struct mrs_field id_aa64afr0_fields[] = {
296 	MRS_FIELD_END,
297 };
298 
299 
300 /* ID_AA64AFR1_EL1 */
301 static struct mrs_field id_aa64afr1_fields[] = {
302 	MRS_FIELD_END,
303 };
304 
305 
306 /* ID_AA64DFR0_EL1 */
307 static struct mrs_field_value id_aa64dfr0_pmsver[] = {
308 	MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""),
309 	MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_V1, "SPE"),
310 	MRS_FIELD_VALUE_END,
311 };
312 
313 static struct mrs_field_value id_aa64dfr0_ctx_cmps[] = {
314 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"),
315 	MRS_FIELD_VALUE_END,
316 };
317 
318 static struct mrs_field_value id_aa64dfr0_wrps[] = {
319 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"),
320 	MRS_FIELD_VALUE_END,
321 };
322 
323 static struct mrs_field_value id_aa64dfr0_brps[] = {
324 	MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"),
325 	MRS_FIELD_VALUE_END,
326 };
327 
328 static struct mrs_field_value id_aa64dfr0_pmuver[] = {
329 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""),
330 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"),
331 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3+16 bit evtCount"),
332 	MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"),
333 	MRS_FIELD_VALUE_END,
334 };
335 
336 static struct mrs_field_value id_aa64dfr0_tracever[] = {
337 	MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""),
338 	MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"),
339 	MRS_FIELD_VALUE_END,
340 };
341 
342 static struct mrs_field_value id_aa64dfr0_debugver[] = {
343 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"),
344 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"),
345 	MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8.2"),
346 	MRS_FIELD_VALUE_END,
347 };
348 
349 static struct mrs_field id_aa64dfr0_fields[] = {
350 	MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver),
351 	MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT,
352 	    id_aa64dfr0_ctx_cmps),
353 	MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_LOWER, id_aa64dfr0_wrps),
354 	MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps),
355 	MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver),
356 	MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT,
357 	    id_aa64dfr0_tracever),
358 	MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6),
359 	    id_aa64dfr0_debugver),
360 	MRS_FIELD_END,
361 };
362 
363 
364 /* ID_AA64DFR1 */
365 static struct mrs_field id_aa64dfr1_fields[] = {
366 	MRS_FIELD_END,
367 };
368 
369 
370 /* ID_AA64ISAR0_EL1 */
371 static struct mrs_field_value id_aa64isar0_rndr[] = {
372 	MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_NONE, ""),
373 	MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_IMPL, "RNG"),
374 	MRS_FIELD_VALUE_END,
375 };
376 
377 static struct mrs_field_value id_aa64isar0_tlb[] = {
378 	MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_NONE, ""),
379 	MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOS, "TLBI-OS"),
380 	MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOSR, "TLBI-OSR"),
381 	MRS_FIELD_VALUE_END,
382 };
383 
384 static struct mrs_field_value id_aa64isar0_ts[] = {
385 	MRS_FIELD_VALUE(ID_AA64ISAR0_TS_NONE, ""),
386 	MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_4, "CondM-8.4"),
387 	MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_5, "CondM-8.5"),
388 	MRS_FIELD_VALUE_END,
389 };
390 
391 static struct mrs_field_value id_aa64isar0_fhm[] = {
392 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, FHM, NONE, IMPL),
393 	MRS_FIELD_VALUE_END,
394 };
395 
396 static struct mrs_field_value id_aa64isar0_dp[] = {
397 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL),
398 	MRS_FIELD_VALUE_END,
399 };
400 
401 static struct mrs_field_value id_aa64isar0_sm4[] = {
402 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL),
403 	MRS_FIELD_VALUE_END,
404 };
405 
406 static struct mrs_field_value id_aa64isar0_sm3[] = {
407 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL),
408 	MRS_FIELD_VALUE_END,
409 };
410 
411 static struct mrs_field_value id_aa64isar0_sha3[] = {
412 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL),
413 	MRS_FIELD_VALUE_END,
414 };
415 
416 static struct mrs_field_value id_aa64isar0_rdm[] = {
417 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL),
418 	MRS_FIELD_VALUE_END,
419 };
420 
421 static struct mrs_field_value id_aa64isar0_atomic[] = {
422 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL),
423 	MRS_FIELD_VALUE_END,
424 };
425 
426 static struct mrs_field_value id_aa64isar0_crc32[] = {
427 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE),
428 	MRS_FIELD_VALUE_END,
429 };
430 
431 static struct mrs_field_value id_aa64isar0_sha2[] = {
432 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE),
433 	MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"),
434 	MRS_FIELD_VALUE_END,
435 };
436 
437 static struct mrs_field_value id_aa64isar0_sha1[] = {
438 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE),
439 	MRS_FIELD_VALUE_END,
440 };
441 
442 static struct mrs_field_value id_aa64isar0_aes[] = {
443 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE),
444 	MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"),
445 	MRS_FIELD_VALUE_END,
446 };
447 
448 static struct mrs_field id_aa64isar0_fields[] = {
449 	MRS_FIELD(ID_AA64ISAR0, RNDR, false, MRS_LOWER, id_aa64isar0_rndr),
450 	MRS_FIELD(ID_AA64ISAR0, TLB, false, MRS_LOWER, id_aa64isar0_tlb),
451 	MRS_FIELD(ID_AA64ISAR0, TS, false, MRS_LOWER, id_aa64isar0_ts),
452 	MRS_FIELD(ID_AA64ISAR0, FHM, false, MRS_LOWER, id_aa64isar0_fhm),
453 	MRS_FIELD(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp),
454 	MRS_FIELD(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4),
455 	MRS_FIELD(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3),
456 	MRS_FIELD(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3),
457 	MRS_FIELD(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm),
458 	MRS_FIELD(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic),
459 	MRS_FIELD(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32),
460 	MRS_FIELD(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2),
461 	MRS_FIELD(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1),
462 	MRS_FIELD(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes),
463 	MRS_FIELD_END,
464 };
465 
466 
467 /* ID_AA64ISAR1_EL1 */
468 static struct mrs_field_value id_aa64isar1_i8mm[] = {
469 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, I8MM, NONE, IMPL),
470 	MRS_FIELD_VALUE_END,
471 };
472 
473 static struct mrs_field_value id_aa64isar1_dgh[] = {
474 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DGH, NONE, IMPL),
475 	MRS_FIELD_VALUE_END,
476 };
477 
478 static struct mrs_field_value id_aa64isar1_bf16[] = {
479 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, BF16, NONE, IMPL),
480 	MRS_FIELD_VALUE_END,
481 };
482 
483 static struct mrs_field_value id_aa64isar1_specres[] = {
484 	MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_NONE, ""),
485 	MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_IMPL, "PredInv"),
486 	MRS_FIELD_VALUE_END,
487 };
488 
489 static struct mrs_field_value id_aa64isar1_sb[] = {
490 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, SB, NONE, IMPL),
491 	MRS_FIELD_VALUE_END,
492 };
493 
494 static struct mrs_field_value id_aa64isar1_frintts[] = {
495 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FRINTTS, NONE, IMPL),
496 	MRS_FIELD_VALUE_END,
497 };
498 
499 static struct mrs_field_value id_aa64isar1_gpi[] = {
500 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL),
501 	MRS_FIELD_VALUE_END,
502 };
503 
504 static struct mrs_field_value id_aa64isar1_gpa[] = {
505 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
506 	MRS_FIELD_VALUE_END,
507 };
508 
509 static struct mrs_field_value id_aa64isar1_lrcpc[] = {
510 	MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_NONE, ""),
511 	MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_3, "RCPC-8.3"),
512 	MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_4, "RCPC-8.4"),
513 	MRS_FIELD_VALUE_END,
514 };
515 
516 static struct mrs_field_value id_aa64isar1_fcma[] = {
517 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL),
518 	MRS_FIELD_VALUE_END,
519 };
520 
521 static struct mrs_field_value id_aa64isar1_jscvt[] = {
522 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL),
523 	MRS_FIELD_VALUE_END,
524 };
525 
526 static struct mrs_field_value id_aa64isar1_api[] = {
527 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, API, NONE, IMPL),
528 	MRS_FIELD_VALUE_END,
529 };
530 
531 static struct mrs_field_value id_aa64isar1_apa[] = {
532 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, APA, NONE, IMPL),
533 	MRS_FIELD_VALUE_END,
534 };
535 
536 static struct mrs_field_value id_aa64isar1_dpb[] = {
537 	MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_NONE, ""),
538 	MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVAP, "DCPoP"),
539 	MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVADP, "DCCVADP"),
540 	MRS_FIELD_VALUE_END,
541 };
542 
543 static struct mrs_field id_aa64isar1_fields[] = {
544 	MRS_FIELD(ID_AA64ISAR1, I8MM, false, MRS_LOWER, id_aa64isar1_i8mm),
545 	MRS_FIELD(ID_AA64ISAR1, DGH, false, MRS_LOWER, id_aa64isar1_dgh),
546 	MRS_FIELD(ID_AA64ISAR1, BF16, false, MRS_LOWER, id_aa64isar1_bf16),
547 	MRS_FIELD(ID_AA64ISAR1, SPECRES, false, MRS_LOWER,
548 	    id_aa64isar1_specres),
549 	MRS_FIELD(ID_AA64ISAR1, SB, false, MRS_LOWER, id_aa64isar1_sb),
550 	MRS_FIELD(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER,
551 	    id_aa64isar1_frintts),
552 	MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi),
553 	MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa),
554 	MRS_FIELD(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc),
555 	MRS_FIELD(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma),
556 	MRS_FIELD(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt),
557 	MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api),
558 	MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa),
559 	MRS_FIELD(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb),
560 	MRS_FIELD_END,
561 };
562 
563 
564 /* ID_AA64MMFR0_EL1 */
565 static struct mrs_field_value id_aa64mmfr0_tgran4[] = {
566 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL),
567 	MRS_FIELD_VALUE_END,
568 };
569 
570 static struct mrs_field_value id_aa64mmfr0_tgran64[] = {
571 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL),
572 	MRS_FIELD_VALUE_END,
573 };
574 
575 static struct mrs_field_value id_aa64mmfr0_tgran16[] = {
576 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL),
577 	MRS_FIELD_VALUE_END,
578 };
579 
580 static struct mrs_field_value id_aa64mmfr0_bigend_el0[] = {
581 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED),
582 	MRS_FIELD_VALUE_END,
583 };
584 
585 static struct mrs_field_value id_aa64mmfr0_snsmem[] = {
586 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT),
587 	MRS_FIELD_VALUE_END,
588 };
589 
590 static struct mrs_field_value id_aa64mmfr0_bigend[] = {
591 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED),
592 	MRS_FIELD_VALUE_END,
593 };
594 
595 static struct mrs_field_value id_aa64mmfr0_asid_bits[] = {
596 	MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"),
597 	MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"),
598 	MRS_FIELD_VALUE_END,
599 };
600 
601 static struct mrs_field_value id_aa64mmfr0_parange[] = {
602 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"),
603 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"),
604 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"),
605 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"),
606 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"),
607 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"),
608 	MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"),
609 	MRS_FIELD_VALUE_END,
610 };
611 
612 static struct mrs_field id_aa64mmfr0_fields[] = {
613 	MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4),
614 	MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT,
615 	    id_aa64mmfr0_tgran64),
616 	MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT,
617 	    id_aa64mmfr0_tgran16),
618 	MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT,
619 	    id_aa64mmfr0_bigend_el0),
620 	MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem),
621 	MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend),
622 	MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT,
623 	    id_aa64mmfr0_asid_bits),
624 	MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT,
625 	    id_aa64mmfr0_parange),
626 	MRS_FIELD_END,
627 };
628 
629 
630 /* ID_AA64MMFR1_EL1 */
631 static struct mrs_field_value id_aa64mmfr1_xnx[] = {
632 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL),
633 	MRS_FIELD_VALUE_END,
634 };
635 
636 static struct mrs_field_value id_aa64mmfr1_specsei[] = {
637 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL),
638 	MRS_FIELD_VALUE_END,
639 };
640 
641 static struct mrs_field_value id_aa64mmfr1_pan[] = {
642 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL),
643 	MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"),
644 	MRS_FIELD_VALUE_END,
645 };
646 
647 static struct mrs_field_value id_aa64mmfr1_lo[] = {
648 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL),
649 	MRS_FIELD_VALUE_END,
650 };
651 
652 static struct mrs_field_value id_aa64mmfr1_hpds[] = {
653 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""),
654 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"),
655 	MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"),
656 	MRS_FIELD_VALUE_END,
657 };
658 
659 static struct mrs_field_value id_aa64mmfr1_vh[] = {
660 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL),
661 	MRS_FIELD_VALUE_END,
662 };
663 
664 static struct mrs_field_value id_aa64mmfr1_vmidbits[] = {
665 	MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"),
666 	MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"),
667 	MRS_FIELD_VALUE_END,
668 };
669 
670 static struct mrs_field_value id_aa64mmfr1_hafdbs[] = {
671 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""),
672 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"),
673 	MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"),
674 	MRS_FIELD_VALUE_END,
675 };
676 
677 static struct mrs_field id_aa64mmfr1_fields[] = {
678 	MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx),
679 	MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT,
680 	    id_aa64mmfr1_specsei),
681 	MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan),
682 	MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo),
683 	MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds),
684 	MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh),
685 	MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT,
686 	    id_aa64mmfr1_vmidbits),
687 	MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs),
688 	MRS_FIELD_END,
689 };
690 
691 
692 /* ID_AA64MMFR2_EL1 */
693 static struct mrs_field_value id_aa64mmfr2_e0pd[] = {
694 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, E0PD, NONE, IMPL),
695 	MRS_FIELD_VALUE_END,
696 };
697 
698 static struct mrs_field_value id_aa64mmfr2_evt[] = {
699 	MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_NONE, ""),
700 	MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_8_2, "EVT-8.2"),
701 	MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_8_5, "EVT-8.5"),
702 	MRS_FIELD_VALUE_END,
703 };
704 
705 static struct mrs_field_value id_aa64mmfr2_bbm[] = {
706 	MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL0, ""),
707 	MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL1, "BBM level 1"),
708 	MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL2, "BBM level 2"),
709 	MRS_FIELD_VALUE_END,
710 };
711 
712 static struct mrs_field_value id_aa64mmfr2_ttl[] = {
713 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, TTL, NONE, IMPL),
714 	MRS_FIELD_VALUE_END,
715 };
716 
717 static struct mrs_field_value id_aa64mmfr2_fwb[] = {
718 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, FWB, NONE, IMPL),
719 	MRS_FIELD_VALUE_END,
720 };
721 
722 static struct mrs_field_value id_aa64mmfr2_ids[] = {
723 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IDS, NONE, IMPL),
724 	MRS_FIELD_VALUE_END,
725 };
726 
727 static struct mrs_field_value id_aa64mmfr2_at[] = {
728 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, AT, NONE, IMPL),
729 	MRS_FIELD_VALUE_END,
730 };
731 
732 static struct mrs_field_value id_aa64mmfr2_st[] = {
733 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, ST, NONE, IMPL),
734 	MRS_FIELD_VALUE_END,
735 };
736 
737 static struct mrs_field_value id_aa64mmfr2_nv[] = {
738 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, IMPL),
739 	MRS_FIELD_VALUE_END,
740 };
741 
742 static struct mrs_field_value id_aa64mmfr2_ccidx[] = {
743 	MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"),
744 	MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "64bit CCIDX"),
745 	MRS_FIELD_VALUE_END,
746 };
747 
748 static struct mrs_field_value id_aa64mmfr2_varange[] = {
749 	MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"),
750 	MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"),
751 	MRS_FIELD_VALUE_END,
752 };
753 
754 static struct mrs_field_value id_aa64mmfr2_iesb[] = {
755 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL),
756 	MRS_FIELD_VALUE_END,
757 };
758 
759 static struct mrs_field_value id_aa64mmfr2_lsm[] = {
760 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL),
761 	MRS_FIELD_VALUE_END,
762 };
763 
764 static struct mrs_field_value id_aa64mmfr2_uao[] = {
765 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL),
766 	MRS_FIELD_VALUE_END,
767 };
768 
769 static struct mrs_field_value id_aa64mmfr2_cnp[] = {
770 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL),
771 	MRS_FIELD_VALUE_END,
772 };
773 
774 static struct mrs_field id_aa64mmfr2_fields[] = {
775 	MRS_FIELD(ID_AA64MMFR2, E0PD, false, MRS_EXACT, id_aa64mmfr2_e0pd),
776 	MRS_FIELD(ID_AA64MMFR2, EVT, false, MRS_EXACT, id_aa64mmfr2_evt),
777 	MRS_FIELD(ID_AA64MMFR2, BBM, false, MRS_EXACT, id_aa64mmfr2_bbm),
778 	MRS_FIELD(ID_AA64MMFR2, TTL, false, MRS_EXACT, id_aa64mmfr2_ttl),
779 	MRS_FIELD(ID_AA64MMFR2, FWB, false, MRS_EXACT, id_aa64mmfr2_fwb),
780 	MRS_FIELD(ID_AA64MMFR2, IDS, false, MRS_EXACT, id_aa64mmfr2_ids),
781 	MRS_FIELD(ID_AA64MMFR2, AT, false, MRS_LOWER, id_aa64mmfr2_at),
782 	MRS_FIELD(ID_AA64MMFR2, ST, false, MRS_EXACT, id_aa64mmfr2_st),
783 	MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv),
784 	MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx),
785 	MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT,
786 	    id_aa64mmfr2_varange),
787 	MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb),
788 	MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm),
789 	MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao),
790 	MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp),
791 	MRS_FIELD_END,
792 };
793 
794 
795 /* ID_AA64PFR0_EL1 */
796 static struct mrs_field_value id_aa64pfr0_csv3[] = {
797 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_NONE, ""),
798 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_ISOLATED, "CSV3"),
799 	MRS_FIELD_VALUE_END,
800 };
801 
802 static struct mrs_field_value id_aa64pfr0_csv2[] = {
803 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_NONE, ""),
804 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_ISOLATED, "CSV2"),
805 	MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_SCXTNUM, "SCXTNUM"),
806 	MRS_FIELD_VALUE_END,
807 };
808 
809 static struct mrs_field_value id_aa64pfr0_dit[] = {
810 	MRS_FIELD_VALUE(ID_AA64PFR0_DIT_NONE, ""),
811 	MRS_FIELD_VALUE(ID_AA64PFR0_DIT_PSTATE, "PSTATE.DIT"),
812 	MRS_FIELD_VALUE_END,
813 };
814 
815 static struct mrs_field_value id_aa64pfr0_amu[] = {
816 	MRS_FIELD_VALUE(ID_AA64PFR0_AMU_NONE, ""),
817 	MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1, "AMUv1"),
818 	MRS_FIELD_VALUE_END,
819 };
820 
821 static struct mrs_field_value id_aa64pfr0_mpam[] = {
822 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, MPAM, NONE, IMPL),
823 	MRS_FIELD_VALUE_END,
824 };
825 
826 static struct mrs_field_value id_aa64pfr0_sel2[] = {
827 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SEL2, NONE, IMPL),
828 	MRS_FIELD_VALUE_END,
829 };
830 
831 static struct mrs_field_value id_aa64pfr0_sve[] = {
832 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL),
833 	MRS_FIELD_VALUE_END,
834 };
835 
836 static struct mrs_field_value id_aa64pfr0_ras[] = {
837 	MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""),
838 	MRS_FIELD_VALUE(ID_AA64PFR0_RAS_V1, "RASv1"),
839 	MRS_FIELD_VALUE_END,
840 };
841 
842 static struct mrs_field_value id_aa64pfr0_gic[] = {
843 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN),
844 	MRS_FIELD_VALUE_END,
845 };
846 
847 static struct mrs_field_value id_aa64pfr0_advsimd[] = {
848 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL),
849 	MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"),
850 	MRS_FIELD_VALUE_END,
851 };
852 
853 static struct mrs_field_value id_aa64pfr0_fp[] = {
854 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL),
855 	MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"),
856 	MRS_FIELD_VALUE_END,
857 };
858 
859 static struct mrs_field_value id_aa64pfr0_el3[] = {
860 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64),
861 	MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"),
862 	MRS_FIELD_VALUE_END,
863 };
864 
865 static struct mrs_field_value id_aa64pfr0_el2[] = {
866 	MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64),
867 	MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"),
868 	MRS_FIELD_VALUE_END,
869 };
870 
871 static struct mrs_field_value id_aa64pfr0_el1[] = {
872 	MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"),
873 	MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"),
874 	MRS_FIELD_VALUE_END,
875 };
876 
877 static struct mrs_field_value id_aa64pfr0_el0[] = {
878 	MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"),
879 	MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"),
880 	MRS_FIELD_VALUE_END,
881 };
882 
883 static struct mrs_field id_aa64pfr0_fields[] = {
884 	MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3),
885 	MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2),
886 	MRS_FIELD(ID_AA64PFR0, DIT, false, MRS_LOWER, id_aa64pfr0_dit),
887 	MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu),
888 	MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam),
889 	MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2),
890 	MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve),
891 	MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras),
892 	MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic),
893 	MRS_FIELD(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd),
894 	MRS_FIELD(ID_AA64PFR0, FP, true,  MRS_LOWER, id_aa64pfr0_fp),
895 	MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3),
896 	MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2),
897 	MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1),
898 	MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0),
899 	MRS_FIELD_END,
900 };
901 
902 
903 /* ID_AA64PFR1_EL1 */
904 static struct mrs_field_value id_aa64pfr1_bt[] = {
905 	MRS_FIELD_VALUE(ID_AA64PFR1_BT_NONE, ""),
906 	MRS_FIELD_VALUE(ID_AA64PFR1_BT_IMPL, "BTI"),
907 	MRS_FIELD_VALUE_END,
908 };
909 
910 static struct mrs_field_value id_aa64pfr1_ssbs[] = {
911 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_NONE, ""),
912 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE, "PSTATE.SSBS"),
913 	MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE_MSR, "PSTATE.SSBS MSR"),
914 	MRS_FIELD_VALUE_END,
915 };
916 
917 static struct mrs_field_value id_aa64pfr1_mte[] = {
918 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_NONE, ""),
919 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL_EL0, "MTE EL0"),
920 	MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL, "MTE"),
921 	MRS_FIELD_VALUE_END,
922 };
923 
924 static struct mrs_field id_aa64pfr1_fields[] = {
925 	MRS_FIELD(ID_AA64PFR1, BT, false, MRS_EXACT, id_aa64pfr1_bt),
926 	MRS_FIELD(ID_AA64PFR1, SSBS, false, MRS_LOWER, id_aa64pfr1_ssbs),
927 	MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte),
928 	MRS_FIELD_END,
929 };
930 
931 struct mrs_user_reg {
932 	u_int		reg;
933 	u_int		CRm;
934 	u_int		Op2;
935 	size_t		offset;
936 	struct mrs_field *fields;
937 };
938 
939 static struct mrs_user_reg user_regs[] = {
940 	{	/* id_aa64isar0_el1 */
941 		.reg = ID_AA64ISAR0_EL1,
942 		.CRm = 6,
943 		.Op2 = 0,
944 		.offset = __offsetof(struct cpu_desc, id_aa64isar0),
945 		.fields = id_aa64isar0_fields,
946 	},
947 	{	/* id_aa64isar1_el1 */
948 		.reg = ID_AA64ISAR1_EL1,
949 		.CRm = 6,
950 		.Op2 = 1,
951 		.offset = __offsetof(struct cpu_desc, id_aa64isar1),
952 		.fields = id_aa64isar1_fields,
953 	},
954 	{	/* id_aa64pfr0_el1 */
955 		.reg = ID_AA64PFR0_EL1,
956 		.CRm = 4,
957 		.Op2 = 0,
958 		.offset = __offsetof(struct cpu_desc, id_aa64pfr0),
959 		.fields = id_aa64pfr0_fields,
960 	},
961 	{	/* id_aa64pfr0_el1 */
962 		.reg = ID_AA64PFR1_EL1,
963 		.CRm = 4,
964 		.Op2 = 1,
965 		.offset = __offsetof(struct cpu_desc, id_aa64pfr1),
966 		.fields = id_aa64pfr1_fields,
967 	},
968 	{	/* id_aa64dfr0_el1 */
969 		.reg = ID_AA64DFR0_EL1,
970 		.CRm = 5,
971 		.Op2 = 0,
972 		.offset = __offsetof(struct cpu_desc, id_aa64dfr0),
973 		.fields = id_aa64dfr0_fields,
974 	},
975 	{	/* id_aa64mmfr0_el1 */
976 		.reg = ID_AA64MMFR0_EL1,
977 		.CRm = 7,
978 		.Op2 = 0,
979 		.offset = __offsetof(struct cpu_desc, id_aa64mmfr0),
980 		.fields = id_aa64mmfr0_fields,
981 	},
982 };
983 
984 #define	CPU_DESC_FIELD(desc, idx)					\
985     *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
986 
987 static int
user_mrs_handler(vm_offset_t va,uint32_t insn,struct trapframe * frame,uint32_t esr)988 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
989     uint32_t esr)
990 {
991 	uint64_t value;
992 	int CRm, Op2, i, reg;
993 
994 	if ((insn & MRS_MASK) != MRS_VALUE)
995 		return (0);
996 
997 	/*
998 	 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
999 	 * These are in the EL1 CPU identification space.
1000 	 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
1001 	 * CRm == {4-7} holds the ID_AA64 registers.
1002 	 *
1003 	 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
1004 	 * Table D9-2 System instruction encodings for non-Debug System
1005 	 * register accesses.
1006 	 */
1007 	if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
1008 		return (0);
1009 
1010 	CRm = mrs_CRm(insn);
1011 	if (CRm > 7 || (CRm < 4 && CRm != 0))
1012 		return (0);
1013 
1014 	Op2 = mrs_Op2(insn);
1015 	value = 0;
1016 
1017 	for (i = 0; i < nitems(user_regs); i++) {
1018 		if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
1019 			value = CPU_DESC_FIELD(user_cpu_desc, i);
1020 			break;
1021 		}
1022 	}
1023 
1024 	if (CRm == 0) {
1025 		switch (Op2) {
1026 		case 0:
1027 			value = READ_SPECIALREG(midr_el1);
1028 			break;
1029 		case 5:
1030 			value = READ_SPECIALREG(mpidr_el1);
1031 			break;
1032 		case 6:
1033 			value = READ_SPECIALREG(revidr_el1);
1034 			break;
1035 		default:
1036 			return (0);
1037 		}
1038 	}
1039 
1040 	/*
1041 	 * We will handle this instruction, move to the next so we
1042 	 * don't trap here again.
1043 	 */
1044 	frame->tf_elr += INSN_SIZE;
1045 
1046 	reg = MRS_REGISTER(insn);
1047 	/* If reg is 31 then write to xzr, i.e. do nothing */
1048 	if (reg == 31)
1049 		return (1);
1050 
1051 	if (reg < nitems(frame->tf_x))
1052 		frame->tf_x[reg] = value;
1053 	else if (reg == 30)
1054 		frame->tf_lr = value;
1055 
1056 	return (1);
1057 }
1058 
1059 bool
extract_user_id_field(u_int reg,u_int field_shift,uint8_t * val)1060 extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val)
1061 {
1062 	uint64_t value;
1063 	int i;
1064 
1065 	for (i = 0; i < nitems(user_regs); i++) {
1066 		if (user_regs[i].reg == reg) {
1067 			value = CPU_DESC_FIELD(user_cpu_desc, i);
1068 			*val = value >> field_shift;
1069 			return (true);
1070 		}
1071 	}
1072 
1073 	return (false);
1074 }
1075 
1076 bool
get_kernel_reg(u_int reg,uint64_t * val)1077 get_kernel_reg(u_int reg, uint64_t *val)
1078 {
1079 	int i;
1080 
1081 	for (i = 0; i < nitems(user_regs); i++) {
1082 		if (user_regs[i].reg == reg) {
1083 			*val = CPU_DESC_FIELD(kern_cpu_desc, i);
1084 			return (true);
1085 		}
1086 	}
1087 
1088 	return (false);
1089 }
1090 
1091 static uint64_t
update_lower_register(uint64_t val,uint64_t new_val,u_int shift,int width,bool sign)1092 update_lower_register(uint64_t val, uint64_t new_val, u_int shift,
1093     int width, bool sign)
1094 {
1095 	uint64_t mask;
1096 	uint64_t new_field, old_field;
1097 	bool update;
1098 
1099 	KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__,
1100 	    width));
1101 
1102 	mask = (1ul << width) - 1;
1103 	new_field = (new_val >> shift) & mask;
1104 	old_field = (val >> shift) & mask;
1105 
1106 	update = false;
1107 	if (sign) {
1108 		/*
1109 		 * The field is signed. Toggle the upper bit so the comparison
1110 		 * works on unsigned values as this makes positive numbers,
1111 		 * i.e. those with a 0 bit, larger than negative numbers,
1112 		 * i.e. those with a 1 bit, in an unsigned comparison.
1113 		 */
1114 		if ((new_field ^ (1ul << (width - 1))) <
1115 		    (old_field ^ (1ul << (width - 1))))
1116 			update = true;
1117 	} else {
1118 		if (new_field < old_field)
1119 			update = true;
1120 	}
1121 
1122 	if (update) {
1123 		val &= ~(mask << shift);
1124 		val |= new_field << shift;
1125 	}
1126 
1127 	return (val);
1128 }
1129 
1130 void
update_special_regs(u_int cpu)1131 update_special_regs(u_int cpu)
1132 {
1133 	struct mrs_field *fields;
1134 	uint64_t user_reg, kern_reg, value;
1135 	int i, j;
1136 
1137 	if (cpu == 0) {
1138 		/* Create a user visible cpu description with safe values */
1139 		memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
1140 		/* Safe values for these registers */
1141 		user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE |
1142 		    ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 |
1143 		    ID_AA64PFR0_EL0_64;
1144 		user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8;
1145 	}
1146 
1147 	for (i = 0; i < nitems(user_regs); i++) {
1148 		value = CPU_DESC_FIELD(cpu_desc[cpu], i);
1149 		if (cpu == 0) {
1150 			kern_reg = value;
1151 			user_reg = value;
1152 		} else {
1153 			kern_reg = CPU_DESC_FIELD(kern_cpu_desc, i);
1154 			user_reg = CPU_DESC_FIELD(user_cpu_desc, i);
1155 		}
1156 
1157 		fields = user_regs[i].fields;
1158 		for (j = 0; fields[j].type != 0; j++) {
1159 			switch (fields[j].type & MRS_TYPE_MASK) {
1160 			case MRS_EXACT:
1161 				user_reg &= ~(0xful << fields[j].shift);
1162 				user_reg |=
1163 				    (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
1164 				    fields[j].shift;
1165 				break;
1166 			case MRS_LOWER:
1167 				user_reg = update_lower_register(user_reg,
1168 				    value, fields[j].shift, 4, fields[j].sign);
1169 				break;
1170 			default:
1171 				panic("Invalid field type: %d", fields[j].type);
1172 			}
1173 			kern_reg = update_lower_register(kern_reg, value,
1174 			    fields[j].shift, 4, fields[j].sign);
1175 		}
1176 
1177 		CPU_DESC_FIELD(kern_cpu_desc, i) = kern_reg;
1178 		CPU_DESC_FIELD(user_cpu_desc, i) = user_reg;
1179 	}
1180 }
1181 
1182 /* HWCAP */
1183 bool __read_frequently lse_supported = false;
1184 
1185 bool __read_frequently icache_aliasing = false;
1186 bool __read_frequently icache_vmid = false;
1187 
1188 int64_t dcache_line_size;	/* The minimum D cache line size */
1189 int64_t icache_line_size;	/* The minimum I cache line size */
1190 int64_t idcache_line_size;	/* The minimum cache line size */
1191 
1192 static void
identify_cpu_sysinit(void * dummy __unused)1193 identify_cpu_sysinit(void *dummy __unused)
1194 {
1195 	int cpu;
1196 	bool dic, idc;
1197 
1198 	dic = (allow_dic != 0);
1199 	idc = (allow_idc != 0);
1200 
1201 	CPU_FOREACH(cpu) {
1202 		check_cpu_regs(cpu);
1203 		if (cpu != 0)
1204 			update_special_regs(cpu);
1205 
1206 		if (CTR_DIC_VAL(cpu_desc[cpu].ctr) == 0)
1207 			dic = false;
1208 		if (CTR_IDC_VAL(cpu_desc[cpu].ctr) == 0)
1209 			idc = false;
1210 	}
1211 
1212 	/* Exposed to userspace as AT_HWCAP and AT_HWCAP2 */
1213 	elf_hwcap = parse_cpu_features_hwcap();
1214 	elf_hwcap2 = parse_cpu_features_hwcap2();
1215 
1216 	if (dic && idc) {
1217 		arm64_icache_sync_range = &arm64_dic_idc_icache_sync_range;
1218 		if (bootverbose)
1219 			printf("Enabling DIC & IDC ICache sync\n");
1220 	}
1221 
1222 	if ((elf_hwcap & HWCAP_ATOMICS) != 0) {
1223 		lse_supported = true;
1224 		if (bootverbose)
1225 			printf("Enabling LSE atomics in the kernel\n");
1226 	}
1227 #ifdef LSE_ATOMICS
1228 	if (!lse_supported)
1229 		panic("CPU does not support LSE atomic instructions");
1230 #endif
1231 
1232 	install_undef_handler(true, user_mrs_handler);
1233 }
1234 SYSINIT(identify_cpu, SI_SUB_CPU, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
1235 
1236 static void
cpu_features_sysinit(void * dummy __unused)1237 cpu_features_sysinit(void *dummy __unused)
1238 {
1239 	u_int cpu;
1240 
1241 	CPU_FOREACH(cpu)
1242 		print_cpu_features(cpu);
1243 }
1244 SYSINIT(cpu_features, SI_SUB_SMP, SI_ORDER_ANY, cpu_features_sysinit, NULL);
1245 
1246 static u_long
parse_cpu_features_hwcap(void)1247 parse_cpu_features_hwcap(void)
1248 {
1249 	u_long hwcap = 0;
1250 
1251 	switch (ID_AA64ISAR0_TS_VAL(user_cpu_desc.id_aa64isar0)) {
1252 	case ID_AA64ISAR0_TS_CondM_8_4:
1253 	case ID_AA64ISAR0_TS_CondM_8_5:
1254 		hwcap |= HWCAP_FLAGM;
1255 		break;
1256 	default:
1257 		break;
1258 	}
1259 
1260 	if (ID_AA64ISAR0_FHM_VAL(user_cpu_desc.id_aa64isar0) ==
1261 	    ID_AA64ISAR0_FHM_IMPL)
1262 		hwcap |= HWCAP_ASIMDFHM;
1263 
1264 	if (ID_AA64ISAR0_DP_VAL(user_cpu_desc.id_aa64isar0) ==
1265 	    ID_AA64ISAR0_DP_IMPL)
1266 		hwcap |= HWCAP_ASIMDDP;
1267 
1268 	if (ID_AA64ISAR0_SM4_VAL(user_cpu_desc.id_aa64isar0) ==
1269 	    ID_AA64ISAR0_SM4_IMPL)
1270 		hwcap |= HWCAP_SM4;
1271 
1272 	if (ID_AA64ISAR0_SM3_VAL(user_cpu_desc.id_aa64isar0) ==
1273 	    ID_AA64ISAR0_SM3_IMPL)
1274 		hwcap |= HWCAP_SM3;
1275 
1276 	if (ID_AA64ISAR0_SHA3_VAL(user_cpu_desc.id_aa64isar0) ==
1277 	    ID_AA64ISAR0_SHA3_IMPL)
1278 		hwcap |= HWCAP_SHA3;
1279 
1280 	if (ID_AA64ISAR0_RDM_VAL(user_cpu_desc.id_aa64isar0) ==
1281 	    ID_AA64ISAR0_RDM_IMPL)
1282 		hwcap |= HWCAP_ASIMDRDM;
1283 
1284 	if (ID_AA64ISAR0_Atomic_VAL(user_cpu_desc.id_aa64isar0) ==
1285 	    ID_AA64ISAR0_Atomic_IMPL)
1286 		hwcap |= HWCAP_ATOMICS;
1287 
1288 	if (ID_AA64ISAR0_CRC32_VAL(user_cpu_desc.id_aa64isar0) ==
1289 	    ID_AA64ISAR0_CRC32_BASE)
1290 		hwcap |= HWCAP_CRC32;
1291 
1292 	switch (ID_AA64ISAR0_SHA2_VAL(user_cpu_desc.id_aa64isar0)) {
1293 	case ID_AA64ISAR0_SHA2_BASE:
1294 		hwcap |= HWCAP_SHA2;
1295 		break;
1296 	case ID_AA64ISAR0_SHA2_512:
1297 		hwcap |= HWCAP_SHA2 | HWCAP_SHA512;
1298 		break;
1299 	default:
1300 		break;
1301 	}
1302 
1303 	if (ID_AA64ISAR0_SHA1_VAL(user_cpu_desc.id_aa64isar0) ==
1304 	    ID_AA64ISAR0_SHA1_BASE)
1305 		hwcap |= HWCAP_SHA1;
1306 
1307 	switch (ID_AA64ISAR0_AES_VAL(user_cpu_desc.id_aa64isar0)) {
1308 	case ID_AA64ISAR0_AES_BASE:
1309 		hwcap |= HWCAP_AES;
1310 		break;
1311 	case ID_AA64ISAR0_AES_PMULL:
1312 		hwcap |= HWCAP_PMULL | HWCAP_AES;
1313 		break;
1314 	default:
1315 		break;
1316 	}
1317 
1318 	if (ID_AA64ISAR1_SB_VAL(user_cpu_desc.id_aa64isar1) ==
1319 	    ID_AA64ISAR1_SB_IMPL)
1320 		hwcap |= HWCAP_SB;
1321 
1322 	switch (ID_AA64ISAR1_LRCPC_VAL(user_cpu_desc.id_aa64isar1)) {
1323 	case ID_AA64ISAR1_LRCPC_RCPC_8_3:
1324 		hwcap |= HWCAP_LRCPC;
1325 		break;
1326 	case ID_AA64ISAR1_LRCPC_RCPC_8_4:
1327 		hwcap |= HWCAP_LRCPC | HWCAP_ILRCPC;
1328 		break;
1329 	default:
1330 		break;
1331 	}
1332 
1333 	if (ID_AA64ISAR1_FCMA_VAL(user_cpu_desc.id_aa64isar1) ==
1334 	    ID_AA64ISAR1_FCMA_IMPL)
1335 		hwcap |= HWCAP_FCMA;
1336 
1337 	if (ID_AA64ISAR1_JSCVT_VAL(user_cpu_desc.id_aa64isar1) ==
1338 	    ID_AA64ISAR1_JSCVT_IMPL)
1339 		hwcap |= HWCAP_JSCVT;
1340 
1341 	if (ID_AA64ISAR1_DPB_VAL(user_cpu_desc.id_aa64isar1) ==
1342 	    ID_AA64ISAR1_DPB_DCCVAP)
1343 		hwcap |= HWCAP_DCPOP;
1344 
1345 	if (ID_AA64MMFR2_AT_VAL(user_cpu_desc.id_aa64mmfr2) ==
1346 	    ID_AA64MMFR2_AT_IMPL)
1347 		hwcap |= HWCAP_USCAT;
1348 
1349 	if (ID_AA64PFR0_DIT_VAL(user_cpu_desc.id_aa64pfr0) ==
1350 	    ID_AA64PFR0_DIT_PSTATE)
1351 		hwcap |= HWCAP_DIT;
1352 
1353 	if (ID_AA64PFR0_SVE_VAL(user_cpu_desc.id_aa64pfr0) ==
1354 	    ID_AA64PFR0_SVE_IMPL)
1355 		hwcap |= HWCAP_SVE;
1356 
1357 	switch (ID_AA64PFR0_AdvSIMD_VAL(user_cpu_desc.id_aa64pfr0)) {
1358 	case ID_AA64PFR0_AdvSIMD_IMPL:
1359 		hwcap |= HWCAP_ASIMD;
1360 		break;
1361 	case ID_AA64PFR0_AdvSIMD_HP:
1362 		hwcap |= HWCAP_ASIMD | HWCAP_ASIMDHP;
1363 		break;
1364 	default:
1365 		break;
1366 	}
1367 
1368 	switch (ID_AA64PFR0_FP_VAL(user_cpu_desc.id_aa64pfr0)) {
1369 	case ID_AA64PFR0_FP_IMPL:
1370 		hwcap |= HWCAP_FP;
1371 		break;
1372 	case ID_AA64PFR0_FP_HP:
1373 		hwcap |= HWCAP_FP | HWCAP_FPHP;
1374 		break;
1375 	default:
1376 		break;
1377 	}
1378 
1379 	if (ID_AA64PFR1_SSBS_VAL(user_cpu_desc.id_aa64pfr1) ==
1380 	    ID_AA64PFR1_SSBS_PSTATE_MSR)
1381 		hwcap |= HWCAP_SSBS;
1382 
1383 	return (hwcap);
1384 }
1385 
1386 static u_long
parse_cpu_features_hwcap2(void)1387 parse_cpu_features_hwcap2(void)
1388 {
1389 	u_long hwcap2 = 0;
1390 
1391 	if (ID_AA64ISAR0_RNDR_VAL(user_cpu_desc.id_aa64isar0) ==
1392 	    ID_AA64ISAR0_RNDR_IMPL)
1393 		hwcap2 |= HWCAP2_RNG;
1394 
1395 	if (ID_AA64ISAR0_TS_VAL(user_cpu_desc.id_aa64isar0) ==
1396 	    ID_AA64ISAR0_TS_CondM_8_5)
1397 		hwcap2 |= HWCAP2_FLAGM2;
1398 
1399 	if (ID_AA64ISAR1_I8MM_VAL(user_cpu_desc.id_aa64isar1) ==
1400 	    ID_AA64ISAR1_I8MM_IMPL)
1401 		hwcap2 |= HWCAP2_I8MM;
1402 
1403 	if (ID_AA64ISAR1_DGH_VAL(user_cpu_desc.id_aa64isar1) ==
1404 	    ID_AA64ISAR1_DGH_IMPL)
1405 		hwcap2 |= HWCAP2_DGH;
1406 
1407 	if (ID_AA64ISAR1_BF16_VAL(user_cpu_desc.id_aa64isar1) ==
1408 	    ID_AA64ISAR1_BF16_IMPL)
1409 		hwcap2 |= HWCAP2_BF16;
1410 
1411 	if (ID_AA64ISAR1_FRINTTS_VAL(user_cpu_desc.id_aa64isar1) ==
1412 	    ID_AA64ISAR1_FRINTTS_IMPL)
1413 		hwcap2 |= HWCAP2_FRINT;
1414 
1415 	if (ID_AA64ISAR1_DPB_VAL(user_cpu_desc.id_aa64isar1) ==
1416 	    ID_AA64ISAR1_DPB_DCCVADP)
1417 		hwcap2 |= HWCAP2_DCPODP;
1418 
1419 	if (ID_AA64PFR1_BT_VAL(user_cpu_desc.id_aa64pfr1) ==
1420 	    ID_AA64PFR1_BT_IMPL)
1421 		hwcap2 |= HWCAP2_BTI;
1422 
1423 	return (hwcap2);
1424 }
1425 
1426 static void
print_ctr_fields(struct sbuf * sb,uint64_t reg,void * arg)1427 print_ctr_fields(struct sbuf *sb, uint64_t reg, void *arg)
1428 {
1429 
1430 	sbuf_printf(sb, "%u byte D-cacheline,", CTR_DLINE_SIZE(reg));
1431 	sbuf_printf(sb, "%u byte I-cacheline,", CTR_ILINE_SIZE(reg));
1432 	reg &= ~(CTR_DLINE_MASK | CTR_ILINE_MASK);
1433 
1434 	switch(CTR_L1IP_VAL(reg)) {
1435 	case CTR_L1IP_VPIPT:
1436 		sbuf_printf(sb, "VPIPT");
1437 		break;
1438 	case CTR_L1IP_AIVIVT:
1439 		sbuf_printf(sb, "AIVIVT");
1440 		break;
1441 	case CTR_L1IP_VIPT:
1442 		sbuf_printf(sb, "VIPT");
1443 		break;
1444 	case CTR_L1IP_PIPT:
1445 		sbuf_printf(sb, "PIPT");
1446 		break;
1447 	}
1448 	sbuf_printf(sb, " ICache,");
1449 	reg &= ~CTR_L1IP_MASK;
1450 
1451 	sbuf_printf(sb, "%d byte ERG,", CTR_ERG_SIZE(reg));
1452 	sbuf_printf(sb, "%d byte CWG", CTR_CWG_SIZE(reg));
1453 	reg &= ~(CTR_ERG_MASK | CTR_CWG_MASK);
1454 
1455 	if (CTR_IDC_VAL(reg) != 0)
1456 		sbuf_printf(sb, ",IDC");
1457 	if (CTR_DIC_VAL(reg) != 0)
1458 		sbuf_printf(sb, ",DIC");
1459 	reg &= ~(CTR_IDC_MASK | CTR_DIC_MASK);
1460 	reg &= ~CTR_RES1;
1461 
1462 	if (reg != 0)
1463 		sbuf_printf(sb, ",%lx", reg);
1464 }
1465 
1466 static void
print_register(struct sbuf * sb,const char * reg_name,uint64_t reg,void (* print_fields)(struct sbuf *,uint64_t,void *),void * arg)1467 print_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
1468     void (*print_fields)(struct sbuf *, uint64_t, void *), void *arg)
1469 {
1470 
1471 	sbuf_printf(sb, "%29s = <", reg_name);
1472 
1473 	print_fields(sb, reg, arg);
1474 
1475 	sbuf_finish(sb);
1476 	printf("%s>\n", sbuf_data(sb));
1477 	sbuf_clear(sb);
1478 }
1479 
1480 static void
print_id_fields(struct sbuf * sb,uint64_t reg,void * arg)1481 print_id_fields(struct sbuf *sb, uint64_t reg, void *arg)
1482 {
1483 	struct mrs_field *fields = arg;
1484 	struct mrs_field_value *fv;
1485 	int field, i, j, printed;
1486 
1487 #define SEP_STR	((printed++) == 0) ? "" : ","
1488 	printed = 0;
1489 	for (i = 0; fields[i].type != 0; i++) {
1490 		fv = fields[i].values;
1491 
1492 		/* TODO: Handle with an unknown message */
1493 		if (fv == NULL)
1494 			continue;
1495 
1496 		field = (reg & fields[i].mask) >> fields[i].shift;
1497 		for (j = 0; fv[j].desc != NULL; j++) {
1498 			if ((fv[j].value >> fields[i].shift) != field)
1499 				continue;
1500 
1501 			if (fv[j].desc[0] != '\0')
1502 				sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc);
1503 			break;
1504 		}
1505 		if (fv[j].desc == NULL)
1506 			sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR,
1507 			    fields[i].name, field);
1508 
1509 		reg &= ~(0xful << fields[i].shift);
1510 	}
1511 
1512 	if (reg != 0)
1513 		sbuf_printf(sb, "%s%#lx", SEP_STR, reg);
1514 #undef SEP_STR
1515 }
1516 
1517 static void
print_id_register(struct sbuf * sb,const char * reg_name,uint64_t reg,struct mrs_field * fields)1518 print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg,
1519     struct mrs_field *fields)
1520 {
1521 
1522 	print_register(sb, reg_name, reg, print_id_fields, fields);
1523 }
1524 
1525 static void
print_cpu_features(u_int cpu)1526 print_cpu_features(u_int cpu)
1527 {
1528 	struct sbuf *sb;
1529 
1530 	sb = sbuf_new_auto();
1531 	sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
1532 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1533 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1534 
1535 	sbuf_cat(sb, " affinity:");
1536 	switch(cpu_aff_levels) {
1537 	default:
1538 	case 4:
1539 		sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
1540 		/* FALLTHROUGH */
1541 	case 3:
1542 		sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
1543 		/* FALLTHROUGH */
1544 	case 2:
1545 		sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
1546 		/* FALLTHROUGH */
1547 	case 1:
1548 	case 0: /* On UP this will be zero */
1549 		sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
1550 		break;
1551 	}
1552 	sbuf_finish(sb);
1553 	printf("%s\n", sbuf_data(sb));
1554 	sbuf_clear(sb);
1555 
1556 	/*
1557 	 * There is a hardware errata where, if one CPU is performing a TLB
1558 	 * invalidation while another is performing a store-exclusive the
1559 	 * store-exclusive may return the wrong status. A workaround seems
1560 	 * to be to use an IPI to invalidate on each CPU, however given the
1561 	 * limited number of affected units (pass 1.1 is the evaluation
1562 	 * hardware revision), and the lack of information from Cavium
1563 	 * this has not been implemented.
1564 	 *
1565 	 * At the time of writing this the only information is from:
1566 	 * https://lkml.org/lkml/2016/8/4/722
1567 	 */
1568 	/*
1569 	 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
1570 	 * triggers on pass 2.0+.
1571 	 */
1572 	if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
1573 	    CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
1574 		printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
1575 		    "hardware bugs that may cause the incorrect operation of "
1576 		    "atomic operations.\n");
1577 
1578 	/* Cache Type Register */
1579 	if (cpu == 0 || (cpu_print_regs & PRINT_CTR_EL0) != 0) {
1580 		print_register(sb, "Cache Type",
1581 		    cpu_desc[cpu].ctr, print_ctr_fields, NULL);
1582 	}
1583 
1584 	/* AArch64 Instruction Set Attribute Register 0 */
1585 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0)
1586 		print_id_register(sb, "Instruction Set Attributes 0",
1587 		    cpu_desc[cpu].id_aa64isar0, id_aa64isar0_fields);
1588 
1589 	/* AArch64 Instruction Set Attribute Register 1 */
1590 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0)
1591 		print_id_register(sb, "Instruction Set Attributes 1",
1592 		    cpu_desc[cpu].id_aa64isar1, id_aa64isar1_fields);
1593 
1594 	/* AArch64 Processor Feature Register 0 */
1595 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0)
1596 		print_id_register(sb, "Processor Features 0",
1597 		    cpu_desc[cpu].id_aa64pfr0, id_aa64pfr0_fields);
1598 
1599 	/* AArch64 Processor Feature Register 1 */
1600 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0)
1601 		print_id_register(sb, "Processor Features 1",
1602 		    cpu_desc[cpu].id_aa64pfr1, id_aa64pfr1_fields);
1603 
1604 	/* AArch64 Memory Model Feature Register 0 */
1605 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0)
1606 		print_id_register(sb, "Memory Model Features 0",
1607 		    cpu_desc[cpu].id_aa64mmfr0, id_aa64mmfr0_fields);
1608 
1609 	/* AArch64 Memory Model Feature Register 1 */
1610 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0)
1611 		print_id_register(sb, "Memory Model Features 1",
1612 		    cpu_desc[cpu].id_aa64mmfr1, id_aa64mmfr1_fields);
1613 
1614 	/* AArch64 Memory Model Feature Register 2 */
1615 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0)
1616 		print_id_register(sb, "Memory Model Features 2",
1617 		    cpu_desc[cpu].id_aa64mmfr2, id_aa64mmfr2_fields);
1618 
1619 	/* AArch64 Debug Feature Register 0 */
1620 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0)
1621 		print_id_register(sb, "Debug Features 0",
1622 		    cpu_desc[cpu].id_aa64dfr0, id_aa64dfr0_fields);
1623 
1624 	/* AArch64 Memory Model Feature Register 1 */
1625 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0)
1626 		print_id_register(sb, "Debug Features 1",
1627 		    cpu_desc[cpu].id_aa64dfr1, id_aa64dfr1_fields);
1628 
1629 	/* AArch64 Auxiliary Feature Register 0 */
1630 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0)
1631 		print_id_register(sb, "Auxiliary Features 0",
1632 		    cpu_desc[cpu].id_aa64afr0, id_aa64afr0_fields);
1633 
1634 	/* AArch64 Auxiliary Feature Register 1 */
1635 	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0)
1636 		print_id_register(sb, "Auxiliary Features 1",
1637 		    cpu_desc[cpu].id_aa64afr1, id_aa64afr1_fields);
1638 
1639 	sbuf_delete(sb);
1640 	sb = NULL;
1641 #undef SEP_STR
1642 }
1643 
1644 void
identify_cache(uint64_t ctr)1645 identify_cache(uint64_t ctr)
1646 {
1647 
1648 	/* Identify the L1 cache type */
1649 	switch (CTR_L1IP_VAL(ctr)) {
1650 	case CTR_L1IP_PIPT:
1651 		break;
1652 	case CTR_L1IP_VPIPT:
1653 		icache_vmid = true;
1654 		break;
1655 	default:
1656 	case CTR_L1IP_VIPT:
1657 		icache_aliasing = true;
1658 		break;
1659 	}
1660 
1661 	if (dcache_line_size == 0) {
1662 		KASSERT(icache_line_size == 0, ("%s: i-cacheline size set: %ld",
1663 		    __func__, icache_line_size));
1664 
1665 		/* Get the D cache line size */
1666 		dcache_line_size = CTR_DLINE_SIZE(ctr);
1667 		/* And the same for the I cache */
1668 		icache_line_size = CTR_ILINE_SIZE(ctr);
1669 
1670 		idcache_line_size = MIN(dcache_line_size, icache_line_size);
1671 	}
1672 
1673 	if (dcache_line_size != CTR_DLINE_SIZE(ctr)) {
1674 		printf("WARNING: D-cacheline size mismatch %ld != %d\n",
1675 		    dcache_line_size, CTR_DLINE_SIZE(ctr));
1676 	}
1677 
1678 	if (icache_line_size != CTR_ILINE_SIZE(ctr)) {
1679 		printf("WARNING: I-cacheline size mismatch %ld != %d\n",
1680 		    icache_line_size, CTR_ILINE_SIZE(ctr));
1681 	}
1682 }
1683 
1684 void
identify_cpu(u_int cpu)1685 identify_cpu(u_int cpu)
1686 {
1687 	u_int midr;
1688 	u_int impl_id;
1689 	u_int part_id;
1690 	size_t i;
1691 	const struct cpu_parts *cpu_partsp = NULL;
1692 
1693 	midr = get_midr();
1694 
1695 	impl_id = CPU_IMPL(midr);
1696 	for (i = 0; i < nitems(cpu_implementers); i++) {
1697 		if (impl_id == cpu_implementers[i].impl_id ||
1698 		    cpu_implementers[i].impl_id == 0) {
1699 			cpu_desc[cpu].cpu_impl = impl_id;
1700 			cpu_desc[cpu].cpu_impl_name =
1701 			    cpu_implementers[i].impl_name;
1702 			cpu_partsp = cpu_implementers[i].cpu_parts;
1703 			break;
1704 		}
1705 	}
1706 
1707 	part_id = CPU_PART(midr);
1708 	for (i = 0; &cpu_partsp[i] != NULL; i++) {
1709 		if (part_id == cpu_partsp[i].part_id ||
1710 		    cpu_partsp[i].part_id == 0) {
1711 			cpu_desc[cpu].cpu_part_num = part_id;
1712 			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1713 			break;
1714 		}
1715 	}
1716 
1717 	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1718 	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1719 
1720 	snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d",
1721 	    cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1722 	    cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1723 
1724 	/* Save affinity for current CPU */
1725 	cpu_desc[cpu].mpidr = get_mpidr();
1726 	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1727 
1728 	cpu_desc[cpu].ctr = READ_SPECIALREG(ctr_el0);
1729 	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
1730 	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
1731 	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
1732 	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
1733 	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
1734 	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
1735 	cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1);
1736 	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
1737 	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
1738 }
1739 
1740 static void
check_cpu_regs(u_int cpu)1741 check_cpu_regs(u_int cpu)
1742 {
1743 
1744 	switch (cpu_aff_levels) {
1745 	case 0:
1746 		if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1747 		    CPU_AFF0(cpu_desc[0].mpidr))
1748 			cpu_aff_levels = 1;
1749 		/* FALLTHROUGH */
1750 	case 1:
1751 		if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1752 		    CPU_AFF1(cpu_desc[0].mpidr))
1753 			cpu_aff_levels = 2;
1754 		/* FALLTHROUGH */
1755 	case 2:
1756 		if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1757 		    CPU_AFF2(cpu_desc[0].mpidr))
1758 			cpu_aff_levels = 3;
1759 		/* FALLTHROUGH */
1760 	case 3:
1761 		if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1762 		    CPU_AFF3(cpu_desc[0].mpidr))
1763 			cpu_aff_levels = 4;
1764 		break;
1765 	}
1766 
1767 	if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1768 		cpu_print_regs |= PRINT_ID_AA64_AFR0;
1769 	if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1770 		cpu_print_regs |= PRINT_ID_AA64_AFR1;
1771 
1772 	if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1773 		cpu_print_regs |= PRINT_ID_AA64_DFR0;
1774 	if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1775 		cpu_print_regs |= PRINT_ID_AA64_DFR1;
1776 
1777 	if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1778 		cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1779 	if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1780 		cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1781 
1782 	if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1783 		cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1784 	if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1785 		cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1786 	if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1787 		cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1788 
1789 	if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1790 		cpu_print_regs |= PRINT_ID_AA64_PFR0;
1791 	if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1792 		cpu_print_regs |= PRINT_ID_AA64_PFR1;
1793 
1794 	if (cpu_desc[cpu].ctr != cpu_desc[0].ctr) {
1795 		/*
1796 		 * If the cache type register is different we may
1797 		 * have a different l1 cache type.
1798 		 */
1799 		identify_cache(cpu_desc[cpu].ctr);
1800 		cpu_print_regs |= PRINT_CTR_EL0;
1801 	}
1802 }
1803