xref: /f-stack/freebsd/arm/arm/cpuinfo.c (revision 22ce4aff)
1 /*-
2  * Copyright 2014 Svatopluk Kraus <[email protected]>
3  * Copyright 2014 Michal Meloun <[email protected]>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/pcpu.h>
35 #include <sys/smp.h>
36 #include <sys/sysctl.h>
37 
38 #include <machine/cpu.h>
39 #include <machine/cpuinfo.h>
40 #include <machine/elf.h>
41 #include <machine/md_var.h>
42 
43 void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
44 
45 int disable_bp_hardening;
46 int spectre_v2_safe = 1;
47 
48 struct cpuinfo cpuinfo =
49 {
50 	/* Use safe defaults for start */
51 	.dcache_line_size = 32,
52 	.dcache_line_mask = 31,
53 	.icache_line_size = 32,
54 	.icache_line_mask = 31,
55 };
56 
57 static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
58     "CPU");
59 static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
60     "CPU quirks");
61 
62 /*
63  * Tunable CPU quirks.
64  * Be careful, ACTRL cannot be changed if CPU is started in secure
65  * mode(world) and write to ACTRL can cause exception!
66  * These quirks are intended for optimizing CPU performance, not for
67  * applying errata workarounds. Nobody can expect that CPU with unfixed
68  * errata is stable enough to execute the kernel until quirks are applied.
69  */
70 static uint32_t cpu_quirks_actlr_mask;
71 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
72     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
73     "Bits to be masked in ACTLR");
74 
75 static uint32_t cpu_quirks_actlr_set;
76 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
77     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
78     "Bits to be set in ACTLR");
79 
80 /* Read and parse CPU id scheme */
81 void
cpuinfo_init(void)82 cpuinfo_init(void)
83 {
84 	uint32_t tmp;
85 
86 	/*
87 	 * Prematurely fetch CPU quirks. Standard fetch for tunable
88 	 * sysctls is handled using SYSINIT, thus too late for boot CPU.
89 	 * Keep names in sync with sysctls.
90 	 */
91 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
92 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
93 
94 	cpuinfo.midr = cp15_midr_get();
95 	/* Test old version id schemes first */
96 	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
97 		if (CPU_ID_ISOLD(cpuinfo.midr)) {
98 			/* obsolete ARMv2 or ARMv3 CPU */
99 			cpuinfo.midr = 0;
100 			return;
101 		}
102 		if (CPU_ID_IS7(cpuinfo.midr)) {
103 			if ((cpuinfo.midr & (1 << 23)) == 0) {
104 				/* obsolete ARMv3 CPU */
105 				cpuinfo.midr = 0;
106 				return;
107 			}
108 			/* ARMv4T CPU */
109 			cpuinfo.architecture = 1;
110 			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
111 		} else {
112 			/* ARM new id scheme */
113 			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
114 			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
115 		}
116 	} else {
117 		/* non ARM -> must be new id scheme */
118 		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
119 		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
120 	}
121 	/* Parse rest of MIDR  */
122 	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
123 	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
124 	cpuinfo.patch = cpuinfo.midr & 0x0F;
125 
126 	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
127 	cpuinfo.ctr = cp15_ctr_get();
128 	cpuinfo.tcmtr = cp15_tcmtr_get();
129 	cpuinfo.tlbtr = cp15_tlbtr_get();
130 	cpuinfo.mpidr = cp15_mpidr_get();
131 	cpuinfo.revidr = cp15_revidr_get();
132 
133 	/* if CPU is not v7 cpu id scheme */
134 	if (cpuinfo.architecture != 0xF)
135 		return;
136 	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
137 	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
138 	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
139 	cpuinfo.id_afr0 = cp15_id_afr0_get();
140 	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
141 	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
142 	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
143 	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
144 	cpuinfo.id_isar0 = cp15_id_isar0_get();
145 	cpuinfo.id_isar1 = cp15_id_isar1_get();
146 	cpuinfo.id_isar2 = cp15_id_isar2_get();
147 	cpuinfo.id_isar3 = cp15_id_isar3_get();
148 	cpuinfo.id_isar4 = cp15_id_isar4_get();
149 	cpuinfo.id_isar5 = cp15_id_isar5_get();
150 
151 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
152 	cpuinfo.cbar = cp15_cbar_get();
153 */
154 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
155 		cpuinfo.ccsidr = cp15_ccsidr_get();
156 		cpuinfo.clidr = cp15_clidr_get();
157 	}
158 
159 	/* Test if revidr is implemented */
160 	if (cpuinfo.revidr == cpuinfo.midr)
161 		cpuinfo.revidr = 0;
162 
163 	/* parsed bits of above registers */
164 	/* id_mmfr0 */
165 	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
166 	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
167 	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
168 	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
169 	/* id_mmfr2 */
170 	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
171 	/* id_mmfr3 */
172 	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
173 	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
174 	/* id_pfr1 */
175 	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
176 	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
177 	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
178 	/* mpidr */
179 	cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
180 
181 	/* L1 Cache sizes */
182 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
183 		cpuinfo.dcache_line_size =
184 		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
185 		cpuinfo.icache_line_size =
186 		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
187 	} else {
188 		cpuinfo.dcache_line_size =
189 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
190 		cpuinfo.icache_line_size =
191 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
192 	}
193 	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
194 	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
195 
196 	/* Fill AT_HWCAP bits. */
197 	elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */
198 	elf_hwcap |= HWCAP_TLS | HWCAP_EDSP;	   /* Required for v6+ CPUs */
199 
200 	tmp = (cpuinfo.id_isar0 >> 24) & 0xF;	/* Divide_instrs */
201 	if (tmp >= 1)
202 		elf_hwcap |= HWCAP_IDIVT;
203 	if (tmp >= 2)
204 		elf_hwcap |= HWCAP_IDIVA;
205 
206 	tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; 	/* State1  */
207 	if (tmp >= 1)
208 		elf_hwcap |= HWCAP_THUMB;
209 
210 	tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; 	/* State3  */
211 	if (tmp >= 1)
212 		elf_hwcap |= HWCAP_THUMBEE;
213 
214 	tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; 	/* VMSA */
215 	if (tmp >= 5)
216 		elf_hwcap |= HWCAP_LPAE;
217 
218 	/* Fill AT_HWCAP2 bits. */
219 	tmp = (cpuinfo.id_isar5 >> 4) & 0xF;	/* AES */
220 	if (tmp >= 1)
221 		elf_hwcap2 |= HWCAP2_AES;
222 	if (tmp >= 2)
223 		elf_hwcap2 |= HWCAP2_PMULL;
224 
225 	tmp = (cpuinfo.id_isar5 >> 8) & 0xF;	/* SHA1 */
226 	if (tmp >= 1)
227 		elf_hwcap2 |= HWCAP2_SHA1;
228 
229 	tmp = (cpuinfo.id_isar5 >> 12) & 0xF;	/* SHA2 */
230 	if (tmp >= 1)
231 		elf_hwcap2 |= HWCAP2_SHA2;
232 
233 	tmp = (cpuinfo.id_isar5 >> 16) & 0xF;	/* CRC32 */
234 	if (tmp >= 1)
235 		elf_hwcap2 |= HWCAP2_CRC32;
236 }
237 
238 /*
239  * Get bits that must be set or cleared in ACLR register.
240  * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
241  * Its expected that SCU is in operational state before this
242  * function is called.
243  */
244 static void
cpuinfo_get_actlr_modifier(uint32_t * actlr_mask,uint32_t * actlr_set)245 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
246 {
247 
248 	*actlr_mask = 0;
249 	*actlr_set = 0;
250 
251 	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
252 		switch (cpuinfo.part_number) {
253 		case CPU_ARCH_CORTEX_A75:
254 		case CPU_ARCH_CORTEX_A73:
255 		case CPU_ARCH_CORTEX_A72:
256 		case CPU_ARCH_CORTEX_A57:
257 		case CPU_ARCH_CORTEX_A53:
258 			/* Nothing to do for AArch32 */
259 			break;
260 		case CPU_ARCH_CORTEX_A17:
261 		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
262 			/*
263 			 * Enable SMP mode
264 			 */
265 			*actlr_mask = (1 << 6);
266 			*actlr_set = (1 << 6);
267 			break;
268 		case CPU_ARCH_CORTEX_A15:
269 			/*
270 			 * Enable snoop-delayed exclusive handling
271 			 * Enable SMP mode
272 			 */
273 			*actlr_mask = (1U << 31) |(1 << 6);
274 			*actlr_set = (1U << 31) |(1 << 6);
275 			break;
276 		case CPU_ARCH_CORTEX_A9:
277 			/*
278 			 * Disable exclusive L1/L2 cache control
279 			 * Enable SMP mode
280 			 * Enable Cache and TLB maintenance broadcast
281 			 */
282 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
283 			*actlr_set = (1 << 6) | (1 << 0);
284 			break;
285 		case CPU_ARCH_CORTEX_A8:
286 			/*
287 			 * Enable L2 cache
288 			 * Enable L1 data cache hardware alias checks
289 			 */
290 			*actlr_mask = (1 << 1) | (1 << 0);
291 			*actlr_set = (1 << 1);
292 			break;
293 		case CPU_ARCH_CORTEX_A7:
294 			/*
295 			 * Enable SMP mode
296 			 */
297 			*actlr_mask = (1 << 6);
298 			*actlr_set = (1 << 6);
299 			break;
300 		case CPU_ARCH_CORTEX_A5:
301 			/*
302 			 * Disable exclusive L1/L2 cache control
303 			 * Enable SMP mode
304 			 * Enable Cache and TLB maintenance broadcast
305 			 */
306 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
307 			*actlr_set = (1 << 6) | (1 << 0);
308 			break;
309 		case CPU_ARCH_ARM1176:
310 			/*
311 			 * Restrict cache size to 16KB
312 			 * Enable the return stack
313 			 * Enable dynamic branch prediction
314 			 * Enable static branch prediction
315 			 */
316 			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
317 			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
318 			break;
319 		}
320 		return;
321 	}
322 }
323 
324 /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
325 void
cpuinfo_reinit_mmu(uint32_t ttb)326 cpuinfo_reinit_mmu(uint32_t ttb)
327 {
328 	uint32_t actlr_mask;
329 	uint32_t actlr_set;
330 
331 	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
332 	actlr_mask |= cpu_quirks_actlr_mask;
333 	actlr_set |= cpu_quirks_actlr_set;
334 	reinit_mmu(ttb, actlr_mask, actlr_set);
335 }
336 
337 static bool
modify_actlr(uint32_t clear,uint32_t set)338 modify_actlr(uint32_t clear, uint32_t set)
339 {
340 	uint32_t reg, newreg;
341 
342 	reg = cp15_actlr_get();
343 	newreg = reg;
344 	newreg &= ~clear;
345 	newreg |= set;
346 	if (reg == newreg)
347 		return (true);
348 	cp15_actlr_set(newreg);
349 
350 	reg = cp15_actlr_get();
351 	if (reg == newreg)
352 		return (true);
353 	return (false);
354 }
355 
356 /* Apply/restore BP hardening on current core. */
357 static int
apply_bp_hardening(bool enable,int kind,bool actrl,uint32_t set_mask)358 apply_bp_hardening(bool enable, int kind, bool actrl, uint32_t set_mask)
359 {
360 	if (enable) {
361 		if (actrl && !modify_actlr(0, set_mask))
362 			return (-1);
363 		PCPU_SET(bp_harden_kind, kind);
364 	} else {
365 		PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
366 		if (actrl)
367 			modify_actlr(~0, PCPU_GET(original_actlr));
368 		spectre_v2_safe = 0;
369 	}
370 	return (0);
371 }
372 
373 static void
handle_bp_hardening(bool enable)374 handle_bp_hardening(bool enable)
375 {
376 	int kind;
377 	char *kind_str;
378 
379 	kind = PCPU_BP_HARDEN_KIND_NONE;
380 	/*
381 	 * Note: Access to ACTRL is locked to secure world on most boards.
382 	 * This means that full BP hardening depends on updated u-boot/firmware
383 	 * or is impossible at all (if secure monitor is in on-chip ROM).
384 	 */
385 	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
386 		switch (cpuinfo.part_number) {
387 		case CPU_ARCH_CORTEX_A8:
388 			/*
389 			 * For Cortex-A8, IBE bit must be set otherwise
390 			 * BPIALL is effectively NOP.
391 			 * Unfortunately, Cortex-A is also affected by
392 			 * ARM erratum 687067 which causes non-working
393 			 * BPIALL if IBE bit is set and 'Instruction L1 System
394 			 * Array Debug Register 0' is not 0.
395 			 * This register is not reset on power-up and is
396 			 * accessible only from secure world, so we cannot do
397 			 * nothing (nor detect) to fix this issue.
398 			 * I afraid that on chip ROM based secure monitor on
399 			 * AM335x (BeagleBone) doesn't reset this debug
400 			 * register.
401 			 */
402 			kind = PCPU_BP_HARDEN_KIND_BPIALL;
403 			if (apply_bp_hardening(enable, kind, true, 1 << 6) != 0)
404 				goto actlr_err;
405 			break;
406 		break;
407 
408 		case CPU_ARCH_CORTEX_A9:
409 		case CPU_ARCH_CORTEX_A12:
410 		case CPU_ARCH_CORTEX_A17:
411 		case CPU_ARCH_CORTEX_A57:
412 		case CPU_ARCH_CORTEX_A72:
413 		case CPU_ARCH_CORTEX_A73:
414 		case CPU_ARCH_CORTEX_A75:
415 			kind = PCPU_BP_HARDEN_KIND_BPIALL;
416 			if (apply_bp_hardening(enable, kind, false, 0) != 0)
417 				goto actlr_err;
418 			break;
419 
420 		case CPU_ARCH_CORTEX_A15:
421 			/*
422 			 * For Cortex-A15, set 'Enable invalidates of BTB' bit.
423 			 * Despite this, the BPIALL is still effectively NOP,
424 			 * but with this bit set, the ICIALLU also flushes
425 			 * branch predictor as side effect.
426 			 */
427 			kind = PCPU_BP_HARDEN_KIND_ICIALLU;
428 			if (apply_bp_hardening(enable, kind, true, 1 << 0) != 0)
429 				goto actlr_err;
430 			break;
431 
432 		default:
433 			break;
434 		}
435 	} else if (cpuinfo.implementer == CPU_IMPLEMENTER_QCOM) {
436 		printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative "
437 		    "branch attacks. !!!\n"
438 		    "Qualcomm Krait cores are known (or believed) to be "
439 		    "vulnerable to \n"
440 		    "speculative branch attacks, no mitigation exists yet.\n",
441 		    PCPU_GET(cpuid));
442 		goto unkonown_mitigation;
443 	}  else {
444 		goto unkonown_mitigation;
445 	}
446 
447 	if (bootverbose) {
448 		switch (kind) {
449 		case PCPU_BP_HARDEN_KIND_NONE:
450 			kind_str = "not necessary";
451 			break;
452 		case PCPU_BP_HARDEN_KIND_BPIALL:
453 			kind_str = "BPIALL";
454 			break;
455 		case PCPU_BP_HARDEN_KIND_ICIALLU:
456 			kind_str = "ICIALLU";
457 			break;
458 		default:
459 			panic("Unknown BP hardering kind (%d).", kind);
460 		}
461 		printf("CPU(%d) applied BP hardening: %s\n", PCPU_GET(cpuid),
462 		    kind_str);
463 	}
464 
465 	return;
466 
467 unkonown_mitigation:
468 	PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
469 	spectre_v2_safe = 0;
470 	return;
471 
472 actlr_err:
473 	PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
474 	spectre_v2_safe = 0;
475 	printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative branch "
476 	    "attacks. !!!\n"
477 	    "We cannot enable required bit(s) in ACTRL register\n"
478 	    "because it's locked by secure monitor and/or firmware.\n",
479 	    PCPU_GET(cpuid));
480 }
481 
482 void
cpuinfo_init_bp_hardening(void)483 cpuinfo_init_bp_hardening(void)
484 {
485 
486 	/*
487 	 * Store original unmodified ACTRL, so we can restore it when
488 	 * BP hardening is disabled by sysctl.
489 	 */
490 	PCPU_SET(original_actlr, cp15_actlr_get());
491 	handle_bp_hardening(true);
492 }
493 
494 static void
bp_hardening_action(void * arg)495 bp_hardening_action(void *arg)
496 {
497 
498 	handle_bp_hardening(disable_bp_hardening == 0);
499 }
500 
501 static int
sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS)502 sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS)
503 {
504 	int rv;
505 
506 	rv = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
507 
508 	if (!rv && req->newptr) {
509 		spectre_v2_safe = 1;
510 		dmb();
511 #ifdef SMP
512 		smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier,
513 		bp_hardening_action, NULL, NULL);
514 #else
515 		bp_hardening_action(NULL);
516 #endif
517 	}
518 
519 	return (rv);
520 }
521 
522 SYSCTL_PROC(_machdep, OID_AUTO, disable_bp_hardening,
523     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
524     &disable_bp_hardening, 0, sysctl_disable_bp_hardening, "I",
525     "Disable BP hardening mitigation.");
526 
527 SYSCTL_INT(_machdep, OID_AUTO, spectre_v2_safe, CTLFLAG_RD,
528     &spectre_v2_safe, 0, "System is safe to Spectre Version 2 attacks");
529