xref: /freebsd-14.2/sys/arm/arm/cpuinfo.c (revision 685dc743)
1 /*-
2  * Copyright 2014 Svatopluk Kraus <[email protected]>
3  * Copyright 2014 Michal Meloun <[email protected]>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/pcpu.h>
33 #include <sys/smp.h>
34 #include <sys/sysctl.h>
35 
36 #include <machine/cpu.h>
37 #include <machine/cpuinfo.h>
38 #include <machine/elf.h>
39 #include <machine/md_var.h>
40 
41 void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
42 
43 int disable_bp_hardening;
44 int spectre_v2_safe = 1;
45 
46 struct cpuinfo cpuinfo =
47 {
48 	/* Use safe defaults for start */
49 	.dcache_line_size = 32,
50 	.dcache_line_mask = 31,
51 	.icache_line_size = 32,
52 	.icache_line_mask = 31,
53 };
54 
55 static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
56     "CPU");
57 static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
58     "CPU quirks");
59 
60 /*
61  * Tunable CPU quirks.
62  * Be careful, ACTRL cannot be changed if CPU is started in secure
63  * mode(world) and write to ACTRL can cause exception!
64  * These quirks are intended for optimizing CPU performance, not for
65  * applying errata workarounds. Nobody can expect that CPU with unfixed
66  * errata is stable enough to execute the kernel until quirks are applied.
67  */
68 static uint32_t cpu_quirks_actlr_mask;
69 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
70     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
71     "Bits to be masked in ACTLR");
72 
73 static uint32_t cpu_quirks_actlr_set;
74 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
75     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
76     "Bits to be set in ACTLR");
77 
78 static int
sysctl_hw_cpu_quirks_actrl_value(SYSCTL_HANDLER_ARGS)79 sysctl_hw_cpu_quirks_actrl_value(SYSCTL_HANDLER_ARGS)
80 {
81 	uint32_t reg;
82 
83 	reg = cp15_actlr_get();
84 	return (SYSCTL_OUT(req, &reg, sizeof(reg)));
85 }
86 SYSCTL_PROC(_hw_cpu_quirks, OID_AUTO, actlr_value,
87     CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
88     sysctl_hw_cpu_quirks_actrl_value, "IU",
89     "Value of ACTLR");
90 
91 /* Read and parse CPU id scheme */
92 void
cpuinfo_init(void)93 cpuinfo_init(void)
94 {
95 	uint32_t tmp;
96 
97 	/*
98 	 * Prematurely fetch CPU quirks. Standard fetch for tunable
99 	 * sysctls is handled using SYSINIT, thus too late for boot CPU.
100 	 * Keep names in sync with sysctls.
101 	 */
102 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
103 	TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
104 
105 	cpuinfo.midr = cp15_midr_get();
106 	/* Test old version id schemes first */
107 	if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
108 		if (CPU_ID_ISOLD(cpuinfo.midr)) {
109 			/* obsolete ARMv2 or ARMv3 CPU */
110 			cpuinfo.midr = 0;
111 			return;
112 		}
113 		if (CPU_ID_IS7(cpuinfo.midr)) {
114 			if ((cpuinfo.midr & (1 << 23)) == 0) {
115 				/* obsolete ARMv3 CPU */
116 				cpuinfo.midr = 0;
117 				return;
118 			}
119 			/* ARMv4T CPU */
120 			cpuinfo.architecture = 1;
121 			cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
122 		} else {
123 			/* ARM new id scheme */
124 			cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
125 			cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
126 		}
127 	} else {
128 		/* non ARM -> must be new id scheme */
129 		cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
130 		cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
131 	}
132 	/* Parse rest of MIDR  */
133 	cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
134 	cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
135 	cpuinfo.patch = cpuinfo.midr & 0x0F;
136 
137 	/* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
138 	cpuinfo.ctr = cp15_ctr_get();
139 	cpuinfo.tcmtr = cp15_tcmtr_get();
140 	cpuinfo.tlbtr = cp15_tlbtr_get();
141 	cpuinfo.mpidr = cp15_mpidr_get();
142 	cpuinfo.revidr = cp15_revidr_get();
143 
144 	/* if CPU is not v7 cpu id scheme */
145 	if (cpuinfo.architecture != 0xF)
146 		return;
147 	cpuinfo.id_pfr0 = cp15_id_pfr0_get();
148 	cpuinfo.id_pfr1 = cp15_id_pfr1_get();
149 	cpuinfo.id_dfr0 = cp15_id_dfr0_get();
150 	cpuinfo.id_afr0 = cp15_id_afr0_get();
151 	cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
152 	cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
153 	cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
154 	cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
155 	cpuinfo.id_isar0 = cp15_id_isar0_get();
156 	cpuinfo.id_isar1 = cp15_id_isar1_get();
157 	cpuinfo.id_isar2 = cp15_id_isar2_get();
158 	cpuinfo.id_isar3 = cp15_id_isar3_get();
159 	cpuinfo.id_isar4 = cp15_id_isar4_get();
160 	cpuinfo.id_isar5 = cp15_id_isar5_get();
161 
162 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
163 	cpuinfo.cbar = cp15_cbar_get();
164 */
165 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
166 		cpuinfo.ccsidr = cp15_ccsidr_get();
167 		cpuinfo.clidr = cp15_clidr_get();
168 	}
169 
170 	/* Test if revidr is implemented */
171 	if (cpuinfo.revidr == cpuinfo.midr)
172 		cpuinfo.revidr = 0;
173 
174 	/* parsed bits of above registers */
175 	/* id_mmfr0 */
176 	cpuinfo.outermost_shareability =  (cpuinfo.id_mmfr0 >> 8) & 0xF;
177 	cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
178 	cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
179 	cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
180 	/* id_mmfr2 */
181 	cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
182 	/* id_mmfr3 */
183 	cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
184 	cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
185 	/* id_pfr1 */
186 	cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
187 	cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
188 	cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
189 	/* mpidr */
190 	cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
191 
192 	/* L1 Cache sizes */
193 	if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
194 		cpuinfo.dcache_line_size =
195 		    1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
196 		cpuinfo.icache_line_size =
197 		    1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
198 	} else {
199 		cpuinfo.dcache_line_size =
200 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
201 		cpuinfo.icache_line_size =
202 		    1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
203 	}
204 	cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
205 	cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
206 
207 	/* Fill AT_HWCAP bits. */
208 	elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */
209 	elf_hwcap |= HWCAP_TLS | HWCAP_EDSP;	   /* Required for v6+ CPUs */
210 
211 	tmp = (cpuinfo.id_isar0 >> 24) & 0xF;	/* Divide_instrs */
212 	if (tmp >= 1)
213 		elf_hwcap |= HWCAP_IDIVT;
214 	if (tmp >= 2)
215 		elf_hwcap |= HWCAP_IDIVA;
216 
217 	tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; 	/* State1  */
218 	if (tmp >= 1)
219 		elf_hwcap |= HWCAP_THUMB;
220 
221 	tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; 	/* State3  */
222 	if (tmp >= 1)
223 		elf_hwcap |= HWCAP_THUMBEE;
224 
225 	tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; 	/* VMSA */
226 	if (tmp >= 5)
227 		elf_hwcap |= HWCAP_LPAE;
228 
229 	/* Fill AT_HWCAP2 bits. */
230 	tmp = (cpuinfo.id_isar5 >> 4) & 0xF;	/* AES */
231 	if (tmp >= 1)
232 		elf_hwcap2 |= HWCAP2_AES;
233 	if (tmp >= 2)
234 		elf_hwcap2 |= HWCAP2_PMULL;
235 
236 	tmp = (cpuinfo.id_isar5 >> 8) & 0xF;	/* SHA1 */
237 	if (tmp >= 1)
238 		elf_hwcap2 |= HWCAP2_SHA1;
239 
240 	tmp = (cpuinfo.id_isar5 >> 12) & 0xF;	/* SHA2 */
241 	if (tmp >= 1)
242 		elf_hwcap2 |= HWCAP2_SHA2;
243 
244 	tmp = (cpuinfo.id_isar5 >> 16) & 0xF;	/* CRC32 */
245 	if (tmp >= 1)
246 		elf_hwcap2 |= HWCAP2_CRC32;
247 }
248 
249 /*
250  * Get bits that must be set or cleared in ACLR register.
251  * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
252  * Its expected that SCU is in operational state before this
253  * function is called.
254  */
255 static void
cpuinfo_get_actlr_modifier(uint32_t * actlr_mask,uint32_t * actlr_set)256 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
257 {
258 
259 	*actlr_mask = 0;
260 	*actlr_set = 0;
261 
262 	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
263 		switch (cpuinfo.part_number) {
264 		case CPU_ARCH_CORTEX_A75:
265 		case CPU_ARCH_CORTEX_A73:
266 		case CPU_ARCH_CORTEX_A72:
267 		case CPU_ARCH_CORTEX_A57:
268 		case CPU_ARCH_CORTEX_A53:
269 			/* Nothing to do for AArch32 */
270 			break;
271 		case CPU_ARCH_CORTEX_A17:
272 		case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
273 			/*
274 			 * Enable SMP mode
275 			 */
276 			*actlr_mask = (1 << 6);
277 			*actlr_set = (1 << 6);
278 			break;
279 		case CPU_ARCH_CORTEX_A15:
280 			/*
281 			 * Enable snoop-delayed exclusive handling
282 			 * Enable SMP mode
283 			 */
284 			*actlr_mask = (1U << 31) |(1 << 6);
285 			*actlr_set = (1U << 31) |(1 << 6);
286 			break;
287 		case CPU_ARCH_CORTEX_A9:
288 			/*
289 			 * Disable exclusive L1/L2 cache control
290 			 * Enable SMP mode
291 			 * Enable Cache and TLB maintenance broadcast
292 			 */
293 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
294 			*actlr_set = (1 << 6) | (1 << 0);
295 			break;
296 		case CPU_ARCH_CORTEX_A8:
297 			/*
298 			 * Enable L2 cache
299 			 * Enable L1 data cache hardware alias checks
300 			 */
301 			*actlr_mask = (1 << 1) | (1 << 0);
302 			*actlr_set = (1 << 1);
303 			break;
304 		case CPU_ARCH_CORTEX_A7:
305 			/*
306 			 * Enable SMP mode
307 			 */
308 			*actlr_mask = (1 << 6);
309 			*actlr_set = (1 << 6);
310 			break;
311 		case CPU_ARCH_CORTEX_A5:
312 			/*
313 			 * Disable exclusive L1/L2 cache control
314 			 * Enable SMP mode
315 			 * Enable Cache and TLB maintenance broadcast
316 			 */
317 			*actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
318 			*actlr_set = (1 << 6) | (1 << 0);
319 			break;
320 		case CPU_ARCH_ARM1176:
321 			/*
322 			 * Restrict cache size to 16KB
323 			 * Enable the return stack
324 			 * Enable dynamic branch prediction
325 			 * Enable static branch prediction
326 			 */
327 			*actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
328 			*actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
329 			break;
330 		}
331 		return;
332 	}
333 }
334 
335 /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
336 void
cpuinfo_reinit_mmu(uint32_t ttb)337 cpuinfo_reinit_mmu(uint32_t ttb)
338 {
339 	uint32_t actlr_mask;
340 	uint32_t actlr_set;
341 
342 	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
343 	actlr_mask |= cpu_quirks_actlr_mask;
344 	actlr_set |= cpu_quirks_actlr_set;
345 	reinit_mmu(ttb, actlr_mask, actlr_set);
346 }
347 
348 static bool
modify_actlr(uint32_t clear,uint32_t set)349 modify_actlr(uint32_t clear, uint32_t set)
350 {
351 	uint32_t reg, newreg;
352 
353 	reg = cp15_actlr_get();
354 	newreg = reg;
355 	newreg &= ~clear;
356 	newreg |= set;
357 	if (reg == newreg)
358 		return (true);
359 	cp15_actlr_set(newreg);
360 
361 	reg = cp15_actlr_get();
362 	if (reg == newreg)
363 		return (true);
364 	return (false);
365 }
366 
367 /* Apply/restore BP hardening on current core. */
368 static int
apply_bp_hardening(bool enable,int kind,bool actrl,uint32_t set_mask)369 apply_bp_hardening(bool enable, int kind, bool actrl, uint32_t set_mask)
370 {
371 	if (enable) {
372 		if (actrl && !modify_actlr(0, set_mask))
373 			return (-1);
374 		PCPU_SET(bp_harden_kind, kind);
375 	} else {
376 		PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
377 		if (actrl)
378 			modify_actlr(~0, PCPU_GET(original_actlr));
379 		spectre_v2_safe = 0;
380 	}
381 	return (0);
382 }
383 
384 static void
handle_bp_hardening(bool enable)385 handle_bp_hardening(bool enable)
386 {
387 	int kind;
388 	char *kind_str;
389 
390 	kind = PCPU_BP_HARDEN_KIND_NONE;
391 	/*
392 	 * Note: Access to ACTRL is locked to secure world on most boards.
393 	 * This means that full BP hardening depends on updated u-boot/firmware
394 	 * or is impossible at all (if secure monitor is in on-chip ROM).
395 	 */
396 	if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
397 		switch (cpuinfo.part_number) {
398 		case CPU_ARCH_CORTEX_A8:
399 			/*
400 			 * For Cortex-A8, IBE bit must be set otherwise
401 			 * BPIALL is effectively NOP.
402 			 * Unfortunately, Cortex-A is also affected by
403 			 * ARM erratum 687067 which causes non-working
404 			 * BPIALL if IBE bit is set and 'Instruction L1 System
405 			 * Array Debug Register 0' is not 0.
406 			 * This register is not reset on power-up and is
407 			 * accessible only from secure world, so we cannot do
408 			 * nothing (nor detect) to fix this issue.
409 			 * I afraid that on chip ROM based secure monitor on
410 			 * AM335x (BeagleBone) doesn't reset this debug
411 			 * register.
412 			 */
413 			kind = PCPU_BP_HARDEN_KIND_BPIALL;
414 			if (apply_bp_hardening(enable, kind, true, 1 << 6) != 0)
415 				goto actlr_err;
416 			break;
417 		break;
418 
419 		case CPU_ARCH_CORTEX_A9:
420 		case CPU_ARCH_CORTEX_A12:
421 		case CPU_ARCH_CORTEX_A17:
422 		case CPU_ARCH_CORTEX_A57:
423 		case CPU_ARCH_CORTEX_A72:
424 		case CPU_ARCH_CORTEX_A73:
425 		case CPU_ARCH_CORTEX_A75:
426 			kind = PCPU_BP_HARDEN_KIND_BPIALL;
427 			if (apply_bp_hardening(enable, kind, false, 0) != 0)
428 				goto actlr_err;
429 			break;
430 
431 		case CPU_ARCH_CORTEX_A15:
432 			/*
433 			 * For Cortex-A15, set 'Enable invalidates of BTB' bit.
434 			 * Despite this, the BPIALL is still effectively NOP,
435 			 * but with this bit set, the ICIALLU also flushes
436 			 * branch predictor as side effect.
437 			 */
438 			kind = PCPU_BP_HARDEN_KIND_ICIALLU;
439 			if (apply_bp_hardening(enable, kind, true, 1 << 0) != 0)
440 				goto actlr_err;
441 			break;
442 
443 		default:
444 			break;
445 		}
446 	} else if (cpuinfo.implementer == CPU_IMPLEMENTER_QCOM) {
447 		printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative "
448 		    "branch attacks. !!!\n"
449 		    "Qualcomm Krait cores are known (or believed) to be "
450 		    "vulnerable to \n"
451 		    "speculative branch attacks, no mitigation exists yet.\n",
452 		    PCPU_GET(cpuid));
453 		goto unkonown_mitigation;
454 	}  else {
455 		goto unkonown_mitigation;
456 	}
457 
458 	if (bootverbose) {
459 		switch (kind) {
460 		case PCPU_BP_HARDEN_KIND_NONE:
461 			kind_str = "not necessary";
462 			break;
463 		case PCPU_BP_HARDEN_KIND_BPIALL:
464 			kind_str = "BPIALL";
465 			break;
466 		case PCPU_BP_HARDEN_KIND_ICIALLU:
467 			kind_str = "ICIALLU";
468 			break;
469 		default:
470 			panic("Unknown BP hardering kind (%d).", kind);
471 		}
472 		printf("CPU(%d) applied BP hardening: %s\n", PCPU_GET(cpuid),
473 		    kind_str);
474 	}
475 
476 	return;
477 
478 unkonown_mitigation:
479 	PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
480 	spectre_v2_safe = 0;
481 	return;
482 
483 actlr_err:
484 	PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
485 	spectre_v2_safe = 0;
486 	printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative branch "
487 	    "attacks. !!!\n"
488 	    "We cannot enable required bit(s) in ACTRL register\n"
489 	    "because it's locked by secure monitor and/or firmware.\n",
490 	    PCPU_GET(cpuid));
491 }
492 
493 void
cpuinfo_init_bp_hardening(void)494 cpuinfo_init_bp_hardening(void)
495 {
496 
497 	/*
498 	 * Store original unmodified ACTRL, so we can restore it when
499 	 * BP hardening is disabled by sysctl.
500 	 */
501 	PCPU_SET(original_actlr, cp15_actlr_get());
502 	handle_bp_hardening(true);
503 }
504 
505 static void
bp_hardening_action(void * arg)506 bp_hardening_action(void *arg)
507 {
508 
509 	handle_bp_hardening(disable_bp_hardening == 0);
510 }
511 
512 static int
sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS)513 sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS)
514 {
515 	int rv;
516 
517 	rv = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
518 
519 	if (!rv && req->newptr) {
520 		spectre_v2_safe = 1;
521 		dmb();
522 #ifdef SMP
523 		smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier,
524 		bp_hardening_action, NULL, NULL);
525 #else
526 		bp_hardening_action(NULL);
527 #endif
528 	}
529 
530 	return (rv);
531 }
532 
533 SYSCTL_PROC(_machdep, OID_AUTO, disable_bp_hardening,
534     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
535     &disable_bp_hardening, 0, sysctl_disable_bp_hardening, "I",
536     "Disable BP hardening mitigation.");
537 
538 SYSCTL_INT(_machdep, OID_AUTO, spectre_v2_safe, CTLFLAG_RD,
539     &spectre_v2_safe, 0, "System is safe to Spectre Version 2 attacks");
540