1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include "opt_acpi.h"
45 #include "opt_atpic.h"
46 #include "opt_cpu.h"
47 #include "opt_ddb.h"
48 #include "opt_inet.h"
49 #include "opt_isa.h"
50 #include "opt_kdb.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_mp_watchdog.h"
54 #include "opt_platform.h"
55 #ifdef __i386__
56 #include "opt_apic.h"
57 #endif
58
59 #include <sys/param.h>
60 #include <sys/proc.h>
61 #include <sys/systm.h>
62 #include <sys/bus.h>
63 #include <sys/cpu.h>
64 #include <sys/domainset.h>
65 #include <sys/kdb.h>
66 #include <sys/kernel.h>
67 #include <sys/ktr.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mutex.h>
71 #include <sys/pcpu.h>
72 #include <sys/rwlock.h>
73 #include <sys/sched.h>
74 #include <sys/smp.h>
75 #include <sys/sysctl.h>
76
77 #include <machine/clock.h>
78 #include <machine/cpu.h>
79 #include <machine/cputypes.h>
80 #include <machine/specialreg.h>
81 #include <machine/md_var.h>
82 #include <machine/mp_watchdog.h>
83 #include <machine/tss.h>
84 #ifdef SMP
85 #include <machine/smp.h>
86 #endif
87 #ifdef CPU_ELAN
88 #include <machine/elan_mmcr.h>
89 #endif
90 #include <x86/acpica_machdep.h>
91
92 #include <vm/vm.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_map.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_pager.h>
99 #include <vm/vm_param.h>
100
101 #include <isa/isareg.h>
102
103 #include <contrib/dev/acpica/include/acpi.h>
104
105 #define STATE_RUNNING 0x0
106 #define STATE_MWAIT 0x1
107 #define STATE_SLEEPING 0x2
108
109 #ifdef SMP
110 static u_int cpu_reset_proxyid;
111 static volatile u_int cpu_reset_proxy_active;
112 #endif
113
114 struct msr_op_arg {
115 u_int msr;
116 int op;
117 uint64_t arg1;
118 };
119
120 static void
x86_msr_op_one(void * argp)121 x86_msr_op_one(void *argp)
122 {
123 struct msr_op_arg *a;
124 uint64_t v;
125
126 a = argp;
127 switch (a->op) {
128 case MSR_OP_ANDNOT:
129 v = rdmsr(a->msr);
130 v &= ~a->arg1;
131 wrmsr(a->msr, v);
132 break;
133 case MSR_OP_OR:
134 v = rdmsr(a->msr);
135 v |= a->arg1;
136 wrmsr(a->msr, v);
137 break;
138 case MSR_OP_WRITE:
139 wrmsr(a->msr, a->arg1);
140 break;
141 }
142 }
143
144 #define MSR_OP_EXMODE_MASK 0xf0000000
145 #define MSR_OP_OP_MASK 0x000000ff
146
147 void
x86_msr_op(u_int msr,u_int op,uint64_t arg1)148 x86_msr_op(u_int msr, u_int op, uint64_t arg1)
149 {
150 struct thread *td;
151 struct msr_op_arg a;
152 u_int exmode;
153 int bound_cpu, i, is_bound;
154
155 a.op = op & MSR_OP_OP_MASK;
156 MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR ||
157 a.op == MSR_OP_WRITE);
158 exmode = op & MSR_OP_EXMODE_MASK;
159 MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED ||
160 exmode == MSR_OP_RENDEZVOUS);
161 a.msr = msr;
162 a.arg1 = arg1;
163 switch (exmode) {
164 case MSR_OP_LOCAL:
165 x86_msr_op_one(&a);
166 break;
167 case MSR_OP_SCHED:
168 td = curthread;
169 thread_lock(td);
170 is_bound = sched_is_bound(td);
171 bound_cpu = td->td_oncpu;
172 CPU_FOREACH(i) {
173 sched_bind(td, i);
174 x86_msr_op_one(&a);
175 }
176 if (is_bound)
177 sched_bind(td, bound_cpu);
178 else
179 sched_unbind(td);
180 thread_unlock(td);
181 break;
182 case MSR_OP_RENDEZVOUS:
183 smp_rendezvous(NULL, x86_msr_op_one, NULL, &a);
184 break;
185 }
186 }
187
188 /*
189 * Automatically initialized per CPU errata in cpu_idle_tun below.
190 */
191 bool mwait_cpustop_broken = false;
192 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN,
193 &mwait_cpustop_broken, 0,
194 "Can not reliably wake MONITOR/MWAIT cpus without interrupts");
195
196 /*
197 * Flush the D-cache for non-DMA I/O so that the I-cache can
198 * be made coherent later.
199 */
200 void
cpu_flush_dcache(void * ptr,size_t len)201 cpu_flush_dcache(void *ptr, size_t len)
202 {
203 /* Not applicable */
204 }
205
206 void
acpi_cpu_c1(void)207 acpi_cpu_c1(void)
208 {
209
210 __asm __volatile("sti; hlt");
211 }
212
213 /*
214 * Use mwait to pause execution while waiting for an interrupt or
215 * another thread to signal that there is more work.
216 *
217 * NOTE: Interrupts will cause a wakeup; however, this function does
218 * not enable interrupt handling. The caller is responsible to enable
219 * interrupts.
220 */
221 void
acpi_cpu_idle_mwait(uint32_t mwait_hint)222 acpi_cpu_idle_mwait(uint32_t mwait_hint)
223 {
224 int *state;
225 uint64_t v;
226
227 /*
228 * A comment in Linux patch claims that 'CPUs run faster with
229 * speculation protection disabled. All CPU threads in a core
230 * must disable speculation protection for it to be
231 * disabled. Disable it while we are idle so the other
232 * hyperthread can run fast.'
233 *
234 * XXXKIB. Software coordination mode should be supported,
235 * but all Intel CPUs provide hardware coordination.
236 */
237
238 state = &PCPU_PTR(monitorbuf)->idle_state;
239 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
240 ("cpu_mwait_cx: wrong monitorbuf state"));
241 atomic_store_int(state, STATE_MWAIT);
242 if (PCPU_GET(ibpb_set) || hw_ssb_active) {
243 v = rdmsr(MSR_IA32_SPEC_CTRL);
244 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
245 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
246 } else {
247 v = 0;
248 }
249 cpu_monitor(state, 0, 0);
250 if (atomic_load_int(state) == STATE_MWAIT)
251 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
252
253 /*
254 * SSB cannot be disabled while we sleep, or rather, if it was
255 * disabled, the sysctl thread will bind to our cpu to tweak
256 * MSR.
257 */
258 if (v != 0)
259 wrmsr(MSR_IA32_SPEC_CTRL, v);
260
261 /*
262 * We should exit on any event that interrupts mwait, because
263 * that event might be a wanted interrupt.
264 */
265 atomic_store_int(state, STATE_RUNNING);
266 }
267
268 /* Get current clock frequency for the given cpu id. */
269 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)270 cpu_est_clockrate(int cpu_id, uint64_t *rate)
271 {
272 uint64_t tsc1, tsc2;
273 uint64_t acnt, mcnt, perf;
274 register_t reg;
275
276 if (pcpu_find(cpu_id) == NULL || rate == NULL)
277 return (EINVAL);
278 #ifdef __i386__
279 if ((cpu_feature & CPUID_TSC) == 0)
280 return (EOPNOTSUPP);
281 #endif
282
283 /*
284 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
285 * DELAY(9) based logic fails.
286 */
287 if (tsc_is_invariant && !tsc_perf_stat)
288 return (EOPNOTSUPP);
289
290 #ifdef SMP
291 if (smp_cpus > 1) {
292 /* Schedule ourselves on the indicated cpu. */
293 thread_lock(curthread);
294 sched_bind(curthread, cpu_id);
295 thread_unlock(curthread);
296 }
297 #endif
298
299 /* Calibrate by measuring a short delay. */
300 reg = intr_disable();
301 if (tsc_is_invariant) {
302 wrmsr(MSR_MPERF, 0);
303 wrmsr(MSR_APERF, 0);
304 tsc1 = rdtsc();
305 DELAY(1000);
306 mcnt = rdmsr(MSR_MPERF);
307 acnt = rdmsr(MSR_APERF);
308 tsc2 = rdtsc();
309 intr_restore(reg);
310 perf = 1000 * acnt / mcnt;
311 *rate = (tsc2 - tsc1) * perf;
312 } else {
313 tsc1 = rdtsc();
314 DELAY(1000);
315 tsc2 = rdtsc();
316 intr_restore(reg);
317 *rate = (tsc2 - tsc1) * 1000;
318 }
319
320 #ifdef SMP
321 if (smp_cpus > 1) {
322 thread_lock(curthread);
323 sched_unbind(curthread);
324 thread_unlock(curthread);
325 }
326 #endif
327
328 return (0);
329 }
330
331 /*
332 * Shutdown the CPU as much as possible
333 */
334 void
cpu_halt(void)335 cpu_halt(void)
336 {
337 for (;;)
338 halt();
339 }
340
341 static void
cpu_reset_real(void)342 cpu_reset_real(void)
343 {
344 struct region_descriptor null_idt;
345 int b;
346
347 disable_intr();
348 #ifdef CPU_ELAN
349 if (elan_mmcr != NULL)
350 elan_mmcr->RESCFG = 1;
351 #endif
352 #ifdef __i386__
353 if (cpu == CPU_GEODE1100) {
354 /* Attempt Geode's own reset */
355 outl(0xcf8, 0x80009044ul);
356 outl(0xcfc, 0xf);
357 }
358 #endif
359 #if !defined(BROKEN_KEYBOARD_RESET)
360 /*
361 * Attempt to do a CPU reset via the keyboard controller,
362 * do not turn off GateA20, as any machine that fails
363 * to do the reset here would then end up in no man's land.
364 */
365 outb(IO_KBD + 4, 0xFE);
366 DELAY(500000); /* wait 0.5 sec to see if that did it */
367 #endif
368
369 /*
370 * Attempt to force a reset via the Reset Control register at
371 * I/O port 0xcf9. Bit 2 forces a system reset when it
372 * transitions from 0 to 1. Bit 1 selects the type of reset
373 * to attempt: 0 selects a "soft" reset, and 1 selects a
374 * "hard" reset. We try a "hard" reset. The first write sets
375 * bit 1 to select a "hard" reset and clears bit 2. The
376 * second write forces a 0 -> 1 transition in bit 2 to trigger
377 * a reset.
378 */
379 outb(0xcf9, 0x2);
380 outb(0xcf9, 0x6);
381 DELAY(500000); /* wait 0.5 sec to see if that did it */
382
383 /*
384 * Attempt to force a reset via the Fast A20 and Init register
385 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
386 * Bit 0 asserts INIT# when set to 1. We are careful to only
387 * preserve bit 1 while setting bit 0. We also must clear bit
388 * 0 before setting it if it isn't already clear.
389 */
390 b = inb(0x92);
391 if (b != 0xff) {
392 if ((b & 0x1) != 0)
393 outb(0x92, b & 0xfe);
394 outb(0x92, b | 0x1);
395 DELAY(500000); /* wait 0.5 sec to see if that did it */
396 }
397
398 printf("No known reset method worked, attempting CPU shutdown\n");
399 DELAY(1000000); /* wait 1 sec for printf to complete */
400
401 /* Wipe the IDT. */
402 null_idt.rd_limit = 0;
403 null_idt.rd_base = 0;
404 lidt(&null_idt);
405
406 /* "good night, sweet prince .... <THUNK!>" */
407 breakpoint();
408
409 /* NOTREACHED */
410 while(1);
411 }
412
413 #ifdef SMP
414 static void
cpu_reset_proxy(void)415 cpu_reset_proxy(void)
416 {
417
418 cpu_reset_proxy_active = 1;
419 while (cpu_reset_proxy_active == 1)
420 ia32_pause(); /* Wait for other cpu to see that we've started */
421
422 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
423 DELAY(1000000);
424 cpu_reset_real();
425 }
426 #endif
427
428 void
cpu_reset(void)429 cpu_reset(void)
430 {
431 #ifdef SMP
432 struct monitorbuf *mb;
433 cpuset_t map;
434 u_int cnt;
435
436 if (smp_started) {
437 map = all_cpus;
438 CPU_CLR(PCPU_GET(cpuid), &map);
439 CPU_ANDNOT(&map, &stopped_cpus);
440 if (!CPU_EMPTY(&map)) {
441 printf("cpu_reset: Stopping other CPUs\n");
442 stop_cpus(map);
443 }
444
445 if (PCPU_GET(cpuid) != 0) {
446 cpu_reset_proxyid = PCPU_GET(cpuid);
447 cpustop_restartfunc = cpu_reset_proxy;
448 cpu_reset_proxy_active = 0;
449 printf("cpu_reset: Restarting BSP\n");
450
451 /* Restart CPU #0. */
452 CPU_SETOF(0, &started_cpus);
453 mb = &pcpu_find(0)->pc_monitorbuf;
454 atomic_store_int(&mb->stop_state,
455 MONITOR_STOPSTATE_RUNNING);
456
457 cnt = 0;
458 while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
459 ia32_pause();
460 cnt++; /* Wait for BSP to announce restart */
461 }
462 if (cpu_reset_proxy_active == 0) {
463 printf("cpu_reset: Failed to restart BSP\n");
464 } else {
465 cpu_reset_proxy_active = 2;
466 while (1)
467 ia32_pause();
468 /* NOTREACHED */
469 }
470 }
471
472 DELAY(1000000);
473 }
474 #endif
475 cpu_reset_real();
476 /* NOTREACHED */
477 }
478
479 bool
cpu_mwait_usable(void)480 cpu_mwait_usable(void)
481 {
482
483 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
484 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
485 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
486 }
487
488 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
489
490 int cpu_amdc1e_bug = 0; /* AMD C1E APIC workaround required. */
491
492 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
493 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
494 0, "Use MONITOR/MWAIT for short idle");
495
496 static void
cpu_idle_acpi(sbintime_t sbt)497 cpu_idle_acpi(sbintime_t sbt)
498 {
499 int *state;
500
501 state = &PCPU_PTR(monitorbuf)->idle_state;
502 atomic_store_int(state, STATE_SLEEPING);
503
504 /* See comments in cpu_idle_hlt(). */
505 disable_intr();
506 if (sched_runnable())
507 enable_intr();
508 else if (cpu_idle_hook)
509 cpu_idle_hook(sbt);
510 else
511 acpi_cpu_c1();
512 atomic_store_int(state, STATE_RUNNING);
513 }
514
515 static void
cpu_idle_hlt(sbintime_t sbt)516 cpu_idle_hlt(sbintime_t sbt)
517 {
518 int *state;
519
520 state = &PCPU_PTR(monitorbuf)->idle_state;
521 atomic_store_int(state, STATE_SLEEPING);
522
523 /*
524 * Since we may be in a critical section from cpu_idle(), if
525 * an interrupt fires during that critical section we may have
526 * a pending preemption. If the CPU halts, then that thread
527 * may not execute until a later interrupt awakens the CPU.
528 * To handle this race, check for a runnable thread after
529 * disabling interrupts and immediately return if one is
530 * found. Also, we must absolutely guarentee that hlt is
531 * the next instruction after sti. This ensures that any
532 * interrupt that fires after the call to disable_intr() will
533 * immediately awaken the CPU from hlt. Finally, please note
534 * that on x86 this works fine because of interrupts enabled only
535 * after the instruction following sti takes place, while IF is set
536 * to 1 immediately, allowing hlt instruction to acknowledge the
537 * interrupt.
538 */
539 disable_intr();
540 if (sched_runnable())
541 enable_intr();
542 else
543 acpi_cpu_c1();
544 atomic_store_int(state, STATE_RUNNING);
545 }
546
547 static void
cpu_idle_mwait(sbintime_t sbt)548 cpu_idle_mwait(sbintime_t sbt)
549 {
550 int *state;
551
552 state = &PCPU_PTR(monitorbuf)->idle_state;
553 atomic_store_int(state, STATE_MWAIT);
554
555 /* See comments in cpu_idle_hlt(). */
556 disable_intr();
557 if (sched_runnable()) {
558 atomic_store_int(state, STATE_RUNNING);
559 enable_intr();
560 return;
561 }
562
563 cpu_monitor(state, 0, 0);
564 if (atomic_load_int(state) == STATE_MWAIT)
565 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
566 else
567 enable_intr();
568 atomic_store_int(state, STATE_RUNNING);
569 }
570
571 static void
cpu_idle_spin(sbintime_t sbt)572 cpu_idle_spin(sbintime_t sbt)
573 {
574 int *state;
575 int i;
576
577 state = &PCPU_PTR(monitorbuf)->idle_state;
578 atomic_store_int(state, STATE_RUNNING);
579
580 /*
581 * The sched_runnable() call is racy but as long as there is
582 * a loop missing it one time will have just a little impact if any
583 * (and it is much better than missing the check at all).
584 */
585 for (i = 0; i < 1000; i++) {
586 if (sched_runnable())
587 return;
588 cpu_spinwait();
589 }
590 }
591
592 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
593
594 void
cpu_idle(int busy)595 cpu_idle(int busy)
596 {
597 uint64_t msr;
598 sbintime_t sbt = -1;
599
600 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
601 busy, curcpu);
602 #ifdef MP_WATCHDOG
603 ap_watchdog(PCPU_GET(cpuid));
604 #endif
605
606 /* If we are busy - try to use fast methods. */
607 if (busy) {
608 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
609 cpu_idle_mwait(busy);
610 goto out;
611 }
612 }
613
614 /* If we have time - switch timers into idle mode. */
615 if (!busy) {
616 critical_enter();
617 sbt = cpu_idleclock();
618 }
619
620 /* Apply AMD APIC timer C1E workaround. */
621 if (cpu_amdc1e_bug && cpu_disable_c3_sleep) {
622 msr = rdmsr(MSR_AMDK8_IPM);
623 if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0)
624 wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT |
625 AMDK8_C1EONCMPHALT));
626 }
627
628 /* Call main idle method. */
629 cpu_idle_fn(sbt);
630
631 /* Switch timers back into active mode. */
632 if (!busy) {
633 cpu_activeclock();
634 critical_exit();
635 }
636 out:
637 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
638 busy, curcpu);
639 }
640
641 static int cpu_idle_apl31_workaround;
642 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW,
643 &cpu_idle_apl31_workaround, 0,
644 "Apollo Lake APL31 MWAIT bug workaround");
645
646 int
cpu_idle_wakeup(int cpu)647 cpu_idle_wakeup(int cpu)
648 {
649 struct monitorbuf *mb;
650 int *state;
651
652 mb = &pcpu_find(cpu)->pc_monitorbuf;
653 state = &mb->idle_state;
654 switch (atomic_load_int(state)) {
655 case STATE_SLEEPING:
656 return (0);
657 case STATE_MWAIT:
658 atomic_store_int(state, STATE_RUNNING);
659 return (cpu_idle_apl31_workaround ? 0 : 1);
660 case STATE_RUNNING:
661 return (1);
662 default:
663 panic("bad monitor state");
664 return (1);
665 }
666 }
667
668 /*
669 * Ordered by speed/power consumption.
670 */
671 static struct {
672 void *id_fn;
673 char *id_name;
674 int id_cpuid2_flag;
675 } idle_tbl[] = {
676 { .id_fn = cpu_idle_spin, .id_name = "spin" },
677 { .id_fn = cpu_idle_mwait, .id_name = "mwait",
678 .id_cpuid2_flag = CPUID2_MON },
679 { .id_fn = cpu_idle_hlt, .id_name = "hlt" },
680 { .id_fn = cpu_idle_acpi, .id_name = "acpi" },
681 };
682
683 static int
idle_sysctl_available(SYSCTL_HANDLER_ARGS)684 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
685 {
686 char *avail, *p;
687 int error;
688 int i;
689
690 avail = malloc(256, M_TEMP, M_WAITOK);
691 p = avail;
692 for (i = 0; i < nitems(idle_tbl); i++) {
693 if (idle_tbl[i].id_cpuid2_flag != 0 &&
694 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
695 continue;
696 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
697 cpu_idle_hook == NULL)
698 continue;
699 p += sprintf(p, "%s%s", p != avail ? ", " : "",
700 idle_tbl[i].id_name);
701 }
702 error = sysctl_handle_string(oidp, avail, 0, req);
703 free(avail, M_TEMP);
704 return (error);
705 }
706
707 SYSCTL_PROC(_machdep, OID_AUTO, idle_available,
708 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
709 0, 0, idle_sysctl_available, "A",
710 "list of available idle functions");
711
712 static bool
cpu_idle_selector(const char * new_idle_name)713 cpu_idle_selector(const char *new_idle_name)
714 {
715 int i;
716
717 for (i = 0; i < nitems(idle_tbl); i++) {
718 if (idle_tbl[i].id_cpuid2_flag != 0 &&
719 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
720 continue;
721 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
722 cpu_idle_hook == NULL)
723 continue;
724 if (strcmp(idle_tbl[i].id_name, new_idle_name))
725 continue;
726 cpu_idle_fn = idle_tbl[i].id_fn;
727 if (bootverbose)
728 printf("CPU idle set to %s\n", idle_tbl[i].id_name);
729 return (true);
730 }
731 return (false);
732 }
733
734 static int
cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)735 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
736 {
737 char buf[16], *p;
738 int error, i;
739
740 p = "unknown";
741 for (i = 0; i < nitems(idle_tbl); i++) {
742 if (idle_tbl[i].id_fn == cpu_idle_fn) {
743 p = idle_tbl[i].id_name;
744 break;
745 }
746 }
747 strncpy(buf, p, sizeof(buf));
748 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
749 if (error != 0 || req->newptr == NULL)
750 return (error);
751 return (cpu_idle_selector(buf) ? 0 : EINVAL);
752 }
753
754 SYSCTL_PROC(_machdep, OID_AUTO, idle,
755 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
756 0, 0, cpu_idle_sysctl, "A",
757 "currently selected idle function");
758
759 static void
cpu_idle_tun(void * unused __unused)760 cpu_idle_tun(void *unused __unused)
761 {
762 char tunvar[16];
763
764 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
765 cpu_idle_selector(tunvar);
766 else if (cpu_vendor_id == CPU_VENDOR_AMD &&
767 CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) {
768 /* Ryzen erratas 1057, 1109. */
769 cpu_idle_selector("hlt");
770 idle_mwait = 0;
771 mwait_cpustop_broken = true;
772 }
773
774 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) {
775 /*
776 * Apollo Lake errata APL31 (public errata APL30).
777 * Stores to the armed address range may not trigger
778 * MWAIT to resume execution. OS needs to use
779 * interrupts to wake processors from MWAIT-induced
780 * sleep states.
781 */
782 cpu_idle_apl31_workaround = 1;
783 mwait_cpustop_broken = true;
784 }
785 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
786 }
787 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
788
789 static int panic_on_nmi = 0xff;
790 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
791 &panic_on_nmi, 0,
792 "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all");
793 int nmi_is_broadcast = 1;
794 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
795 &nmi_is_broadcast, 0,
796 "Chipset NMI is broadcast");
797 int (*apei_nmi)(void);
798
799 void
nmi_call_kdb(u_int cpu,u_int type,struct trapframe * frame)800 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
801 {
802 bool claimed = false;
803
804 #ifdef DEV_ISA
805 /* machine/parity/power fail/"kitchen sink" faults */
806 if (isa_nmi(frame->tf_err)) {
807 claimed = true;
808 if ((panic_on_nmi & 1) != 0)
809 panic("NMI indicates hardware failure");
810 }
811 #endif /* DEV_ISA */
812
813 /* ACPI Platform Error Interfaces callback. */
814 if (apei_nmi != NULL && (*apei_nmi)())
815 claimed = true;
816
817 /*
818 * NMIs can be useful for debugging. They can be hooked up to a
819 * pushbutton, usually on an ISA, PCI, or PCIe card. They can also be
820 * generated by an IPMI BMC, either manually or in response to a
821 * watchdog timeout. For example, see the "power diag" command in
822 * ports/sysutils/ipmitool. They can also be generated by a
823 * hypervisor; see "bhyvectl --inject-nmi".
824 */
825
826 #ifdef KDB
827 if (!claimed && (panic_on_nmi & 2) != 0) {
828 if (debugger_on_panic) {
829 printf("NMI/cpu%d ... going to debugger\n", cpu);
830 claimed = kdb_trap(type, 0, frame);
831 }
832 }
833 #endif /* KDB */
834
835 if (!claimed && panic_on_nmi != 0)
836 panic("NMI");
837 }
838
839 void
nmi_handle_intr(u_int type,struct trapframe * frame)840 nmi_handle_intr(u_int type, struct trapframe *frame)
841 {
842
843 #ifdef SMP
844 if (nmi_is_broadcast) {
845 nmi_call_kdb_smp(type, frame);
846 return;
847 }
848 #endif
849 nmi_call_kdb(PCPU_GET(cpuid), type, frame);
850 }
851
852 static int hw_ibrs_active;
853 int hw_ibrs_ibpb_active;
854 int hw_ibrs_disable = 1;
855
856 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
857 "Indirect Branch Restricted Speculation active");
858
859 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs,
860 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
861 "Indirect Branch Restricted Speculation active");
862
863 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
864 &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
865
866 void
hw_ibrs_recalculate(bool for_all_cpus)867 hw_ibrs_recalculate(bool for_all_cpus)
868 {
869 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
870 x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ?
871 MSR_OP_RENDEZVOUS : MSR_OP_LOCAL) |
872 (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR),
873 IA32_SPEC_CTRL_IBRS);
874 hw_ibrs_active = hw_ibrs_disable == 0;
875 hw_ibrs_ibpb_active = 0;
876 } else {
877 hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 &
878 CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable;
879 }
880 }
881
882 static int
hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)883 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
884 {
885 int error, val;
886
887 val = hw_ibrs_disable;
888 error = sysctl_handle_int(oidp, &val, 0, req);
889 if (error != 0 || req->newptr == NULL)
890 return (error);
891 hw_ibrs_disable = val != 0;
892 hw_ibrs_recalculate(true);
893 return (0);
894 }
895 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
896 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
897 "Disable Indirect Branch Restricted Speculation");
898
899 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT |
900 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
901 hw_ibrs_disable_handler, "I",
902 "Disable Indirect Branch Restricted Speculation");
903
904 int hw_ssb_active;
905 int hw_ssb_disable;
906
907 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
908 &hw_ssb_active, 0,
909 "Speculative Store Bypass Disable active");
910
911 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb,
912 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
913 "Speculative Store Bypass Disable active");
914
915 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD,
916 &hw_ssb_active, 0, "Speculative Store Bypass Disable active");
917
918 static void
hw_ssb_set(bool enable,bool for_all_cpus)919 hw_ssb_set(bool enable, bool for_all_cpus)
920 {
921
922 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
923 hw_ssb_active = 0;
924 return;
925 }
926 hw_ssb_active = enable;
927 x86_msr_op(MSR_IA32_SPEC_CTRL,
928 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
929 (for_all_cpus ? MSR_OP_SCHED : MSR_OP_LOCAL), IA32_SPEC_CTRL_SSBD);
930 }
931
932 void
hw_ssb_recalculate(bool all_cpus)933 hw_ssb_recalculate(bool all_cpus)
934 {
935
936 switch (hw_ssb_disable) {
937 default:
938 hw_ssb_disable = 0;
939 /* FALLTHROUGH */
940 case 0: /* off */
941 hw_ssb_set(false, all_cpus);
942 break;
943 case 1: /* on */
944 hw_ssb_set(true, all_cpus);
945 break;
946 case 2: /* auto */
947 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ?
948 false : true, all_cpus);
949 break;
950 }
951 }
952
953 static int
hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)954 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
955 {
956 int error, val;
957
958 val = hw_ssb_disable;
959 error = sysctl_handle_int(oidp, &val, 0, req);
960 if (error != 0 || req->newptr == NULL)
961 return (error);
962 hw_ssb_disable = val;
963 hw_ssb_recalculate(true);
964 return (0);
965 }
966 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
967 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
968 hw_ssb_disable_handler, "I",
969 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
970
971 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT |
972 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
973 hw_ssb_disable_handler, "I",
974 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
975
976 int hw_mds_disable;
977
978 /*
979 * Handler for Microarchitectural Data Sampling issues. Really not a
980 * pointer to C function: on amd64 the code must not change any CPU
981 * architectural state except possibly %rflags. Also, it is always
982 * called with interrupts disabled.
983 */
984 void mds_handler_void(void);
985 void mds_handler_verw(void);
986 void mds_handler_ivb(void);
987 void mds_handler_bdw(void);
988 void mds_handler_skl_sse(void);
989 void mds_handler_skl_avx(void);
990 void mds_handler_skl_avx512(void);
991 void mds_handler_silvermont(void);
992 void (*mds_handler)(void) = mds_handler_void;
993
994 static int
sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)995 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
996 {
997 const char *state;
998
999 if (mds_handler == mds_handler_void)
1000 state = "inactive";
1001 else if (mds_handler == mds_handler_verw)
1002 state = "VERW";
1003 else if (mds_handler == mds_handler_ivb)
1004 state = "software IvyBridge";
1005 else if (mds_handler == mds_handler_bdw)
1006 state = "software Broadwell";
1007 else if (mds_handler == mds_handler_skl_sse)
1008 state = "software Skylake SSE";
1009 else if (mds_handler == mds_handler_skl_avx)
1010 state = "software Skylake AVX";
1011 else if (mds_handler == mds_handler_skl_avx512)
1012 state = "software Skylake AVX512";
1013 else if (mds_handler == mds_handler_silvermont)
1014 state = "software Silvermont";
1015 else
1016 state = "unknown";
1017 return (SYSCTL_OUT(req, state, strlen(state)));
1018 }
1019
1020 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
1021 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1022 sysctl_hw_mds_disable_state_handler, "A",
1023 "Microarchitectural Data Sampling Mitigation state");
1024
1025 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds,
1026 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1027 "Microarchitectural Data Sampling Mitigation state");
1028
1029 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state,
1030 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1031 sysctl_hw_mds_disable_state_handler, "A",
1032 "Microarchitectural Data Sampling Mitigation state");
1033
1034 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
1035
1036 void
hw_mds_recalculate(void)1037 hw_mds_recalculate(void)
1038 {
1039 struct pcpu *pc;
1040 vm_offset_t b64;
1041 u_long xcr0;
1042 int i;
1043
1044 /*
1045 * Allow user to force VERW variant even if MD_CLEAR is not
1046 * reported. For instance, hypervisor might unknowingly
1047 * filter the cap out.
1048 * For the similar reasons, and for testing, allow to enable
1049 * mitigation even when MDS_NO cap is set.
1050 */
1051 if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1052 ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 &&
1053 hw_mds_disable == 3)) {
1054 mds_handler = mds_handler_void;
1055 } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1056 hw_mds_disable == 3) || hw_mds_disable == 1) {
1057 mds_handler = mds_handler_verw;
1058 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1059 (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1060 CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1061 CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1062 CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1063 CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1064 CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1065 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1066 /*
1067 * Nehalem, SandyBridge, IvyBridge
1068 */
1069 CPU_FOREACH(i) {
1070 pc = pcpu_find(i);
1071 if (pc->pc_mds_buf == NULL) {
1072 pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
1073 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1074 bzero(pc->pc_mds_buf, 16);
1075 }
1076 }
1077 mds_handler = mds_handler_ivb;
1078 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1079 (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1080 CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1081 CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1082 CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1083 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1084 /*
1085 * Haswell, Broadwell
1086 */
1087 CPU_FOREACH(i) {
1088 pc = pcpu_find(i);
1089 if (pc->pc_mds_buf == NULL) {
1090 pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
1091 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1092 bzero(pc->pc_mds_buf, 16);
1093 }
1094 }
1095 mds_handler = mds_handler_bdw;
1096 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1097 ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1098 CPUID_STEPPING) <= 5) ||
1099 CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1100 (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1101 CPUID_STEPPING) <= 0xb) ||
1102 (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1103 CPUID_STEPPING) <= 0xc)) &&
1104 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1105 /*
1106 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1107 * CascadeLake
1108 */
1109 CPU_FOREACH(i) {
1110 pc = pcpu_find(i);
1111 if (pc->pc_mds_buf == NULL) {
1112 pc->pc_mds_buf = malloc_domainset(6 * 1024,
1113 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1114 M_WAITOK);
1115 b64 = (vm_offset_t)malloc_domainset(64 + 63,
1116 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1117 M_WAITOK);
1118 pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1119 bzero(pc->pc_mds_buf64, 64);
1120 }
1121 }
1122 xcr0 = rxcr(0);
1123 if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1124 (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0)
1125 mds_handler = mds_handler_skl_avx512;
1126 else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1127 (cpu_feature2 & CPUID2_AVX) != 0)
1128 mds_handler = mds_handler_skl_avx;
1129 else
1130 mds_handler = mds_handler_skl_sse;
1131 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1132 ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1133 CPUID_TO_MODEL(cpu_id) == 0x4a ||
1134 CPUID_TO_MODEL(cpu_id) == 0x4c ||
1135 CPUID_TO_MODEL(cpu_id) == 0x4d ||
1136 CPUID_TO_MODEL(cpu_id) == 0x5a ||
1137 CPUID_TO_MODEL(cpu_id) == 0x5d ||
1138 CPUID_TO_MODEL(cpu_id) == 0x6e ||
1139 CPUID_TO_MODEL(cpu_id) == 0x65 ||
1140 CPUID_TO_MODEL(cpu_id) == 0x75 ||
1141 CPUID_TO_MODEL(cpu_id) == 0x1c ||
1142 CPUID_TO_MODEL(cpu_id) == 0x26 ||
1143 CPUID_TO_MODEL(cpu_id) == 0x27 ||
1144 CPUID_TO_MODEL(cpu_id) == 0x35 ||
1145 CPUID_TO_MODEL(cpu_id) == 0x36 ||
1146 CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1147 /* Silvermont, Airmont */
1148 CPU_FOREACH(i) {
1149 pc = pcpu_find(i);
1150 if (pc->pc_mds_buf == NULL)
1151 pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1152 }
1153 mds_handler = mds_handler_silvermont;
1154 } else {
1155 hw_mds_disable = 0;
1156 mds_handler = mds_handler_void;
1157 }
1158 }
1159
1160 static void
hw_mds_recalculate_boot(void * arg __unused)1161 hw_mds_recalculate_boot(void *arg __unused)
1162 {
1163
1164 hw_mds_recalculate();
1165 }
1166 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL);
1167
1168 static int
sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)1169 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1170 {
1171 int error, val;
1172
1173 val = hw_mds_disable;
1174 error = sysctl_handle_int(oidp, &val, 0, req);
1175 if (error != 0 || req->newptr == NULL)
1176 return (error);
1177 if (val < 0 || val > 3)
1178 return (EINVAL);
1179 hw_mds_disable = val;
1180 hw_mds_recalculate();
1181 return (0);
1182 }
1183
1184 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1185 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1186 sysctl_mds_disable_handler, "I",
1187 "Microarchitectural Data Sampling Mitigation "
1188 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO");
1189
1190 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT |
1191 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1192 sysctl_mds_disable_handler, "I",
1193 "Microarchitectural Data Sampling Mitigation "
1194 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO");
1195
1196 /*
1197 * Intel Transactional Memory Asynchronous Abort Mitigation
1198 * CVE-2019-11135
1199 */
1200 int x86_taa_enable;
1201 int x86_taa_state;
1202 enum {
1203 TAA_NONE = 0, /* No mitigation enabled */
1204 TAA_TSX_DISABLE = 1, /* Disable TSX via MSR */
1205 TAA_VERW = 2, /* Use VERW mitigation */
1206 TAA_AUTO = 3, /* Automatically select the mitigation */
1207
1208 /* The states below are not selectable by the operator */
1209
1210 TAA_TAA_UC = 4, /* Mitigation present in microcode */
1211 TAA_NOT_PRESENT = 5 /* TSX is not present */
1212 };
1213
1214 static void
taa_set(bool enable,bool all)1215 taa_set(bool enable, bool all)
1216 {
1217
1218 x86_msr_op(MSR_IA32_TSX_CTRL,
1219 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1220 (all ? MSR_OP_RENDEZVOUS : MSR_OP_LOCAL),
1221 IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR);
1222 }
1223
1224 void
x86_taa_recalculate(void)1225 x86_taa_recalculate(void)
1226 {
1227 static int taa_saved_mds_disable = 0;
1228 int taa_need = 0, taa_state = 0;
1229 int mds_disable = 0, need_mds_recalc = 0;
1230
1231 /* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
1232 if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
1233 (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
1234 /* TSX is not present */
1235 x86_taa_state = TAA_NOT_PRESENT;
1236 return;
1237 }
1238
1239 /* Check to see what mitigation options the CPU gives us */
1240 if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
1241 /* CPU is not suseptible to TAA */
1242 taa_need = TAA_TAA_UC;
1243 } else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
1244 /*
1245 * CPU can turn off TSX. This is the next best option
1246 * if TAA_NO hardware mitigation isn't present
1247 */
1248 taa_need = TAA_TSX_DISABLE;
1249 } else {
1250 /* No TSX/TAA specific remedies are available. */
1251 if (x86_taa_enable == TAA_TSX_DISABLE) {
1252 if (bootverbose)
1253 printf("TSX control not available\n");
1254 return;
1255 } else
1256 taa_need = TAA_VERW;
1257 }
1258
1259 /* Can we automatically take action, or are we being forced? */
1260 if (x86_taa_enable == TAA_AUTO)
1261 taa_state = taa_need;
1262 else
1263 taa_state = x86_taa_enable;
1264
1265 /* No state change, nothing to do */
1266 if (taa_state == x86_taa_state) {
1267 if (bootverbose)
1268 printf("No TSX change made\n");
1269 return;
1270 }
1271
1272 /* Does the MSR need to be turned on or off? */
1273 if (taa_state == TAA_TSX_DISABLE)
1274 taa_set(true, true);
1275 else if (x86_taa_state == TAA_TSX_DISABLE)
1276 taa_set(false, true);
1277
1278 /* Does MDS need to be set to turn on VERW? */
1279 if (taa_state == TAA_VERW) {
1280 taa_saved_mds_disable = hw_mds_disable;
1281 mds_disable = hw_mds_disable = 1;
1282 need_mds_recalc = 1;
1283 } else if (x86_taa_state == TAA_VERW) {
1284 mds_disable = hw_mds_disable = taa_saved_mds_disable;
1285 need_mds_recalc = 1;
1286 }
1287 if (need_mds_recalc) {
1288 hw_mds_recalculate();
1289 if (mds_disable != hw_mds_disable) {
1290 if (bootverbose)
1291 printf("Cannot change MDS state for TAA\n");
1292 /* Don't update our state */
1293 return;
1294 }
1295 }
1296
1297 x86_taa_state = taa_state;
1298 return;
1299 }
1300
1301 static void
taa_recalculate_boot(void * arg __unused)1302 taa_recalculate_boot(void * arg __unused)
1303 {
1304
1305 x86_taa_recalculate();
1306 }
1307 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
1308
1309 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa,
1310 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1311 "TSX Asynchronous Abort Mitigation");
1312
1313 static int
sysctl_taa_handler(SYSCTL_HANDLER_ARGS)1314 sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
1315 {
1316 int error, val;
1317
1318 val = x86_taa_enable;
1319 error = sysctl_handle_int(oidp, &val, 0, req);
1320 if (error != 0 || req->newptr == NULL)
1321 return (error);
1322 if (val < TAA_NONE || val > TAA_AUTO)
1323 return (EINVAL);
1324 x86_taa_enable = val;
1325 x86_taa_recalculate();
1326 return (0);
1327 }
1328
1329 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
1330 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1331 sysctl_taa_handler, "I",
1332 "TAA Mitigation enablement control "
1333 "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO");
1334
1335 static int
sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)1336 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
1337 {
1338 const char *state;
1339
1340 switch (x86_taa_state) {
1341 case TAA_NONE:
1342 state = "inactive";
1343 break;
1344 case TAA_TSX_DISABLE:
1345 state = "TSX disabled";
1346 break;
1347 case TAA_VERW:
1348 state = "VERW";
1349 break;
1350 case TAA_TAA_UC:
1351 state = "Mitigated in microcode";
1352 break;
1353 case TAA_NOT_PRESENT:
1354 state = "TSX not present";
1355 break;
1356 default:
1357 state = "unknown";
1358 }
1359
1360 return (SYSCTL_OUT(req, state, strlen(state)));
1361 }
1362
1363 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
1364 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1365 sysctl_taa_state_handler, "A",
1366 "TAA Mitigation state");
1367
1368 int __read_frequently cpu_flush_rsb_ctxsw;
1369 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
1370 CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
1371 "Flush Return Stack Buffer on context switch");
1372
1373 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds,
1374 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1375 "MCU Optimization, disable RDSEED mitigation");
1376
1377 int x86_rngds_mitg_enable = 1;
1378 void
x86_rngds_mitg_recalculate(bool all_cpus)1379 x86_rngds_mitg_recalculate(bool all_cpus)
1380 {
1381 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0)
1382 return;
1383 x86_msr_op(MSR_IA32_MCU_OPT_CTRL,
1384 (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1385 (all_cpus ? MSR_OP_RENDEZVOUS : MSR_OP_LOCAL),
1386 IA32_RNGDS_MITG_DIS);
1387 }
1388
1389 static int
sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)1390 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)
1391 {
1392 int error, val;
1393
1394 val = x86_rngds_mitg_enable;
1395 error = sysctl_handle_int(oidp, &val, 0, req);
1396 if (error != 0 || req->newptr == NULL)
1397 return (error);
1398 x86_rngds_mitg_enable = val;
1399 x86_rngds_mitg_recalculate(true);
1400 return (0);
1401 }
1402 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT |
1403 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1404 sysctl_rngds_mitg_enable_handler, "I",
1405 "MCU Optimization, disabling RDSEED mitigation control "
1406 "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled");
1407
1408 static int
sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)1409 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)
1410 {
1411 const char *state;
1412
1413 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) {
1414 state = "Not applicable";
1415 } else if (x86_rngds_mitg_enable == 0) {
1416 state = "RDSEED not serialized";
1417 } else {
1418 state = "Mitigated";
1419 }
1420 return (SYSCTL_OUT(req, state, strlen(state)));
1421 }
1422 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state,
1423 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1424 sysctl_rngds_state_handler, "A",
1425 "MCU Optimization state");
1426
1427 /*
1428 * Enable and restore kernel text write permissions.
1429 * Callers must ensure that disable_wp()/restore_wp() are executed
1430 * without rescheduling on the same core.
1431 */
1432 bool
disable_wp(void)1433 disable_wp(void)
1434 {
1435 u_int cr0;
1436
1437 cr0 = rcr0();
1438 if ((cr0 & CR0_WP) == 0)
1439 return (false);
1440 load_cr0(cr0 & ~CR0_WP);
1441 return (true);
1442 }
1443
1444 void
restore_wp(bool old_wp)1445 restore_wp(bool old_wp)
1446 {
1447
1448 if (old_wp)
1449 load_cr0(rcr0() | CR0_WP);
1450 }
1451
1452 bool
acpi_get_fadt_bootflags(uint16_t * flagsp)1453 acpi_get_fadt_bootflags(uint16_t *flagsp)
1454 {
1455 #ifdef DEV_ACPI
1456 ACPI_TABLE_FADT *fadt;
1457 vm_paddr_t physaddr;
1458
1459 physaddr = acpi_find_table(ACPI_SIG_FADT);
1460 if (physaddr == 0)
1461 return (false);
1462 fadt = acpi_map_table(physaddr, ACPI_SIG_FADT);
1463 if (fadt == NULL)
1464 return (false);
1465 *flagsp = fadt->BootFlags;
1466 acpi_unmap_table(fadt);
1467 return (true);
1468 #else
1469 return (false);
1470 #endif
1471 }
1472