1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
39 */
40
41 #include <sys/cdefs.h>
42 #include "opt_acpi.h"
43 #include "opt_atpic.h"
44 #include "opt_cpu.h"
45 #include "opt_ddb.h"
46 #include "opt_inet.h"
47 #include "opt_isa.h"
48 #include "opt_kdb.h"
49 #include "opt_kstack_pages.h"
50 #include "opt_maxmem.h"
51 #include "opt_platform.h"
52 #include "opt_sched.h"
53 #ifdef __i386__
54 #include "opt_apic.h"
55 #endif
56
57 #include <sys/param.h>
58 #include <sys/proc.h>
59 #include <sys/systm.h>
60 #include <sys/bus.h>
61 #include <sys/cpu.h>
62 #include <sys/domainset.h>
63 #include <sys/kdb.h>
64 #include <sys/kernel.h>
65 #include <sys/ktr.h>
66 #include <sys/lock.h>
67 #include <sys/malloc.h>
68 #include <sys/mutex.h>
69 #include <sys/pcpu.h>
70 #include <sys/rwlock.h>
71 #include <sys/sched.h>
72 #include <sys/smp.h>
73 #include <sys/sysctl.h>
74
75 #include <machine/clock.h>
76 #include <machine/cpu.h>
77 #include <machine/cpufunc.h>
78 #include <machine/cputypes.h>
79 #include <machine/specialreg.h>
80 #include <machine/md_var.h>
81 #include <machine/tss.h>
82 #ifdef SMP
83 #include <machine/smp.h>
84 #endif
85 #ifdef CPU_ELAN
86 #include <machine/elan_mmcr.h>
87 #endif
88 #include <x86/acpica_machdep.h>
89 #include <x86/ifunc.h>
90
91 #include <vm/vm.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
99
100 #include <isa/isareg.h>
101
102 #include <contrib/dev/acpica/include/acpi.h>
103
104 #define STATE_RUNNING 0x0
105 #define STATE_MWAIT 0x1
106 #define STATE_SLEEPING 0x2
107
108 #ifdef SMP
109 static u_int cpu_reset_proxyid;
110 static volatile u_int cpu_reset_proxy_active;
111 #endif
112
113 char bootmethod[16];
114 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
115 "System firmware boot method");
116
117 struct msr_op_arg {
118 u_int msr;
119 int op;
120 uint64_t arg1;
121 uint64_t *res;
122 };
123
124 static void
x86_msr_op_one(void * argp)125 x86_msr_op_one(void *argp)
126 {
127 struct msr_op_arg *a;
128 uint64_t v;
129
130 a = argp;
131 switch (a->op) {
132 case MSR_OP_ANDNOT:
133 v = rdmsr(a->msr);
134 v &= ~a->arg1;
135 wrmsr(a->msr, v);
136 break;
137 case MSR_OP_OR:
138 v = rdmsr(a->msr);
139 v |= a->arg1;
140 wrmsr(a->msr, v);
141 break;
142 case MSR_OP_WRITE:
143 wrmsr(a->msr, a->arg1);
144 break;
145 case MSR_OP_READ:
146 v = rdmsr(a->msr);
147 *a->res = v;
148 break;
149 }
150 }
151
152 #define MSR_OP_EXMODE_MASK 0xf0000000
153 #define MSR_OP_OP_MASK 0x000000ff
154 #define MSR_OP_GET_CPUID(x) (((x) & ~MSR_OP_EXMODE_MASK) >> 8)
155
156 void
x86_msr_op(u_int msr,u_int op,uint64_t arg1,uint64_t * res)157 x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res)
158 {
159 struct thread *td;
160 struct msr_op_arg a;
161 cpuset_t set;
162 u_int exmode;
163 int bound_cpu, cpu, i, is_bound;
164
165 a.op = op & MSR_OP_OP_MASK;
166 MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR ||
167 a.op == MSR_OP_WRITE || a.op == MSR_OP_READ);
168 exmode = op & MSR_OP_EXMODE_MASK;
169 MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED_ALL ||
170 exmode == MSR_OP_SCHED_ONE || exmode == MSR_OP_RENDEZVOUS_ALL ||
171 exmode == MSR_OP_RENDEZVOUS_ONE);
172 a.msr = msr;
173 a.arg1 = arg1;
174 a.res = res;
175 switch (exmode) {
176 case MSR_OP_LOCAL:
177 x86_msr_op_one(&a);
178 break;
179 case MSR_OP_SCHED_ALL:
180 td = curthread;
181 thread_lock(td);
182 is_bound = sched_is_bound(td);
183 bound_cpu = td->td_oncpu;
184 CPU_FOREACH(i) {
185 sched_bind(td, i);
186 x86_msr_op_one(&a);
187 }
188 if (is_bound)
189 sched_bind(td, bound_cpu);
190 else
191 sched_unbind(td);
192 thread_unlock(td);
193 break;
194 case MSR_OP_SCHED_ONE:
195 td = curthread;
196 cpu = MSR_OP_GET_CPUID(op);
197 thread_lock(td);
198 is_bound = sched_is_bound(td);
199 bound_cpu = td->td_oncpu;
200 if (!is_bound || bound_cpu != cpu)
201 sched_bind(td, cpu);
202 x86_msr_op_one(&a);
203 if (is_bound) {
204 if (bound_cpu != cpu)
205 sched_bind(td, bound_cpu);
206 } else {
207 sched_unbind(td);
208 }
209 thread_unlock(td);
210 break;
211 case MSR_OP_RENDEZVOUS_ALL:
212 smp_rendezvous(smp_no_rendezvous_barrier, x86_msr_op_one,
213 smp_no_rendezvous_barrier, &a);
214 break;
215 case MSR_OP_RENDEZVOUS_ONE:
216 cpu = MSR_OP_GET_CPUID(op);
217 CPU_SETOF(cpu, &set);
218 smp_rendezvous_cpus(set, smp_no_rendezvous_barrier,
219 x86_msr_op_one, smp_no_rendezvous_barrier, &a);
220 break;
221 }
222 }
223
224 /*
225 * Automatically initialized per CPU errata in cpu_idle_tun below.
226 */
227 bool mwait_cpustop_broken = false;
228 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN,
229 &mwait_cpustop_broken, 0,
230 "Can not reliably wake MONITOR/MWAIT cpus without interrupts");
231
232 /*
233 * Flush the D-cache for non-DMA I/O so that the I-cache can
234 * be made coherent later.
235 */
236 void
cpu_flush_dcache(void * ptr,size_t len)237 cpu_flush_dcache(void *ptr, size_t len)
238 {
239 /* Not applicable */
240 }
241
242 void
acpi_cpu_c1(void)243 acpi_cpu_c1(void)
244 {
245
246 __asm __volatile("sti; hlt");
247 }
248
249 /*
250 * Use mwait to pause execution while waiting for an interrupt or
251 * another thread to signal that there is more work.
252 *
253 * NOTE: Interrupts will cause a wakeup; however, this function does
254 * not enable interrupt handling. The caller is responsible to enable
255 * interrupts.
256 */
257 void
acpi_cpu_idle_mwait(uint32_t mwait_hint)258 acpi_cpu_idle_mwait(uint32_t mwait_hint)
259 {
260 int *state;
261 uint64_t v;
262
263 /*
264 * A comment in Linux patch claims that 'CPUs run faster with
265 * speculation protection disabled. All CPU threads in a core
266 * must disable speculation protection for it to be
267 * disabled. Disable it while we are idle so the other
268 * hyperthread can run fast.'
269 *
270 * XXXKIB. Software coordination mode should be supported,
271 * but all Intel CPUs provide hardware coordination.
272 */
273
274 state = &PCPU_PTR(monitorbuf)->idle_state;
275 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
276 ("cpu_mwait_cx: wrong monitorbuf state"));
277 atomic_store_int(state, STATE_MWAIT);
278 if (PCPU_GET(ibpb_set) || hw_ssb_active) {
279 v = rdmsr(MSR_IA32_SPEC_CTRL);
280 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
281 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
282 } else {
283 v = 0;
284 }
285 cpu_monitor(state, 0, 0);
286 if (atomic_load_int(state) == STATE_MWAIT)
287 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
288
289 /*
290 * SSB cannot be disabled while we sleep, or rather, if it was
291 * disabled, the sysctl thread will bind to our cpu to tweak
292 * MSR.
293 */
294 if (v != 0)
295 wrmsr(MSR_IA32_SPEC_CTRL, v);
296
297 /*
298 * We should exit on any event that interrupts mwait, because
299 * that event might be a wanted interrupt.
300 */
301 atomic_store_int(state, STATE_RUNNING);
302 }
303
304 /* Get current clock frequency for the given cpu id. */
305 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)306 cpu_est_clockrate(int cpu_id, uint64_t *rate)
307 {
308 uint64_t tsc1, tsc2;
309 uint64_t acnt, mcnt, perf;
310 register_t reg;
311
312 if (pcpu_find(cpu_id) == NULL || rate == NULL)
313 return (EINVAL);
314 #ifdef __i386__
315 if ((cpu_feature & CPUID_TSC) == 0)
316 return (EOPNOTSUPP);
317 #endif
318
319 /*
320 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
321 * DELAY(9) based logic fails.
322 */
323 if (tsc_is_invariant && !tsc_perf_stat)
324 return (EOPNOTSUPP);
325
326 #ifdef SMP
327 if (smp_cpus > 1) {
328 /* Schedule ourselves on the indicated cpu. */
329 thread_lock(curthread);
330 sched_bind(curthread, cpu_id);
331 thread_unlock(curthread);
332 }
333 #endif
334
335 /* Calibrate by measuring a short delay. */
336 reg = intr_disable();
337 if (tsc_is_invariant) {
338 wrmsr(MSR_MPERF, 0);
339 wrmsr(MSR_APERF, 0);
340 tsc1 = rdtsc();
341 DELAY(1000);
342 mcnt = rdmsr(MSR_MPERF);
343 acnt = rdmsr(MSR_APERF);
344 tsc2 = rdtsc();
345 intr_restore(reg);
346 perf = 1000 * acnt / mcnt;
347 *rate = (tsc2 - tsc1) * perf;
348 } else {
349 tsc1 = rdtsc();
350 DELAY(1000);
351 tsc2 = rdtsc();
352 intr_restore(reg);
353 *rate = (tsc2 - tsc1) * 1000;
354 }
355
356 #ifdef SMP
357 if (smp_cpus > 1) {
358 thread_lock(curthread);
359 sched_unbind(curthread);
360 thread_unlock(curthread);
361 }
362 #endif
363
364 return (0);
365 }
366
367 /*
368 * Shutdown the CPU as much as possible
369 */
370 void
cpu_halt(void)371 cpu_halt(void)
372 {
373 for (;;)
374 halt();
375 }
376
377 static void
cpu_reset_real(void)378 cpu_reset_real(void)
379 {
380 struct region_descriptor null_idt;
381 int b;
382
383 disable_intr();
384 #ifdef CPU_ELAN
385 if (elan_mmcr != NULL)
386 elan_mmcr->RESCFG = 1;
387 #endif
388 #ifdef __i386__
389 if (cpu == CPU_GEODE1100) {
390 /* Attempt Geode's own reset */
391 outl(0xcf8, 0x80009044ul);
392 outl(0xcfc, 0xf);
393 }
394 #endif
395 #if !defined(BROKEN_KEYBOARD_RESET)
396 /*
397 * Attempt to do a CPU reset via the keyboard controller,
398 * do not turn off GateA20, as any machine that fails
399 * to do the reset here would then end up in no man's land.
400 */
401 outb(IO_KBD + 4, 0xFE);
402 DELAY(500000); /* wait 0.5 sec to see if that did it */
403 #endif
404
405 /*
406 * Attempt to force a reset via the Reset Control register at
407 * I/O port 0xcf9. Bit 2 forces a system reset when it
408 * transitions from 0 to 1. Bit 1 selects the type of reset
409 * to attempt: 0 selects a "soft" reset, and 1 selects a
410 * "hard" reset. We try a "hard" reset. The first write sets
411 * bit 1 to select a "hard" reset and clears bit 2. The
412 * second write forces a 0 -> 1 transition in bit 2 to trigger
413 * a reset.
414 */
415 outb(0xcf9, 0x2);
416 outb(0xcf9, 0x6);
417 DELAY(500000); /* wait 0.5 sec to see if that did it */
418
419 /*
420 * Attempt to force a reset via the Fast A20 and Init register
421 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
422 * Bit 0 asserts INIT# when set to 1. We are careful to only
423 * preserve bit 1 while setting bit 0. We also must clear bit
424 * 0 before setting it if it isn't already clear.
425 */
426 b = inb(0x92);
427 if (b != 0xff) {
428 if ((b & 0x1) != 0)
429 outb(0x92, b & 0xfe);
430 outb(0x92, b | 0x1);
431 DELAY(500000); /* wait 0.5 sec to see if that did it */
432 }
433
434 printf("No known reset method worked, attempting CPU shutdown\n");
435 DELAY(1000000); /* wait 1 sec for printf to complete */
436
437 /* Wipe the IDT. */
438 null_idt.rd_limit = 0;
439 null_idt.rd_base = 0;
440 lidt(&null_idt);
441
442 /* "good night, sweet prince .... <THUNK!>" */
443 breakpoint();
444
445 /* NOTREACHED */
446 while(1);
447 }
448
449 #ifdef SMP
450 static void
cpu_reset_proxy(void)451 cpu_reset_proxy(void)
452 {
453
454 cpu_reset_proxy_active = 1;
455 while (cpu_reset_proxy_active == 1)
456 ia32_pause(); /* Wait for other cpu to see that we've started */
457
458 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
459 DELAY(1000000);
460 cpu_reset_real();
461 }
462 #endif
463
464 void
cpu_reset(void)465 cpu_reset(void)
466 {
467 #ifdef SMP
468 struct monitorbuf *mb;
469 cpuset_t map;
470 u_int cnt;
471
472 if (smp_started) {
473 map = all_cpus;
474 CPU_CLR(PCPU_GET(cpuid), &map);
475 CPU_ANDNOT(&map, &map, &stopped_cpus);
476 if (!CPU_EMPTY(&map)) {
477 printf("cpu_reset: Stopping other CPUs\n");
478 stop_cpus(map);
479 }
480
481 if (PCPU_GET(cpuid) != 0) {
482 cpu_reset_proxyid = PCPU_GET(cpuid);
483 cpustop_restartfunc = cpu_reset_proxy;
484 cpu_reset_proxy_active = 0;
485 printf("cpu_reset: Restarting BSP\n");
486
487 /* Restart CPU #0. */
488 CPU_SETOF(0, &started_cpus);
489 mb = &pcpu_find(0)->pc_monitorbuf;
490 atomic_store_int(&mb->stop_state,
491 MONITOR_STOPSTATE_RUNNING);
492
493 cnt = 0;
494 while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
495 ia32_pause();
496 cnt++; /* Wait for BSP to announce restart */
497 }
498 if (cpu_reset_proxy_active == 0) {
499 printf("cpu_reset: Failed to restart BSP\n");
500 } else {
501 cpu_reset_proxy_active = 2;
502 while (1)
503 ia32_pause();
504 /* NOTREACHED */
505 }
506 }
507 }
508 #endif
509 cpu_reset_real();
510 /* NOTREACHED */
511 }
512
513 bool
cpu_mwait_usable(void)514 cpu_mwait_usable(void)
515 {
516
517 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
518 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
519 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
520 }
521
522 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
523
524 int cpu_amdc1e_bug = 0; /* AMD C1E APIC workaround required. */
525
526 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
527 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
528 0, "Use MONITOR/MWAIT for short idle");
529
530 static bool
cpu_idle_enter(int * statep,int newstate)531 cpu_idle_enter(int *statep, int newstate)
532 {
533 KASSERT(atomic_load_int(statep) == STATE_RUNNING,
534 ("%s: state %d", __func__, atomic_load_int(statep)));
535
536 /*
537 * A fence is needed to prevent reordering of the load in
538 * sched_runnable() with this store to the idle state word. Without it,
539 * cpu_idle_wakeup() can observe the state as STATE_RUNNING after having
540 * added load to the queue, and elide an IPI. Then, sched_runnable()
541 * can observe tdq_load == 0, so the CPU ends up idling with pending
542 * work. tdq_notify() similarly ensures that a prior update to tdq_load
543 * is visible before calling cpu_idle_wakeup().
544 */
545 atomic_store_int(statep, newstate);
546 #if defined(SCHED_ULE) && defined(SMP)
547 atomic_thread_fence_seq_cst();
548 #endif
549
550 /*
551 * Since we may be in a critical section from cpu_idle(), if
552 * an interrupt fires during that critical section we may have
553 * a pending preemption. If the CPU halts, then that thread
554 * may not execute until a later interrupt awakens the CPU.
555 * To handle this race, check for a runnable thread after
556 * disabling interrupts and immediately return if one is
557 * found. Also, we must absolutely guarentee that hlt is
558 * the next instruction after sti. This ensures that any
559 * interrupt that fires after the call to disable_intr() will
560 * immediately awaken the CPU from hlt. Finally, please note
561 * that on x86 this works fine because of interrupts enabled only
562 * after the instruction following sti takes place, while IF is set
563 * to 1 immediately, allowing hlt instruction to acknowledge the
564 * interrupt.
565 */
566 disable_intr();
567 if (sched_runnable()) {
568 enable_intr();
569 atomic_store_int(statep, STATE_RUNNING);
570 return (false);
571 } else {
572 return (true);
573 }
574 }
575
576 static void
cpu_idle_exit(int * statep)577 cpu_idle_exit(int *statep)
578 {
579 atomic_store_int(statep, STATE_RUNNING);
580 }
581
582 static void
cpu_idle_acpi(sbintime_t sbt)583 cpu_idle_acpi(sbintime_t sbt)
584 {
585 int *state;
586
587 state = &PCPU_PTR(monitorbuf)->idle_state;
588 if (cpu_idle_enter(state, STATE_SLEEPING)) {
589 if (cpu_idle_hook)
590 cpu_idle_hook(sbt);
591 else
592 acpi_cpu_c1();
593 cpu_idle_exit(state);
594 }
595 }
596
597 static void
cpu_idle_hlt(sbintime_t sbt)598 cpu_idle_hlt(sbintime_t sbt)
599 {
600 int *state;
601
602 state = &PCPU_PTR(monitorbuf)->idle_state;
603 if (cpu_idle_enter(state, STATE_SLEEPING)) {
604 acpi_cpu_c1();
605 atomic_store_int(state, STATE_RUNNING);
606 }
607 }
608
609 static void
cpu_idle_mwait(sbintime_t sbt)610 cpu_idle_mwait(sbintime_t sbt)
611 {
612 int *state;
613
614 state = &PCPU_PTR(monitorbuf)->idle_state;
615 if (cpu_idle_enter(state, STATE_MWAIT)) {
616 cpu_monitor(state, 0, 0);
617 if (atomic_load_int(state) == STATE_MWAIT)
618 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
619 else
620 enable_intr();
621 cpu_idle_exit(state);
622 }
623 }
624
625 static void
cpu_idle_spin(sbintime_t sbt)626 cpu_idle_spin(sbintime_t sbt)
627 {
628 int *state;
629 int i;
630
631 state = &PCPU_PTR(monitorbuf)->idle_state;
632 atomic_store_int(state, STATE_RUNNING);
633
634 /*
635 * The sched_runnable() call is racy but as long as there is
636 * a loop missing it one time will have just a little impact if any
637 * (and it is much better than missing the check at all).
638 */
639 for (i = 0; i < 1000; i++) {
640 if (sched_runnable())
641 return;
642 cpu_spinwait();
643 }
644 }
645
646 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
647
648 void
cpu_idle(int busy)649 cpu_idle(int busy)
650 {
651 uint64_t msr;
652 sbintime_t sbt = -1;
653
654 CTR1(KTR_SPARE2, "cpu_idle(%d)", busy);
655
656 /* If we are busy - try to use fast methods. */
657 if (busy) {
658 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
659 cpu_idle_mwait(busy);
660 goto out;
661 }
662 }
663
664 /* If we have time - switch timers into idle mode. */
665 if (!busy) {
666 critical_enter();
667 sbt = cpu_idleclock();
668 }
669
670 /* Apply AMD APIC timer C1E workaround. */
671 if (cpu_amdc1e_bug && cpu_disable_c3_sleep) {
672 msr = rdmsr(MSR_AMDK8_IPM);
673 if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0)
674 wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT |
675 AMDK8_C1EONCMPHALT));
676 }
677
678 /* Call main idle method. */
679 cpu_idle_fn(sbt);
680
681 /* Switch timers back into active mode. */
682 if (!busy) {
683 cpu_activeclock();
684 critical_exit();
685 }
686 out:
687 CTR1(KTR_SPARE2, "cpu_idle(%d) done", busy);
688 }
689
690 static int cpu_idle_apl31_workaround;
691 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
692 &cpu_idle_apl31_workaround, 0,
693 "Apollo Lake APL31 MWAIT bug workaround");
694
695 int
cpu_idle_wakeup(int cpu)696 cpu_idle_wakeup(int cpu)
697 {
698 struct monitorbuf *mb;
699 int *state;
700
701 mb = &pcpu_find(cpu)->pc_monitorbuf;
702 state = &mb->idle_state;
703 switch (atomic_load_int(state)) {
704 case STATE_SLEEPING:
705 return (0);
706 case STATE_MWAIT:
707 atomic_store_int(state, STATE_RUNNING);
708 return (cpu_idle_apl31_workaround ? 0 : 1);
709 case STATE_RUNNING:
710 return (1);
711 default:
712 panic("bad monitor state");
713 return (1);
714 }
715 }
716
717 /*
718 * Ordered by speed/power consumption.
719 */
720 static const struct {
721 void *id_fn;
722 const char *id_name;
723 int id_cpuid2_flag;
724 } idle_tbl[] = {
725 { .id_fn = cpu_idle_spin, .id_name = "spin" },
726 { .id_fn = cpu_idle_mwait, .id_name = "mwait",
727 .id_cpuid2_flag = CPUID2_MON },
728 { .id_fn = cpu_idle_hlt, .id_name = "hlt" },
729 { .id_fn = cpu_idle_acpi, .id_name = "acpi" },
730 };
731
732 static int
idle_sysctl_available(SYSCTL_HANDLER_ARGS)733 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
734 {
735 char *avail, *p;
736 int error;
737 int i;
738
739 avail = malloc(256, M_TEMP, M_WAITOK);
740 p = avail;
741 for (i = 0; i < nitems(idle_tbl); i++) {
742 if (idle_tbl[i].id_cpuid2_flag != 0 &&
743 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
744 continue;
745 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
746 cpu_idle_hook == NULL)
747 continue;
748 p += sprintf(p, "%s%s", p != avail ? ", " : "",
749 idle_tbl[i].id_name);
750 }
751 error = sysctl_handle_string(oidp, avail, 0, req);
752 free(avail, M_TEMP);
753 return (error);
754 }
755
756 SYSCTL_PROC(_machdep, OID_AUTO, idle_available,
757 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
758 0, 0, idle_sysctl_available, "A",
759 "list of available idle functions");
760
761 static bool
cpu_idle_selector(const char * new_idle_name)762 cpu_idle_selector(const char *new_idle_name)
763 {
764 int i;
765
766 for (i = 0; i < nitems(idle_tbl); i++) {
767 if (idle_tbl[i].id_cpuid2_flag != 0 &&
768 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
769 continue;
770 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
771 cpu_idle_hook == NULL)
772 continue;
773 if (strcmp(idle_tbl[i].id_name, new_idle_name))
774 continue;
775 cpu_idle_fn = idle_tbl[i].id_fn;
776 if (bootverbose)
777 printf("CPU idle set to %s\n", idle_tbl[i].id_name);
778 return (true);
779 }
780 return (false);
781 }
782
783 static int
cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)784 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
785 {
786 char buf[16];
787 const char *p;
788 int error, i;
789
790 p = "unknown";
791 for (i = 0; i < nitems(idle_tbl); i++) {
792 if (idle_tbl[i].id_fn == cpu_idle_fn) {
793 p = idle_tbl[i].id_name;
794 break;
795 }
796 }
797 strncpy(buf, p, sizeof(buf));
798 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
799 if (error != 0 || req->newptr == NULL)
800 return (error);
801 return (cpu_idle_selector(buf) ? 0 : EINVAL);
802 }
803
804 SYSCTL_PROC(_machdep, OID_AUTO, idle,
805 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
806 0, 0, cpu_idle_sysctl, "A",
807 "currently selected idle function");
808
809 static void
cpu_idle_tun(void * unused __unused)810 cpu_idle_tun(void *unused __unused)
811 {
812 char tunvar[16];
813
814 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
815 cpu_idle_selector(tunvar);
816 else if (cpu_vendor_id == CPU_VENDOR_AMD &&
817 CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) {
818 /* Ryzen erratas 1057, 1109. */
819 cpu_idle_selector("hlt");
820 idle_mwait = 0;
821 mwait_cpustop_broken = true;
822 }
823
824 if (cpu_vendor_id == CPU_VENDOR_INTEL &&
825 CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) == 0x5c) {
826 /*
827 * Apollo Lake errata APL31 (public errata APL30).
828 * Stores to the armed address range may not trigger
829 * MWAIT to resume execution. OS needs to use
830 * interrupts to wake processors from MWAIT-induced
831 * sleep states.
832 */
833 cpu_idle_apl31_workaround = 1;
834 mwait_cpustop_broken = true;
835 }
836 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
837 }
838 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
839
840 static int panic_on_nmi = 0xff;
841 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
842 &panic_on_nmi, 0,
843 "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all");
844 int nmi_is_broadcast = 1;
845 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
846 &nmi_is_broadcast, 0,
847 "Chipset NMI is broadcast");
848 int (*apei_nmi)(void);
849
850 void
nmi_call_kdb(u_int cpu,u_int type,struct trapframe * frame)851 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
852 {
853 bool claimed = false;
854
855 #ifdef DEV_ISA
856 /* machine/parity/power fail/"kitchen sink" faults */
857 if (isa_nmi(frame->tf_err)) {
858 claimed = true;
859 if ((panic_on_nmi & 1) != 0)
860 panic("NMI indicates hardware failure");
861 }
862 #endif /* DEV_ISA */
863
864 /* ACPI Platform Error Interfaces callback. */
865 if (apei_nmi != NULL && (*apei_nmi)())
866 claimed = true;
867
868 /*
869 * NMIs can be useful for debugging. They can be hooked up to a
870 * pushbutton, usually on an ISA, PCI, or PCIe card. They can also be
871 * generated by an IPMI BMC, either manually or in response to a
872 * watchdog timeout. For example, see the "power diag" command in
873 * ports/sysutils/ipmitool. They can also be generated by a
874 * hypervisor; see "bhyvectl --inject-nmi".
875 */
876
877 #ifdef KDB
878 if (!claimed && (panic_on_nmi & 2) != 0) {
879 if (debugger_on_panic) {
880 printf("NMI/cpu%d ... going to debugger\n", cpu);
881 claimed = kdb_trap(type, 0, frame);
882 }
883 }
884 #endif /* KDB */
885
886 if (!claimed && panic_on_nmi != 0)
887 panic("NMI");
888 }
889
890 void
nmi_handle_intr(u_int type,struct trapframe * frame)891 nmi_handle_intr(u_int type, struct trapframe *frame)
892 {
893
894 #ifdef SMP
895 if (nmi_is_broadcast) {
896 nmi_call_kdb_smp(type, frame);
897 return;
898 }
899 #endif
900 nmi_call_kdb(PCPU_GET(cpuid), type, frame);
901 }
902
903 static int hw_ibrs_active;
904 int hw_ibrs_ibpb_active;
905 int hw_ibrs_disable = 1;
906
907 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
908 "Indirect Branch Restricted Speculation active");
909
910 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs,
911 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
912 "Indirect Branch Restricted Speculation active");
913
914 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
915 &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
916
917 void
hw_ibrs_recalculate(bool for_all_cpus)918 hw_ibrs_recalculate(bool for_all_cpus)
919 {
920 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
921 x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ?
922 MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL) |
923 (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR),
924 IA32_SPEC_CTRL_IBRS, NULL);
925 hw_ibrs_active = hw_ibrs_disable == 0;
926 hw_ibrs_ibpb_active = 0;
927 } else {
928 hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 &
929 CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable;
930 }
931 }
932
933 static int
hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)934 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
935 {
936 int error, val;
937
938 val = hw_ibrs_disable;
939 error = sysctl_handle_int(oidp, &val, 0, req);
940 if (error != 0 || req->newptr == NULL)
941 return (error);
942 hw_ibrs_disable = val != 0;
943 hw_ibrs_recalculate(true);
944 return (0);
945 }
946 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
947 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
948 "Disable Indirect Branch Restricted Speculation");
949
950 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT |
951 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
952 hw_ibrs_disable_handler, "I",
953 "Disable Indirect Branch Restricted Speculation");
954
955 int hw_ssb_active;
956 int hw_ssb_disable;
957
958 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
959 &hw_ssb_active, 0,
960 "Speculative Store Bypass Disable active");
961
962 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb,
963 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
964 "Speculative Store Bypass Disable active");
965
966 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD,
967 &hw_ssb_active, 0, "Speculative Store Bypass Disable active");
968
969 static void
hw_ssb_set(bool enable,bool for_all_cpus)970 hw_ssb_set(bool enable, bool for_all_cpus)
971 {
972
973 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
974 hw_ssb_active = 0;
975 return;
976 }
977 hw_ssb_active = enable;
978 x86_msr_op(MSR_IA32_SPEC_CTRL,
979 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
980 (for_all_cpus ? MSR_OP_SCHED_ALL : MSR_OP_LOCAL),
981 IA32_SPEC_CTRL_SSBD, NULL);
982 }
983
984 void
hw_ssb_recalculate(bool all_cpus)985 hw_ssb_recalculate(bool all_cpus)
986 {
987
988 switch (hw_ssb_disable) {
989 default:
990 hw_ssb_disable = 0;
991 /* FALLTHROUGH */
992 case 0: /* off */
993 hw_ssb_set(false, all_cpus);
994 break;
995 case 1: /* on */
996 hw_ssb_set(true, all_cpus);
997 break;
998 case 2: /* auto */
999 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ?
1000 false : true, all_cpus);
1001 break;
1002 }
1003 }
1004
1005 static int
hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)1006 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
1007 {
1008 int error, val;
1009
1010 val = hw_ssb_disable;
1011 error = sysctl_handle_int(oidp, &val, 0, req);
1012 if (error != 0 || req->newptr == NULL)
1013 return (error);
1014 hw_ssb_disable = val;
1015 hw_ssb_recalculate(true);
1016 return (0);
1017 }
1018 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
1019 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1020 hw_ssb_disable_handler, "I",
1021 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1022
1023 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT |
1024 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1025 hw_ssb_disable_handler, "I",
1026 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1027
1028 int hw_mds_disable;
1029
1030 /*
1031 * Handler for Microarchitectural Data Sampling issues. Really not a
1032 * pointer to C function: on amd64 the code must not change any CPU
1033 * architectural state except possibly %rflags. Also, it is always
1034 * called with interrupts disabled.
1035 */
1036 void mds_handler_void(void);
1037 void mds_handler_verw(void);
1038 void mds_handler_ivb(void);
1039 void mds_handler_bdw(void);
1040 void mds_handler_skl_sse(void);
1041 void mds_handler_skl_avx(void);
1042 void mds_handler_skl_avx512(void);
1043 void mds_handler_silvermont(void);
1044 void (*mds_handler)(void) = mds_handler_void;
1045
1046 static int
sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)1047 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
1048 {
1049 const char *state;
1050
1051 if (mds_handler == mds_handler_void)
1052 state = "inactive";
1053 else if (mds_handler == mds_handler_verw)
1054 state = "VERW";
1055 else if (mds_handler == mds_handler_ivb)
1056 state = "software IvyBridge";
1057 else if (mds_handler == mds_handler_bdw)
1058 state = "software Broadwell";
1059 else if (mds_handler == mds_handler_skl_sse)
1060 state = "software Skylake SSE";
1061 else if (mds_handler == mds_handler_skl_avx)
1062 state = "software Skylake AVX";
1063 else if (mds_handler == mds_handler_skl_avx512)
1064 state = "software Skylake AVX512";
1065 else if (mds_handler == mds_handler_silvermont)
1066 state = "software Silvermont";
1067 else
1068 state = "unknown";
1069 return (SYSCTL_OUT(req, state, strlen(state)));
1070 }
1071
1072 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
1073 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1074 sysctl_hw_mds_disable_state_handler, "A",
1075 "Microarchitectural Data Sampling Mitigation state");
1076
1077 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds,
1078 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1079 "Microarchitectural Data Sampling Mitigation state");
1080
1081 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state,
1082 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1083 sysctl_hw_mds_disable_state_handler, "A",
1084 "Microarchitectural Data Sampling Mitigation state");
1085
1086 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
1087
1088 void
hw_mds_recalculate(void)1089 hw_mds_recalculate(void)
1090 {
1091 struct pcpu *pc;
1092 vm_offset_t b64;
1093 u_long xcr0;
1094 int i;
1095
1096 /*
1097 * Allow user to force VERW variant even if MD_CLEAR is not
1098 * reported. For instance, hypervisor might unknowingly
1099 * filter the cap out.
1100 * For the similar reasons, and for testing, allow to enable
1101 * mitigation even when MDS_NO cap is set.
1102 */
1103 if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1104 ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 &&
1105 hw_mds_disable == 3)) {
1106 mds_handler = mds_handler_void;
1107 } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1108 hw_mds_disable == 3) || hw_mds_disable == 1) {
1109 mds_handler = mds_handler_verw;
1110 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1111 (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1112 CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1113 CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1114 CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1115 CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1116 CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1117 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1118 /*
1119 * Nehalem, SandyBridge, IvyBridge
1120 */
1121 CPU_FOREACH(i) {
1122 pc = pcpu_find(i);
1123 if (pc->pc_mds_buf == NULL) {
1124 pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
1125 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1126 bzero(pc->pc_mds_buf, 16);
1127 }
1128 }
1129 mds_handler = mds_handler_ivb;
1130 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1131 (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1132 CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1133 CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1134 CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1135 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1136 /*
1137 * Haswell, Broadwell
1138 */
1139 CPU_FOREACH(i) {
1140 pc = pcpu_find(i);
1141 if (pc->pc_mds_buf == NULL) {
1142 pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
1143 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1144 bzero(pc->pc_mds_buf, 16);
1145 }
1146 }
1147 mds_handler = mds_handler_bdw;
1148 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1149 ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1150 CPUID_STEPPING) <= 5) ||
1151 CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1152 (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1153 CPUID_STEPPING) <= 0xb) ||
1154 (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1155 CPUID_STEPPING) <= 0xc)) &&
1156 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1157 /*
1158 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1159 * CascadeLake
1160 */
1161 CPU_FOREACH(i) {
1162 pc = pcpu_find(i);
1163 if (pc->pc_mds_buf == NULL) {
1164 pc->pc_mds_buf = malloc_domainset(6 * 1024,
1165 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1166 M_WAITOK);
1167 b64 = (vm_offset_t)malloc_domainset(64 + 63,
1168 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1169 M_WAITOK);
1170 pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1171 bzero(pc->pc_mds_buf64, 64);
1172 }
1173 }
1174 xcr0 = rxcr(0);
1175 if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1176 (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0)
1177 mds_handler = mds_handler_skl_avx512;
1178 else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1179 (cpu_feature2 & CPUID2_AVX) != 0)
1180 mds_handler = mds_handler_skl_avx;
1181 else
1182 mds_handler = mds_handler_skl_sse;
1183 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1184 ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1185 CPUID_TO_MODEL(cpu_id) == 0x4a ||
1186 CPUID_TO_MODEL(cpu_id) == 0x4c ||
1187 CPUID_TO_MODEL(cpu_id) == 0x4d ||
1188 CPUID_TO_MODEL(cpu_id) == 0x5a ||
1189 CPUID_TO_MODEL(cpu_id) == 0x5d ||
1190 CPUID_TO_MODEL(cpu_id) == 0x6e ||
1191 CPUID_TO_MODEL(cpu_id) == 0x65 ||
1192 CPUID_TO_MODEL(cpu_id) == 0x75 ||
1193 CPUID_TO_MODEL(cpu_id) == 0x1c ||
1194 CPUID_TO_MODEL(cpu_id) == 0x26 ||
1195 CPUID_TO_MODEL(cpu_id) == 0x27 ||
1196 CPUID_TO_MODEL(cpu_id) == 0x35 ||
1197 CPUID_TO_MODEL(cpu_id) == 0x36 ||
1198 CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1199 /* Silvermont, Airmont */
1200 CPU_FOREACH(i) {
1201 pc = pcpu_find(i);
1202 if (pc->pc_mds_buf == NULL)
1203 pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1204 }
1205 mds_handler = mds_handler_silvermont;
1206 } else {
1207 hw_mds_disable = 0;
1208 mds_handler = mds_handler_void;
1209 }
1210 }
1211
1212 static void
hw_mds_recalculate_boot(void * arg __unused)1213 hw_mds_recalculate_boot(void *arg __unused)
1214 {
1215
1216 hw_mds_recalculate();
1217 }
1218 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL);
1219
1220 static int
sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)1221 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1222 {
1223 int error, val;
1224
1225 val = hw_mds_disable;
1226 error = sysctl_handle_int(oidp, &val, 0, req);
1227 if (error != 0 || req->newptr == NULL)
1228 return (error);
1229 if (val < 0 || val > 3)
1230 return (EINVAL);
1231 hw_mds_disable = val;
1232 hw_mds_recalculate();
1233 return (0);
1234 }
1235
1236 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1237 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1238 sysctl_mds_disable_handler, "I",
1239 "Microarchitectural Data Sampling Mitigation "
1240 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1241
1242 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT |
1243 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1244 sysctl_mds_disable_handler, "I",
1245 "Microarchitectural Data Sampling Mitigation "
1246 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1247
1248 /*
1249 * Intel Transactional Memory Asynchronous Abort Mitigation
1250 * CVE-2019-11135
1251 */
1252 int x86_taa_enable;
1253 int x86_taa_state;
1254 enum {
1255 TAA_NONE = 0, /* No mitigation enabled */
1256 TAA_TSX_DISABLE = 1, /* Disable TSX via MSR */
1257 TAA_VERW = 2, /* Use VERW mitigation */
1258 TAA_AUTO = 3, /* Automatically select the mitigation */
1259
1260 /* The states below are not selectable by the operator */
1261
1262 TAA_TAA_UC = 4, /* Mitigation present in microcode */
1263 TAA_NOT_PRESENT = 5 /* TSX is not present */
1264 };
1265
1266 static void
taa_set(bool enable,bool all)1267 taa_set(bool enable, bool all)
1268 {
1269
1270 x86_msr_op(MSR_IA32_TSX_CTRL,
1271 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1272 (all ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1273 IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR,
1274 NULL);
1275 }
1276
1277 void
x86_taa_recalculate(void)1278 x86_taa_recalculate(void)
1279 {
1280 static int taa_saved_mds_disable = 0;
1281 int taa_need = 0, taa_state = 0;
1282 int mds_disable = 0, need_mds_recalc = 0;
1283
1284 /* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
1285 if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
1286 (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
1287 /* TSX is not present */
1288 x86_taa_state = TAA_NOT_PRESENT;
1289 return;
1290 }
1291
1292 /* Check to see what mitigation options the CPU gives us */
1293 if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
1294 /* CPU is not suseptible to TAA */
1295 taa_need = TAA_TAA_UC;
1296 } else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
1297 /*
1298 * CPU can turn off TSX. This is the next best option
1299 * if TAA_NO hardware mitigation isn't present
1300 */
1301 taa_need = TAA_TSX_DISABLE;
1302 } else {
1303 /* No TSX/TAA specific remedies are available. */
1304 if (x86_taa_enable == TAA_TSX_DISABLE) {
1305 if (bootverbose)
1306 printf("TSX control not available\n");
1307 return;
1308 } else
1309 taa_need = TAA_VERW;
1310 }
1311
1312 /* Can we automatically take action, or are we being forced? */
1313 if (x86_taa_enable == TAA_AUTO)
1314 taa_state = taa_need;
1315 else
1316 taa_state = x86_taa_enable;
1317
1318 /* No state change, nothing to do */
1319 if (taa_state == x86_taa_state) {
1320 if (bootverbose)
1321 printf("No TSX change made\n");
1322 return;
1323 }
1324
1325 /* Does the MSR need to be turned on or off? */
1326 if (taa_state == TAA_TSX_DISABLE)
1327 taa_set(true, true);
1328 else if (x86_taa_state == TAA_TSX_DISABLE)
1329 taa_set(false, true);
1330
1331 /* Does MDS need to be set to turn on VERW? */
1332 if (taa_state == TAA_VERW) {
1333 taa_saved_mds_disable = hw_mds_disable;
1334 mds_disable = hw_mds_disable = 1;
1335 need_mds_recalc = 1;
1336 } else if (x86_taa_state == TAA_VERW) {
1337 mds_disable = hw_mds_disable = taa_saved_mds_disable;
1338 need_mds_recalc = 1;
1339 }
1340 if (need_mds_recalc) {
1341 hw_mds_recalculate();
1342 if (mds_disable != hw_mds_disable) {
1343 if (bootverbose)
1344 printf("Cannot change MDS state for TAA\n");
1345 /* Don't update our state */
1346 return;
1347 }
1348 }
1349
1350 x86_taa_state = taa_state;
1351 return;
1352 }
1353
1354 static void
taa_recalculate_boot(void * arg __unused)1355 taa_recalculate_boot(void * arg __unused)
1356 {
1357
1358 x86_taa_recalculate();
1359 }
1360 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
1361
1362 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa,
1363 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1364 "TSX Asynchronous Abort Mitigation");
1365
1366 static int
sysctl_taa_handler(SYSCTL_HANDLER_ARGS)1367 sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
1368 {
1369 int error, val;
1370
1371 val = x86_taa_enable;
1372 error = sysctl_handle_int(oidp, &val, 0, req);
1373 if (error != 0 || req->newptr == NULL)
1374 return (error);
1375 if (val < TAA_NONE || val > TAA_AUTO)
1376 return (EINVAL);
1377 x86_taa_enable = val;
1378 x86_taa_recalculate();
1379 return (0);
1380 }
1381
1382 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
1383 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1384 sysctl_taa_handler, "I",
1385 "TAA Mitigation enablement control "
1386 "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO)");
1387
1388 static int
sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)1389 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
1390 {
1391 const char *state;
1392
1393 switch (x86_taa_state) {
1394 case TAA_NONE:
1395 state = "inactive";
1396 break;
1397 case TAA_TSX_DISABLE:
1398 state = "TSX disabled";
1399 break;
1400 case TAA_VERW:
1401 state = "VERW";
1402 break;
1403 case TAA_TAA_UC:
1404 state = "Mitigated in microcode";
1405 break;
1406 case TAA_NOT_PRESENT:
1407 state = "TSX not present";
1408 break;
1409 default:
1410 state = "unknown";
1411 }
1412
1413 return (SYSCTL_OUT(req, state, strlen(state)));
1414 }
1415
1416 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
1417 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1418 sysctl_taa_state_handler, "A",
1419 "TAA Mitigation state");
1420
1421 int __read_frequently cpu_flush_rsb_ctxsw;
1422 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
1423 CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
1424 "Flush Return Stack Buffer on context switch");
1425
1426 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds,
1427 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1428 "MCU Optimization, disable RDSEED mitigation");
1429
1430 int x86_rngds_mitg_enable = 1;
1431 void
x86_rngds_mitg_recalculate(bool all_cpus)1432 x86_rngds_mitg_recalculate(bool all_cpus)
1433 {
1434 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0)
1435 return;
1436 x86_msr_op(MSR_IA32_MCU_OPT_CTRL,
1437 (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1438 (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1439 IA32_RNGDS_MITG_DIS, NULL);
1440 }
1441
1442 static int
sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)1443 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)
1444 {
1445 int error, val;
1446
1447 val = x86_rngds_mitg_enable;
1448 error = sysctl_handle_int(oidp, &val, 0, req);
1449 if (error != 0 || req->newptr == NULL)
1450 return (error);
1451 x86_rngds_mitg_enable = val;
1452 x86_rngds_mitg_recalculate(true);
1453 return (0);
1454 }
1455 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT |
1456 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1457 sysctl_rngds_mitg_enable_handler, "I",
1458 "MCU Optimization, disabling RDSEED mitigation control "
1459 "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled)");
1460
1461 static int
sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)1462 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)
1463 {
1464 const char *state;
1465
1466 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) {
1467 state = "Not applicable";
1468 } else if (x86_rngds_mitg_enable == 0) {
1469 state = "RDSEED not serialized";
1470 } else {
1471 state = "Mitigated";
1472 }
1473 return (SYSCTL_OUT(req, state, strlen(state)));
1474 }
1475 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state,
1476 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1477 sysctl_rngds_state_handler, "A",
1478 "MCU Optimization state");
1479
1480
1481 /*
1482 * Zenbleed.
1483 *
1484 * No corresponding errata is publicly listed. AMD has issued a security
1485 * bulletin (AMD-SB-7008), entitled "Cross-Process Information Leak". This
1486 * document lists (as of August 2023) platform firmware's availability target
1487 * dates, with most being November/December 2023. It will then be up to
1488 * motherboard manufacturers to produce corresponding BIOS updates, which will
1489 * happen with an inevitable lag. Additionally, for a variety of reasons,
1490 * operators might not be able to apply them everywhere due. On the side of
1491 * standalone CPU microcodes, no plans for availability have been published so
1492 * far. However, a developer appearing to be an AMD employee has hardcoded in
1493 * Linux revision numbers of future microcodes that are presumed to fix the
1494 * vulnerability.
1495 *
1496 * Given the stability issues encountered with early microcode releases for Rome
1497 * (the only microcode publicly released so far) and the absence of official
1498 * communication on standalone CPU microcodes, we have opted instead for
1499 * matching by default all AMD Zen2 processors which, according to the
1500 * vulnerability's discoverer, are all affected (see
1501 * https://lock.cmpxchg8b.com/zenbleed.html). This policy, also adopted by
1502 * OpenBSD, may be overriden using the tunable/sysctl
1503 * 'machdep.mitigations.zenbleed.enable'. We might revise it later depending on
1504 * official statements, microcode updates' public availability and community
1505 * assessment that they actually fix the vulnerability without any instability
1506 * side effects.
1507 */
1508
1509 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, zenbleed,
1510 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1511 "Zenbleed OS-triggered prevention (via chicken bit)");
1512
1513 /* 2 is auto, see below. */
1514 int zenbleed_enable = 2;
1515
1516 void
zenbleed_sanitize_enable(void)1517 zenbleed_sanitize_enable(void)
1518 {
1519 /* Default to auto (2). */
1520 if (zenbleed_enable < 0 || zenbleed_enable > 2)
1521 zenbleed_enable = 2;
1522 }
1523
1524 static bool
zenbleed_chicken_bit_applicable(void)1525 zenbleed_chicken_bit_applicable(void)
1526 {
1527 /* Concerns only bare-metal AMD Zen2 processors. */
1528 return (cpu_vendor_id == CPU_VENDOR_AMD &&
1529 CPUID_TO_FAMILY(cpu_id) == 0x17 &&
1530 CPUID_TO_MODEL(cpu_id) >= 0x30 &&
1531 vm_guest == VM_GUEST_NO);
1532 }
1533
1534 static bool
zenbleed_chicken_bit_should_enable(void)1535 zenbleed_chicken_bit_should_enable(void)
1536 {
1537 /*
1538 * Obey tunable/sysctl.
1539 *
1540 * As explained above, currently, the automatic setting (2) and the "on"
1541 * one (1) have the same effect. In the future, we might additionally
1542 * check for specific microcode revisions as part of the automatic
1543 * determination.
1544 */
1545 return (zenbleed_enable != 0);
1546 }
1547
1548 void
zenbleed_check_and_apply(bool all_cpus)1549 zenbleed_check_and_apply(bool all_cpus)
1550 {
1551 bool set;
1552
1553 if (!zenbleed_chicken_bit_applicable())
1554 return;
1555
1556 set = zenbleed_chicken_bit_should_enable();
1557
1558 x86_msr_op(MSR_DE_CFG,
1559 (set ? MSR_OP_OR : MSR_OP_ANDNOT) |
1560 (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1561 DE_CFG_ZEN2_FP_BACKUP_FIX_BIT, NULL);
1562 }
1563
1564 static int
sysctl_zenbleed_enable_handler(SYSCTL_HANDLER_ARGS)1565 sysctl_zenbleed_enable_handler(SYSCTL_HANDLER_ARGS)
1566 {
1567 int error, val;
1568
1569 val = zenbleed_enable;
1570 error = sysctl_handle_int(oidp, &val, 0, req);
1571 if (error != 0 || req->newptr == NULL)
1572 return (error);
1573 zenbleed_enable = val;
1574 zenbleed_sanitize_enable();
1575 zenbleed_check_and_apply(true);
1576 return (0);
1577 }
1578 SYSCTL_PROC(_machdep_mitigations_zenbleed, OID_AUTO, enable, CTLTYPE_INT |
1579 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1580 sysctl_zenbleed_enable_handler, "I",
1581 "Enable Zenbleed OS-triggered mitigation (chicken bit) "
1582 "(0: Force disable, 1: Force enable, 2: Automatic determination)");
1583
1584 static int
sysctl_zenbleed_state_handler(SYSCTL_HANDLER_ARGS)1585 sysctl_zenbleed_state_handler(SYSCTL_HANDLER_ARGS)
1586 {
1587 const char *state;
1588
1589 if (!zenbleed_chicken_bit_applicable())
1590 state = "Not applicable";
1591 else if (zenbleed_chicken_bit_should_enable())
1592 state = "Mitigation enabled";
1593 else
1594 state = "Mitigation disabled";
1595 return (SYSCTL_OUT(req, state, strlen(state)));
1596 }
1597 SYSCTL_PROC(_machdep_mitigations_zenbleed, OID_AUTO, state,
1598 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1599 sysctl_zenbleed_state_handler, "A",
1600 "Zenbleed OS-triggered mitigation (chicken bit) state");
1601
1602
1603 /*
1604 * Enable and restore kernel text write permissions.
1605 * Callers must ensure that disable_wp()/restore_wp() are executed
1606 * without rescheduling on the same core.
1607 */
1608 bool
disable_wp(void)1609 disable_wp(void)
1610 {
1611 u_int cr0;
1612
1613 cr0 = rcr0();
1614 if ((cr0 & CR0_WP) == 0)
1615 return (false);
1616 load_cr0(cr0 & ~CR0_WP);
1617 return (true);
1618 }
1619
1620 void
restore_wp(bool old_wp)1621 restore_wp(bool old_wp)
1622 {
1623
1624 if (old_wp)
1625 load_cr0(rcr0() | CR0_WP);
1626 }
1627
1628 bool
acpi_get_fadt_bootflags(uint16_t * flagsp)1629 acpi_get_fadt_bootflags(uint16_t *flagsp)
1630 {
1631 #ifdef DEV_ACPI
1632 ACPI_TABLE_FADT *fadt;
1633 vm_paddr_t physaddr;
1634
1635 physaddr = acpi_find_table(ACPI_SIG_FADT);
1636 if (physaddr == 0)
1637 return (false);
1638 fadt = acpi_map_table(physaddr, ACPI_SIG_FADT);
1639 if (fadt == NULL)
1640 return (false);
1641 *flagsp = fadt->BootFlags;
1642 acpi_unmap_table(fadt);
1643 return (true);
1644 #else
1645 return (false);
1646 #endif
1647 }
1648
1649 DEFINE_IFUNC(, uint64_t, rdtsc_ordered, (void))
1650 {
1651 bool cpu_is_amd = cpu_vendor_id == CPU_VENDOR_AMD ||
1652 cpu_vendor_id == CPU_VENDOR_HYGON;
1653
1654 if ((amd_feature & AMDID_RDTSCP) != 0)
1655 return (rdtscp);
1656 else if ((cpu_feature & CPUID_SSE2) != 0)
1657 return (cpu_is_amd ? rdtsc_ordered_mfence :
1658 rdtsc_ordered_lfence);
1659 else
1660 return (rdtsc);
1661 }
1662