1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009 Hudson River Trading LLC
5 * Written by: John H. Baldwin <[email protected]>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Support for x86 machine check architecture.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #ifdef __amd64__
38 #define DEV_APIC
39 #else
40 #include "opt_apic.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/interrupt.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/sysctl.h>
54 #include <sys/systm.h>
55 #include <sys/taskqueue.h>
56 #include <machine/intr_machdep.h>
57 #include <x86/apicvar.h>
58 #include <machine/cpu.h>
59 #include <machine/cputypes.h>
60 #include <x86/mca.h>
61 #include <machine/md_var.h>
62 #include <machine/specialreg.h>
63
64 /* Modes for mca_scan() */
65 enum scan_mode {
66 POLLED,
67 MCE,
68 CMCI,
69 };
70
71 #ifdef DEV_APIC
72 /*
73 * State maintained for each monitored MCx bank to control the
74 * corrected machine check interrupt threshold.
75 */
76 struct cmc_state {
77 int max_threshold;
78 time_t last_intr;
79 };
80
81 struct amd_et_state {
82 int cur_threshold;
83 time_t last_intr;
84 };
85 #endif
86
87 struct mca_internal {
88 struct mca_record rec;
89 int logged;
90 STAILQ_ENTRY(mca_internal) link;
91 };
92
93 static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
94
95 static volatile int mca_count; /* Number of records stored. */
96 static int mca_banks; /* Number of per-CPU register banks. */
97
98 static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL,
99 "Machine Check Architecture");
100
101 static int mca_enabled = 1;
102 SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0,
103 "Administrative toggle for machine check support");
104
105 static int amd10h_L1TP = 1;
106 SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
107 "Administrative toggle for logging of level one TLB parity (L1TP) errors");
108
109 static int intel6h_HSD131;
110 SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
111 "Administrative toggle for logging of spurious corrected errors");
112
113 int workaround_erratum383;
114 SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RDTUN,
115 &workaround_erratum383, 0,
116 "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
117
118 static STAILQ_HEAD(, mca_internal) mca_freelist;
119 static int mca_freecount;
120 static STAILQ_HEAD(, mca_internal) mca_records;
121 static struct callout mca_timer;
122 static int mca_ticks = 3600; /* Check hourly by default. */
123 static struct taskqueue *mca_tq;
124 static struct task mca_refill_task, mca_scan_task;
125 static struct mtx mca_lock;
126
127 #ifdef DEV_APIC
128 static struct cmc_state **cmc_state; /* Indexed by cpuid, bank. */
129 static struct amd_et_state **amd_et_state; /* Indexed by cpuid, bank. */
130 static int cmc_throttle = 60; /* Time in seconds to throttle CMCI. */
131
132 static int amd_elvt = -1;
133
134 static inline bool
amd_thresholding_supported(void)135 amd_thresholding_supported(void)
136 {
137 if (cpu_vendor_id != CPU_VENDOR_AMD)
138 return (false);
139 /*
140 * The RASCap register is wholly reserved in families 0x10-0x15 (through model 1F).
141 *
142 * It begins to be documented in family 0x15 model 30 and family 0x16,
143 * but neither of these families documents the ScalableMca bit, which
144 * supposedly defines the presence of this feature on family 0x17.
145 */
146 if (CPUID_TO_FAMILY(cpu_id) >= 0x10 && CPUID_TO_FAMILY(cpu_id) <= 0x16)
147 return (true);
148 if (CPUID_TO_FAMILY(cpu_id) >= 0x17)
149 return ((amd_rascap & AMDRAS_SCALABLE_MCA) != 0);
150 return (false);
151 }
152 #endif
153
154 static inline bool
cmci_supported(uint64_t mcg_cap)155 cmci_supported(uint64_t mcg_cap)
156 {
157 /*
158 * MCG_CAP_CMCI_P bit is reserved in AMD documentation. Until
159 * it is defined, do not use it to check for CMCI support.
160 */
161 if (cpu_vendor_id != CPU_VENDOR_INTEL)
162 return (false);
163 return ((mcg_cap & MCG_CAP_CMCI_P) != 0);
164 }
165
166 static int
sysctl_positive_int(SYSCTL_HANDLER_ARGS)167 sysctl_positive_int(SYSCTL_HANDLER_ARGS)
168 {
169 int error, value;
170
171 value = *(int *)arg1;
172 error = sysctl_handle_int(oidp, &value, 0, req);
173 if (error || req->newptr == NULL)
174 return (error);
175 if (value <= 0)
176 return (EINVAL);
177 *(int *)arg1 = value;
178 return (0);
179 }
180
181 static int
sysctl_mca_records(SYSCTL_HANDLER_ARGS)182 sysctl_mca_records(SYSCTL_HANDLER_ARGS)
183 {
184 int *name = (int *)arg1;
185 u_int namelen = arg2;
186 struct mca_record record;
187 struct mca_internal *rec;
188 int i;
189
190 if (namelen != 1)
191 return (EINVAL);
192
193 if (name[0] < 0 || name[0] >= mca_count)
194 return (EINVAL);
195
196 mtx_lock_spin(&mca_lock);
197 if (name[0] >= mca_count) {
198 mtx_unlock_spin(&mca_lock);
199 return (EINVAL);
200 }
201 i = 0;
202 STAILQ_FOREACH(rec, &mca_records, link) {
203 if (i == name[0]) {
204 record = rec->rec;
205 break;
206 }
207 i++;
208 }
209 mtx_unlock_spin(&mca_lock);
210 return (SYSCTL_OUT(req, &record, sizeof(record)));
211 }
212
213 static const char *
mca_error_ttype(uint16_t mca_error)214 mca_error_ttype(uint16_t mca_error)
215 {
216
217 switch ((mca_error & 0x000c) >> 2) {
218 case 0:
219 return ("I");
220 case 1:
221 return ("D");
222 case 2:
223 return ("G");
224 }
225 return ("?");
226 }
227
228 static const char *
mca_error_level(uint16_t mca_error)229 mca_error_level(uint16_t mca_error)
230 {
231
232 switch (mca_error & 0x0003) {
233 case 0:
234 return ("L0");
235 case 1:
236 return ("L1");
237 case 2:
238 return ("L2");
239 case 3:
240 return ("LG");
241 }
242 return ("L?");
243 }
244
245 static const char *
mca_error_request(uint16_t mca_error)246 mca_error_request(uint16_t mca_error)
247 {
248
249 switch ((mca_error & 0x00f0) >> 4) {
250 case 0x0:
251 return ("ERR");
252 case 0x1:
253 return ("RD");
254 case 0x2:
255 return ("WR");
256 case 0x3:
257 return ("DRD");
258 case 0x4:
259 return ("DWR");
260 case 0x5:
261 return ("IRD");
262 case 0x6:
263 return ("PREFETCH");
264 case 0x7:
265 return ("EVICT");
266 case 0x8:
267 return ("SNOOP");
268 }
269 return ("???");
270 }
271
272 static const char *
mca_error_mmtype(uint16_t mca_error)273 mca_error_mmtype(uint16_t mca_error)
274 {
275
276 switch ((mca_error & 0x70) >> 4) {
277 case 0x0:
278 return ("GEN");
279 case 0x1:
280 return ("RD");
281 case 0x2:
282 return ("WR");
283 case 0x3:
284 return ("AC");
285 case 0x4:
286 return ("MS");
287 }
288 return ("???");
289 }
290
291 static int
mca_mute(const struct mca_record * rec)292 mca_mute(const struct mca_record *rec)
293 {
294
295 /*
296 * Skip spurious corrected parity errors generated by Intel Haswell-
297 * and Broadwell-based CPUs (see HSD131, HSM142, HSW131 and BDM48
298 * erratum respectively), unless reporting is enabled.
299 * Note that these errors also have been observed with the D0-stepping
300 * of Haswell, while at least initially the CPU specification updates
301 * suggested only the C0-stepping to be affected. Similarly, Celeron
302 * 2955U with a CPU ID of 0x45 apparently are also concerned with the
303 * same problem, with HSM142 only referring to 0x3c and 0x46.
304 */
305 if (cpu_vendor_id == CPU_VENDOR_INTEL &&
306 CPUID_TO_FAMILY(cpu_id) == 0x6 &&
307 (CPUID_TO_MODEL(cpu_id) == 0x3c || /* HSD131, HSM142, HSW131 */
308 CPUID_TO_MODEL(cpu_id) == 0x3d || /* BDM48 */
309 CPUID_TO_MODEL(cpu_id) == 0x45 ||
310 CPUID_TO_MODEL(cpu_id) == 0x46) && /* HSM142 */
311 rec->mr_bank == 0 &&
312 (rec->mr_status & 0xa0000000ffffffff) == 0x80000000000f0005 &&
313 !intel6h_HSD131)
314 return (1);
315
316 return (0);
317 }
318
319 /* Dump details about a single machine check. */
320 static void
mca_log(const struct mca_record * rec)321 mca_log(const struct mca_record *rec)
322 {
323 uint16_t mca_error;
324
325 if (mca_mute(rec))
326 return;
327
328 printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
329 (long long)rec->mr_status);
330 printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
331 (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
332 printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
333 rec->mr_cpu_id, rec->mr_apic_id);
334 printf("MCA: CPU %d ", rec->mr_cpu);
335 if (rec->mr_status & MC_STATUS_UC)
336 printf("UNCOR ");
337 else {
338 printf("COR ");
339 if (cmci_supported(rec->mr_mcg_cap))
340 printf("(%lld) ", ((long long)rec->mr_status &
341 MC_STATUS_COR_COUNT) >> 38);
342 }
343 if (rec->mr_status & MC_STATUS_PCC)
344 printf("PCC ");
345 if (rec->mr_status & MC_STATUS_OVER)
346 printf("OVER ");
347 mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
348 switch (mca_error) {
349 /* Simple error codes. */
350 case 0x0000:
351 printf("no error");
352 break;
353 case 0x0001:
354 printf("unclassified error");
355 break;
356 case 0x0002:
357 printf("ucode ROM parity error");
358 break;
359 case 0x0003:
360 printf("external error");
361 break;
362 case 0x0004:
363 printf("FRC error");
364 break;
365 case 0x0005:
366 printf("internal parity error");
367 break;
368 case 0x0400:
369 printf("internal timer error");
370 break;
371 default:
372 if ((mca_error & 0xfc00) == 0x0400) {
373 printf("internal error %x", mca_error & 0x03ff);
374 break;
375 }
376
377 /* Compound error codes. */
378
379 /* Memory hierarchy error. */
380 if ((mca_error & 0xeffc) == 0x000c) {
381 printf("%s memory error", mca_error_level(mca_error));
382 break;
383 }
384
385 /* TLB error. */
386 if ((mca_error & 0xeff0) == 0x0010) {
387 printf("%sTLB %s error", mca_error_ttype(mca_error),
388 mca_error_level(mca_error));
389 break;
390 }
391
392 /* Memory controller error. */
393 if ((mca_error & 0xef80) == 0x0080) {
394 printf("%s channel ", mca_error_mmtype(mca_error));
395 if ((mca_error & 0x000f) != 0x000f)
396 printf("%d", mca_error & 0x000f);
397 else
398 printf("??");
399 printf(" memory error");
400 break;
401 }
402
403 /* Cache error. */
404 if ((mca_error & 0xef00) == 0x0100) {
405 printf("%sCACHE %s %s error",
406 mca_error_ttype(mca_error),
407 mca_error_level(mca_error),
408 mca_error_request(mca_error));
409 break;
410 }
411
412 /* Bus and/or Interconnect error. */
413 if ((mca_error & 0xe800) == 0x0800) {
414 printf("BUS%s ", mca_error_level(mca_error));
415 switch ((mca_error & 0x0600) >> 9) {
416 case 0:
417 printf("Source");
418 break;
419 case 1:
420 printf("Responder");
421 break;
422 case 2:
423 printf("Observer");
424 break;
425 default:
426 printf("???");
427 break;
428 }
429 printf(" %s ", mca_error_request(mca_error));
430 switch ((mca_error & 0x000c) >> 2) {
431 case 0:
432 printf("Memory");
433 break;
434 case 2:
435 printf("I/O");
436 break;
437 case 3:
438 printf("Other");
439 break;
440 default:
441 printf("???");
442 break;
443 }
444 if (mca_error & 0x0100)
445 printf(" timed out");
446 break;
447 }
448
449 printf("unknown error %x", mca_error);
450 break;
451 }
452 printf("\n");
453 if (rec->mr_status & MC_STATUS_ADDRV)
454 printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr);
455 if (rec->mr_status & MC_STATUS_MISCV)
456 printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
457 }
458
459 static int
mca_check_status(int bank,struct mca_record * rec)460 mca_check_status(int bank, struct mca_record *rec)
461 {
462 uint64_t status;
463 u_int p[4];
464
465 status = rdmsr(MSR_MC_STATUS(bank));
466 if (!(status & MC_STATUS_VAL))
467 return (0);
468
469 /* Save exception information. */
470 rec->mr_status = status;
471 rec->mr_bank = bank;
472 rec->mr_addr = 0;
473 if (status & MC_STATUS_ADDRV)
474 rec->mr_addr = rdmsr(MSR_MC_ADDR(bank));
475 rec->mr_misc = 0;
476 if (status & MC_STATUS_MISCV)
477 rec->mr_misc = rdmsr(MSR_MC_MISC(bank));
478 rec->mr_tsc = rdtsc();
479 rec->mr_apic_id = PCPU_GET(apic_id);
480 rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP);
481 rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS);
482 rec->mr_cpu_id = cpu_id;
483 rec->mr_cpu_vendor_id = cpu_vendor_id;
484 rec->mr_cpu = PCPU_GET(cpuid);
485
486 /*
487 * Clear machine check. Don't do this for uncorrectable
488 * errors so that the BIOS can see them.
489 */
490 if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) {
491 wrmsr(MSR_MC_STATUS(bank), 0);
492 do_cpuid(0, p);
493 }
494 return (1);
495 }
496
497 static void
mca_fill_freelist(void)498 mca_fill_freelist(void)
499 {
500 struct mca_internal *rec;
501 int desired;
502
503 /*
504 * Ensure we have at least one record for each bank and one
505 * record per CPU.
506 */
507 desired = imax(mp_ncpus, mca_banks);
508 mtx_lock_spin(&mca_lock);
509 while (mca_freecount < desired) {
510 mtx_unlock_spin(&mca_lock);
511 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
512 mtx_lock_spin(&mca_lock);
513 STAILQ_INSERT_TAIL(&mca_freelist, rec, link);
514 mca_freecount++;
515 }
516 mtx_unlock_spin(&mca_lock);
517 }
518
519 static void
mca_refill(void * context,int pending)520 mca_refill(void *context, int pending)
521 {
522
523 mca_fill_freelist();
524 }
525
526 static void
mca_record_entry(enum scan_mode mode,const struct mca_record * record)527 mca_record_entry(enum scan_mode mode, const struct mca_record *record)
528 {
529 struct mca_internal *rec;
530
531 if (mode == POLLED) {
532 rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
533 mtx_lock_spin(&mca_lock);
534 } else {
535 mtx_lock_spin(&mca_lock);
536 rec = STAILQ_FIRST(&mca_freelist);
537 if (rec == NULL) {
538 printf("MCA: Unable to allocate space for an event.\n");
539 mca_log(record);
540 mtx_unlock_spin(&mca_lock);
541 return;
542 }
543 STAILQ_REMOVE_HEAD(&mca_freelist, link);
544 mca_freecount--;
545 }
546
547 rec->rec = *record;
548 rec->logged = 0;
549 STAILQ_INSERT_TAIL(&mca_records, rec, link);
550 mca_count++;
551 mtx_unlock_spin(&mca_lock);
552 if (mode == CMCI && !cold)
553 taskqueue_enqueue(mca_tq, &mca_refill_task);
554 }
555
556 #ifdef DEV_APIC
557 /*
558 * Update the interrupt threshold for a CMCI. The strategy is to use
559 * a low trigger that interrupts as soon as the first event occurs.
560 * However, if a steady stream of events arrive, the threshold is
561 * increased until the interrupts are throttled to once every
562 * cmc_throttle seconds or the periodic scan. If a periodic scan
563 * finds that the threshold is too high, it is lowered.
564 */
565 static int
update_threshold(enum scan_mode mode,int valid,int last_intr,int count,int cur_threshold,int max_threshold)566 update_threshold(enum scan_mode mode, int valid, int last_intr, int count,
567 int cur_threshold, int max_threshold)
568 {
569 u_int delta;
570 int limit;
571
572 delta = (u_int)(time_uptime - last_intr);
573 limit = cur_threshold;
574
575 /*
576 * If an interrupt was received less than cmc_throttle seconds
577 * since the previous interrupt and the count from the current
578 * event is greater than or equal to the current threshold,
579 * double the threshold up to the max.
580 */
581 if (mode == CMCI && valid) {
582 if (delta < cmc_throttle && count >= limit &&
583 limit < max_threshold) {
584 limit = min(limit << 1, max_threshold);
585 }
586 return (limit);
587 }
588
589 /*
590 * When the banks are polled, check to see if the threshold
591 * should be lowered.
592 */
593 if (mode != POLLED)
594 return (limit);
595
596 /* If a CMCI occured recently, do nothing for now. */
597 if (delta < cmc_throttle)
598 return (limit);
599
600 /*
601 * Compute a new limit based on the average rate of events per
602 * cmc_throttle seconds since the last interrupt.
603 */
604 if (valid) {
605 limit = count * cmc_throttle / delta;
606 if (limit <= 0)
607 limit = 1;
608 else if (limit > max_threshold)
609 limit = max_threshold;
610 } else {
611 limit = 1;
612 }
613 return (limit);
614 }
615
616 static void
cmci_update(enum scan_mode mode,int bank,int valid,struct mca_record * rec)617 cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec)
618 {
619 struct cmc_state *cc;
620 uint64_t ctl;
621 int cur_threshold, new_threshold;
622 int count;
623
624 /* Fetch the current limit for this bank. */
625 cc = &cmc_state[PCPU_GET(cpuid)][bank];
626 ctl = rdmsr(MSR_MC_CTL2(bank));
627 count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
628 cur_threshold = ctl & MC_CTL2_THRESHOLD;
629
630 new_threshold = update_threshold(mode, valid, cc->last_intr, count,
631 cur_threshold, cc->max_threshold);
632
633 if (mode == CMCI && valid)
634 cc->last_intr = time_uptime;
635 if (new_threshold != cur_threshold) {
636 ctl &= ~MC_CTL2_THRESHOLD;
637 ctl |= new_threshold;
638 wrmsr(MSR_MC_CTL2(bank), ctl);
639 }
640 }
641
642 static void
amd_thresholding_update(enum scan_mode mode,int bank,int valid)643 amd_thresholding_update(enum scan_mode mode, int bank, int valid)
644 {
645 struct amd_et_state *cc;
646 uint64_t misc;
647 int new_threshold;
648 int count;
649
650 cc = &amd_et_state[PCPU_GET(cpuid)][bank];
651 misc = rdmsr(MSR_MC_MISC(bank));
652 count = (misc & MC_MISC_AMD_CNT_MASK) >> MC_MISC_AMD_CNT_SHIFT;
653 count = count - (MC_MISC_AMD_CNT_MAX - cc->cur_threshold);
654
655 new_threshold = update_threshold(mode, valid, cc->last_intr, count,
656 cc->cur_threshold, MC_MISC_AMD_CNT_MAX);
657
658 cc->cur_threshold = new_threshold;
659 misc &= ~MC_MISC_AMD_CNT_MASK;
660 misc |= (uint64_t)(MC_MISC_AMD_CNT_MAX - cc->cur_threshold)
661 << MC_MISC_AMD_CNT_SHIFT;
662 misc &= ~MC_MISC_AMD_OVERFLOW;
663 wrmsr(MSR_MC_MISC(bank), misc);
664 if (mode == CMCI && valid)
665 cc->last_intr = time_uptime;
666 }
667 #endif
668
669 /*
670 * This scans all the machine check banks of the current CPU to see if
671 * there are any machine checks. Any non-recoverable errors are
672 * reported immediately via mca_log(). The current thread must be
673 * pinned when this is called. The 'mode' parameter indicates if we
674 * are being called from the MC exception handler, the CMCI handler,
675 * or the periodic poller. In the MC exception case this function
676 * returns true if the system is restartable. Otherwise, it returns a
677 * count of the number of valid MC records found.
678 */
679 static int
mca_scan(enum scan_mode mode,int * recoverablep)680 mca_scan(enum scan_mode mode, int *recoverablep)
681 {
682 struct mca_record rec;
683 uint64_t mcg_cap, ucmask;
684 int count, i, recoverable, valid;
685
686 count = 0;
687 recoverable = 1;
688 ucmask = MC_STATUS_UC | MC_STATUS_PCC;
689
690 /* When handling a MCE#, treat the OVER flag as non-restartable. */
691 if (mode == MCE)
692 ucmask |= MC_STATUS_OVER;
693 mcg_cap = rdmsr(MSR_MCG_CAP);
694 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
695 #ifdef DEV_APIC
696 /*
697 * For a CMCI, only check banks this CPU is
698 * responsible for.
699 */
700 if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i))
701 continue;
702 #endif
703
704 valid = mca_check_status(i, &rec);
705 if (valid) {
706 count++;
707 if (rec.mr_status & ucmask) {
708 recoverable = 0;
709 mtx_lock_spin(&mca_lock);
710 mca_log(&rec);
711 mtx_unlock_spin(&mca_lock);
712 }
713 mca_record_entry(mode, &rec);
714 }
715
716 #ifdef DEV_APIC
717 /*
718 * If this is a bank this CPU monitors via CMCI,
719 * update the threshold.
720 */
721 if (PCPU_GET(cmci_mask) & 1 << i) {
722 if (cmc_state != NULL)
723 cmci_update(mode, i, valid, &rec);
724 else
725 amd_thresholding_update(mode, i, valid);
726 }
727 #endif
728 }
729 if (mode == POLLED)
730 mca_fill_freelist();
731 if (recoverablep != NULL)
732 *recoverablep = recoverable;
733 return (count);
734 }
735
736 /*
737 * Scan the machine check banks on all CPUs by binding to each CPU in
738 * turn. If any of the CPUs contained new machine check records, log
739 * them to the console.
740 */
741 static void
mca_scan_cpus(void * context,int pending)742 mca_scan_cpus(void *context, int pending)
743 {
744 struct mca_internal *mca;
745 struct thread *td;
746 int count, cpu;
747
748 mca_fill_freelist();
749 td = curthread;
750 count = 0;
751 thread_lock(td);
752 CPU_FOREACH(cpu) {
753 sched_bind(td, cpu);
754 thread_unlock(td);
755 count += mca_scan(POLLED, NULL);
756 thread_lock(td);
757 sched_unbind(td);
758 }
759 thread_unlock(td);
760 if (count != 0) {
761 mtx_lock_spin(&mca_lock);
762 STAILQ_FOREACH(mca, &mca_records, link) {
763 if (!mca->logged) {
764 mca->logged = 1;
765 mca_log(&mca->rec);
766 }
767 }
768 mtx_unlock_spin(&mca_lock);
769 }
770 }
771
772 static void
mca_periodic_scan(void * arg)773 mca_periodic_scan(void *arg)
774 {
775
776 taskqueue_enqueue(mca_tq, &mca_scan_task);
777 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
778 }
779
780 static int
sysctl_mca_scan(SYSCTL_HANDLER_ARGS)781 sysctl_mca_scan(SYSCTL_HANDLER_ARGS)
782 {
783 int error, i;
784
785 i = 0;
786 error = sysctl_handle_int(oidp, &i, 0, req);
787 if (error)
788 return (error);
789 if (i)
790 taskqueue_enqueue(mca_tq, &mca_scan_task);
791 return (0);
792 }
793
794 static void
mca_createtq(void * dummy)795 mca_createtq(void *dummy)
796 {
797 if (mca_banks <= 0)
798 return;
799
800 mca_tq = taskqueue_create_fast("mca", M_WAITOK,
801 taskqueue_thread_enqueue, &mca_tq);
802 taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
803
804 /* CMCIs during boot may have claimed items from the freelist. */
805 mca_fill_freelist();
806 }
807 SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL);
808
809 static void
mca_startup(void * dummy)810 mca_startup(void *dummy)
811 {
812
813 if (mca_banks <= 0)
814 return;
815
816 callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
817 }
818 #ifdef EARLY_AP_STARTUP
819 SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL);
820 #else
821 SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
822 #endif
823
824 #ifdef DEV_APIC
825 static void
cmci_setup(void)826 cmci_setup(void)
827 {
828 int i;
829
830 cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA,
831 M_WAITOK);
832 for (i = 0; i <= mp_maxid; i++)
833 cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks,
834 M_MCA, M_WAITOK | M_ZERO);
835 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
836 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
837 &cmc_throttle, 0, sysctl_positive_int, "I",
838 "Interval in seconds to throttle corrected MC interrupts");
839 }
840
841 static void
amd_thresholding_setup(void)842 amd_thresholding_setup(void)
843 {
844 u_int i;
845
846 amd_et_state = malloc((mp_maxid + 1) * sizeof(struct amd_et_state *),
847 M_MCA, M_WAITOK);
848 for (i = 0; i <= mp_maxid; i++)
849 amd_et_state[i] = malloc(sizeof(struct amd_et_state) *
850 mca_banks, M_MCA, M_WAITOK | M_ZERO);
851 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
852 "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
853 &cmc_throttle, 0, sysctl_positive_int, "I",
854 "Interval in seconds to throttle corrected MC interrupts");
855 }
856 #endif
857
858 static void
mca_setup(uint64_t mcg_cap)859 mca_setup(uint64_t mcg_cap)
860 {
861
862 /*
863 * On AMD Family 10h processors, unless logging of level one TLB
864 * parity (L1TP) errors is disabled, enable the recommended workaround
865 * for Erratum 383.
866 */
867 if (cpu_vendor_id == CPU_VENDOR_AMD &&
868 CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP)
869 workaround_erratum383 = 1;
870
871 mca_banks = mcg_cap & MCG_CAP_COUNT;
872 mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
873 STAILQ_INIT(&mca_records);
874 TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL);
875 callout_init(&mca_timer, 1);
876 STAILQ_INIT(&mca_freelist);
877 TASK_INIT(&mca_refill_task, 0, mca_refill, NULL);
878 mca_fill_freelist();
879 SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
880 "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
881 "Record count");
882 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
883 "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks,
884 0, sysctl_positive_int, "I",
885 "Periodic interval in seconds to scan for machine checks");
886 SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
887 "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records");
888 SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
889 "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
890 sysctl_mca_scan, "I", "Force an immediate scan for machine checks");
891 #ifdef DEV_APIC
892 if (cmci_supported(mcg_cap))
893 cmci_setup();
894 else if (amd_thresholding_supported())
895 amd_thresholding_setup();
896 #endif
897 }
898
899 #ifdef DEV_APIC
900 /*
901 * See if we should monitor CMCI for this bank. If CMCI_EN is already
902 * set in MC_CTL2, then another CPU is responsible for this bank, so
903 * ignore it. If CMCI_EN returns zero after being set, then this bank
904 * does not support CMCI_EN. If this CPU sets CMCI_EN, then it should
905 * now monitor this bank.
906 */
907 static void
cmci_monitor(int i)908 cmci_monitor(int i)
909 {
910 struct cmc_state *cc;
911 uint64_t ctl;
912
913 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
914
915 ctl = rdmsr(MSR_MC_CTL2(i));
916 if (ctl & MC_CTL2_CMCI_EN)
917 /* Already monitored by another CPU. */
918 return;
919
920 /* Set the threshold to one event for now. */
921 ctl &= ~MC_CTL2_THRESHOLD;
922 ctl |= MC_CTL2_CMCI_EN | 1;
923 wrmsr(MSR_MC_CTL2(i), ctl);
924 ctl = rdmsr(MSR_MC_CTL2(i));
925 if (!(ctl & MC_CTL2_CMCI_EN))
926 /* This bank does not support CMCI. */
927 return;
928
929 cc = &cmc_state[PCPU_GET(cpuid)][i];
930
931 /* Determine maximum threshold. */
932 ctl &= ~MC_CTL2_THRESHOLD;
933 ctl |= 0x7fff;
934 wrmsr(MSR_MC_CTL2(i), ctl);
935 ctl = rdmsr(MSR_MC_CTL2(i));
936 cc->max_threshold = ctl & MC_CTL2_THRESHOLD;
937
938 /* Start off with a threshold of 1. */
939 ctl &= ~MC_CTL2_THRESHOLD;
940 ctl |= 1;
941 wrmsr(MSR_MC_CTL2(i), ctl);
942
943 /* Mark this bank as monitored. */
944 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
945 }
946
947 /*
948 * For resume, reset the threshold for any banks we monitor back to
949 * one and throw away the timestamp of the last interrupt.
950 */
951 static void
cmci_resume(int i)952 cmci_resume(int i)
953 {
954 struct cmc_state *cc;
955 uint64_t ctl;
956
957 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
958
959 /* Ignore banks not monitored by this CPU. */
960 if (!(PCPU_GET(cmci_mask) & 1 << i))
961 return;
962
963 cc = &cmc_state[PCPU_GET(cpuid)][i];
964 cc->last_intr = 0;
965 ctl = rdmsr(MSR_MC_CTL2(i));
966 ctl &= ~MC_CTL2_THRESHOLD;
967 ctl |= MC_CTL2_CMCI_EN | 1;
968 wrmsr(MSR_MC_CTL2(i), ctl);
969 }
970
971 /*
972 * Apply an AMD ET configuration to the corresponding MSR.
973 */
974 static void
amd_thresholding_start(struct amd_et_state * cc,int bank)975 amd_thresholding_start(struct amd_et_state *cc, int bank)
976 {
977 uint64_t misc;
978
979 KASSERT(amd_elvt >= 0, ("ELVT offset is not set"));
980
981 misc = rdmsr(MSR_MC_MISC(bank));
982
983 misc &= ~MC_MISC_AMD_INT_MASK;
984 misc |= MC_MISC_AMD_INT_LVT;
985
986 misc &= ~MC_MISC_AMD_LVT_MASK;
987 misc |= (uint64_t)amd_elvt << MC_MISC_AMD_LVT_SHIFT;
988
989 misc &= ~MC_MISC_AMD_CNT_MASK;
990 misc |= (uint64_t)(MC_MISC_AMD_CNT_MAX - cc->cur_threshold)
991 << MC_MISC_AMD_CNT_SHIFT;
992
993 misc &= ~MC_MISC_AMD_OVERFLOW;
994 misc |= MC_MISC_AMD_CNTEN;
995
996 wrmsr(MSR_MC_MISC(bank), misc);
997 }
998
999 static void
amd_thresholding_monitor(int i)1000 amd_thresholding_monitor(int i)
1001 {
1002 struct amd_et_state *cc;
1003 uint64_t misc;
1004
1005 /*
1006 * Kludge: On 10h, banks after 4 are not thresholding but also may have
1007 * bogus Valid bits. Skip them. This is definitely fixed in 15h, but
1008 * I have not investigated whether it is fixed in earlier models.
1009 */
1010 if (CPUID_TO_FAMILY(cpu_id) < 0x15 && i >= 5)
1011 return;
1012
1013 /* The counter must be valid and present. */
1014 misc = rdmsr(MSR_MC_MISC(i));
1015 if ((misc & (MC_MISC_AMD_VAL | MC_MISC_AMD_CNTP)) !=
1016 (MC_MISC_AMD_VAL | MC_MISC_AMD_CNTP))
1017 return;
1018
1019 /* The register should not be locked. */
1020 if ((misc & MC_MISC_AMD_LOCK) != 0) {
1021 if (bootverbose)
1022 printf("%s: 0x%jx: Bank %d: locked\n", __func__,
1023 (uintmax_t)misc, i);
1024 return;
1025 }
1026
1027 /*
1028 * If counter is enabled then either the firmware or another CPU
1029 * has already claimed it.
1030 */
1031 if ((misc & MC_MISC_AMD_CNTEN) != 0) {
1032 if (bootverbose)
1033 printf("%s: 0x%jx: Bank %d: already enabled\n",
1034 __func__, (uintmax_t)misc, i);
1035 return;
1036 }
1037
1038 /*
1039 * Configure an Extended Interrupt LVT register for reporting
1040 * counter overflows if that feature is supported and the first
1041 * extended register is available.
1042 */
1043 amd_elvt = lapic_enable_mca_elvt();
1044 if (amd_elvt < 0) {
1045 printf("%s: Bank %d: lapic enable mca elvt failed: %d\n",
1046 __func__, i, amd_elvt);
1047 return;
1048 }
1049
1050 /* Re-use Intel CMC support infrastructure. */
1051 if (bootverbose)
1052 printf("%s: Starting AMD thresholding on bank %d\n", __func__,
1053 i);
1054
1055 cc = &amd_et_state[PCPU_GET(cpuid)][i];
1056 cc->cur_threshold = 1;
1057 amd_thresholding_start(cc, i);
1058
1059 /* Mark this bank as monitored. */
1060 PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
1061 }
1062
1063 static void
amd_thresholding_resume(int i)1064 amd_thresholding_resume(int i)
1065 {
1066 struct amd_et_state *cc;
1067
1068 KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
1069
1070 /* Ignore banks not monitored by this CPU. */
1071 if (!(PCPU_GET(cmci_mask) & 1 << i))
1072 return;
1073
1074 cc = &amd_et_state[PCPU_GET(cpuid)][i];
1075 cc->last_intr = 0;
1076 cc->cur_threshold = 1;
1077 amd_thresholding_start(cc, i);
1078 }
1079 #endif
1080
1081 /*
1082 * Initializes per-CPU machine check registers and enables corrected
1083 * machine check interrupts.
1084 */
1085 static void
_mca_init(int boot)1086 _mca_init(int boot)
1087 {
1088 uint64_t mcg_cap;
1089 uint64_t ctl, mask;
1090 int i, skip, family;
1091
1092 family = CPUID_TO_FAMILY(cpu_id);
1093
1094 /* MCE is required. */
1095 if (!mca_enabled || !(cpu_feature & CPUID_MCE))
1096 return;
1097
1098 if (cpu_feature & CPUID_MCA) {
1099 if (boot)
1100 PCPU_SET(cmci_mask, 0);
1101
1102 mcg_cap = rdmsr(MSR_MCG_CAP);
1103 if (mcg_cap & MCG_CAP_CTL_P)
1104 /* Enable MCA features. */
1105 wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE);
1106 if (IS_BSP() && boot)
1107 mca_setup(mcg_cap);
1108
1109 /*
1110 * Disable logging of level one TLB parity (L1TP) errors by
1111 * the data cache as an alternative workaround for AMD Family
1112 * 10h Erratum 383. Unlike the recommended workaround, there
1113 * is no performance penalty to this workaround. However,
1114 * L1TP errors will go unreported.
1115 */
1116 if (cpu_vendor_id == CPU_VENDOR_AMD && family == 0x10 &&
1117 !amd10h_L1TP) {
1118 mask = rdmsr(MSR_MC0_CTL_MASK);
1119 if ((mask & (1UL << 5)) == 0)
1120 wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5));
1121 }
1122
1123 /*
1124 * The cmci_monitor() must not be executed
1125 * simultaneously by several CPUs.
1126 */
1127 if (boot)
1128 mtx_lock_spin(&mca_lock);
1129
1130 for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
1131 /* By default enable logging of all errors. */
1132 ctl = 0xffffffffffffffffUL;
1133 skip = 0;
1134
1135 if (cpu_vendor_id == CPU_VENDOR_INTEL) {
1136 /*
1137 * For P6 models before Nehalem MC0_CTL is
1138 * always enabled and reserved.
1139 */
1140 if (i == 0 && family == 0x6
1141 && CPUID_TO_MODEL(cpu_id) < 0x1a)
1142 skip = 1;
1143 } else if (cpu_vendor_id == CPU_VENDOR_AMD) {
1144 /* BKDG for Family 10h: unset GartTblWkEn. */
1145 if (i == MC_AMDNB_BANK && family >= 0xf)
1146 ctl &= ~(1UL << 10);
1147 }
1148
1149 if (!skip)
1150 wrmsr(MSR_MC_CTL(i), ctl);
1151
1152 #ifdef DEV_APIC
1153 if (cmci_supported(mcg_cap)) {
1154 if (boot)
1155 cmci_monitor(i);
1156 else
1157 cmci_resume(i);
1158 } else if (amd_thresholding_supported()) {
1159 if (boot)
1160 amd_thresholding_monitor(i);
1161 else
1162 amd_thresholding_resume(i);
1163 }
1164 #endif
1165
1166 /* Clear all errors. */
1167 wrmsr(MSR_MC_STATUS(i), 0);
1168 }
1169 if (boot)
1170 mtx_unlock_spin(&mca_lock);
1171
1172 #ifdef DEV_APIC
1173 if (!amd_thresholding_supported() &&
1174 PCPU_GET(cmci_mask) != 0 && boot)
1175 lapic_enable_cmc();
1176 #endif
1177 }
1178
1179 load_cr4(rcr4() | CR4_MCE);
1180 }
1181
1182 /* Must be executed on each CPU during boot. */
1183 void
mca_init(void)1184 mca_init(void)
1185 {
1186
1187 _mca_init(1);
1188 }
1189
1190 /* Must be executed on each CPU during resume. */
1191 void
mca_resume(void)1192 mca_resume(void)
1193 {
1194
1195 _mca_init(0);
1196 }
1197
1198 /*
1199 * The machine check registers for the BSP cannot be initialized until
1200 * the local APIC is initialized. This happens at SI_SUB_CPU,
1201 * SI_ORDER_SECOND.
1202 */
1203 static void
mca_init_bsp(void * arg __unused)1204 mca_init_bsp(void *arg __unused)
1205 {
1206
1207 mca_init();
1208 }
1209 SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL);
1210
1211 /* Called when a machine check exception fires. */
1212 void
mca_intr(void)1213 mca_intr(void)
1214 {
1215 uint64_t mcg_status;
1216 int recoverable, count;
1217
1218 if (!(cpu_feature & CPUID_MCA)) {
1219 /*
1220 * Just print the values of the old Pentium registers
1221 * and panic.
1222 */
1223 printf("MC Type: 0x%jx Address: 0x%jx\n",
1224 (uintmax_t)rdmsr(MSR_P5_MC_TYPE),
1225 (uintmax_t)rdmsr(MSR_P5_MC_ADDR));
1226 panic("Machine check");
1227 }
1228
1229 /* Scan the banks and check for any non-recoverable errors. */
1230 count = mca_scan(MCE, &recoverable);
1231 mcg_status = rdmsr(MSR_MCG_STATUS);
1232 if (!(mcg_status & MCG_STATUS_RIPV))
1233 recoverable = 0;
1234
1235 if (!recoverable) {
1236 /*
1237 * Only panic if the error was detected local to this CPU.
1238 * Some errors will assert a machine check on all CPUs, but
1239 * only certain CPUs will find a valid bank to log.
1240 */
1241 while (count == 0)
1242 cpu_spinwait();
1243
1244 panic("Unrecoverable machine check exception");
1245 }
1246
1247 /* Clear MCIP. */
1248 wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
1249 }
1250
1251 #ifdef DEV_APIC
1252 /* Called for a CMCI (correctable machine check interrupt). */
1253 void
cmc_intr(void)1254 cmc_intr(void)
1255 {
1256 struct mca_internal *mca;
1257 int count;
1258
1259 /*
1260 * Serialize MCA bank scanning to prevent collisions from
1261 * sibling threads.
1262 */
1263 count = mca_scan(CMCI, NULL);
1264
1265 /* If we found anything, log them to the console. */
1266 if (count != 0) {
1267 mtx_lock_spin(&mca_lock);
1268 STAILQ_FOREACH(mca, &mca_records, link) {
1269 if (!mca->logged) {
1270 mca->logged = 1;
1271 mca_log(&mca->rec);
1272 }
1273 }
1274 mtx_unlock_spin(&mca_lock);
1275 }
1276 }
1277 #endif
1278