1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1996, by Steve Passe
5 * All rights reserved.
6 * Copyright (c) 2003 John Baldwin <[email protected]>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 * 3. Neither the name of the author nor the names of any co-contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 /*
33 * Local APIC support on Pentium and later processors.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_atpic.h"
40 #include "opt_hwpmc_hooks.h"
41
42 #include "opt_ddb.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/pcpu.h>
52 #include <sys/proc.h>
53 #include <sys/sched.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56 #include <sys/timeet.h>
57
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60
61 #include <x86/apicreg.h>
62 #include <machine/clock.h>
63 #include <machine/cpufunc.h>
64 #include <machine/cputypes.h>
65 #include <machine/frame.h>
66 #include <machine/intr_machdep.h>
67 #include <x86/apicvar.h>
68 #include <x86/mca.h>
69 #include <machine/md_var.h>
70 #include <machine/smp.h>
71 #include <machine/specialreg.h>
72 #include <x86/init.h>
73
74 #ifdef DDB
75 #include <sys/interrupt.h>
76 #include <ddb/ddb.h>
77 #endif
78
79 #ifdef __amd64__
80 #define SDT_APIC SDT_SYSIGT
81 #define GSEL_APIC 0
82 #else
83 #define SDT_APIC SDT_SYS386IGT
84 #define GSEL_APIC GSEL(GCODE_SEL, SEL_KPL)
85 #endif
86
87 static MALLOC_DEFINE(M_LAPIC, "local_apic", "Local APIC items");
88
89 /* Sanity checks on IDT vectors. */
90 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
91 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
92 CTASSERT(APIC_LOCAL_INTS == 240);
93 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
94
95 /*
96 * I/O interrupts use non-negative IRQ values. These values are used
97 * to mark unused IDT entries or IDT entries reserved for a non-I/O
98 * interrupt.
99 */
100 #define IRQ_FREE -1
101 #define IRQ_TIMER -2
102 #define IRQ_SYSCALL -3
103 #define IRQ_DTRACE_RET -4
104 #define IRQ_EVTCHN -5
105
106 enum lat_timer_mode {
107 LAT_MODE_UNDEF = 0,
108 LAT_MODE_PERIODIC = 1,
109 LAT_MODE_ONESHOT = 2,
110 LAT_MODE_DEADLINE = 3,
111 };
112
113 /*
114 * Support for local APICs. Local APICs manage interrupts on each
115 * individual processor as opposed to I/O APICs which receive interrupts
116 * from I/O devices and then forward them on to the local APICs.
117 *
118 * Local APICs can also send interrupts to each other thus providing the
119 * mechanism for IPIs.
120 */
121
122 struct lvt {
123 u_int lvt_edgetrigger:1;
124 u_int lvt_activehi:1;
125 u_int lvt_masked:1;
126 u_int lvt_active:1;
127 u_int lvt_mode:16;
128 u_int lvt_vector:8;
129 };
130
131 struct lapic {
132 struct lvt la_lvts[APIC_LVT_MAX + 1];
133 struct lvt la_elvts[APIC_ELVT_MAX + 1];
134 u_int la_id:8;
135 u_int la_cluster:4;
136 u_int la_cluster_id:2;
137 u_int la_present:1;
138 u_long *la_timer_count;
139 uint64_t la_timer_period;
140 enum lat_timer_mode la_timer_mode;
141 uint32_t lvt_timer_base;
142 uint32_t lvt_timer_last;
143 /* Include IDT_SYSCALL to make indexing easier. */
144 int la_ioint_irqs[APIC_NUM_IOINTS + 1];
145 } static *lapics;
146
147 /* Global defaults for local APIC LVT entries. */
148 static struct lvt lvts[APIC_LVT_MAX + 1] = {
149 { 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 }, /* LINT0: masked ExtINT */
150 { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
151 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
152 { 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
153 { 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 }, /* PMC */
154 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
155 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT }, /* CMCI */
156 };
157
158 /* Global defaults for AMD local APIC ELVT entries. */
159 static struct lvt elvts[APIC_ELVT_MAX + 1] = {
160 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
161 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, APIC_CMC_INT },
162 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
163 { 1, 1, 1, 0, APIC_LVT_DM_FIXED, 0 },
164 };
165
166 static inthand_t *ioint_handlers[] = {
167 NULL, /* 0 - 31 */
168 IDTVEC(apic_isr1), /* 32 - 63 */
169 IDTVEC(apic_isr2), /* 64 - 95 */
170 IDTVEC(apic_isr3), /* 96 - 127 */
171 IDTVEC(apic_isr4), /* 128 - 159 */
172 IDTVEC(apic_isr5), /* 160 - 191 */
173 IDTVEC(apic_isr6), /* 192 - 223 */
174 IDTVEC(apic_isr7), /* 224 - 255 */
175 };
176
177 static inthand_t *ioint_pti_handlers[] = {
178 NULL, /* 0 - 31 */
179 IDTVEC(apic_isr1_pti), /* 32 - 63 */
180 IDTVEC(apic_isr2_pti), /* 64 - 95 */
181 IDTVEC(apic_isr3_pti), /* 96 - 127 */
182 IDTVEC(apic_isr4_pti), /* 128 - 159 */
183 IDTVEC(apic_isr5_pti), /* 160 - 191 */
184 IDTVEC(apic_isr6_pti), /* 192 - 223 */
185 IDTVEC(apic_isr7_pti), /* 224 - 255 */
186 };
187
188 static u_int32_t lapic_timer_divisors[] = {
189 APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
190 APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
191 };
192
193 extern inthand_t IDTVEC(rsvd_pti), IDTVEC(rsvd);
194
195 volatile char *lapic_map;
196 vm_paddr_t lapic_paddr;
197 int x2apic_mode;
198 int lapic_eoi_suppression;
199 static int lapic_timer_tsc_deadline;
200 static u_long lapic_timer_divisor, count_freq;
201 static struct eventtimer lapic_et;
202 #ifdef SMP
203 static uint64_t lapic_ipi_wait_mult;
204 #endif
205 unsigned int max_apic_id;
206
207 SYSCTL_NODE(_hw, OID_AUTO, apic, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
208 "APIC options");
209 SYSCTL_INT(_hw_apic, OID_AUTO, x2apic_mode, CTLFLAG_RD, &x2apic_mode, 0, "");
210 SYSCTL_INT(_hw_apic, OID_AUTO, eoi_suppression, CTLFLAG_RD,
211 &lapic_eoi_suppression, 0, "");
212 SYSCTL_INT(_hw_apic, OID_AUTO, timer_tsc_deadline, CTLFLAG_RD,
213 &lapic_timer_tsc_deadline, 0, "");
214
215 static void lapic_calibrate_initcount(struct lapic *la);
216 static void lapic_calibrate_deadline(struct lapic *la);
217
218 /*
219 * Use __nosanitizethread to exempt the LAPIC I/O accessors from KCSan
220 * instrumentation. Otherwise, if x2APIC is not available, use of the global
221 * lapic_map will generate a KCSan false positive. While the mapping is
222 * shared among all CPUs, the physical access will always take place on the
223 * local CPU's APIC, so there isn't in fact a race here. Furthermore, the
224 * KCSan warning printf can cause a panic if issued during LAPIC access,
225 * due to attempted recursive use of event timer resources.
226 */
227
228 static uint32_t __nosanitizethread
lapic_read32(enum LAPIC_REGISTERS reg)229 lapic_read32(enum LAPIC_REGISTERS reg)
230 {
231 uint32_t res;
232
233 if (x2apic_mode) {
234 res = rdmsr32(MSR_APIC_000 + reg);
235 } else {
236 res = *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL);
237 }
238 return (res);
239 }
240
241 static void __nosanitizethread
lapic_write32(enum LAPIC_REGISTERS reg,uint32_t val)242 lapic_write32(enum LAPIC_REGISTERS reg, uint32_t val)
243 {
244
245 if (x2apic_mode) {
246 mfence();
247 lfence();
248 wrmsr(MSR_APIC_000 + reg, val);
249 } else {
250 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
251 }
252 }
253
254 static void __nosanitizethread
lapic_write32_nofence(enum LAPIC_REGISTERS reg,uint32_t val)255 lapic_write32_nofence(enum LAPIC_REGISTERS reg, uint32_t val)
256 {
257
258 if (x2apic_mode) {
259 wrmsr(MSR_APIC_000 + reg, val);
260 } else {
261 *(volatile uint32_t *)(lapic_map + reg * LAPIC_MEM_MUL) = val;
262 }
263 }
264
265 #ifdef SMP
266 static uint64_t
lapic_read_icr_lo(void)267 lapic_read_icr_lo(void)
268 {
269
270 return (lapic_read32(LAPIC_ICR_LO));
271 }
272
273 static void
lapic_write_icr(uint32_t vhi,uint32_t vlo)274 lapic_write_icr(uint32_t vhi, uint32_t vlo)
275 {
276 register_t saveintr;
277 uint64_t v;
278
279 if (x2apic_mode) {
280 v = ((uint64_t)vhi << 32) | vlo;
281 mfence();
282 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, v);
283 } else {
284 saveintr = intr_disable();
285 lapic_write32(LAPIC_ICR_HI, vhi);
286 lapic_write32(LAPIC_ICR_LO, vlo);
287 intr_restore(saveintr);
288 }
289 }
290
291 static void
lapic_write_icr_lo(uint32_t vlo)292 lapic_write_icr_lo(uint32_t vlo)
293 {
294
295 if (x2apic_mode) {
296 mfence();
297 wrmsr(MSR_APIC_000 + LAPIC_ICR_LO, vlo);
298 } else {
299 lapic_write32(LAPIC_ICR_LO, vlo);
300 }
301 }
302
303 static void
lapic_write_self_ipi(uint32_t vector)304 lapic_write_self_ipi(uint32_t vector)
305 {
306
307 KASSERT(x2apic_mode, ("SELF IPI write in xAPIC mode"));
308 wrmsr(MSR_APIC_000 + LAPIC_SELF_IPI, vector);
309 }
310 #endif /* SMP */
311
312 static void
native_lapic_enable_x2apic(void)313 native_lapic_enable_x2apic(void)
314 {
315 uint64_t apic_base;
316
317 apic_base = rdmsr(MSR_APICBASE);
318 apic_base |= APICBASE_X2APIC | APICBASE_ENABLED;
319 wrmsr(MSR_APICBASE, apic_base);
320 }
321
322 static bool
native_lapic_is_x2apic(void)323 native_lapic_is_x2apic(void)
324 {
325 uint64_t apic_base;
326
327 apic_base = rdmsr(MSR_APICBASE);
328 return ((apic_base & (APICBASE_X2APIC | APICBASE_ENABLED)) ==
329 (APICBASE_X2APIC | APICBASE_ENABLED));
330 }
331
332 static void lapic_enable(void);
333 static void lapic_resume(struct pic *pic, bool suspend_cancelled);
334 static void lapic_timer_oneshot(struct lapic *);
335 static void lapic_timer_oneshot_nointr(struct lapic *, uint32_t);
336 static void lapic_timer_periodic(struct lapic *);
337 static void lapic_timer_deadline(struct lapic *);
338 static void lapic_timer_stop(struct lapic *);
339 static void lapic_timer_set_divisor(u_int divisor);
340 static uint32_t lvt_mode(struct lapic *la, u_int pin, uint32_t value);
341 static int lapic_et_start(struct eventtimer *et,
342 sbintime_t first, sbintime_t period);
343 static int lapic_et_stop(struct eventtimer *et);
344 static u_int apic_idt_to_irq(u_int apic_id, u_int vector);
345 static void lapic_set_tpr(u_int vector);
346
347 struct pic lapic_pic = { .pic_resume = lapic_resume };
348
349 /* Forward declarations for apic_ops */
350 static void native_lapic_create(u_int apic_id, int boot_cpu);
351 static void native_lapic_init(vm_paddr_t addr);
352 static void native_lapic_xapic_mode(void);
353 static void native_lapic_setup(int boot);
354 static void native_lapic_dump(const char *str);
355 static void native_lapic_disable(void);
356 static void native_lapic_eoi(void);
357 static int native_lapic_id(void);
358 static int native_lapic_intr_pending(u_int vector);
359 static u_int native_apic_cpuid(u_int apic_id);
360 static u_int native_apic_alloc_vector(u_int apic_id, u_int irq);
361 static u_int native_apic_alloc_vectors(u_int apic_id, u_int *irqs,
362 u_int count, u_int align);
363 static void native_apic_disable_vector(u_int apic_id, u_int vector);
364 static void native_apic_enable_vector(u_int apic_id, u_int vector);
365 static void native_apic_free_vector(u_int apic_id, u_int vector, u_int irq);
366 static void native_lapic_set_logical_id(u_int apic_id, u_int cluster,
367 u_int cluster_id);
368 static int native_lapic_enable_pmc(void);
369 static void native_lapic_disable_pmc(void);
370 static void native_lapic_reenable_pmc(void);
371 static void native_lapic_enable_cmc(void);
372 static int native_lapic_enable_mca_elvt(void);
373 static int native_lapic_set_lvt_mask(u_int apic_id, u_int lvt,
374 u_char masked);
375 static int native_lapic_set_lvt_mode(u_int apic_id, u_int lvt,
376 uint32_t mode);
377 static int native_lapic_set_lvt_polarity(u_int apic_id, u_int lvt,
378 enum intr_polarity pol);
379 static int native_lapic_set_lvt_triggermode(u_int apic_id, u_int lvt,
380 enum intr_trigger trigger);
381 #ifdef SMP
382 static void native_lapic_ipi_raw(register_t icrlo, u_int dest);
383 static void native_lapic_ipi_vectored(u_int vector, int dest);
384 static int native_lapic_ipi_wait(int delay);
385 #endif /* SMP */
386 static int native_lapic_ipi_alloc(inthand_t *ipifunc);
387 static void native_lapic_ipi_free(int vector);
388
389 struct apic_ops apic_ops = {
390 .create = native_lapic_create,
391 .init = native_lapic_init,
392 .xapic_mode = native_lapic_xapic_mode,
393 .is_x2apic = native_lapic_is_x2apic,
394 .setup = native_lapic_setup,
395 .dump = native_lapic_dump,
396 .disable = native_lapic_disable,
397 .eoi = native_lapic_eoi,
398 .id = native_lapic_id,
399 .intr_pending = native_lapic_intr_pending,
400 .set_logical_id = native_lapic_set_logical_id,
401 .cpuid = native_apic_cpuid,
402 .alloc_vector = native_apic_alloc_vector,
403 .alloc_vectors = native_apic_alloc_vectors,
404 .enable_vector = native_apic_enable_vector,
405 .disable_vector = native_apic_disable_vector,
406 .free_vector = native_apic_free_vector,
407 .enable_pmc = native_lapic_enable_pmc,
408 .disable_pmc = native_lapic_disable_pmc,
409 .reenable_pmc = native_lapic_reenable_pmc,
410 .enable_cmc = native_lapic_enable_cmc,
411 .enable_mca_elvt = native_lapic_enable_mca_elvt,
412 #ifdef SMP
413 .ipi_raw = native_lapic_ipi_raw,
414 .ipi_vectored = native_lapic_ipi_vectored,
415 .ipi_wait = native_lapic_ipi_wait,
416 #endif
417 .ipi_alloc = native_lapic_ipi_alloc,
418 .ipi_free = native_lapic_ipi_free,
419 .set_lvt_mask = native_lapic_set_lvt_mask,
420 .set_lvt_mode = native_lapic_set_lvt_mode,
421 .set_lvt_polarity = native_lapic_set_lvt_polarity,
422 .set_lvt_triggermode = native_lapic_set_lvt_triggermode,
423 };
424
425 static uint32_t
lvt_mode_impl(struct lapic * la,struct lvt * lvt,u_int pin,uint32_t value)426 lvt_mode_impl(struct lapic *la, struct lvt *lvt, u_int pin, uint32_t value)
427 {
428
429 value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
430 APIC_LVT_VECTOR);
431 if (lvt->lvt_edgetrigger == 0)
432 value |= APIC_LVT_TM;
433 if (lvt->lvt_activehi == 0)
434 value |= APIC_LVT_IIPP_INTALO;
435 if (lvt->lvt_masked)
436 value |= APIC_LVT_M;
437 value |= lvt->lvt_mode;
438 switch (lvt->lvt_mode) {
439 case APIC_LVT_DM_NMI:
440 case APIC_LVT_DM_SMI:
441 case APIC_LVT_DM_INIT:
442 case APIC_LVT_DM_EXTINT:
443 if (!lvt->lvt_edgetrigger && bootverbose) {
444 printf("lapic%u: Forcing LINT%u to edge trigger\n",
445 la->la_id, pin);
446 value &= ~APIC_LVT_TM;
447 }
448 /* Use a vector of 0. */
449 break;
450 case APIC_LVT_DM_FIXED:
451 value |= lvt->lvt_vector;
452 break;
453 default:
454 panic("bad APIC LVT delivery mode: %#x\n", value);
455 }
456 return (value);
457 }
458
459 static uint32_t
lvt_mode(struct lapic * la,u_int pin,uint32_t value)460 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
461 {
462 struct lvt *lvt;
463
464 KASSERT(pin <= APIC_LVT_MAX,
465 ("%s: pin %u out of range", __func__, pin));
466 if (la->la_lvts[pin].lvt_active)
467 lvt = &la->la_lvts[pin];
468 else
469 lvt = &lvts[pin];
470
471 return (lvt_mode_impl(la, lvt, pin, value));
472 }
473
474 static uint32_t
elvt_mode(struct lapic * la,u_int idx,uint32_t value)475 elvt_mode(struct lapic *la, u_int idx, uint32_t value)
476 {
477 struct lvt *elvt;
478
479 KASSERT(idx <= APIC_ELVT_MAX,
480 ("%s: idx %u out of range", __func__, idx));
481
482 elvt = &la->la_elvts[idx];
483 KASSERT(elvt->lvt_active, ("%s: ELVT%u is not active", __func__, idx));
484 KASSERT(elvt->lvt_edgetrigger,
485 ("%s: ELVT%u is not edge triggered", __func__, idx));
486 KASSERT(elvt->lvt_activehi,
487 ("%s: ELVT%u is not active high", __func__, idx));
488 return (lvt_mode_impl(la, elvt, idx, value));
489 }
490
491 /*
492 * Map the local APIC and setup necessary interrupt vectors.
493 */
494 static void
native_lapic_init(vm_paddr_t addr)495 native_lapic_init(vm_paddr_t addr)
496 {
497 #ifdef SMP
498 uint64_t r, r1, r2, rx;
499 #endif
500 uint32_t ver;
501 int i;
502 bool arat;
503
504 /*
505 * Enable x2APIC mode if possible. Map the local APIC
506 * registers page.
507 *
508 * Keep the LAPIC registers page mapped uncached for x2APIC
509 * mode too, to have direct map page attribute set to
510 * uncached. This is needed to work around CPU errata present
511 * on all Intel processors.
512 */
513 KASSERT(trunc_page(addr) == addr,
514 ("local APIC not aligned on a page boundary"));
515 lapic_paddr = addr;
516 lapic_map = pmap_mapdev(addr, PAGE_SIZE);
517 if (x2apic_mode) {
518 native_lapic_enable_x2apic();
519 lapic_map = NULL;
520 }
521
522 /* Setup the spurious interrupt handler. */
523 setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
524 GSEL_APIC);
525
526 /* Perform basic initialization of the BSP's local APIC. */
527 lapic_enable();
528
529 /* Set BSP's per-CPU local APIC ID. */
530 PCPU_SET(apic_id, lapic_id());
531
532 /* Local APIC timer interrupt. */
533 setidt(APIC_TIMER_INT, pti ? IDTVEC(timerint_pti) : IDTVEC(timerint),
534 SDT_APIC, SEL_KPL, GSEL_APIC);
535
536 /* Local APIC error interrupt. */
537 setidt(APIC_ERROR_INT, pti ? IDTVEC(errorint_pti) : IDTVEC(errorint),
538 SDT_APIC, SEL_KPL, GSEL_APIC);
539
540 /* XXX: Thermal interrupt */
541
542 /* Local APIC CMCI. */
543 setidt(APIC_CMC_INT, pti ? IDTVEC(cmcint_pti) : IDTVEC(cmcint),
544 SDT_APIC, SEL_KPL, GSEL_APIC);
545
546 if ((resource_int_value("apic", 0, "clock", &i) != 0 || i != 0)) {
547 /* Set if APIC timer runs in C3. */
548 arat = (cpu_power_eax & CPUTPM1_ARAT);
549
550 bzero(&lapic_et, sizeof(lapic_et));
551 lapic_et.et_name = "LAPIC";
552 lapic_et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT |
553 ET_FLAGS_PERCPU;
554 lapic_et.et_quality = 600;
555 if (!arat) {
556 lapic_et.et_flags |= ET_FLAGS_C3STOP;
557 lapic_et.et_quality = 100;
558 }
559 if ((cpu_feature & CPUID_TSC) != 0 &&
560 (cpu_feature2 & CPUID2_TSCDLT) != 0 &&
561 tsc_is_invariant && tsc_freq != 0) {
562 lapic_timer_tsc_deadline = 1;
563 TUNABLE_INT_FETCH("hw.lapic_tsc_deadline",
564 &lapic_timer_tsc_deadline);
565 }
566
567 lapic_et.et_frequency = 0;
568 /* We don't know frequency yet, so trying to guess. */
569 lapic_et.et_min_period = 0x00001000LL;
570 lapic_et.et_max_period = SBT_1S;
571 lapic_et.et_start = lapic_et_start;
572 lapic_et.et_stop = lapic_et_stop;
573 lapic_et.et_priv = NULL;
574 et_register(&lapic_et);
575 }
576
577 /*
578 * Set lapic_eoi_suppression after lapic_enable(), to not
579 * enable suppression in the hardware prematurely. Note that
580 * we by default enable suppression even when system only has
581 * one IO-APIC, since EOI is broadcasted to all APIC agents,
582 * including CPUs, otherwise.
583 *
584 * It seems that at least some KVM versions report
585 * EOI_SUPPRESSION bit, but auto-EOI does not work.
586 */
587 ver = lapic_read32(LAPIC_VERSION);
588 if ((ver & APIC_VER_EOI_SUPPRESSION) != 0) {
589 lapic_eoi_suppression = 1;
590 if (vm_guest == VM_GUEST_KVM) {
591 if (bootverbose)
592 printf(
593 "KVM -- disabling lapic eoi suppression\n");
594 lapic_eoi_suppression = 0;
595 }
596 TUNABLE_INT_FETCH("hw.lapic_eoi_suppression",
597 &lapic_eoi_suppression);
598 }
599
600 #ifdef SMP
601 #define LOOPS 100000
602 /*
603 * Calibrate the busy loop waiting for IPI ack in xAPIC mode.
604 * lapic_ipi_wait_mult contains the number of iterations which
605 * approximately delay execution for 1 microsecond (the
606 * argument to native_lapic_ipi_wait() is in microseconds).
607 *
608 * We assume that TSC is present and already measured.
609 * Possible TSC frequency jumps are irrelevant to the
610 * calibration loop below, the CPU clock management code is
611 * not yet started, and we do not enter sleep states.
612 */
613 KASSERT((cpu_feature & CPUID_TSC) != 0 && tsc_freq != 0,
614 ("TSC not initialized"));
615 if (!x2apic_mode) {
616 r = rdtsc();
617 for (rx = 0; rx < LOOPS; rx++) {
618 (void)lapic_read_icr_lo();
619 ia32_pause();
620 }
621 r = rdtsc() - r;
622 r1 = tsc_freq * LOOPS;
623 r2 = r * 1000000;
624 lapic_ipi_wait_mult = r1 >= r2 ? r1 / r2 : 1;
625 if (bootverbose) {
626 printf("LAPIC: ipi_wait() us multiplier %ju (r %ju "
627 "tsc %ju)\n", (uintmax_t)lapic_ipi_wait_mult,
628 (uintmax_t)r, (uintmax_t)tsc_freq);
629 }
630 }
631 #undef LOOPS
632 #endif /* SMP */
633 }
634
635 /*
636 * Create a local APIC instance.
637 */
638 static void
native_lapic_create(u_int apic_id,int boot_cpu)639 native_lapic_create(u_int apic_id, int boot_cpu)
640 {
641 int i;
642
643 if (apic_id > max_apic_id) {
644 printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
645 if (boot_cpu)
646 panic("Can't ignore BSP");
647 return;
648 }
649 KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
650 apic_id));
651
652 /*
653 * Assume no local LVT overrides and a cluster of 0 and
654 * intra-cluster ID of 0.
655 */
656 lapics[apic_id].la_present = 1;
657 lapics[apic_id].la_id = apic_id;
658 for (i = 0; i <= APIC_LVT_MAX; i++) {
659 lapics[apic_id].la_lvts[i] = lvts[i];
660 lapics[apic_id].la_lvts[i].lvt_active = 0;
661 }
662 for (i = 0; i <= APIC_ELVT_MAX; i++) {
663 lapics[apic_id].la_elvts[i] = elvts[i];
664 lapics[apic_id].la_elvts[i].lvt_active = 0;
665 }
666 for (i = 0; i <= APIC_NUM_IOINTS; i++)
667 lapics[apic_id].la_ioint_irqs[i] = IRQ_FREE;
668 lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
669 lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
670 IRQ_TIMER;
671 #ifdef KDTRACE_HOOKS
672 lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] =
673 IRQ_DTRACE_RET;
674 #endif
675 #ifdef XENHVM
676 lapics[apic_id].la_ioint_irqs[IDT_EVTCHN - APIC_IO_INTS] = IRQ_EVTCHN;
677 #endif
678
679 #ifdef SMP
680 cpu_add(apic_id, boot_cpu);
681 #endif
682 }
683
684 static inline uint32_t
amd_read_ext_features(void)685 amd_read_ext_features(void)
686 {
687 uint32_t version;
688
689 if (cpu_vendor_id != CPU_VENDOR_AMD &&
690 cpu_vendor_id != CPU_VENDOR_HYGON)
691 return (0);
692 version = lapic_read32(LAPIC_VERSION);
693 if ((version & APIC_VER_AMD_EXT_SPACE) != 0)
694 return (lapic_read32(LAPIC_EXT_FEATURES));
695 else
696 return (0);
697 }
698
699 static inline uint32_t
amd_read_elvt_count(void)700 amd_read_elvt_count(void)
701 {
702 uint32_t extf;
703 uint32_t count;
704
705 extf = amd_read_ext_features();
706 count = (extf & APIC_EXTF_ELVT_MASK) >> APIC_EXTF_ELVT_SHIFT;
707 count = min(count, APIC_ELVT_MAX + 1);
708 return (count);
709 }
710
711 /*
712 * Dump contents of local APIC registers
713 */
714 static void
native_lapic_dump(const char * str)715 native_lapic_dump(const char* str)
716 {
717 uint32_t version;
718 uint32_t maxlvt;
719 uint32_t extf;
720 int elvt_count;
721 int i;
722
723 version = lapic_read32(LAPIC_VERSION);
724 maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
725 printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
726 printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x",
727 lapic_read32(LAPIC_ID), version,
728 lapic_read32(LAPIC_LDR), x2apic_mode ? 0 : lapic_read32(LAPIC_DFR));
729 if ((cpu_feature2 & CPUID2_X2APIC) != 0)
730 printf(" x2APIC: %d", x2apic_mode);
731 printf("\n lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
732 lapic_read32(LAPIC_LVT_LINT0), lapic_read32(LAPIC_LVT_LINT1),
733 lapic_read32(LAPIC_TPR), lapic_read32(LAPIC_SVR));
734 printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x",
735 lapic_read32(LAPIC_LVT_TIMER), lapic_read32(LAPIC_LVT_THERMAL),
736 lapic_read32(LAPIC_LVT_ERROR));
737 if (maxlvt >= APIC_LVT_PMC)
738 printf(" pmc: 0x%08x", lapic_read32(LAPIC_LVT_PCINT));
739 printf("\n");
740 if (maxlvt >= APIC_LVT_CMCI)
741 printf(" cmci: 0x%08x\n", lapic_read32(LAPIC_LVT_CMCI));
742 extf = amd_read_ext_features();
743 if (extf != 0) {
744 printf(" AMD ext features: 0x%08x\n", extf);
745 elvt_count = amd_read_elvt_count();
746 for (i = 0; i < elvt_count; i++)
747 printf(" AMD elvt%d: 0x%08x\n", i,
748 lapic_read32(LAPIC_EXT_LVT0 + i));
749 }
750 }
751
752 static void
native_lapic_xapic_mode(void)753 native_lapic_xapic_mode(void)
754 {
755 register_t saveintr;
756
757 saveintr = intr_disable();
758 if (x2apic_mode)
759 native_lapic_enable_x2apic();
760 intr_restore(saveintr);
761 }
762
763 static void
native_lapic_setup(int boot)764 native_lapic_setup(int boot)
765 {
766 struct lapic *la;
767 uint32_t version;
768 uint32_t maxlvt;
769 register_t saveintr;
770 int elvt_count;
771 int i;
772
773 saveintr = intr_disable();
774
775 la = &lapics[lapic_id()];
776 KASSERT(la->la_present, ("missing APIC structure"));
777 version = lapic_read32(LAPIC_VERSION);
778 maxlvt = (version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
779
780 /* Initialize the TPR to allow all interrupts. */
781 lapic_set_tpr(0);
782
783 /* Setup spurious vector and enable the local APIC. */
784 lapic_enable();
785
786 /* Program LINT[01] LVT entries. */
787 lapic_write32(LAPIC_LVT_LINT0, lvt_mode(la, APIC_LVT_LINT0,
788 lapic_read32(LAPIC_LVT_LINT0)));
789 lapic_write32(LAPIC_LVT_LINT1, lvt_mode(la, APIC_LVT_LINT1,
790 lapic_read32(LAPIC_LVT_LINT1)));
791
792 /* Program the PMC LVT entry if present. */
793 if (maxlvt >= APIC_LVT_PMC) {
794 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
795 LAPIC_LVT_PCINT));
796 }
797
798 /* Program timer LVT. */
799 la->lvt_timer_base = lvt_mode(la, APIC_LVT_TIMER,
800 lapic_read32(LAPIC_LVT_TIMER));
801 la->lvt_timer_last = la->lvt_timer_base;
802 lapic_write32(LAPIC_LVT_TIMER, la->lvt_timer_base);
803
804 /* Calibrate the timer parameters using BSP. */
805 if (boot && IS_BSP()) {
806 lapic_calibrate_initcount(la);
807 if (lapic_timer_tsc_deadline)
808 lapic_calibrate_deadline(la);
809 }
810
811 /* Setup the timer if configured. */
812 if (la->la_timer_mode != LAT_MODE_UNDEF) {
813 KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor",
814 lapic_id()));
815 switch (la->la_timer_mode) {
816 case LAT_MODE_PERIODIC:
817 lapic_timer_set_divisor(lapic_timer_divisor);
818 lapic_timer_periodic(la);
819 break;
820 case LAT_MODE_ONESHOT:
821 lapic_timer_set_divisor(lapic_timer_divisor);
822 lapic_timer_oneshot(la);
823 break;
824 case LAT_MODE_DEADLINE:
825 lapic_timer_deadline(la);
826 break;
827 default:
828 panic("corrupted la_timer_mode %p %d", la,
829 la->la_timer_mode);
830 }
831 }
832
833 /* Program error LVT and clear any existing errors. */
834 lapic_write32(LAPIC_LVT_ERROR, lvt_mode(la, APIC_LVT_ERROR,
835 lapic_read32(LAPIC_LVT_ERROR)));
836 lapic_write32(LAPIC_ESR, 0);
837
838 /* XXX: Thermal LVT */
839
840 /* Program the CMCI LVT entry if present. */
841 if (maxlvt >= APIC_LVT_CMCI) {
842 lapic_write32(LAPIC_LVT_CMCI, lvt_mode(la, APIC_LVT_CMCI,
843 lapic_read32(LAPIC_LVT_CMCI)));
844 }
845
846 elvt_count = amd_read_elvt_count();
847 for (i = 0; i < elvt_count; i++) {
848 if (la->la_elvts[i].lvt_active)
849 lapic_write32(LAPIC_EXT_LVT0 + i,
850 elvt_mode(la, i, lapic_read32(LAPIC_EXT_LVT0 + i)));
851 }
852
853 intr_restore(saveintr);
854 }
855
856 static void
native_lapic_intrcnt(void * dummy __unused)857 native_lapic_intrcnt(void *dummy __unused)
858 {
859 struct pcpu *pc;
860 struct lapic *la;
861 char buf[MAXCOMLEN + 1];
862
863 /* If there are no APICs, skip this function. */
864 if (lapics == NULL)
865 return;
866
867 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
868 la = &lapics[pc->pc_apic_id];
869 if (!la->la_present)
870 continue;
871
872 snprintf(buf, sizeof(buf), "cpu%d:timer", pc->pc_cpuid);
873 intrcnt_add(buf, &la->la_timer_count);
874 }
875 }
876 SYSINIT(native_lapic_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, native_lapic_intrcnt,
877 NULL);
878
879 static void
native_lapic_reenable_pmc(void)880 native_lapic_reenable_pmc(void)
881 {
882 #ifdef HWPMC_HOOKS
883 uint32_t value;
884
885 value = lapic_read32(LAPIC_LVT_PCINT);
886 value &= ~APIC_LVT_M;
887 lapic_write32(LAPIC_LVT_PCINT, value);
888 #endif
889 }
890
891 #ifdef HWPMC_HOOKS
892 static void
lapic_update_pmc(void * dummy)893 lapic_update_pmc(void *dummy)
894 {
895 struct lapic *la;
896
897 la = &lapics[lapic_id()];
898 lapic_write32(LAPIC_LVT_PCINT, lvt_mode(la, APIC_LVT_PMC,
899 lapic_read32(LAPIC_LVT_PCINT)));
900 }
901 #endif
902
903 static int
native_lapic_enable_pmc(void)904 native_lapic_enable_pmc(void)
905 {
906 #ifdef HWPMC_HOOKS
907 u_int32_t maxlvt;
908
909 /* Fail if the local APIC is not present. */
910 if (!x2apic_mode && lapic_map == NULL)
911 return (0);
912
913 /* Fail if the PMC LVT is not present. */
914 maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
915 if (maxlvt < APIC_LVT_PMC)
916 return (0);
917
918 lvts[APIC_LVT_PMC].lvt_masked = 0;
919
920 #ifdef EARLY_AP_STARTUP
921 MPASS(mp_ncpus == 1 || smp_started);
922 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
923 #else
924 #ifdef SMP
925 /*
926 * If hwpmc was loaded at boot time then the APs may not be
927 * started yet. In that case, don't forward the request to
928 * them as they will program the lvt when they start.
929 */
930 if (smp_started)
931 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
932 else
933 #endif
934 lapic_update_pmc(NULL);
935 #endif
936 return (1);
937 #else
938 return (0);
939 #endif
940 }
941
942 static void
native_lapic_disable_pmc(void)943 native_lapic_disable_pmc(void)
944 {
945 #ifdef HWPMC_HOOKS
946 u_int32_t maxlvt;
947
948 /* Fail if the local APIC is not present. */
949 if (!x2apic_mode && lapic_map == NULL)
950 return;
951
952 /* Fail if the PMC LVT is not present. */
953 maxlvt = (lapic_read32(LAPIC_VERSION) & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
954 if (maxlvt < APIC_LVT_PMC)
955 return;
956
957 lvts[APIC_LVT_PMC].lvt_masked = 1;
958
959 #ifdef SMP
960 /* The APs should always be started when hwpmc is unloaded. */
961 KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
962 #endif
963 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
964 #endif
965 }
966
967 static void
lapic_calibrate_initcount(struct lapic * la)968 lapic_calibrate_initcount(struct lapic *la)
969 {
970 u_long value;
971
972 /* Start off with a divisor of 2 (power on reset default). */
973 lapic_timer_divisor = 2;
974 /* Try to calibrate the local APIC timer. */
975 do {
976 lapic_timer_set_divisor(lapic_timer_divisor);
977 lapic_timer_oneshot_nointr(la, APIC_TIMER_MAX_COUNT);
978 DELAY(1000000);
979 value = APIC_TIMER_MAX_COUNT - lapic_read32(LAPIC_CCR_TIMER);
980 if (value != APIC_TIMER_MAX_COUNT)
981 break;
982 lapic_timer_divisor <<= 1;
983 } while (lapic_timer_divisor <= 128);
984 if (lapic_timer_divisor > 128)
985 panic("lapic: Divisor too big");
986 if (bootverbose) {
987 printf("lapic: Divisor %lu, Frequency %lu Hz\n",
988 lapic_timer_divisor, value);
989 }
990 count_freq = value;
991 }
992
993 static void
lapic_calibrate_deadline(struct lapic * la __unused)994 lapic_calibrate_deadline(struct lapic *la __unused)
995 {
996
997 if (bootverbose) {
998 printf("lapic: deadline tsc mode, Frequency %ju Hz\n",
999 (uintmax_t)tsc_freq);
1000 }
1001 }
1002
1003 static void
lapic_change_mode(struct eventtimer * et,struct lapic * la,enum lat_timer_mode newmode)1004 lapic_change_mode(struct eventtimer *et, struct lapic *la,
1005 enum lat_timer_mode newmode)
1006 {
1007
1008 if (la->la_timer_mode == newmode)
1009 return;
1010 switch (newmode) {
1011 case LAT_MODE_PERIODIC:
1012 lapic_timer_set_divisor(lapic_timer_divisor);
1013 et->et_frequency = count_freq;
1014 break;
1015 case LAT_MODE_DEADLINE:
1016 et->et_frequency = tsc_freq;
1017 break;
1018 case LAT_MODE_ONESHOT:
1019 lapic_timer_set_divisor(lapic_timer_divisor);
1020 et->et_frequency = count_freq;
1021 break;
1022 default:
1023 panic("lapic_change_mode %d", newmode);
1024 }
1025 la->la_timer_mode = newmode;
1026 et->et_min_period = (0x00000002LLU << 32) / et->et_frequency;
1027 et->et_max_period = (0xfffffffeLLU << 32) / et->et_frequency;
1028 }
1029
1030 static int
lapic_et_start(struct eventtimer * et,sbintime_t first,sbintime_t period)1031 lapic_et_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
1032 {
1033 struct lapic *la;
1034
1035 la = &lapics[PCPU_GET(apic_id)];
1036 if (period != 0) {
1037 lapic_change_mode(et, la, LAT_MODE_PERIODIC);
1038 la->la_timer_period = ((uint32_t)et->et_frequency * period) >>
1039 32;
1040 lapic_timer_periodic(la);
1041 } else if (lapic_timer_tsc_deadline) {
1042 lapic_change_mode(et, la, LAT_MODE_DEADLINE);
1043 la->la_timer_period = (et->et_frequency * first) >> 32;
1044 lapic_timer_deadline(la);
1045 } else {
1046 lapic_change_mode(et, la, LAT_MODE_ONESHOT);
1047 la->la_timer_period = ((uint32_t)et->et_frequency * first) >>
1048 32;
1049 lapic_timer_oneshot(la);
1050 }
1051 return (0);
1052 }
1053
1054 static int
lapic_et_stop(struct eventtimer * et)1055 lapic_et_stop(struct eventtimer *et)
1056 {
1057 struct lapic *la;
1058
1059 la = &lapics[PCPU_GET(apic_id)];
1060 lapic_timer_stop(la);
1061 la->la_timer_mode = LAT_MODE_UNDEF;
1062 return (0);
1063 }
1064
1065 static void
native_lapic_disable(void)1066 native_lapic_disable(void)
1067 {
1068 uint32_t value;
1069
1070 /* Software disable the local APIC. */
1071 value = lapic_read32(LAPIC_SVR);
1072 value &= ~APIC_SVR_SWEN;
1073 lapic_write32(LAPIC_SVR, value);
1074 }
1075
1076 static void
lapic_enable(void)1077 lapic_enable(void)
1078 {
1079 uint32_t value;
1080
1081 /* Program the spurious vector to enable the local APIC. */
1082 value = lapic_read32(LAPIC_SVR);
1083 value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
1084 value |= APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT;
1085 if (lapic_eoi_suppression)
1086 value |= APIC_SVR_EOI_SUPPRESSION;
1087 lapic_write32(LAPIC_SVR, value);
1088 }
1089
1090 /* Reset the local APIC on the BSP during resume. */
1091 static void
lapic_resume(struct pic * pic,bool suspend_cancelled)1092 lapic_resume(struct pic *pic, bool suspend_cancelled)
1093 {
1094
1095 lapic_setup(0);
1096 }
1097
1098 static int
native_lapic_id(void)1099 native_lapic_id(void)
1100 {
1101 uint32_t v;
1102
1103 KASSERT(x2apic_mode || lapic_map != NULL, ("local APIC is not mapped"));
1104 v = lapic_read32(LAPIC_ID);
1105 if (!x2apic_mode)
1106 v >>= APIC_ID_SHIFT;
1107 return (v);
1108 }
1109
1110 static int
native_lapic_intr_pending(u_int vector)1111 native_lapic_intr_pending(u_int vector)
1112 {
1113 uint32_t irr;
1114
1115 /*
1116 * The IRR registers are an array of registers each of which
1117 * only describes 32 interrupts in the low 32 bits. Thus, we
1118 * divide the vector by 32 to get the register index.
1119 * Finally, we modulus the vector by 32 to determine the
1120 * individual bit to test.
1121 */
1122 irr = lapic_read32(LAPIC_IRR0 + vector / 32);
1123 return (irr & 1 << (vector % 32));
1124 }
1125
1126 static void
native_lapic_set_logical_id(u_int apic_id,u_int cluster,u_int cluster_id)1127 native_lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
1128 {
1129 struct lapic *la;
1130
1131 KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
1132 __func__, apic_id));
1133 KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
1134 __func__, cluster));
1135 KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
1136 ("%s: intra cluster id %u too big", __func__, cluster_id));
1137 la = &lapics[apic_id];
1138 la->la_cluster = cluster;
1139 la->la_cluster_id = cluster_id;
1140 }
1141
1142 static int
native_lapic_set_lvt_mask(u_int apic_id,u_int pin,u_char masked)1143 native_lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
1144 {
1145
1146 if (pin > APIC_LVT_MAX)
1147 return (EINVAL);
1148 if (apic_id == APIC_ID_ALL) {
1149 lvts[pin].lvt_masked = masked;
1150 if (bootverbose)
1151 printf("lapic:");
1152 } else {
1153 KASSERT(lapics[apic_id].la_present,
1154 ("%s: missing APIC %u", __func__, apic_id));
1155 lapics[apic_id].la_lvts[pin].lvt_masked = masked;
1156 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1157 if (bootverbose)
1158 printf("lapic%u:", apic_id);
1159 }
1160 if (bootverbose)
1161 printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
1162 return (0);
1163 }
1164
1165 static int
native_lapic_set_lvt_mode(u_int apic_id,u_int pin,u_int32_t mode)1166 native_lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
1167 {
1168 struct lvt *lvt;
1169
1170 if (pin > APIC_LVT_MAX)
1171 return (EINVAL);
1172 if (apic_id == APIC_ID_ALL) {
1173 lvt = &lvts[pin];
1174 if (bootverbose)
1175 printf("lapic:");
1176 } else {
1177 KASSERT(lapics[apic_id].la_present,
1178 ("%s: missing APIC %u", __func__, apic_id));
1179 lvt = &lapics[apic_id].la_lvts[pin];
1180 lvt->lvt_active = 1;
1181 if (bootverbose)
1182 printf("lapic%u:", apic_id);
1183 }
1184 lvt->lvt_mode = mode;
1185 switch (mode) {
1186 case APIC_LVT_DM_NMI:
1187 case APIC_LVT_DM_SMI:
1188 case APIC_LVT_DM_INIT:
1189 case APIC_LVT_DM_EXTINT:
1190 lvt->lvt_edgetrigger = 1;
1191 lvt->lvt_activehi = 1;
1192 if (mode == APIC_LVT_DM_EXTINT)
1193 lvt->lvt_masked = 1;
1194 else
1195 lvt->lvt_masked = 0;
1196 break;
1197 default:
1198 panic("Unsupported delivery mode: 0x%x\n", mode);
1199 }
1200 if (bootverbose) {
1201 printf(" Routing ");
1202 switch (mode) {
1203 case APIC_LVT_DM_NMI:
1204 printf("NMI");
1205 break;
1206 case APIC_LVT_DM_SMI:
1207 printf("SMI");
1208 break;
1209 case APIC_LVT_DM_INIT:
1210 printf("INIT");
1211 break;
1212 case APIC_LVT_DM_EXTINT:
1213 printf("ExtINT");
1214 break;
1215 }
1216 printf(" -> LINT%u\n", pin);
1217 }
1218 return (0);
1219 }
1220
1221 static int
native_lapic_set_lvt_polarity(u_int apic_id,u_int pin,enum intr_polarity pol)1222 native_lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
1223 {
1224
1225 if (pin > APIC_LVT_MAX || pol == INTR_POLARITY_CONFORM)
1226 return (EINVAL);
1227 if (apic_id == APIC_ID_ALL) {
1228 lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
1229 if (bootverbose)
1230 printf("lapic:");
1231 } else {
1232 KASSERT(lapics[apic_id].la_present,
1233 ("%s: missing APIC %u", __func__, apic_id));
1234 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1235 lapics[apic_id].la_lvts[pin].lvt_activehi =
1236 (pol == INTR_POLARITY_HIGH);
1237 if (bootverbose)
1238 printf("lapic%u:", apic_id);
1239 }
1240 if (bootverbose)
1241 printf(" LINT%u polarity: %s\n", pin,
1242 pol == INTR_POLARITY_HIGH ? "high" : "low");
1243 return (0);
1244 }
1245
1246 static int
native_lapic_set_lvt_triggermode(u_int apic_id,u_int pin,enum intr_trigger trigger)1247 native_lapic_set_lvt_triggermode(u_int apic_id, u_int pin,
1248 enum intr_trigger trigger)
1249 {
1250
1251 if (pin > APIC_LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
1252 return (EINVAL);
1253 if (apic_id == APIC_ID_ALL) {
1254 lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
1255 if (bootverbose)
1256 printf("lapic:");
1257 } else {
1258 KASSERT(lapics[apic_id].la_present,
1259 ("%s: missing APIC %u", __func__, apic_id));
1260 lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
1261 (trigger == INTR_TRIGGER_EDGE);
1262 lapics[apic_id].la_lvts[pin].lvt_active = 1;
1263 if (bootverbose)
1264 printf("lapic%u:", apic_id);
1265 }
1266 if (bootverbose)
1267 printf(" LINT%u trigger: %s\n", pin,
1268 trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
1269 return (0);
1270 }
1271
1272 /*
1273 * Adjust the TPR of the current CPU so that it blocks all interrupts below
1274 * the passed in vector.
1275 */
1276 static void
lapic_set_tpr(u_int vector)1277 lapic_set_tpr(u_int vector)
1278 {
1279 #ifdef CHEAP_TPR
1280 lapic_write32(LAPIC_TPR, vector);
1281 #else
1282 uint32_t tpr;
1283
1284 tpr = lapic_read32(LAPIC_TPR) & ~APIC_TPR_PRIO;
1285 tpr |= vector;
1286 lapic_write32(LAPIC_TPR, tpr);
1287 #endif
1288 }
1289
1290 static void
native_lapic_eoi(void)1291 native_lapic_eoi(void)
1292 {
1293
1294 lapic_write32_nofence(LAPIC_EOI, 0);
1295 }
1296
1297 void
lapic_handle_intr(int vector,struct trapframe * frame)1298 lapic_handle_intr(int vector, struct trapframe *frame)
1299 {
1300 struct intsrc *isrc;
1301
1302 isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
1303 vector));
1304 intr_execute_handlers(isrc, frame);
1305 }
1306
1307 void
lapic_handle_timer(struct trapframe * frame)1308 lapic_handle_timer(struct trapframe *frame)
1309 {
1310 struct lapic *la;
1311 struct trapframe *oldframe;
1312 struct thread *td;
1313
1314 /* Send EOI first thing. */
1315 lapic_eoi();
1316
1317 #if defined(SMP) && !defined(SCHED_ULE)
1318 /*
1319 * Don't do any accounting for the disabled HTT cores, since it
1320 * will provide misleading numbers for the userland.
1321 *
1322 * No locking is necessary here, since even if we lose the race
1323 * when hlt_cpus_mask changes it is not a big deal, really.
1324 *
1325 * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
1326 * and unlike other schedulers it actually schedules threads to
1327 * those CPUs.
1328 */
1329 if (CPU_ISSET(PCPU_GET(cpuid), &hlt_cpus_mask))
1330 return;
1331 #endif
1332
1333 /* Look up our local APIC structure for the tick counters. */
1334 la = &lapics[PCPU_GET(apic_id)];
1335 (*la->la_timer_count)++;
1336 critical_enter();
1337 if (lapic_et.et_active) {
1338 td = curthread;
1339 td->td_intr_nesting_level++;
1340 oldframe = td->td_intr_frame;
1341 td->td_intr_frame = frame;
1342 lapic_et.et_event_cb(&lapic_et, lapic_et.et_arg);
1343 td->td_intr_frame = oldframe;
1344 td->td_intr_nesting_level--;
1345 }
1346 critical_exit();
1347 }
1348
1349 static void
lapic_timer_set_divisor(u_int divisor)1350 lapic_timer_set_divisor(u_int divisor)
1351 {
1352
1353 KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
1354 KASSERT(ffs(divisor) <= nitems(lapic_timer_divisors),
1355 ("lapic: invalid divisor %u", divisor));
1356 lapic_write32(LAPIC_DCR_TIMER, lapic_timer_divisors[ffs(divisor) - 1]);
1357 }
1358
1359 static void
lapic_timer_oneshot(struct lapic * la)1360 lapic_timer_oneshot(struct lapic *la)
1361 {
1362 uint32_t value;
1363
1364 value = la->lvt_timer_base;
1365 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1366 value |= APIC_LVTT_TM_ONE_SHOT;
1367 la->lvt_timer_last = value;
1368 lapic_write32(LAPIC_LVT_TIMER, value);
1369 lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1370 }
1371
1372 static void
lapic_timer_oneshot_nointr(struct lapic * la,uint32_t count)1373 lapic_timer_oneshot_nointr(struct lapic *la, uint32_t count)
1374 {
1375 uint32_t value;
1376
1377 value = la->lvt_timer_base;
1378 value &= ~APIC_LVTT_TM;
1379 value |= APIC_LVTT_TM_ONE_SHOT | APIC_LVT_M;
1380 la->lvt_timer_last = value;
1381 lapic_write32(LAPIC_LVT_TIMER, value);
1382 lapic_write32(LAPIC_ICR_TIMER, count);
1383 }
1384
1385 static void
lapic_timer_periodic(struct lapic * la)1386 lapic_timer_periodic(struct lapic *la)
1387 {
1388 uint32_t value;
1389
1390 value = la->lvt_timer_base;
1391 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1392 value |= APIC_LVTT_TM_PERIODIC;
1393 la->lvt_timer_last = value;
1394 lapic_write32(LAPIC_LVT_TIMER, value);
1395 lapic_write32(LAPIC_ICR_TIMER, la->la_timer_period);
1396 }
1397
1398 static void
lapic_timer_deadline(struct lapic * la)1399 lapic_timer_deadline(struct lapic *la)
1400 {
1401 uint32_t value;
1402
1403 value = la->lvt_timer_base;
1404 value &= ~(APIC_LVTT_TM | APIC_LVT_M);
1405 value |= APIC_LVTT_TM_TSCDLT;
1406 if (value != la->lvt_timer_last) {
1407 la->lvt_timer_last = value;
1408 lapic_write32_nofence(LAPIC_LVT_TIMER, value);
1409 if (!x2apic_mode)
1410 mfence();
1411 }
1412 wrmsr(MSR_TSC_DEADLINE, la->la_timer_period + rdtsc());
1413 }
1414
1415 static void
lapic_timer_stop(struct lapic * la)1416 lapic_timer_stop(struct lapic *la)
1417 {
1418 uint32_t value;
1419
1420 if (la->la_timer_mode == LAT_MODE_DEADLINE) {
1421 wrmsr(MSR_TSC_DEADLINE, 0);
1422 mfence();
1423 } else {
1424 value = la->lvt_timer_base;
1425 value &= ~APIC_LVTT_TM;
1426 value |= APIC_LVT_M;
1427 la->lvt_timer_last = value;
1428 lapic_write32(LAPIC_LVT_TIMER, value);
1429 }
1430 }
1431
1432 void
lapic_handle_cmc(void)1433 lapic_handle_cmc(void)
1434 {
1435
1436 lapic_eoi();
1437 cmc_intr();
1438 }
1439
1440 /*
1441 * Called from the mca_init() to activate the CMC interrupt if this CPU is
1442 * responsible for monitoring any MC banks for CMC events. Since mca_init()
1443 * is called prior to lapic_setup() during boot, this just needs to unmask
1444 * this CPU's LVT_CMCI entry.
1445 */
1446 static void
native_lapic_enable_cmc(void)1447 native_lapic_enable_cmc(void)
1448 {
1449 u_int apic_id;
1450
1451 #ifdef DEV_ATPIC
1452 if (!x2apic_mode && lapic_map == NULL)
1453 return;
1454 #endif
1455 apic_id = PCPU_GET(apic_id);
1456 KASSERT(lapics[apic_id].la_present,
1457 ("%s: missing APIC %u", __func__, apic_id));
1458 lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_masked = 0;
1459 lapics[apic_id].la_lvts[APIC_LVT_CMCI].lvt_active = 1;
1460 if (bootverbose)
1461 printf("lapic%u: CMCI unmasked\n", apic_id);
1462 }
1463
1464 static int
native_lapic_enable_mca_elvt(void)1465 native_lapic_enable_mca_elvt(void)
1466 {
1467 u_int apic_id;
1468 uint32_t value;
1469 int elvt_count;
1470
1471 #ifdef DEV_ATPIC
1472 if (lapic_map == NULL)
1473 return (-1);
1474 #endif
1475
1476 apic_id = PCPU_GET(apic_id);
1477 KASSERT(lapics[apic_id].la_present,
1478 ("%s: missing APIC %u", __func__, apic_id));
1479 elvt_count = amd_read_elvt_count();
1480 if (elvt_count <= APIC_ELVT_MCA)
1481 return (-1);
1482
1483 value = lapic_read32(LAPIC_EXT_LVT0 + APIC_ELVT_MCA);
1484 if ((value & APIC_LVT_M) == 0) {
1485 if (bootverbose)
1486 printf("AMD MCE Thresholding Extended LVT is already active\n");
1487 return (APIC_ELVT_MCA);
1488 }
1489 lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_masked = 0;
1490 lapics[apic_id].la_elvts[APIC_ELVT_MCA].lvt_active = 1;
1491 if (bootverbose)
1492 printf("lapic%u: MCE Thresholding ELVT unmasked\n", apic_id);
1493 return (APIC_ELVT_MCA);
1494 }
1495
1496 void
lapic_handle_error(void)1497 lapic_handle_error(void)
1498 {
1499 uint32_t esr;
1500
1501 /*
1502 * Read the contents of the error status register. Write to
1503 * the register first before reading from it to force the APIC
1504 * to update its value to indicate any errors that have
1505 * occurred since the previous write to the register.
1506 */
1507 lapic_write32(LAPIC_ESR, 0);
1508 esr = lapic_read32(LAPIC_ESR);
1509
1510 printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
1511 lapic_eoi();
1512 }
1513
1514 static u_int
native_apic_cpuid(u_int apic_id)1515 native_apic_cpuid(u_int apic_id)
1516 {
1517 #ifdef SMP
1518 return apic_cpuids[apic_id];
1519 #else
1520 return 0;
1521 #endif
1522 }
1523
1524 /* Request a free IDT vector to be used by the specified IRQ. */
1525 static u_int
native_apic_alloc_vector(u_int apic_id,u_int irq)1526 native_apic_alloc_vector(u_int apic_id, u_int irq)
1527 {
1528 u_int vector;
1529
1530 KASSERT(irq < num_io_irqs, ("Invalid IRQ %u", irq));
1531
1532 /*
1533 * Search for a free vector. Currently we just use a very simple
1534 * algorithm to find the first free vector.
1535 */
1536 mtx_lock_spin(&icu_lock);
1537 for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1538 if (lapics[apic_id].la_ioint_irqs[vector] != IRQ_FREE)
1539 continue;
1540 lapics[apic_id].la_ioint_irqs[vector] = irq;
1541 mtx_unlock_spin(&icu_lock);
1542 return (vector + APIC_IO_INTS);
1543 }
1544 mtx_unlock_spin(&icu_lock);
1545 return (0);
1546 }
1547
1548 /*
1549 * Request 'count' free contiguous IDT vectors to be used by 'count'
1550 * IRQs. 'count' must be a power of two and the vectors will be
1551 * aligned on a boundary of 'align'. If the request cannot be
1552 * satisfied, 0 is returned.
1553 */
1554 static u_int
native_apic_alloc_vectors(u_int apic_id,u_int * irqs,u_int count,u_int align)1555 native_apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
1556 {
1557 u_int first, run, vector;
1558
1559 KASSERT(powerof2(count), ("bad count"));
1560 KASSERT(powerof2(align), ("bad align"));
1561 KASSERT(align >= count, ("align < count"));
1562 #ifdef INVARIANTS
1563 for (run = 0; run < count; run++)
1564 KASSERT(irqs[run] < num_io_irqs, ("Invalid IRQ %u at index %u",
1565 irqs[run], run));
1566 #endif
1567
1568 /*
1569 * Search for 'count' free vectors. As with apic_alloc_vector(),
1570 * this just uses a simple first fit algorithm.
1571 */
1572 run = 0;
1573 first = 0;
1574 mtx_lock_spin(&icu_lock);
1575 for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
1576 /* Vector is in use, end run. */
1577 if (lapics[apic_id].la_ioint_irqs[vector] != IRQ_FREE) {
1578 run = 0;
1579 first = 0;
1580 continue;
1581 }
1582
1583 /* Start a new run if run == 0 and vector is aligned. */
1584 if (run == 0) {
1585 if ((vector & (align - 1)) != 0)
1586 continue;
1587 first = vector;
1588 }
1589 run++;
1590
1591 /* Keep looping if the run isn't long enough yet. */
1592 if (run < count)
1593 continue;
1594
1595 /* Found a run, assign IRQs and return the first vector. */
1596 for (vector = 0; vector < count; vector++)
1597 lapics[apic_id].la_ioint_irqs[first + vector] =
1598 irqs[vector];
1599 mtx_unlock_spin(&icu_lock);
1600 return (first + APIC_IO_INTS);
1601 }
1602 mtx_unlock_spin(&icu_lock);
1603 printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
1604 return (0);
1605 }
1606
1607 /*
1608 * Enable a vector for a particular apic_id. Since all lapics share idt
1609 * entries and ioint_handlers this enables the vector on all lapics. lapics
1610 * which do not have the vector configured would report spurious interrupts
1611 * should it fire.
1612 */
1613 static void
native_apic_enable_vector(u_int apic_id,u_int vector)1614 native_apic_enable_vector(u_int apic_id, u_int vector)
1615 {
1616
1617 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1618 KASSERT(ioint_handlers[vector / 32] != NULL,
1619 ("No ISR handler for vector %u", vector));
1620 #ifdef KDTRACE_HOOKS
1621 KASSERT(vector != IDT_DTRACE_RET,
1622 ("Attempt to overwrite DTrace entry"));
1623 #endif
1624 setidt(vector, (pti ? ioint_pti_handlers : ioint_handlers)[vector / 32],
1625 SDT_APIC, SEL_KPL, GSEL_APIC);
1626 }
1627
1628 static void
native_apic_disable_vector(u_int apic_id,u_int vector)1629 native_apic_disable_vector(u_int apic_id, u_int vector)
1630 {
1631
1632 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
1633 #ifdef KDTRACE_HOOKS
1634 KASSERT(vector != IDT_DTRACE_RET,
1635 ("Attempt to overwrite DTrace entry"));
1636 #endif
1637 KASSERT(ioint_handlers[vector / 32] != NULL,
1638 ("No ISR handler for vector %u", vector));
1639 #ifdef notyet
1640 /*
1641 * We can not currently clear the idt entry because other cpus
1642 * may have a valid vector at this offset.
1643 */
1644 setidt(vector, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_APIC,
1645 SEL_KPL, GSEL_APIC);
1646 #endif
1647 }
1648
1649 /* Release an APIC vector when it's no longer in use. */
1650 static void
native_apic_free_vector(u_int apic_id,u_int vector,u_int irq)1651 native_apic_free_vector(u_int apic_id, u_int vector, u_int irq)
1652 {
1653 struct thread *td;
1654
1655 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1656 vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1657 ("Vector %u does not map to an IRQ line", vector));
1658 KASSERT(irq < num_io_irqs, ("Invalid IRQ %u", irq));
1659 KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
1660 irq, ("IRQ mismatch"));
1661 #ifdef KDTRACE_HOOKS
1662 KASSERT(vector != IDT_DTRACE_RET,
1663 ("Attempt to overwrite DTrace entry"));
1664 #endif
1665
1666 /*
1667 * Bind us to the cpu that owned the vector before freeing it so
1668 * we don't lose an interrupt delivery race.
1669 */
1670 td = curthread;
1671 if (!rebooting) {
1672 thread_lock(td);
1673 if (sched_is_bound(td))
1674 panic("apic_free_vector: Thread already bound.\n");
1675 sched_bind(td, apic_cpuid(apic_id));
1676 thread_unlock(td);
1677 }
1678 mtx_lock_spin(&icu_lock);
1679 lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = IRQ_FREE;
1680 mtx_unlock_spin(&icu_lock);
1681 if (!rebooting) {
1682 thread_lock(td);
1683 sched_unbind(td);
1684 thread_unlock(td);
1685 }
1686 }
1687
1688 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
1689 static u_int
apic_idt_to_irq(u_int apic_id,u_int vector)1690 apic_idt_to_irq(u_int apic_id, u_int vector)
1691 {
1692 int irq;
1693
1694 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
1695 vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
1696 ("Vector %u does not map to an IRQ line", vector));
1697 #ifdef KDTRACE_HOOKS
1698 KASSERT(vector != IDT_DTRACE_RET,
1699 ("Attempt to overwrite DTrace entry"));
1700 #endif
1701 irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
1702 if (irq < 0)
1703 irq = 0;
1704 return (irq);
1705 }
1706
1707 #ifdef DDB
1708 /*
1709 * Dump data about APIC IDT vector mappings.
1710 */
DB_SHOW_COMMAND(apic,db_show_apic)1711 DB_SHOW_COMMAND(apic, db_show_apic)
1712 {
1713 struct intsrc *isrc;
1714 int i, verbose;
1715 u_int apic_id;
1716 u_int irq;
1717
1718 if (strcmp(modif, "vv") == 0)
1719 verbose = 2;
1720 else if (strcmp(modif, "v") == 0)
1721 verbose = 1;
1722 else
1723 verbose = 0;
1724 for (apic_id = 0; apic_id <= max_apic_id; apic_id++) {
1725 if (lapics[apic_id].la_present == 0)
1726 continue;
1727 db_printf("Interrupts bound to lapic %u\n", apic_id);
1728 for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
1729 irq = lapics[apic_id].la_ioint_irqs[i];
1730 if (irq == IRQ_FREE || irq == IRQ_SYSCALL)
1731 continue;
1732 #ifdef KDTRACE_HOOKS
1733 if (irq == IRQ_DTRACE_RET)
1734 continue;
1735 #endif
1736 #ifdef XENHVM
1737 if (irq == IRQ_EVTCHN)
1738 continue;
1739 #endif
1740 db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
1741 if (irq == IRQ_TIMER)
1742 db_printf("lapic timer\n");
1743 else if (irq < num_io_irqs) {
1744 isrc = intr_lookup_source(irq);
1745 if (isrc == NULL || verbose == 0)
1746 db_printf("IRQ %u\n", irq);
1747 else
1748 db_dump_intr_event(isrc->is_event,
1749 verbose == 2);
1750 } else
1751 db_printf("IRQ %u ???\n", irq);
1752 }
1753 }
1754 }
1755
1756 static void
dump_mask(const char * prefix,uint32_t v,int base)1757 dump_mask(const char *prefix, uint32_t v, int base)
1758 {
1759 int i, first;
1760
1761 first = 1;
1762 for (i = 0; i < 32; i++)
1763 if (v & (1 << i)) {
1764 if (first) {
1765 db_printf("%s:", prefix);
1766 first = 0;
1767 }
1768 db_printf(" %02x", base + i);
1769 }
1770 if (!first)
1771 db_printf("\n");
1772 }
1773
1774 /* Show info from the lapic regs for this CPU. */
DB_SHOW_COMMAND(lapic,db_show_lapic)1775 DB_SHOW_COMMAND(lapic, db_show_lapic)
1776 {
1777 uint32_t v;
1778
1779 db_printf("lapic ID = %d\n", lapic_id());
1780 v = lapic_read32(LAPIC_VERSION);
1781 db_printf("version = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
1782 v & 0xf);
1783 db_printf("max LVT = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
1784 v = lapic_read32(LAPIC_SVR);
1785 db_printf("SVR = %02x (%s)\n", v & APIC_SVR_VECTOR,
1786 v & APIC_SVR_ENABLE ? "enabled" : "disabled");
1787 db_printf("TPR = %02x\n", lapic_read32(LAPIC_TPR));
1788
1789 #define dump_field(prefix, regn, index) \
1790 dump_mask(__XSTRING(prefix ## index), \
1791 lapic_read32(LAPIC_ ## regn ## index), \
1792 index * 32)
1793
1794 db_printf("In-service Interrupts:\n");
1795 dump_field(isr, ISR, 0);
1796 dump_field(isr, ISR, 1);
1797 dump_field(isr, ISR, 2);
1798 dump_field(isr, ISR, 3);
1799 dump_field(isr, ISR, 4);
1800 dump_field(isr, ISR, 5);
1801 dump_field(isr, ISR, 6);
1802 dump_field(isr, ISR, 7);
1803
1804 db_printf("TMR Interrupts:\n");
1805 dump_field(tmr, TMR, 0);
1806 dump_field(tmr, TMR, 1);
1807 dump_field(tmr, TMR, 2);
1808 dump_field(tmr, TMR, 3);
1809 dump_field(tmr, TMR, 4);
1810 dump_field(tmr, TMR, 5);
1811 dump_field(tmr, TMR, 6);
1812 dump_field(tmr, TMR, 7);
1813
1814 db_printf("IRR Interrupts:\n");
1815 dump_field(irr, IRR, 0);
1816 dump_field(irr, IRR, 1);
1817 dump_field(irr, IRR, 2);
1818 dump_field(irr, IRR, 3);
1819 dump_field(irr, IRR, 4);
1820 dump_field(irr, IRR, 5);
1821 dump_field(irr, IRR, 6);
1822 dump_field(irr, IRR, 7);
1823
1824 #undef dump_field
1825 }
1826 #endif
1827
1828 /*
1829 * APIC probing support code. This includes code to manage enumerators.
1830 */
1831
1832 static SLIST_HEAD(, apic_enumerator) enumerators =
1833 SLIST_HEAD_INITIALIZER(enumerators);
1834 static struct apic_enumerator *best_enum;
1835
1836 void
apic_register_enumerator(struct apic_enumerator * enumerator)1837 apic_register_enumerator(struct apic_enumerator *enumerator)
1838 {
1839 #ifdef INVARIANTS
1840 struct apic_enumerator *apic_enum;
1841
1842 SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
1843 if (apic_enum == enumerator)
1844 panic("%s: Duplicate register of %s", __func__,
1845 enumerator->apic_name);
1846 }
1847 #endif
1848 SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
1849 }
1850
1851 /*
1852 * We have to look for CPU's very, very early because certain subsystems
1853 * want to know how many CPU's we have extremely early on in the boot
1854 * process.
1855 */
1856 static void
apic_init(void * dummy __unused)1857 apic_init(void *dummy __unused)
1858 {
1859 struct apic_enumerator *enumerator;
1860 int retval, best;
1861
1862 /* We only support built in local APICs. */
1863 if (!(cpu_feature & CPUID_APIC))
1864 return;
1865
1866 /* Don't probe if APIC mode is disabled. */
1867 if (resource_disabled("apic", 0))
1868 return;
1869
1870 /* Probe all the enumerators to find the best match. */
1871 best_enum = NULL;
1872 best = 0;
1873 SLIST_FOREACH(enumerator, &enumerators, apic_next) {
1874 retval = enumerator->apic_probe();
1875 if (retval > 0)
1876 continue;
1877 if (best_enum == NULL || best < retval) {
1878 best_enum = enumerator;
1879 best = retval;
1880 }
1881 }
1882 if (best_enum == NULL) {
1883 if (bootverbose)
1884 printf("APIC: Could not find any APICs.\n");
1885 #ifndef DEV_ATPIC
1886 panic("running without device atpic requires a local APIC");
1887 #endif
1888 return;
1889 }
1890
1891 if (bootverbose)
1892 printf("APIC: Using the %s enumerator.\n",
1893 best_enum->apic_name);
1894
1895 #ifdef I686_CPU
1896 /*
1897 * To work around an errata, we disable the local APIC on some
1898 * CPUs during early startup. We need to turn the local APIC back
1899 * on on such CPUs now.
1900 */
1901 ppro_reenable_apic();
1902 #endif
1903
1904 /* Probe the CPU's in the system. */
1905 retval = best_enum->apic_probe_cpus();
1906 if (retval != 0)
1907 printf("%s: Failed to probe CPUs: returned %d\n",
1908 best_enum->apic_name, retval);
1909
1910 }
1911 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
1912
1913 /*
1914 * Setup the local APIC. We have to do this prior to starting up the APs
1915 * in the SMP case.
1916 */
1917 static void
apic_setup_local(void * dummy __unused)1918 apic_setup_local(void *dummy __unused)
1919 {
1920 int retval;
1921
1922 if (best_enum == NULL)
1923 return;
1924
1925 lapics = malloc(sizeof(*lapics) * (max_apic_id + 1), M_LAPIC,
1926 M_WAITOK | M_ZERO);
1927
1928 /* Initialize the local APIC. */
1929 retval = best_enum->apic_setup_local();
1930 if (retval != 0)
1931 printf("%s: Failed to setup the local APIC: returned %d\n",
1932 best_enum->apic_name, retval);
1933 }
1934 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local, NULL);
1935
1936 /*
1937 * Setup the I/O APICs.
1938 */
1939 static void
apic_setup_io(void * dummy __unused)1940 apic_setup_io(void *dummy __unused)
1941 {
1942 int retval;
1943
1944 if (best_enum == NULL)
1945 return;
1946
1947 /*
1948 * Local APIC must be registered before other PICs and pseudo PICs
1949 * for proper suspend/resume order.
1950 */
1951 intr_register_pic(&lapic_pic);
1952
1953 retval = best_enum->apic_setup_io();
1954 if (retval != 0)
1955 printf("%s: Failed to setup I/O APICs: returned %d\n",
1956 best_enum->apic_name, retval);
1957
1958 /*
1959 * Finish setting up the local APIC on the BSP once we know
1960 * how to properly program the LINT pins. In particular, this
1961 * enables the EOI suppression mode, if LAPIC supports it and
1962 * user did not disable the mode.
1963 */
1964 lapic_setup(1);
1965 if (bootverbose)
1966 lapic_dump("BSP");
1967
1968 /* Enable the MSI "pic". */
1969 init_ops.msi_init();
1970
1971 #ifdef XENHVM
1972 xen_intr_alloc_irqs();
1973 #endif
1974 }
1975 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_THIRD, apic_setup_io, NULL);
1976
1977 #ifdef SMP
1978 /*
1979 * Inter Processor Interrupt functions. The lapic_ipi_*() functions are
1980 * private to the MD code. The public interface for the rest of the
1981 * kernel is defined in mp_machdep.c.
1982 */
1983
1984 /*
1985 * Wait delay microseconds for IPI to be sent. If delay is -1, we
1986 * wait forever.
1987 */
1988 static int
native_lapic_ipi_wait(int delay)1989 native_lapic_ipi_wait(int delay)
1990 {
1991 uint64_t rx;
1992
1993 /* LAPIC_ICR.APIC_DELSTAT_MASK is undefined in x2APIC mode */
1994 if (x2apic_mode)
1995 return (1);
1996
1997 for (rx = 0; delay == -1 || rx < lapic_ipi_wait_mult * delay; rx++) {
1998 if ((lapic_read_icr_lo() & APIC_DELSTAT_MASK) ==
1999 APIC_DELSTAT_IDLE)
2000 return (1);
2001 ia32_pause();
2002 }
2003 return (0);
2004 }
2005
2006 static void
native_lapic_ipi_raw(register_t icrlo,u_int dest)2007 native_lapic_ipi_raw(register_t icrlo, u_int dest)
2008 {
2009 uint32_t icrhi;
2010
2011 /* XXX: Need more sanity checking of icrlo? */
2012 KASSERT(x2apic_mode || lapic_map != NULL,
2013 ("%s called too early", __func__));
2014 KASSERT(x2apic_mode ||
2015 (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
2016 ("%s: invalid dest field", __func__));
2017 KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
2018 ("%s: reserved bits set in ICR LO register", __func__));
2019
2020 if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
2021 if (x2apic_mode)
2022 icrhi = dest;
2023 else
2024 icrhi = dest << APIC_ID_SHIFT;
2025 lapic_write_icr(icrhi, icrlo);
2026 } else {
2027 lapic_write_icr_lo(icrlo);
2028 }
2029 }
2030
2031 #define BEFORE_SPIN 50000
2032 #ifdef DETECT_DEADLOCK
2033 #define AFTER_SPIN 50
2034 #endif
2035
2036 static void
native_lapic_ipi_vectored(u_int vector,int dest)2037 native_lapic_ipi_vectored(u_int vector, int dest)
2038 {
2039 register_t icrlo, destfield;
2040
2041 KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
2042 ("%s: invalid vector %d", __func__, vector));
2043
2044 destfield = 0;
2045 switch (dest) {
2046 case APIC_IPI_DEST_SELF:
2047 if (x2apic_mode && vector < IPI_NMI_FIRST) {
2048 lapic_write_self_ipi(vector);
2049 return;
2050 }
2051 icrlo = APIC_DEST_SELF;
2052 break;
2053 case APIC_IPI_DEST_ALL:
2054 icrlo = APIC_DEST_ALLISELF;
2055 break;
2056 case APIC_IPI_DEST_OTHERS:
2057 icrlo = APIC_DEST_ALLESELF;
2058 break;
2059 default:
2060 icrlo = 0;
2061 KASSERT(x2apic_mode ||
2062 (dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
2063 ("%s: invalid destination 0x%x", __func__, dest));
2064 destfield = dest;
2065 }
2066
2067 /*
2068 * NMI IPIs are just fake vectors used to send a NMI. Use special rules
2069 * regarding NMIs if passed, otherwise specify the vector.
2070 */
2071 if (vector >= IPI_NMI_FIRST)
2072 icrlo |= APIC_DELMODE_NMI;
2073 else
2074 icrlo |= vector | APIC_DELMODE_FIXED;
2075 icrlo |= APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE | APIC_LEVEL_ASSERT;
2076
2077 /* Wait for an earlier IPI to finish. */
2078 if (!lapic_ipi_wait(BEFORE_SPIN)) {
2079 if (KERNEL_PANICKED())
2080 return;
2081 else
2082 panic("APIC: Previous IPI is stuck");
2083 }
2084
2085 lapic_ipi_raw(icrlo, destfield);
2086
2087 #ifdef DETECT_DEADLOCK
2088 /* Wait for IPI to be delivered. */
2089 if (!lapic_ipi_wait(AFTER_SPIN)) {
2090 #ifdef needsattention
2091 /*
2092 * XXX FIXME:
2093 *
2094 * The above function waits for the message to actually be
2095 * delivered. It breaks out after an arbitrary timeout
2096 * since the message should eventually be delivered (at
2097 * least in theory) and that if it wasn't we would catch
2098 * the failure with the check above when the next IPI is
2099 * sent.
2100 *
2101 * We could skip this wait entirely, EXCEPT it probably
2102 * protects us from other routines that assume that the
2103 * message was delivered and acted upon when this function
2104 * returns.
2105 */
2106 printf("APIC: IPI might be stuck\n");
2107 #else /* !needsattention */
2108 /* Wait until mesage is sent without a timeout. */
2109 while (lapic_read_icr_lo() & APIC_DELSTAT_PEND)
2110 ia32_pause();
2111 #endif /* needsattention */
2112 }
2113 #endif /* DETECT_DEADLOCK */
2114 }
2115
2116 #endif /* SMP */
2117
2118 /*
2119 * Since the IDT is shared by all CPUs the IPI slot update needs to be globally
2120 * visible.
2121 *
2122 * Consider the case where an IPI is generated immediately after allocation:
2123 * vector = lapic_ipi_alloc(ipifunc);
2124 * ipi_selected(other_cpus, vector);
2125 *
2126 * In xAPIC mode a write to ICR_LO has serializing semantics because the
2127 * APIC page is mapped as an uncached region. In x2APIC mode there is an
2128 * explicit 'mfence' before the ICR MSR is written. Therefore in both cases
2129 * the IDT slot update is globally visible before the IPI is delivered.
2130 */
2131 static int
native_lapic_ipi_alloc(inthand_t * ipifunc)2132 native_lapic_ipi_alloc(inthand_t *ipifunc)
2133 {
2134 struct gate_descriptor *ip;
2135 long func;
2136 int idx, vector;
2137
2138 KASSERT(ipifunc != &IDTVEC(rsvd) && ipifunc != &IDTVEC(rsvd_pti),
2139 ("invalid ipifunc %p", ipifunc));
2140
2141 vector = -1;
2142 mtx_lock_spin(&icu_lock);
2143 for (idx = IPI_DYN_FIRST; idx <= IPI_DYN_LAST; idx++) {
2144 ip = &idt[idx];
2145 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
2146 if ((!pti && func == (uintptr_t)&IDTVEC(rsvd)) ||
2147 (pti && func == (uintptr_t)&IDTVEC(rsvd_pti))) {
2148 vector = idx;
2149 setidt(vector, ipifunc, SDT_APIC, SEL_KPL, GSEL_APIC);
2150 break;
2151 }
2152 }
2153 mtx_unlock_spin(&icu_lock);
2154 return (vector);
2155 }
2156
2157 static void
native_lapic_ipi_free(int vector)2158 native_lapic_ipi_free(int vector)
2159 {
2160 struct gate_descriptor *ip;
2161 long func;
2162
2163 KASSERT(vector >= IPI_DYN_FIRST && vector <= IPI_DYN_LAST,
2164 ("%s: invalid vector %d", __func__, vector));
2165
2166 mtx_lock_spin(&icu_lock);
2167 ip = &idt[vector];
2168 func = (ip->gd_hioffset << 16) | ip->gd_looffset;
2169 KASSERT(func != (uintptr_t)&IDTVEC(rsvd) &&
2170 func != (uintptr_t)&IDTVEC(rsvd_pti),
2171 ("invalid idtfunc %#lx", func));
2172 setidt(vector, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_APIC,
2173 SEL_KPL, GSEL_APIC);
2174 mtx_unlock_spin(&icu_lock);
2175 }
2176