1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1996, by Steve Passe
5 * Copyright (c) 2003, by Peter Wemm
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. The name of the developer may NOT be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_acpi.h"
33 #include "opt_cpu.h"
34 #include "opt_ddb.h"
35 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
37 #include "opt_smp.h"
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/domainset.h>
44 #ifdef GPROF
45 #include <sys/gmon.h>
46 #endif
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/memrange.h>
53 #include <sys/mutex.h>
54 #include <sys/pcpu.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_phys.h>
67
68 #include <x86/apicreg.h>
69 #include <machine/clock.h>
70 #include <machine/cputypes.h>
71 #include <machine/cpufunc.h>
72 #include <x86/mca.h>
73 #include <machine/md_var.h>
74 #include <machine/pcb.h>
75 #include <machine/psl.h>
76 #include <machine/smp.h>
77 #include <machine/specialreg.h>
78 #include <machine/tss.h>
79 #include <x86/ucode.h>
80 #include <machine/cpu.h>
81 #include <x86/init.h>
82
83 #ifdef DEV_ACPI
84 #include <contrib/dev/acpica/include/acpi.h>
85 #include <dev/acpica/acpivar.h>
86 #endif
87
88 #define WARMBOOT_TARGET 0
89 #define WARMBOOT_OFF (KERNBASE + 0x0467)
90 #define WARMBOOT_SEG (KERNBASE + 0x0469)
91
92 #define CMOS_REG (0x70)
93 #define CMOS_DATA (0x71)
94 #define BIOS_RESET (0x0f)
95 #define BIOS_WARM (0x0a)
96
97 #define GiB(v) (v ## ULL << 30)
98
99 #define AP_BOOTPT_SZ (PAGE_SIZE * 4)
100
101 /* Temporary variables for init_secondary() */
102 char *doublefault_stack;
103 char *mce_stack;
104 char *nmi_stack;
105 char *dbg_stack;
106
107 extern u_int mptramp_la57;
108
109 /*
110 * Local data and functions.
111 */
112
113 static int start_ap(int apic_id);
114
115 static bool
is_kernel_paddr(vm_paddr_t pa)116 is_kernel_paddr(vm_paddr_t pa)
117 {
118
119 return (pa >= trunc_2mpage(btext - KERNBASE) &&
120 pa < round_page(_end - KERNBASE));
121 }
122
123 static bool
is_mpboot_good(vm_paddr_t start,vm_paddr_t end)124 is_mpboot_good(vm_paddr_t start, vm_paddr_t end)
125 {
126
127 return (start + AP_BOOTPT_SZ <= GiB(4) && atop(end) < Maxmem);
128 }
129
130 /*
131 * Calculate usable address in base memory for AP trampoline code.
132 */
133 void
mp_bootaddress(vm_paddr_t * physmap,unsigned int * physmap_idx)134 mp_bootaddress(vm_paddr_t *physmap, unsigned int *physmap_idx)
135 {
136 vm_paddr_t start, end;
137 unsigned int i;
138 bool allocated;
139
140 alloc_ap_trampoline(physmap, physmap_idx);
141
142 /*
143 * Find a memory region big enough below the 4GB boundary to
144 * store the initial page tables. Region must be mapped by
145 * the direct map.
146 *
147 * Note that it needs to be aligned to a page boundary.
148 */
149 allocated = false;
150 for (i = *physmap_idx; i <= *physmap_idx; i -= 2) {
151 /*
152 * First, try to chomp at the start of the physmap region.
153 * Kernel binary might claim it already.
154 */
155 start = round_page(physmap[i]);
156 end = start + AP_BOOTPT_SZ;
157 if (start < end && end <= physmap[i + 1] &&
158 is_mpboot_good(start, end) &&
159 !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
160 allocated = true;
161 physmap[i] = end;
162 break;
163 }
164
165 /*
166 * Second, try to chomp at the end. Again, check
167 * against kernel.
168 */
169 end = trunc_page(physmap[i + 1]);
170 start = end - AP_BOOTPT_SZ;
171 if (start < end && start >= physmap[i] &&
172 is_mpboot_good(start, end) &&
173 !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
174 allocated = true;
175 physmap[i + 1] = start;
176 break;
177 }
178 }
179 if (allocated) {
180 mptramp_pagetables = start;
181 if (physmap[i] == physmap[i + 1] && *physmap_idx != 0) {
182 memmove(&physmap[i], &physmap[i + 2],
183 sizeof(*physmap) * (*physmap_idx - i + 2));
184 *physmap_idx -= 2;
185 }
186 } else {
187 mptramp_pagetables = trunc_page(boot_address) - AP_BOOTPT_SZ;
188 if (bootverbose)
189 printf(
190 "Cannot find enough space for the initial AP page tables, placing them at %#x",
191 mptramp_pagetables);
192 }
193 }
194
195 /*
196 * Initialize the IPI handlers and start up the AP's.
197 */
198 void
cpu_mp_start(void)199 cpu_mp_start(void)
200 {
201 int i;
202
203 /* Initialize the logical ID to APIC ID table. */
204 for (i = 0; i < MAXCPU; i++) {
205 cpu_apic_ids[i] = -1;
206 }
207
208 /* Install an inter-CPU IPI for cache and TLB invalidations. */
209 setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
210 SDT_SYSIGT, SEL_KPL, 0);
211
212 /* Install an inter-CPU IPI for all-CPU rendezvous */
213 setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
214 IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
215
216 /* Install generic inter-CPU IPI handler */
217 setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
218 IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
219
220 /* Install an inter-CPU IPI for CPU stop/restart */
221 setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
222 SDT_SYSIGT, SEL_KPL, 0);
223
224 /* Install an inter-CPU IPI for CPU suspend/resume */
225 setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
226 SDT_SYSIGT, SEL_KPL, 0);
227
228 /* Install an IPI for calling delayed SWI */
229 setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
230 SDT_SYSIGT, SEL_KPL, 0);
231
232 /* Set boot_cpu_id if needed. */
233 if (boot_cpu_id == -1) {
234 boot_cpu_id = PCPU_GET(apic_id);
235 cpu_info[boot_cpu_id].cpu_bsp = 1;
236 } else
237 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
238 ("BSP's APIC ID doesn't match boot_cpu_id"));
239
240 /* Probe logical/physical core configuration. */
241 topo_probe();
242
243 assign_cpu_ids();
244
245 mptramp_la57 = la57;
246
247 /* Start each Application Processor */
248 init_ops.start_all_aps();
249
250 set_interrupt_apic_ids();
251
252 #if defined(DEV_ACPI) && MAXMEMDOM > 1
253 acpi_pxm_set_cpu_locality();
254 #endif
255 }
256
257 /*
258 * AP CPU's call this to initialize themselves.
259 */
260 void
init_secondary(void)261 init_secondary(void)
262 {
263 struct pcpu *pc;
264 struct nmi_pcpu *np;
265 struct user_segment_descriptor *gdt;
266 struct region_descriptor ap_gdt;
267 u_int64_t cr0;
268 int cpu, gsel_tss, x;
269
270 /* Set by the startup code for us to use */
271 cpu = bootAP;
272
273 /* Update microcode before doing anything else. */
274 ucode_load_ap(cpu);
275
276 /* Get per-cpu data and save */
277 pc = &__pcpu[cpu];
278
279 /* prime data page for it to use */
280 pcpu_init(pc, cpu, sizeof(struct pcpu));
281 dpcpu_init(dpcpu, cpu);
282 pc->pc_apic_id = cpu_apic_ids[cpu];
283 pc->pc_prvspace = pc;
284 pc->pc_curthread = 0;
285 pc->pc_tssp = &pc->pc_common_tss;
286 pc->pc_rsp0 = 0;
287 pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
288 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
289 gdt = pc->pc_gdt;
290 pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
291 pc->pc_fs32p = &gdt[GUFS32_SEL];
292 pc->pc_gs32p = &gdt[GUGS32_SEL];
293 pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
294 pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
295 /* See comment in pmap_bootstrap(). */
296 pc->pc_pcid_next = PMAP_PCID_KERN + 2;
297 pc->pc_pcid_gen = 1;
298
299 pc->pc_smp_tlb_gen = 1;
300
301 /* Init tss */
302 pc->pc_common_tss = __pcpu[0].pc_common_tss;
303 pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
304 IOPERM_BITMAP_SIZE;
305 pc->pc_common_tss.tss_rsp0 = 0;
306
307 /* The doublefault stack runs on IST1. */
308 np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
309 np->np_pcpu = (register_t)pc;
310 pc->pc_common_tss.tss_ist1 = (long)np;
311
312 /* The NMI stack runs on IST2. */
313 np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
314 np->np_pcpu = (register_t)pc;
315 pc->pc_common_tss.tss_ist2 = (long)np;
316
317 /* The MC# stack runs on IST3. */
318 np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
319 np->np_pcpu = (register_t)pc;
320 pc->pc_common_tss.tss_ist3 = (long)np;
321
322 /* The DB# stack runs on IST4. */
323 np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
324 np->np_pcpu = (register_t)pc;
325 pc->pc_common_tss.tss_ist4 = (long)np;
326
327 /* Prepare private GDT */
328 gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
329 for (x = 0; x < NGDT; x++) {
330 if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
331 x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
332 ssdtosd(&gdt_segs[x], &gdt[x]);
333 }
334 ssdtosyssd(&gdt_segs[GPROC0_SEL],
335 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
336 ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
337 ap_gdt.rd_base = (u_long)gdt;
338 lgdt(&ap_gdt); /* does magic intra-segment return */
339
340 wrmsr(MSR_FSBASE, 0); /* User value */
341 wrmsr(MSR_GSBASE, (u_int64_t)pc);
342 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */
343 fix_cpuid();
344
345 lidt(&r_idt);
346
347 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
348 ltr(gsel_tss);
349
350 /*
351 * Set to a known state:
352 * Set by mpboot.s: CR0_PG, CR0_PE
353 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
354 */
355 cr0 = rcr0();
356 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
357 load_cr0(cr0);
358
359 amd64_conf_fast_syscall();
360
361 /* signal our startup to the BSP. */
362 mp_naps++;
363
364 /* Spin until the BSP releases the AP's. */
365 while (atomic_load_acq_int(&aps_ready) == 0)
366 ia32_pause();
367
368 init_secondary_tail();
369 }
370
371 /*******************************************************************
372 * local functions and data
373 */
374
375 #ifdef NUMA
376 static void
mp_realloc_pcpu(int cpuid,int domain)377 mp_realloc_pcpu(int cpuid, int domain)
378 {
379 vm_page_t m;
380 vm_offset_t oa, na;
381
382 oa = (vm_offset_t)&__pcpu[cpuid];
383 if (vm_phys_domain(pmap_kextract(oa)) == domain)
384 return;
385 m = vm_page_alloc_domain(NULL, 0, domain,
386 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
387 if (m == NULL)
388 return;
389 na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
390 pagecopy((void *)oa, (void *)na);
391 pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1);
392 /* XXX old pcpu page leaked. */
393 }
394 #endif
395
396 /*
397 * start each AP in our list
398 */
399 int
native_start_all_aps(void)400 native_start_all_aps(void)
401 {
402 u_int64_t *pt5, *pt4, *pt3, *pt2;
403 u_int32_t mpbioswarmvec;
404 int apic_id, cpu, domain, i, xo;
405 u_char mpbiosreason;
406
407 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
408
409 /* copy the AP 1st level boot code */
410 bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
411
412 /* Locate the page tables, they'll be below the trampoline */
413 if (la57) {
414 pt5 = (uint64_t *)PHYS_TO_DMAP(mptramp_pagetables);
415 xo = 1;
416 } else {
417 xo = 0;
418 }
419 pt4 = (uint64_t *)PHYS_TO_DMAP(mptramp_pagetables + xo * PAGE_SIZE);
420 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
421 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
422
423 /* Create the initial 1GB replicated page tables */
424 for (i = 0; i < 512; i++) {
425 if (la57) {
426 pt5[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables +
427 PAGE_SIZE);
428 pt5[i] |= PG_V | PG_RW | PG_U;
429 }
430
431 /*
432 * Each slot of the level 4 pages points to the same
433 * level 3 page.
434 */
435 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables +
436 (xo + 1) * PAGE_SIZE);
437 pt4[i] |= PG_V | PG_RW | PG_U;
438
439 /*
440 * Each slot of the level 3 pages points to the same
441 * level 2 page.
442 */
443 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables +
444 ((xo + 2) * PAGE_SIZE));
445 pt3[i] |= PG_V | PG_RW | PG_U;
446
447 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
448 pt2[i] = i * (2 * 1024 * 1024);
449 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
450 }
451
452 /* save the current value of the warm-start vector */
453 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
454 outb(CMOS_REG, BIOS_RESET);
455 mpbiosreason = inb(CMOS_DATA);
456
457 /* setup a vector to our boot code */
458 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
459 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
460 outb(CMOS_REG, BIOS_RESET);
461 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */
462
463 /* Relocate pcpu areas to the correct domain. */
464 #ifdef NUMA
465 if (vm_ndomains > 1)
466 for (cpu = 1; cpu < mp_ncpus; cpu++) {
467 apic_id = cpu_apic_ids[cpu];
468 domain = acpi_pxm_get_cpu_locality(apic_id);
469 mp_realloc_pcpu(cpu, domain);
470 }
471 #endif
472
473 /* start each AP */
474 domain = 0;
475 for (cpu = 1; cpu < mp_ncpus; cpu++) {
476 apic_id = cpu_apic_ids[cpu];
477 #ifdef NUMA
478 if (vm_ndomains > 1)
479 domain = acpi_pxm_get_cpu_locality(apic_id);
480 #endif
481 /* allocate and set up an idle stack data page */
482 bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
483 M_WAITOK | M_ZERO);
484 doublefault_stack = (char *)kmem_malloc(DBLFAULT_STACK_SIZE,
485 M_WAITOK | M_ZERO);
486 mce_stack = (char *)kmem_malloc(MCE_STACK_SIZE,
487 M_WAITOK | M_ZERO);
488 nmi_stack = (char *)kmem_malloc_domainset(
489 DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
490 dbg_stack = (char *)kmem_malloc_domainset(
491 DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
492 dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
493 DPCPU_SIZE, M_WAITOK | M_ZERO);
494
495 bootSTK = (char *)bootstacks[cpu] +
496 kstack_pages * PAGE_SIZE - 8;
497 bootAP = cpu;
498
499 /* attempt to start the Application Processor */
500 if (!start_ap(apic_id)) {
501 /* restore the warmstart vector */
502 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
503 panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
504 }
505
506 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */
507 }
508
509 /* restore the warmstart vector */
510 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
511
512 outb(CMOS_REG, BIOS_RESET);
513 outb(CMOS_DATA, mpbiosreason);
514
515 /* number of APs actually started */
516 return (mp_naps);
517 }
518
519 /*
520 * This function starts the AP (application processor) identified
521 * by the APIC ID 'physicalCpu'. It does quite a "song and dance"
522 * to accomplish this. This is necessary because of the nuances
523 * of the different hardware we might encounter. It isn't pretty,
524 * but it seems to work.
525 */
526 static int
start_ap(int apic_id)527 start_ap(int apic_id)
528 {
529 int vector, ms;
530 int cpus;
531
532 /* calculate the vector */
533 vector = (boot_address >> 12) & 0xff;
534
535 /* used as a watchpoint to signal AP startup */
536 cpus = mp_naps;
537
538 ipi_startup(apic_id, vector);
539
540 /* Wait up to 5 seconds for it to start. */
541 for (ms = 0; ms < 5000; ms++) {
542 if (mp_naps > cpus)
543 return 1; /* return SUCCESS */
544 DELAY(1000);
545 }
546 return 0; /* return FAILURE */
547 }
548
549 /*
550 * Flush the TLB on other CPU's
551 */
552
553 /*
554 * Invalidation request. PCPU pc_smp_tlb_op uses u_int instead of the
555 * enum to avoid both namespace and ABI issues (with enums).
556 */
557 enum invl_op_codes {
558 INVL_OP_TLB = 1,
559 INVL_OP_TLB_INVPCID = 2,
560 INVL_OP_TLB_INVPCID_PTI = 3,
561 INVL_OP_TLB_PCID = 4,
562 INVL_OP_PGRNG = 5,
563 INVL_OP_PGRNG_INVPCID = 6,
564 INVL_OP_PGRNG_PCID = 7,
565 INVL_OP_PG = 8,
566 INVL_OP_PG_INVPCID = 9,
567 INVL_OP_PG_PCID = 10,
568 INVL_OP_CACHE = 11,
569 };
570
571 /*
572 * These variables are initialized at startup to reflect how each of
573 * the different kinds of invalidations should be performed on the
574 * current machine and environment.
575 */
576 static enum invl_op_codes invl_op_tlb;
577 static enum invl_op_codes invl_op_pgrng;
578 static enum invl_op_codes invl_op_pg;
579
580 /*
581 * Scoreboard of IPI completion notifications from target to IPI initiator.
582 *
583 * Each CPU can initiate shootdown IPI independently from other CPUs.
584 * Initiator enters critical section, then fills its local PCPU
585 * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
586 * at location (cpu, my_cpuid) for each target cpu. After that IPI is
587 * sent to all targets which scan for zeroed scoreboard generation
588 * words. Upon finding such word the shootdown data is read from
589 * corresponding cpu's pcpu, and generation is set. Meantime initiator
590 * loops waiting for all zeroed generations in scoreboard to update.
591 */
592 static uint32_t *invl_scoreboard;
593
594 static void
invl_scoreboard_init(void * arg __unused)595 invl_scoreboard_init(void *arg __unused)
596 {
597 u_int i;
598
599 invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
600 (mp_maxid + 1), M_DEVBUF, M_WAITOK);
601 for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
602 invl_scoreboard[i] = 1;
603
604 if (pmap_pcid_enabled) {
605 if (invpcid_works) {
606 if (pti)
607 invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
608 else
609 invl_op_tlb = INVL_OP_TLB_INVPCID;
610 invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
611 invl_op_pg = INVL_OP_PG_INVPCID;
612 } else {
613 invl_op_tlb = INVL_OP_TLB_PCID;
614 invl_op_pgrng = INVL_OP_PGRNG_PCID;
615 invl_op_pg = INVL_OP_PG_PCID;
616 }
617 } else {
618 invl_op_tlb = INVL_OP_TLB;
619 invl_op_pgrng = INVL_OP_PGRNG;
620 invl_op_pg = INVL_OP_PG;
621 }
622 }
623 SYSINIT(invl_ops, SI_SUB_SMP, SI_ORDER_FIRST, invl_scoreboard_init, NULL);
624
625 static uint32_t *
invl_scoreboard_getcpu(u_int cpu)626 invl_scoreboard_getcpu(u_int cpu)
627 {
628 return (invl_scoreboard + cpu * (mp_maxid + 1));
629 }
630
631 static uint32_t *
invl_scoreboard_slot(u_int cpu)632 invl_scoreboard_slot(u_int cpu)
633 {
634 return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
635 }
636
637 /*
638 * Used by the pmap to request cache or TLB invalidation on local and
639 * remote processors. Mask provides the set of remote CPUs that are
640 * to be signalled with the invalidation IPI. As an optimization, the
641 * curcpu_cb callback is invoked on the calling CPU in a critical
642 * section while waiting for the remote CPUs to complete the operation.
643 *
644 * The callback function is called unconditionally on the caller's
645 * underlying processor, even when this processor is not set in the
646 * mask. So, the callback function must be prepared to handle such
647 * spurious invocations.
648 *
649 * Interrupts must be enabled when calling the function with smp
650 * started, to avoid deadlock with other IPIs that are protected with
651 * smp_ipi_mtx spinlock at the initiator side.
652 *
653 * Function must be called with the thread pinned, and it unpins on
654 * completion.
655 */
656 static void
smp_targeted_tlb_shootdown(cpuset_t mask,pmap_t pmap,vm_offset_t addr1,vm_offset_t addr2,smp_invl_cb_t curcpu_cb,enum invl_op_codes op)657 smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
658 vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
659 {
660 cpuset_t other_cpus, mask1;
661 uint32_t generation, *p_cpudone;
662 int cpu;
663 bool is_all;
664
665 /*
666 * It is not necessary to signal other CPUs while booting or
667 * when in the debugger.
668 */
669 if (kdb_active || KERNEL_PANICKED() || !smp_started)
670 goto local_cb;
671
672 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
673
674 /*
675 * Check for other cpus. Return if none.
676 */
677 is_all = !CPU_CMP(&mask, &all_cpus);
678 CPU_CLR(PCPU_GET(cpuid), &mask);
679 if (CPU_EMPTY(&mask))
680 goto local_cb;
681
682 /*
683 * Initiator must have interrupts enabled, which prevents
684 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
685 * from deadlocking with us. On the other hand, preemption
686 * must be disabled to pin initiator to the instance of the
687 * pcpu pc_smp_tlb data and scoreboard line.
688 */
689 KASSERT((read_rflags() & PSL_I) != 0,
690 ("smp_targeted_tlb_shootdown: interrupts disabled"));
691 critical_enter();
692
693 PCPU_SET(smp_tlb_addr1, addr1);
694 PCPU_SET(smp_tlb_addr2, addr2);
695 PCPU_SET(smp_tlb_pmap, pmap);
696 generation = PCPU_GET(smp_tlb_gen);
697 if (++generation == 0)
698 generation = 1;
699 PCPU_SET(smp_tlb_gen, generation);
700 PCPU_SET(smp_tlb_op, op);
701 /* Fence between filling smp_tlb fields and clearing scoreboard. */
702 atomic_thread_fence_rel();
703
704 mask1 = mask;
705 while ((cpu = CPU_FFS(&mask1)) != 0) {
706 cpu--;
707 CPU_CLR(cpu, &mask1);
708 KASSERT(*invl_scoreboard_slot(cpu) != 0,
709 ("IPI scoreboard is zero, initiator %d target %d",
710 PCPU_GET(cpuid), cpu));
711 *invl_scoreboard_slot(cpu) = 0;
712 }
713
714 /*
715 * IPI acts as a fence between writing to the scoreboard above
716 * (zeroing slot) and reading from it below (wait for
717 * acknowledgment).
718 */
719 if (is_all) {
720 ipi_all_but_self(IPI_INVLOP);
721 other_cpus = all_cpus;
722 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
723 } else {
724 other_cpus = mask;
725 ipi_selected(mask, IPI_INVLOP);
726 }
727 curcpu_cb(pmap, addr1, addr2);
728 while ((cpu = CPU_FFS(&other_cpus)) != 0) {
729 cpu--;
730 CPU_CLR(cpu, &other_cpus);
731 p_cpudone = invl_scoreboard_slot(cpu);
732 while (atomic_load_int(p_cpudone) != generation)
733 ia32_pause();
734 }
735
736 /*
737 * Unpin before leaving critical section. If the thread owes
738 * preemption, this allows scheduler to select thread on any
739 * CPU from its cpuset.
740 */
741 sched_unpin();
742 critical_exit();
743
744 return;
745
746 local_cb:
747 critical_enter();
748 curcpu_cb(pmap, addr1, addr2);
749 sched_unpin();
750 critical_exit();
751 }
752
753 void
smp_masked_invltlb(cpuset_t mask,pmap_t pmap,smp_invl_cb_t curcpu_cb)754 smp_masked_invltlb(cpuset_t mask, pmap_t pmap, smp_invl_cb_t curcpu_cb)
755 {
756 smp_targeted_tlb_shootdown(mask, pmap, 0, 0, curcpu_cb, invl_op_tlb);
757 #ifdef COUNT_XINVLTLB_HITS
758 ipi_global++;
759 #endif
760 }
761
762 void
smp_masked_invlpg(cpuset_t mask,vm_offset_t addr,pmap_t pmap,smp_invl_cb_t curcpu_cb)763 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, pmap_t pmap,
764 smp_invl_cb_t curcpu_cb)
765 {
766 smp_targeted_tlb_shootdown(mask, pmap, addr, 0, curcpu_cb, invl_op_pg);
767 #ifdef COUNT_XINVLTLB_HITS
768 ipi_page++;
769 #endif
770 }
771
772 void
smp_masked_invlpg_range(cpuset_t mask,vm_offset_t addr1,vm_offset_t addr2,pmap_t pmap,smp_invl_cb_t curcpu_cb)773 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2,
774 pmap_t pmap, smp_invl_cb_t curcpu_cb)
775 {
776 smp_targeted_tlb_shootdown(mask, pmap, addr1, addr2, curcpu_cb,
777 invl_op_pgrng);
778 #ifdef COUNT_XINVLTLB_HITS
779 ipi_range++;
780 ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
781 #endif
782 }
783
784 void
smp_cache_flush(smp_invl_cb_t curcpu_cb)785 smp_cache_flush(smp_invl_cb_t curcpu_cb)
786 {
787 smp_targeted_tlb_shootdown(all_cpus, NULL, 0, 0, curcpu_cb,
788 INVL_OP_CACHE);
789 }
790
791 /*
792 * Handlers for TLB related IPIs
793 */
794 static void
invltlb_handler(pmap_t smp_tlb_pmap)795 invltlb_handler(pmap_t smp_tlb_pmap)
796 {
797 #ifdef COUNT_XINVLTLB_HITS
798 xhits_gbl[PCPU_GET(cpuid)]++;
799 #endif /* COUNT_XINVLTLB_HITS */
800 #ifdef COUNT_IPIS
801 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
802 #endif /* COUNT_IPIS */
803
804 if (smp_tlb_pmap == kernel_pmap)
805 invltlb_glob();
806 else
807 invltlb();
808 }
809
810 static void
invltlb_invpcid_handler(pmap_t smp_tlb_pmap)811 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
812 {
813 struct invpcid_descr d;
814
815 #ifdef COUNT_XINVLTLB_HITS
816 xhits_gbl[PCPU_GET(cpuid)]++;
817 #endif /* COUNT_XINVLTLB_HITS */
818 #ifdef COUNT_IPIS
819 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
820 #endif /* COUNT_IPIS */
821
822 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
823 d.pad = 0;
824 d.addr = 0;
825 invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
826 INVPCID_CTX);
827 }
828
829 static void
invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)830 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
831 {
832 struct invpcid_descr d;
833
834 #ifdef COUNT_XINVLTLB_HITS
835 xhits_gbl[PCPU_GET(cpuid)]++;
836 #endif /* COUNT_XINVLTLB_HITS */
837 #ifdef COUNT_IPIS
838 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
839 #endif /* COUNT_IPIS */
840
841 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
842 d.pad = 0;
843 d.addr = 0;
844 if (smp_tlb_pmap == kernel_pmap) {
845 /*
846 * This invalidation actually needs to clear kernel
847 * mappings from the TLB in the current pmap, but
848 * since we were asked for the flush in the kernel
849 * pmap, achieve it by performing global flush.
850 */
851 invpcid(&d, INVPCID_CTXGLOB);
852 } else {
853 invpcid(&d, INVPCID_CTX);
854 if (smp_tlb_pmap == PCPU_GET(curpmap))
855 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
856 }
857 }
858
859 static void
invltlb_pcid_handler(pmap_t smp_tlb_pmap)860 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
861 {
862 uint32_t pcid;
863
864 #ifdef COUNT_XINVLTLB_HITS
865 xhits_gbl[PCPU_GET(cpuid)]++;
866 #endif /* COUNT_XINVLTLB_HITS */
867 #ifdef COUNT_IPIS
868 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
869 #endif /* COUNT_IPIS */
870
871 if (smp_tlb_pmap == kernel_pmap) {
872 invltlb_glob();
873 } else {
874 /*
875 * The current pmap might not be equal to
876 * smp_tlb_pmap. The clearing of the pm_gen in
877 * pmap_invalidate_all() takes care of TLB
878 * invalidation when switching to the pmap on this
879 * CPU.
880 */
881 if (smp_tlb_pmap == PCPU_GET(curpmap)) {
882 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
883 load_cr3(smp_tlb_pmap->pm_cr3 | pcid);
884 if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
885 PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
886 }
887 }
888 }
889
890 static void
invlpg_handler(vm_offset_t smp_tlb_addr1)891 invlpg_handler(vm_offset_t smp_tlb_addr1)
892 {
893 #ifdef COUNT_XINVLTLB_HITS
894 xhits_pg[PCPU_GET(cpuid)]++;
895 #endif /* COUNT_XINVLTLB_HITS */
896 #ifdef COUNT_IPIS
897 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
898 #endif /* COUNT_IPIS */
899
900 invlpg(smp_tlb_addr1);
901 }
902
903 static void
invlpg_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)904 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
905 {
906 struct invpcid_descr d;
907
908 #ifdef COUNT_XINVLTLB_HITS
909 xhits_pg[PCPU_GET(cpuid)]++;
910 #endif /* COUNT_XINVLTLB_HITS */
911 #ifdef COUNT_IPIS
912 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
913 #endif /* COUNT_IPIS */
914
915 invlpg(smp_tlb_addr1);
916 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
917 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
918 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
919 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
920 PMAP_PCID_USER_PT;
921 d.pad = 0;
922 d.addr = smp_tlb_addr1;
923 invpcid(&d, INVPCID_ADDR);
924 }
925 }
926
927 static void
invlpg_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1)928 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
929 {
930 uint64_t kcr3, ucr3;
931 uint32_t pcid;
932
933 #ifdef COUNT_XINVLTLB_HITS
934 xhits_pg[PCPU_GET(cpuid)]++;
935 #endif /* COUNT_XINVLTLB_HITS */
936 #ifdef COUNT_IPIS
937 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
938 #endif /* COUNT_IPIS */
939
940 invlpg(smp_tlb_addr1);
941 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
942 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
943 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
944 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
945 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
946 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
947 pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
948 }
949 }
950
951 static void
invlrng_handler(vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)952 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
953 {
954 vm_offset_t addr, addr2;
955
956 #ifdef COUNT_XINVLTLB_HITS
957 xhits_rng[PCPU_GET(cpuid)]++;
958 #endif /* COUNT_XINVLTLB_HITS */
959 #ifdef COUNT_IPIS
960 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
961 #endif /* COUNT_IPIS */
962
963 addr = smp_tlb_addr1;
964 addr2 = smp_tlb_addr2;
965 do {
966 invlpg(addr);
967 addr += PAGE_SIZE;
968 } while (addr < addr2);
969 }
970
971 static void
invlrng_invpcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)972 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
973 vm_offset_t smp_tlb_addr2)
974 {
975 struct invpcid_descr d;
976 vm_offset_t addr, addr2;
977
978 #ifdef COUNT_XINVLTLB_HITS
979 xhits_rng[PCPU_GET(cpuid)]++;
980 #endif /* COUNT_XINVLTLB_HITS */
981 #ifdef COUNT_IPIS
982 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
983 #endif /* COUNT_IPIS */
984
985 addr = smp_tlb_addr1;
986 addr2 = smp_tlb_addr2;
987 do {
988 invlpg(addr);
989 addr += PAGE_SIZE;
990 } while (addr < addr2);
991 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
992 smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
993 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
994 d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
995 PMAP_PCID_USER_PT;
996 d.pad = 0;
997 d.addr = smp_tlb_addr1;
998 do {
999 invpcid(&d, INVPCID_ADDR);
1000 d.addr += PAGE_SIZE;
1001 } while (d.addr < addr2);
1002 }
1003 }
1004
1005 static void
invlrng_pcid_handler(pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)1006 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
1007 vm_offset_t smp_tlb_addr2)
1008 {
1009 vm_offset_t addr, addr2;
1010 uint64_t kcr3, ucr3;
1011 uint32_t pcid;
1012
1013 #ifdef COUNT_XINVLTLB_HITS
1014 xhits_rng[PCPU_GET(cpuid)]++;
1015 #endif /* COUNT_XINVLTLB_HITS */
1016 #ifdef COUNT_IPIS
1017 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
1018 #endif /* COUNT_IPIS */
1019
1020 addr = smp_tlb_addr1;
1021 addr2 = smp_tlb_addr2;
1022 do {
1023 invlpg(addr);
1024 addr += PAGE_SIZE;
1025 } while (addr < addr2);
1026 if (smp_tlb_pmap == PCPU_GET(curpmap) &&
1027 (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
1028 PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
1029 pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
1030 kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
1031 ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
1032 pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
1033 }
1034 }
1035
1036 static void
invlcache_handler(void)1037 invlcache_handler(void)
1038 {
1039 #ifdef COUNT_IPIS
1040 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1041 #endif /* COUNT_IPIS */
1042 wbinvd();
1043 }
1044
1045 static void
invlop_handler_one_req(enum invl_op_codes smp_tlb_op,pmap_t smp_tlb_pmap,vm_offset_t smp_tlb_addr1,vm_offset_t smp_tlb_addr2)1046 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
1047 vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1048 {
1049 switch (smp_tlb_op) {
1050 case INVL_OP_TLB:
1051 invltlb_handler(smp_tlb_pmap);
1052 break;
1053 case INVL_OP_TLB_INVPCID:
1054 invltlb_invpcid_handler(smp_tlb_pmap);
1055 break;
1056 case INVL_OP_TLB_INVPCID_PTI:
1057 invltlb_invpcid_pti_handler(smp_tlb_pmap);
1058 break;
1059 case INVL_OP_TLB_PCID:
1060 invltlb_pcid_handler(smp_tlb_pmap);
1061 break;
1062 case INVL_OP_PGRNG:
1063 invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1064 break;
1065 case INVL_OP_PGRNG_INVPCID:
1066 invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1067 smp_tlb_addr2);
1068 break;
1069 case INVL_OP_PGRNG_PCID:
1070 invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1071 smp_tlb_addr2);
1072 break;
1073 case INVL_OP_PG:
1074 invlpg_handler(smp_tlb_addr1);
1075 break;
1076 case INVL_OP_PG_INVPCID:
1077 invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1078 break;
1079 case INVL_OP_PG_PCID:
1080 invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1081 break;
1082 case INVL_OP_CACHE:
1083 invlcache_handler();
1084 break;
1085 default:
1086 __assert_unreachable();
1087 break;
1088 }
1089 }
1090
1091 void
invlop_handler(void)1092 invlop_handler(void)
1093 {
1094 struct pcpu *initiator_pc;
1095 pmap_t smp_tlb_pmap;
1096 vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1097 u_int initiator_cpu_id;
1098 enum invl_op_codes smp_tlb_op;
1099 uint32_t *scoreboard, smp_tlb_gen;
1100
1101 scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1102 for (;;) {
1103 for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1104 initiator_cpu_id++) {
1105 if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1106 break;
1107 }
1108 if (initiator_cpu_id > mp_maxid)
1109 break;
1110 initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1111
1112 /*
1113 * This acquire fence and its corresponding release
1114 * fence in smp_targeted_tlb_shootdown() is between
1115 * reading zero scoreboard slot and accessing PCPU of
1116 * initiator for pc_smp_tlb values.
1117 */
1118 atomic_thread_fence_acq();
1119 smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1120 smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1121 smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1122 smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1123 smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1124
1125 /*
1126 * Ensure that we do not make our scoreboard
1127 * notification visible to the initiator until the
1128 * pc_smp_tlb values are read. The corresponding
1129 * fence is implicitly provided by the barrier in the
1130 * IPI send operation before the APIC ICR register
1131 * write.
1132 *
1133 * As an optimization, the request is acknowledged
1134 * before the actual invalidation is performed. It is
1135 * safe because target CPU cannot return to userspace
1136 * before handler finishes. Only NMI can preempt the
1137 * handler, but NMI would see the kernel handler frame
1138 * and not touch not-invalidated user page table.
1139 */
1140 atomic_thread_fence_acq();
1141 atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1142
1143 invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1144 smp_tlb_addr2);
1145 }
1146 }
1147