1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008-2015 Nathan Whitehorn
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 /*
31 * Manages physical address maps.
32 *
33 * Since the information managed by this module is also stored by the
34 * logical address mapping module, this module may throw away valid virtual
35 * to physical mappings at almost any time. However, invalidations of
36 * mappings must be done as requested.
37 *
38 * In order to cope with hardware architectures which make virtual to
39 * physical map invalidates expensive, this module may delay invalidate
40 * reduced protection operations until such time as they are actually
41 * necessary. This module is given full information as to which processors
42 * are currently using which maps, and to when physical maps must be made
43 * correct.
44 */
45
46 #include "opt_kstack_pages.h"
47
48 #include <sys/param.h>
49 #include <sys/kernel.h>
50 #include <sys/conf.h>
51 #include <sys/queue.h>
52 #include <sys/cpuset.h>
53 #include <sys/kerneldump.h>
54 #include <sys/ktr.h>
55 #include <sys/lock.h>
56 #include <sys/msgbuf.h>
57 #include <sys/malloc.h>
58 #include <sys/mman.h>
59 #include <sys/mutex.h>
60 #include <sys/proc.h>
61 #include <sys/rwlock.h>
62 #include <sys/sched.h>
63 #include <sys/sysctl.h>
64 #include <sys/systm.h>
65 #include <sys/vmmeter.h>
66 #include <sys/smp.h>
67 #include <sys/reboot.h>
68
69 #include <sys/kdb.h>
70
71 #include <dev/ofw/openfirm.h>
72
73 #include <vm/vm.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_phys.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_extern.h>
82 #include <vm/vm_pageout.h>
83 #include <vm/vm_dumpset.h>
84 #include <vm/vm_reserv.h>
85 #include <vm/uma.h>
86
87 #include <machine/_inttypes.h>
88 #include <machine/cpu.h>
89 #include <machine/ifunc.h>
90 #include <machine/platform.h>
91 #include <machine/frame.h>
92 #include <machine/md_var.h>
93 #include <machine/psl.h>
94 #include <machine/bat.h>
95 #include <machine/hid.h>
96 #include <machine/pte.h>
97 #include <machine/sr.h>
98 #include <machine/trap.h>
99 #include <machine/mmuvar.h>
100
101 #include "mmu_oea64.h"
102
103 void moea64_release_vsid(uint64_t vsid);
104 uintptr_t moea64_get_unique_vsid(void);
105
106 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
107 #define ENABLE_TRANS(msr) mtmsr(msr)
108
109 #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4))
110 #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff)
111 #define VSID_HASH_MASK 0x0000007fffffffffULL
112
113 /*
114 * Locking semantics:
115 *
116 * There are two locks of interest: the page locks and the pmap locks, which
117 * protect their individual PVO lists and are locked in that order. The contents
118 * of all PVO entries are protected by the locks of their respective pmaps.
119 * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
120 * into any list.
121 *
122 */
123
124 #define PV_LOCK_COUNT PA_LOCK_COUNT
125 static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
126
127 /*
128 * Cheap NUMA-izing of the pv locks, to reduce contention across domains.
129 * NUMA domains on POWER9 appear to be indexed as sparse memory spaces, with the
130 * index at (N << 45).
131 */
132 #ifdef __powerpc64__
133 #define PV_LOCK_IDX(pa) ((pa_index(pa) * (((pa) >> 45) + 1)) % PV_LOCK_COUNT)
134 #else
135 #define PV_LOCK_IDX(pa) (pa_index(pa) % PV_LOCK_COUNT)
136 #endif
137 #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)]))
138 #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa))
139 #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa))
140 #define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
141 #define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m))
142 #define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m))
143 #define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
144
145 /* Superpage PV lock */
146
147 #define PV_LOCK_SIZE (1<<PDRSHIFT)
148
149 static __always_inline void
moea64_sp_pv_lock(vm_paddr_t pa)150 moea64_sp_pv_lock(vm_paddr_t pa)
151 {
152 vm_paddr_t pa_end;
153
154 /* Note: breaking when pa_end is reached to avoid overflows */
155 pa_end = pa + (HPT_SP_SIZE - PV_LOCK_SIZE);
156 for (;;) {
157 mtx_lock_flags(PV_LOCKPTR(pa), MTX_DUPOK);
158 if (pa == pa_end)
159 break;
160 pa += PV_LOCK_SIZE;
161 }
162 }
163
164 static __always_inline void
moea64_sp_pv_unlock(vm_paddr_t pa)165 moea64_sp_pv_unlock(vm_paddr_t pa)
166 {
167 vm_paddr_t pa_end;
168
169 /* Note: breaking when pa_end is reached to avoid overflows */
170 pa_end = pa;
171 pa += HPT_SP_SIZE - PV_LOCK_SIZE;
172 for (;;) {
173 mtx_unlock_flags(PV_LOCKPTR(pa), MTX_DUPOK);
174 if (pa == pa_end)
175 break;
176 pa -= PV_LOCK_SIZE;
177 }
178 }
179
180 #define SP_PV_LOCK_ALIGNED(pa) moea64_sp_pv_lock(pa)
181 #define SP_PV_UNLOCK_ALIGNED(pa) moea64_sp_pv_unlock(pa)
182 #define SP_PV_LOCK(pa) moea64_sp_pv_lock((pa) & ~HPT_SP_MASK)
183 #define SP_PV_UNLOCK(pa) moea64_sp_pv_unlock((pa) & ~HPT_SP_MASK)
184 #define SP_PV_PAGE_LOCK(m) SP_PV_LOCK(VM_PAGE_TO_PHYS(m))
185 #define SP_PV_PAGE_UNLOCK(m) SP_PV_UNLOCK(VM_PAGE_TO_PHYS(m))
186
187 struct ofw_map {
188 cell_t om_va;
189 cell_t om_len;
190 uint64_t om_pa;
191 cell_t om_mode;
192 };
193
194 extern unsigned char _etext[];
195 extern unsigned char _end[];
196
197 extern void *slbtrap, *slbtrapend;
198
199 /*
200 * Map of physical memory regions.
201 */
202 static struct mem_region *regions;
203 static struct mem_region *pregions;
204 static struct numa_mem_region *numa_pregions;
205 static u_int phys_avail_count;
206 static int regions_sz, pregions_sz, numapregions_sz;
207
208 extern void bs_remap_earlyboot(void);
209
210 /*
211 * Lock for the SLB tables.
212 */
213 struct mtx moea64_slb_mutex;
214
215 /*
216 * PTEG data.
217 */
218 u_long moea64_pteg_count;
219 u_long moea64_pteg_mask;
220
221 /*
222 * PVO data.
223 */
224
225 uma_zone_t moea64_pvo_zone; /* zone for pvo entries */
226
227 static struct pvo_entry *moea64_bpvo_pool;
228 static int moea64_bpvo_pool_index = 0;
229 static int moea64_bpvo_pool_size = 0;
230 SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
231 &moea64_bpvo_pool_index, 0, "");
232
233 #define BPVO_POOL_SIZE 327680 /* Sensible historical default value */
234 #define BPVO_POOL_EXPANSION_FACTOR 3
235 #define VSID_NBPW (sizeof(u_int32_t) * 8)
236 #ifdef __powerpc64__
237 #define NVSIDS (NPMAPS * 16)
238 #define VSID_HASHMASK 0xffffffffUL
239 #else
240 #define NVSIDS NPMAPS
241 #define VSID_HASHMASK 0xfffffUL
242 #endif
243 static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
244
245 static boolean_t moea64_initialized = FALSE;
246
247 #ifdef MOEA64_STATS
248 /*
249 * Statistics.
250 */
251 u_int moea64_pte_valid = 0;
252 u_int moea64_pte_overflow = 0;
253 u_int moea64_pvo_entries = 0;
254 u_int moea64_pvo_enter_calls = 0;
255 u_int moea64_pvo_remove_calls = 0;
256 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
257 &moea64_pte_valid, 0, "");
258 SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
259 &moea64_pte_overflow, 0, "");
260 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
261 &moea64_pvo_entries, 0, "");
262 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
263 &moea64_pvo_enter_calls, 0, "");
264 SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
265 &moea64_pvo_remove_calls, 0, "");
266 #endif
267
268 vm_offset_t moea64_scratchpage_va[2];
269 struct pvo_entry *moea64_scratchpage_pvo[2];
270 struct mtx moea64_scratchpage_mtx;
271
272 uint64_t moea64_large_page_mask = 0;
273 uint64_t moea64_large_page_size = 0;
274 int moea64_large_page_shift = 0;
275 bool moea64_has_lp_4k_16m = false;
276
277 /*
278 * PVO calls.
279 */
280 static int moea64_pvo_enter(struct pvo_entry *pvo,
281 struct pvo_head *pvo_head, struct pvo_entry **oldpvo);
282 static void moea64_pvo_remove_from_pmap(struct pvo_entry *pvo);
283 static void moea64_pvo_remove_from_page(struct pvo_entry *pvo);
284 static void moea64_pvo_remove_from_page_locked(
285 struct pvo_entry *pvo, vm_page_t m);
286 static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
287
288 /*
289 * Utility routines.
290 */
291 static boolean_t moea64_query_bit(vm_page_t, uint64_t);
292 static u_int moea64_clear_bit(vm_page_t, uint64_t);
293 static void moea64_kremove(vm_offset_t);
294 static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
295 vm_paddr_t pa, vm_size_t sz);
296 static void moea64_pmap_init_qpages(void);
297 static void moea64_remove_locked(pmap_t, vm_offset_t,
298 vm_offset_t, struct pvo_dlist *);
299
300 /*
301 * Superpages data and routines.
302 */
303
304 /*
305 * PVO flags (in vaddr) that must match for promotion to succeed.
306 * Note that protection bits are checked separately, as they reside in
307 * another field.
308 */
309 #define PVO_FLAGS_PROMOTE (PVO_WIRED | PVO_MANAGED | PVO_PTEGIDX_VALID)
310
311 #define PVO_IS_SP(pvo) (((pvo)->pvo_vaddr & PVO_LARGE) && \
312 (pvo)->pvo_pmap != kernel_pmap)
313
314 /* Get physical address from PVO. */
315 #define PVO_PADDR(pvo) moea64_pvo_paddr(pvo)
316
317 /* MD page flag indicating that the page is a superpage. */
318 #define MDPG_ATTR_SP 0x40000000
319
320 SYSCTL_DECL(_vm_pmap);
321
322 static SYSCTL_NODE(_vm_pmap, OID_AUTO, sp, CTLFLAG_RD, 0,
323 "SP page mapping counters");
324
325 static u_long sp_demotions;
326 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, demotions, CTLFLAG_RD,
327 &sp_demotions, 0, "SP page demotions");
328
329 static u_long sp_mappings;
330 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, mappings, CTLFLAG_RD,
331 &sp_mappings, 0, "SP page mappings");
332
333 static u_long sp_p_failures;
334 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_failures, CTLFLAG_RD,
335 &sp_p_failures, 0, "SP page promotion failures");
336
337 static u_long sp_p_fail_pa;
338 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_pa, CTLFLAG_RD,
339 &sp_p_fail_pa, 0, "SP page promotion failure: PAs don't match");
340
341 static u_long sp_p_fail_flags;
342 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_flags, CTLFLAG_RD,
343 &sp_p_fail_flags, 0, "SP page promotion failure: page flags don't match");
344
345 static u_long sp_p_fail_prot;
346 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_prot, CTLFLAG_RD,
347 &sp_p_fail_prot, 0,
348 "SP page promotion failure: page protections don't match");
349
350 static u_long sp_p_fail_wimg;
351 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, p_fail_wimg, CTLFLAG_RD,
352 &sp_p_fail_wimg, 0, "SP page promotion failure: WIMG bits don't match");
353
354 static u_long sp_promotions;
355 SYSCTL_ULONG(_vm_pmap_sp, OID_AUTO, promotions, CTLFLAG_RD,
356 &sp_promotions, 0, "SP page promotions");
357
358 static bool moea64_ps_enabled(pmap_t);
359 static void moea64_align_superpage(vm_object_t, vm_ooffset_t,
360 vm_offset_t *, vm_size_t);
361
362 static int moea64_sp_enter(pmap_t pmap, vm_offset_t va,
363 vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind);
364 static struct pvo_entry *moea64_sp_remove(struct pvo_entry *sp,
365 struct pvo_dlist *tofree);
366
367 static void moea64_sp_promote(pmap_t pmap, vm_offset_t va, vm_page_t m);
368 static void moea64_sp_demote_aligned(struct pvo_entry *sp);
369 static void moea64_sp_demote(struct pvo_entry *pvo);
370
371 static struct pvo_entry *moea64_sp_unwire(struct pvo_entry *sp);
372 static struct pvo_entry *moea64_sp_protect(struct pvo_entry *sp,
373 vm_prot_t prot);
374
375 static int64_t moea64_sp_query(struct pvo_entry *pvo, uint64_t ptebit);
376 static int64_t moea64_sp_clear(struct pvo_entry *pvo, vm_page_t m,
377 uint64_t ptebit);
378
379 static __inline bool moea64_sp_pvo_in_range(struct pvo_entry *pvo,
380 vm_offset_t sva, vm_offset_t eva);
381
382 /*
383 * Kernel MMU interface
384 */
385 void moea64_clear_modify(vm_page_t);
386 void moea64_copy_page(vm_page_t, vm_page_t);
387 void moea64_copy_page_dmap(vm_page_t, vm_page_t);
388 void moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
389 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
390 void moea64_copy_pages_dmap(vm_page_t *ma, vm_offset_t a_offset,
391 vm_page_t *mb, vm_offset_t b_offset, int xfersize);
392 int moea64_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
393 u_int flags, int8_t psind);
394 void moea64_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
395 vm_prot_t);
396 void moea64_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
397 vm_paddr_t moea64_extract(pmap_t, vm_offset_t);
398 vm_page_t moea64_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
399 void moea64_init(void);
400 boolean_t moea64_is_modified(vm_page_t);
401 boolean_t moea64_is_prefaultable(pmap_t, vm_offset_t);
402 boolean_t moea64_is_referenced(vm_page_t);
403 int moea64_ts_referenced(vm_page_t);
404 vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
405 boolean_t moea64_page_exists_quick(pmap_t, vm_page_t);
406 void moea64_page_init(vm_page_t);
407 int moea64_page_wired_mappings(vm_page_t);
408 int moea64_pinit(pmap_t);
409 void moea64_pinit0(pmap_t);
410 void moea64_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
411 void moea64_qenter(vm_offset_t, vm_page_t *, int);
412 void moea64_qremove(vm_offset_t, int);
413 void moea64_release(pmap_t);
414 void moea64_remove(pmap_t, vm_offset_t, vm_offset_t);
415 void moea64_remove_pages(pmap_t);
416 void moea64_remove_all(vm_page_t);
417 void moea64_remove_write(vm_page_t);
418 void moea64_unwire(pmap_t, vm_offset_t, vm_offset_t);
419 void moea64_zero_page(vm_page_t);
420 void moea64_zero_page_dmap(vm_page_t);
421 void moea64_zero_page_area(vm_page_t, int, int);
422 void moea64_activate(struct thread *);
423 void moea64_deactivate(struct thread *);
424 void *moea64_mapdev(vm_paddr_t, vm_size_t);
425 void *moea64_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
426 void moea64_unmapdev(void *, vm_size_t);
427 vm_paddr_t moea64_kextract(vm_offset_t);
428 void moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma);
429 void moea64_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t ma);
430 void moea64_kenter(vm_offset_t, vm_paddr_t);
431 int moea64_dev_direct_mapped(vm_paddr_t, vm_size_t);
432 static void moea64_sync_icache(pmap_t, vm_offset_t, vm_size_t);
433 void moea64_dumpsys_map(vm_paddr_t pa, size_t sz,
434 void **va);
435 void moea64_scan_init(void);
436 vm_offset_t moea64_quick_enter_page(vm_page_t m);
437 vm_offset_t moea64_quick_enter_page_dmap(vm_page_t m);
438 void moea64_quick_remove_page(vm_offset_t addr);
439 boolean_t moea64_page_is_mapped(vm_page_t m);
440 static int moea64_map_user_ptr(pmap_t pm,
441 volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
442 static int moea64_decode_kernel_ptr(vm_offset_t addr,
443 int *is_user, vm_offset_t *decoded_addr);
444 static size_t moea64_scan_pmap(struct bitset *dump_bitset);
445 static void *moea64_dump_pmap_init(unsigned blkpgs);
446 #ifdef __powerpc64__
447 static void moea64_page_array_startup(long);
448 #endif
449 static int moea64_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
450
451 static struct pmap_funcs moea64_methods = {
452 .clear_modify = moea64_clear_modify,
453 .copy_page = moea64_copy_page,
454 .copy_pages = moea64_copy_pages,
455 .enter = moea64_enter,
456 .enter_object = moea64_enter_object,
457 .enter_quick = moea64_enter_quick,
458 .extract = moea64_extract,
459 .extract_and_hold = moea64_extract_and_hold,
460 .init = moea64_init,
461 .is_modified = moea64_is_modified,
462 .is_prefaultable = moea64_is_prefaultable,
463 .is_referenced = moea64_is_referenced,
464 .ts_referenced = moea64_ts_referenced,
465 .map = moea64_map,
466 .mincore = moea64_mincore,
467 .page_exists_quick = moea64_page_exists_quick,
468 .page_init = moea64_page_init,
469 .page_wired_mappings = moea64_page_wired_mappings,
470 .pinit = moea64_pinit,
471 .pinit0 = moea64_pinit0,
472 .protect = moea64_protect,
473 .qenter = moea64_qenter,
474 .qremove = moea64_qremove,
475 .release = moea64_release,
476 .remove = moea64_remove,
477 .remove_pages = moea64_remove_pages,
478 .remove_all = moea64_remove_all,
479 .remove_write = moea64_remove_write,
480 .sync_icache = moea64_sync_icache,
481 .unwire = moea64_unwire,
482 .zero_page = moea64_zero_page,
483 .zero_page_area = moea64_zero_page_area,
484 .activate = moea64_activate,
485 .deactivate = moea64_deactivate,
486 .page_set_memattr = moea64_page_set_memattr,
487 .quick_enter_page = moea64_quick_enter_page,
488 .quick_remove_page = moea64_quick_remove_page,
489 .page_is_mapped = moea64_page_is_mapped,
490 #ifdef __powerpc64__
491 .page_array_startup = moea64_page_array_startup,
492 #endif
493 .ps_enabled = moea64_ps_enabled,
494 .align_superpage = moea64_align_superpage,
495
496 /* Internal interfaces */
497 .mapdev = moea64_mapdev,
498 .mapdev_attr = moea64_mapdev_attr,
499 .unmapdev = moea64_unmapdev,
500 .kextract = moea64_kextract,
501 .kenter = moea64_kenter,
502 .kenter_attr = moea64_kenter_attr,
503 .dev_direct_mapped = moea64_dev_direct_mapped,
504 .dumpsys_pa_init = moea64_scan_init,
505 .dumpsys_scan_pmap = moea64_scan_pmap,
506 .dumpsys_dump_pmap_init = moea64_dump_pmap_init,
507 .dumpsys_map_chunk = moea64_dumpsys_map,
508 .map_user_ptr = moea64_map_user_ptr,
509 .decode_kernel_ptr = moea64_decode_kernel_ptr,
510 };
511
512 MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods);
513
514 /*
515 * Get physical address from PVO.
516 *
517 * For superpages, the lower bits are not stored on pvo_pte.pa and must be
518 * obtained from VA.
519 */
520 static __always_inline vm_paddr_t
moea64_pvo_paddr(struct pvo_entry * pvo)521 moea64_pvo_paddr(struct pvo_entry *pvo)
522 {
523 vm_paddr_t pa;
524
525 pa = (pvo)->pvo_pte.pa & LPTE_RPGN;
526
527 if (PVO_IS_SP(pvo)) {
528 pa &= ~HPT_SP_MASK; /* This is needed to clear LPTE_LP bits. */
529 pa |= PVO_VADDR(pvo) & HPT_SP_MASK;
530 }
531 return (pa);
532 }
533
534 static struct pvo_head *
vm_page_to_pvoh(vm_page_t m)535 vm_page_to_pvoh(vm_page_t m)
536 {
537
538 mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
539 return (&m->md.mdpg_pvoh);
540 }
541
542 static struct pvo_entry *
alloc_pvo_entry(int bootstrap)543 alloc_pvo_entry(int bootstrap)
544 {
545 struct pvo_entry *pvo;
546
547 if (!moea64_initialized || bootstrap) {
548 if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
549 panic("%s: bpvo pool exhausted, index=%d, size=%d, bytes=%zd."
550 "Try setting machdep.moea64_bpvo_pool_size tunable",
551 __func__, moea64_bpvo_pool_index,
552 moea64_bpvo_pool_size,
553 moea64_bpvo_pool_size * sizeof(struct pvo_entry));
554 }
555 pvo = &moea64_bpvo_pool[
556 atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
557 bzero(pvo, sizeof(*pvo));
558 pvo->pvo_vaddr = PVO_BOOTSTRAP;
559 } else
560 pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT | M_ZERO);
561
562 return (pvo);
563 }
564
565 static void
init_pvo_entry(struct pvo_entry * pvo,pmap_t pmap,vm_offset_t va)566 init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
567 {
568 uint64_t vsid;
569 uint64_t hash;
570 int shift;
571
572 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
573
574 pvo->pvo_pmap = pmap;
575 va &= ~ADDR_POFF;
576 pvo->pvo_vaddr |= va;
577 vsid = va_to_vsid(pmap, va);
578 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
579 | (vsid << 16);
580
581 if (pmap == kernel_pmap && (pvo->pvo_vaddr & PVO_LARGE) != 0)
582 shift = moea64_large_page_shift;
583 else
584 shift = ADDR_PIDX_SHFT;
585 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
586 pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
587 }
588
589 static void
free_pvo_entry(struct pvo_entry * pvo)590 free_pvo_entry(struct pvo_entry *pvo)
591 {
592
593 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
594 uma_zfree(moea64_pvo_zone, pvo);
595 }
596
597 void
moea64_pte_from_pvo(const struct pvo_entry * pvo,struct lpte * lpte)598 moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
599 {
600
601 lpte->pte_hi = moea64_pte_vpn_from_pvo_vpn(pvo);
602 lpte->pte_hi |= LPTE_VALID;
603
604 if (pvo->pvo_vaddr & PVO_LARGE)
605 lpte->pte_hi |= LPTE_BIG;
606 if (pvo->pvo_vaddr & PVO_WIRED)
607 lpte->pte_hi |= LPTE_WIRED;
608 if (pvo->pvo_vaddr & PVO_HID)
609 lpte->pte_hi |= LPTE_HID;
610
611 lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
612 if (pvo->pvo_pte.prot & VM_PROT_WRITE)
613 lpte->pte_lo |= LPTE_BW;
614 else
615 lpte->pte_lo |= LPTE_BR;
616
617 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
618 lpte->pte_lo |= LPTE_NOEXEC;
619 }
620
621 static __inline uint64_t
moea64_calc_wimg(vm_paddr_t pa,vm_memattr_t ma)622 moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
623 {
624 uint64_t pte_lo;
625 int i;
626
627 if (ma != VM_MEMATTR_DEFAULT) {
628 switch (ma) {
629 case VM_MEMATTR_UNCACHEABLE:
630 return (LPTE_I | LPTE_G);
631 case VM_MEMATTR_CACHEABLE:
632 return (LPTE_M);
633 case VM_MEMATTR_WRITE_COMBINING:
634 case VM_MEMATTR_WRITE_BACK:
635 case VM_MEMATTR_PREFETCHABLE:
636 return (LPTE_I);
637 case VM_MEMATTR_WRITE_THROUGH:
638 return (LPTE_W | LPTE_M);
639 }
640 }
641
642 /*
643 * Assume the page is cache inhibited and access is guarded unless
644 * it's in our available memory array.
645 */
646 pte_lo = LPTE_I | LPTE_G;
647 for (i = 0; i < pregions_sz; i++) {
648 if ((pa >= pregions[i].mr_start) &&
649 (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
650 pte_lo &= ~(LPTE_I | LPTE_G);
651 pte_lo |= LPTE_M;
652 break;
653 }
654 }
655
656 return pte_lo;
657 }
658
659 /*
660 * Quick sort callout for comparing memory regions.
661 */
662 static int om_cmp(const void *a, const void *b);
663
664 static int
om_cmp(const void * a,const void * b)665 om_cmp(const void *a, const void *b)
666 {
667 const struct ofw_map *mapa;
668 const struct ofw_map *mapb;
669
670 mapa = a;
671 mapb = b;
672 if (mapa->om_pa < mapb->om_pa)
673 return (-1);
674 else if (mapa->om_pa > mapb->om_pa)
675 return (1);
676 else
677 return (0);
678 }
679
680 static void
moea64_add_ofw_mappings(phandle_t mmu,size_t sz)681 moea64_add_ofw_mappings(phandle_t mmu, size_t sz)
682 {
683 struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
684 pcell_t acells, trans_cells[sz/sizeof(cell_t)];
685 struct pvo_entry *pvo;
686 register_t msr;
687 vm_offset_t off;
688 vm_paddr_t pa_base;
689 int i, j;
690
691 bzero(translations, sz);
692 OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
693 sizeof(acells));
694 if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
695 panic("moea64_bootstrap: can't get ofw translations");
696
697 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
698 sz /= sizeof(cell_t);
699 for (i = 0, j = 0; i < sz; j++) {
700 translations[j].om_va = trans_cells[i++];
701 translations[j].om_len = trans_cells[i++];
702 translations[j].om_pa = trans_cells[i++];
703 if (acells == 2) {
704 translations[j].om_pa <<= 32;
705 translations[j].om_pa |= trans_cells[i++];
706 }
707 translations[j].om_mode = trans_cells[i++];
708 }
709 KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
710 i, sz));
711
712 sz = j;
713 qsort(translations, sz, sizeof (*translations), om_cmp);
714
715 for (i = 0; i < sz; i++) {
716 pa_base = translations[i].om_pa;
717 #ifndef __powerpc64__
718 if ((translations[i].om_pa >> 32) != 0)
719 panic("OFW translations above 32-bit boundary!");
720 #endif
721
722 if (pa_base % PAGE_SIZE)
723 panic("OFW translation not page-aligned (phys)!");
724 if (translations[i].om_va % PAGE_SIZE)
725 panic("OFW translation not page-aligned (virt)!");
726
727 CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
728 pa_base, translations[i].om_va, translations[i].om_len);
729
730 /* Now enter the pages for this mapping */
731
732 DISABLE_TRANS(msr);
733 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
734 /* If this address is direct-mapped, skip remapping */
735 if (hw_direct_map &&
736 translations[i].om_va == PHYS_TO_DMAP(pa_base) &&
737 moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT)
738 == LPTE_M)
739 continue;
740
741 PMAP_LOCK(kernel_pmap);
742 pvo = moea64_pvo_find_va(kernel_pmap,
743 translations[i].om_va + off);
744 PMAP_UNLOCK(kernel_pmap);
745 if (pvo != NULL)
746 continue;
747
748 moea64_kenter(translations[i].om_va + off,
749 pa_base + off);
750 }
751 ENABLE_TRANS(msr);
752 }
753 }
754
755 #ifdef __powerpc64__
756 static void
moea64_probe_large_page(void)757 moea64_probe_large_page(void)
758 {
759 uint16_t pvr = mfpvr() >> 16;
760
761 switch (pvr) {
762 case IBM970:
763 case IBM970FX:
764 case IBM970MP:
765 powerpc_sync(); isync();
766 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
767 powerpc_sync(); isync();
768
769 /* FALLTHROUGH */
770 default:
771 if (moea64_large_page_size == 0) {
772 moea64_large_page_size = 0x1000000; /* 16 MB */
773 moea64_large_page_shift = 24;
774 }
775 }
776
777 moea64_large_page_mask = moea64_large_page_size - 1;
778 }
779
780 static void
moea64_bootstrap_slb_prefault(vm_offset_t va,int large)781 moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
782 {
783 struct slb *cache;
784 struct slb entry;
785 uint64_t esid, slbe;
786 uint64_t i;
787
788 cache = PCPU_GET(aim.slb);
789 esid = va >> ADDR_SR_SHFT;
790 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
791
792 for (i = 0; i < 64; i++) {
793 if (cache[i].slbe == (slbe | i))
794 return;
795 }
796
797 entry.slbe = slbe;
798 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
799 if (large)
800 entry.slbv |= SLBV_L;
801
802 slb_insert_kernel(entry.slbe, entry.slbv);
803 }
804 #endif
805
806 static int
moea64_kenter_large(vm_offset_t va,vm_paddr_t pa,uint64_t attr,int bootstrap)807 moea64_kenter_large(vm_offset_t va, vm_paddr_t pa, uint64_t attr, int bootstrap)
808 {
809 struct pvo_entry *pvo;
810 uint64_t pte_lo;
811 int error;
812
813 pte_lo = LPTE_M;
814 pte_lo |= attr;
815
816 pvo = alloc_pvo_entry(bootstrap);
817 pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
818 init_pvo_entry(pvo, kernel_pmap, va);
819
820 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
821 VM_PROT_EXECUTE;
822 pvo->pvo_pte.pa = pa | pte_lo;
823 error = moea64_pvo_enter(pvo, NULL, NULL);
824 if (error != 0)
825 panic("Error %d inserting large page\n", error);
826 return (0);
827 }
828
829 static void
moea64_setup_direct_map(vm_offset_t kernelstart,vm_offset_t kernelend)830 moea64_setup_direct_map(vm_offset_t kernelstart,
831 vm_offset_t kernelend)
832 {
833 register_t msr;
834 vm_paddr_t pa, pkernelstart, pkernelend;
835 vm_offset_t size, off;
836 uint64_t pte_lo;
837 int i;
838
839 if (moea64_large_page_size == 0)
840 hw_direct_map = 0;
841
842 DISABLE_TRANS(msr);
843 if (hw_direct_map) {
844 PMAP_LOCK(kernel_pmap);
845 for (i = 0; i < pregions_sz; i++) {
846 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
847 pregions[i].mr_size; pa += moea64_large_page_size) {
848 pte_lo = LPTE_M;
849 if (pa & moea64_large_page_mask) {
850 pa &= moea64_large_page_mask;
851 pte_lo |= LPTE_G;
852 }
853 if (pa + moea64_large_page_size >
854 pregions[i].mr_start + pregions[i].mr_size)
855 pte_lo |= LPTE_G;
856
857 moea64_kenter_large(PHYS_TO_DMAP(pa), pa, pte_lo, 1);
858 }
859 }
860 PMAP_UNLOCK(kernel_pmap);
861 }
862
863 /*
864 * Make sure the kernel and BPVO pool stay mapped on systems either
865 * without a direct map or on which the kernel is not already executing
866 * out of the direct-mapped region.
867 */
868 if (kernelstart < DMAP_BASE_ADDRESS) {
869 /*
870 * For pre-dmap execution, we need to use identity mapping
871 * because we will be operating with the mmu on but in the
872 * wrong address configuration until we __restartkernel().
873 */
874 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
875 pa += PAGE_SIZE)
876 moea64_kenter(pa, pa);
877 } else if (!hw_direct_map) {
878 pkernelstart = kernelstart & ~DMAP_BASE_ADDRESS;
879 pkernelend = kernelend & ~DMAP_BASE_ADDRESS;
880 for (pa = pkernelstart & ~PAGE_MASK; pa < pkernelend;
881 pa += PAGE_SIZE)
882 moea64_kenter(pa | DMAP_BASE_ADDRESS, pa);
883 }
884
885 if (!hw_direct_map) {
886 size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
887 off = (vm_offset_t)(moea64_bpvo_pool);
888 for (pa = off; pa < off + size; pa += PAGE_SIZE)
889 moea64_kenter(pa, pa);
890
891 /* Map exception vectors */
892 for (pa = EXC_RSVD; pa < EXC_LAST; pa += PAGE_SIZE)
893 moea64_kenter(pa | DMAP_BASE_ADDRESS, pa);
894 }
895 ENABLE_TRANS(msr);
896
897 /*
898 * Allow user to override unmapped_buf_allowed for testing.
899 * XXXKIB Only direct map implementation was tested.
900 */
901 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
902 &unmapped_buf_allowed))
903 unmapped_buf_allowed = hw_direct_map;
904 }
905
906 /* Quick sort callout for comparing physical addresses. */
907 static int
pa_cmp(const void * a,const void * b)908 pa_cmp(const void *a, const void *b)
909 {
910 const vm_paddr_t *pa = a, *pb = b;
911
912 if (*pa < *pb)
913 return (-1);
914 else if (*pa > *pb)
915 return (1);
916 else
917 return (0);
918 }
919
920 void
moea64_early_bootstrap(vm_offset_t kernelstart,vm_offset_t kernelend)921 moea64_early_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
922 {
923 int i, j;
924 vm_size_t physsz, hwphyssz;
925 vm_paddr_t kernelphysstart, kernelphysend;
926 int rm_pavail;
927
928 /* Level 0 reservations consist of 4096 pages (16MB superpage). */
929 vm_level_0_order = 12;
930
931 #ifndef __powerpc64__
932 /* We don't have a direct map since there is no BAT */
933 hw_direct_map = 0;
934
935 /* Make sure battable is zero, since we have no BAT */
936 for (i = 0; i < 16; i++) {
937 battable[i].batu = 0;
938 battable[i].batl = 0;
939 }
940 #else
941 /* Install trap handlers for SLBs */
942 bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap);
943 bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap);
944 __syncicache((void *)EXC_DSE, 0x80);
945 __syncicache((void *)EXC_ISE, 0x80);
946 #endif
947
948 kernelphysstart = kernelstart & ~DMAP_BASE_ADDRESS;
949 kernelphysend = kernelend & ~DMAP_BASE_ADDRESS;
950
951 /* Get physical memory regions from firmware */
952 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
953 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
954
955 if (PHYS_AVAIL_ENTRIES < regions_sz)
956 panic("moea64_bootstrap: phys_avail too small");
957
958 phys_avail_count = 0;
959 physsz = 0;
960 hwphyssz = 0;
961 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
962 for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
963 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
964 regions[i].mr_start, regions[i].mr_start +
965 regions[i].mr_size, regions[i].mr_size);
966 if (hwphyssz != 0 &&
967 (physsz + regions[i].mr_size) >= hwphyssz) {
968 if (physsz < hwphyssz) {
969 phys_avail[j] = regions[i].mr_start;
970 phys_avail[j + 1] = regions[i].mr_start +
971 hwphyssz - physsz;
972 physsz = hwphyssz;
973 phys_avail_count++;
974 dump_avail[j] = phys_avail[j];
975 dump_avail[j + 1] = phys_avail[j + 1];
976 }
977 break;
978 }
979 phys_avail[j] = regions[i].mr_start;
980 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
981 phys_avail_count++;
982 physsz += regions[i].mr_size;
983 dump_avail[j] = phys_avail[j];
984 dump_avail[j + 1] = phys_avail[j + 1];
985 }
986
987 /* Check for overlap with the kernel and exception vectors */
988 rm_pavail = 0;
989 for (j = 0; j < 2*phys_avail_count; j+=2) {
990 if (phys_avail[j] < EXC_LAST)
991 phys_avail[j] += EXC_LAST;
992
993 if (phys_avail[j] >= kernelphysstart &&
994 phys_avail[j+1] <= kernelphysend) {
995 phys_avail[j] = phys_avail[j+1] = ~0;
996 rm_pavail++;
997 continue;
998 }
999
1000 if (kernelphysstart >= phys_avail[j] &&
1001 kernelphysstart < phys_avail[j+1]) {
1002 if (kernelphysend < phys_avail[j+1]) {
1003 phys_avail[2*phys_avail_count] =
1004 (kernelphysend & ~PAGE_MASK) + PAGE_SIZE;
1005 phys_avail[2*phys_avail_count + 1] =
1006 phys_avail[j+1];
1007 phys_avail_count++;
1008 }
1009
1010 phys_avail[j+1] = kernelphysstart & ~PAGE_MASK;
1011 }
1012
1013 if (kernelphysend >= phys_avail[j] &&
1014 kernelphysend < phys_avail[j+1]) {
1015 if (kernelphysstart > phys_avail[j]) {
1016 phys_avail[2*phys_avail_count] = phys_avail[j];
1017 phys_avail[2*phys_avail_count + 1] =
1018 kernelphysstart & ~PAGE_MASK;
1019 phys_avail_count++;
1020 }
1021
1022 phys_avail[j] = (kernelphysend & ~PAGE_MASK) +
1023 PAGE_SIZE;
1024 }
1025 }
1026
1027 /* Remove physical available regions marked for removal (~0) */
1028 if (rm_pavail) {
1029 qsort(phys_avail, 2*phys_avail_count, sizeof(phys_avail[0]),
1030 pa_cmp);
1031 phys_avail_count -= rm_pavail;
1032 for (i = 2*phys_avail_count;
1033 i < 2*(phys_avail_count + rm_pavail); i+=2)
1034 phys_avail[i] = phys_avail[i+1] = 0;
1035 }
1036
1037 physmem = btoc(physsz);
1038
1039 #ifdef PTEGCOUNT
1040 moea64_pteg_count = PTEGCOUNT;
1041 #else
1042 moea64_pteg_count = 0x1000;
1043
1044 while (moea64_pteg_count < physmem)
1045 moea64_pteg_count <<= 1;
1046
1047 moea64_pteg_count >>= 1;
1048 #endif /* PTEGCOUNT */
1049 }
1050
1051 void
moea64_mid_bootstrap(vm_offset_t kernelstart,vm_offset_t kernelend)1052 moea64_mid_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
1053 {
1054 int i;
1055
1056 /*
1057 * Set PTEG mask
1058 */
1059 moea64_pteg_mask = moea64_pteg_count - 1;
1060
1061 /*
1062 * Initialize SLB table lock and page locks
1063 */
1064 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
1065 for (i = 0; i < PV_LOCK_COUNT; i++)
1066 mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
1067
1068 /*
1069 * Initialise the bootstrap pvo pool.
1070 */
1071 TUNABLE_INT_FETCH("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
1072 if (moea64_bpvo_pool_size == 0) {
1073 if (!hw_direct_map)
1074 moea64_bpvo_pool_size = ((ptoa((uintmax_t)physmem) * sizeof(struct vm_page)) /
1075 (PAGE_SIZE * PAGE_SIZE)) * BPVO_POOL_EXPANSION_FACTOR;
1076 else
1077 moea64_bpvo_pool_size = BPVO_POOL_SIZE;
1078 }
1079
1080 if (boothowto & RB_VERBOSE) {
1081 printf("mmu_oea64: bpvo pool entries = %d, bpvo pool size = %zu MB\n",
1082 moea64_bpvo_pool_size,
1083 moea64_bpvo_pool_size*sizeof(struct pvo_entry) / 1048576);
1084 }
1085
1086 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
1087 moea64_bpvo_pool_size*sizeof(struct pvo_entry), PAGE_SIZE);
1088 moea64_bpvo_pool_index = 0;
1089
1090 /* Place at address usable through the direct map */
1091 if (hw_direct_map)
1092 moea64_bpvo_pool = (struct pvo_entry *)
1093 PHYS_TO_DMAP((uintptr_t)moea64_bpvo_pool);
1094
1095 /*
1096 * Make sure kernel vsid is allocated as well as VSID 0.
1097 */
1098 #ifndef __powerpc64__
1099 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
1100 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
1101 moea64_vsid_bitmap[0] |= 1;
1102 #endif
1103
1104 /*
1105 * Initialize the kernel pmap (which is statically allocated).
1106 */
1107 #ifdef __powerpc64__
1108 for (i = 0; i < 64; i++) {
1109 pcpup->pc_aim.slb[i].slbv = 0;
1110 pcpup->pc_aim.slb[i].slbe = 0;
1111 }
1112 #else
1113 for (i = 0; i < 16; i++)
1114 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
1115 #endif
1116
1117 kernel_pmap->pmap_phys = kernel_pmap;
1118 CPU_FILL(&kernel_pmap->pm_active);
1119 RB_INIT(&kernel_pmap->pmap_pvo);
1120
1121 PMAP_LOCK_INIT(kernel_pmap);
1122
1123 /*
1124 * Now map in all the other buffers we allocated earlier
1125 */
1126
1127 moea64_setup_direct_map(kernelstart, kernelend);
1128 }
1129
1130 void
moea64_late_bootstrap(vm_offset_t kernelstart,vm_offset_t kernelend)1131 moea64_late_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
1132 {
1133 ihandle_t mmui;
1134 phandle_t chosen;
1135 phandle_t mmu;
1136 ssize_t sz;
1137 int i;
1138 vm_offset_t pa, va;
1139 void *dpcpu;
1140
1141 /*
1142 * Set up the Open Firmware pmap and add its mappings if not in real
1143 * mode.
1144 */
1145
1146 chosen = OF_finddevice("/chosen");
1147 if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
1148 mmu = OF_instance_to_package(mmui);
1149 if (mmu == -1 ||
1150 (sz = OF_getproplen(mmu, "translations")) == -1)
1151 sz = 0;
1152 if (sz > 6144 /* tmpstksz - 2 KB headroom */)
1153 panic("moea64_bootstrap: too many ofw translations");
1154
1155 if (sz > 0)
1156 moea64_add_ofw_mappings(mmu, sz);
1157 }
1158
1159 /*
1160 * Calculate the last available physical address.
1161 */
1162 Maxmem = 0;
1163 for (i = 0; phys_avail[i + 1] != 0; i += 2)
1164 Maxmem = MAX(Maxmem, powerpc_btop(phys_avail[i + 1]));
1165
1166 /*
1167 * Initialize MMU.
1168 */
1169 pmap_cpu_bootstrap(0);
1170 mtmsr(mfmsr() | PSL_DR | PSL_IR);
1171 pmap_bootstrapped++;
1172
1173 /*
1174 * Set the start and end of kva.
1175 */
1176 virtual_avail = VM_MIN_KERNEL_ADDRESS;
1177 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
1178
1179 /*
1180 * Map the entire KVA range into the SLB. We must not fault there.
1181 */
1182 #ifdef __powerpc64__
1183 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
1184 moea64_bootstrap_slb_prefault(va, 0);
1185 #endif
1186
1187 /*
1188 * Remap any early IO mappings (console framebuffer, etc.)
1189 */
1190 bs_remap_earlyboot();
1191
1192 /*
1193 * Figure out how far we can extend virtual_end into segment 16
1194 * without running into existing mappings. Segment 16 is guaranteed
1195 * to contain neither RAM nor devices (at least on Apple hardware),
1196 * but will generally contain some OFW mappings we should not
1197 * step on.
1198 */
1199
1200 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */
1201 PMAP_LOCK(kernel_pmap);
1202 while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
1203 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
1204 virtual_end += PAGE_SIZE;
1205 PMAP_UNLOCK(kernel_pmap);
1206 #endif
1207
1208 /*
1209 * Allocate a kernel stack with a guard page for thread0 and map it
1210 * into the kernel page map.
1211 */
1212 pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
1213 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1214 virtual_avail = va + kstack_pages * PAGE_SIZE;
1215 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
1216 thread0.td_kstack = va;
1217 thread0.td_kstack_pages = kstack_pages;
1218 for (i = 0; i < kstack_pages; i++) {
1219 moea64_kenter(va, pa);
1220 pa += PAGE_SIZE;
1221 va += PAGE_SIZE;
1222 }
1223
1224 /*
1225 * Allocate virtual address space for the message buffer.
1226 */
1227 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
1228 msgbufp = (struct msgbuf *)virtual_avail;
1229 va = virtual_avail;
1230 virtual_avail += round_page(msgbufsize);
1231 while (va < virtual_avail) {
1232 moea64_kenter(va, pa);
1233 pa += PAGE_SIZE;
1234 va += PAGE_SIZE;
1235 }
1236
1237 /*
1238 * Allocate virtual address space for the dynamic percpu area.
1239 */
1240 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
1241 dpcpu = (void *)virtual_avail;
1242 va = virtual_avail;
1243 virtual_avail += DPCPU_SIZE;
1244 while (va < virtual_avail) {
1245 moea64_kenter(va, pa);
1246 pa += PAGE_SIZE;
1247 va += PAGE_SIZE;
1248 }
1249 dpcpu_init(dpcpu, curcpu);
1250
1251 crashdumpmap = (caddr_t)virtual_avail;
1252 virtual_avail += MAXDUMPPGS * PAGE_SIZE;
1253
1254 /*
1255 * Allocate some things for page zeroing. We put this directly
1256 * in the page table and use MOEA64_PTE_REPLACE to avoid any
1257 * of the PVO book-keeping or other parts of the VM system
1258 * from even knowing that this hack exists.
1259 */
1260
1261 if (!hw_direct_map) {
1262 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
1263 MTX_DEF);
1264 for (i = 0; i < 2; i++) {
1265 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
1266 virtual_end -= PAGE_SIZE;
1267
1268 moea64_kenter(moea64_scratchpage_va[i], 0);
1269
1270 PMAP_LOCK(kernel_pmap);
1271 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
1272 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
1273 PMAP_UNLOCK(kernel_pmap);
1274 }
1275 }
1276
1277 numa_mem_regions(&numa_pregions, &numapregions_sz);
1278 }
1279
1280 static void
moea64_pmap_init_qpages(void)1281 moea64_pmap_init_qpages(void)
1282 {
1283 struct pcpu *pc;
1284 int i;
1285
1286 if (hw_direct_map)
1287 return;
1288
1289 CPU_FOREACH(i) {
1290 pc = pcpu_find(i);
1291 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
1292 if (pc->pc_qmap_addr == 0)
1293 panic("pmap_init_qpages: unable to allocate KVA");
1294 PMAP_LOCK(kernel_pmap);
1295 pc->pc_aim.qmap_pvo =
1296 moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
1297 PMAP_UNLOCK(kernel_pmap);
1298 mtx_init(&pc->pc_aim.qmap_lock, "qmap lock", NULL, MTX_DEF);
1299 }
1300 }
1301
1302 SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
1303
1304 /*
1305 * Activate a user pmap. This mostly involves setting some non-CPU
1306 * state.
1307 */
1308 void
moea64_activate(struct thread * td)1309 moea64_activate(struct thread *td)
1310 {
1311 pmap_t pm;
1312
1313 pm = &td->td_proc->p_vmspace->vm_pmap;
1314 CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1315
1316 #ifdef __powerpc64__
1317 PCPU_SET(aim.userslb, pm->pm_slb);
1318 __asm __volatile("slbmte %0, %1; isync" ::
1319 "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
1320 #else
1321 PCPU_SET(curpmap, pm->pmap_phys);
1322 mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
1323 #endif
1324 }
1325
1326 void
moea64_deactivate(struct thread * td)1327 moea64_deactivate(struct thread *td)
1328 {
1329 pmap_t pm;
1330
1331 __asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
1332
1333 pm = &td->td_proc->p_vmspace->vm_pmap;
1334 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1335 #ifdef __powerpc64__
1336 PCPU_SET(aim.userslb, NULL);
1337 #else
1338 PCPU_SET(curpmap, NULL);
1339 #endif
1340 }
1341
1342 void
moea64_unwire(pmap_t pm,vm_offset_t sva,vm_offset_t eva)1343 moea64_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1344 {
1345 struct pvo_entry key, *pvo;
1346 vm_page_t m;
1347 int64_t refchg;
1348
1349 key.pvo_vaddr = sva;
1350 PMAP_LOCK(pm);
1351 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1352 pvo != NULL && PVO_VADDR(pvo) < eva;
1353 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1354 if (PVO_IS_SP(pvo)) {
1355 if (moea64_sp_pvo_in_range(pvo, sva, eva)) {
1356 pvo = moea64_sp_unwire(pvo);
1357 continue;
1358 } else {
1359 CTR1(KTR_PMAP, "%s: demote before unwire",
1360 __func__);
1361 moea64_sp_demote(pvo);
1362 }
1363 }
1364
1365 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1366 panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1367 pvo);
1368 pvo->pvo_vaddr &= ~PVO_WIRED;
1369 refchg = moea64_pte_replace(pvo, 0 /* No invalidation */);
1370 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1371 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1372 if (refchg < 0)
1373 refchg = LPTE_CHG;
1374 m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
1375
1376 refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
1377 if (refchg & LPTE_CHG)
1378 vm_page_dirty(m);
1379 if (refchg & LPTE_REF)
1380 vm_page_aflag_set(m, PGA_REFERENCED);
1381 }
1382 pm->pm_stats.wired_count--;
1383 }
1384 PMAP_UNLOCK(pm);
1385 }
1386
1387 static int
moea64_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)1388 moea64_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
1389 {
1390 struct pvo_entry *pvo;
1391 vm_paddr_t pa;
1392 vm_page_t m;
1393 int val;
1394 bool managed;
1395
1396 PMAP_LOCK(pmap);
1397
1398 pvo = moea64_pvo_find_va(pmap, addr);
1399 if (pvo != NULL) {
1400 pa = PVO_PADDR(pvo);
1401 m = PHYS_TO_VM_PAGE(pa);
1402 managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED;
1403 if (PVO_IS_SP(pvo))
1404 val = MINCORE_INCORE | MINCORE_PSIND(1);
1405 else
1406 val = MINCORE_INCORE;
1407 } else {
1408 PMAP_UNLOCK(pmap);
1409 return (0);
1410 }
1411
1412 PMAP_UNLOCK(pmap);
1413
1414 if (m == NULL)
1415 return (0);
1416
1417 if (managed) {
1418 if (moea64_is_modified(m))
1419 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
1420
1421 if (moea64_is_referenced(m))
1422 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
1423 }
1424
1425 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
1426 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
1427 managed) {
1428 *pap = pa;
1429 }
1430
1431 return (val);
1432 }
1433
1434 /*
1435 * This goes through and sets the physical address of our
1436 * special scratch PTE to the PA we want to zero or copy. Because
1437 * of locking issues (this can get called in pvo_enter() by
1438 * the UMA allocator), we can't use most other utility functions here
1439 */
1440
1441 static __inline
moea64_set_scratchpage_pa(int which,vm_paddr_t pa)1442 void moea64_set_scratchpage_pa(int which, vm_paddr_t pa)
1443 {
1444 struct pvo_entry *pvo;
1445
1446 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1447 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1448
1449 pvo = moea64_scratchpage_pvo[which];
1450 PMAP_LOCK(pvo->pvo_pmap);
1451 pvo->pvo_pte.pa =
1452 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1453 moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
1454 PMAP_UNLOCK(pvo->pvo_pmap);
1455 isync();
1456 }
1457
1458 void
moea64_copy_page(vm_page_t msrc,vm_page_t mdst)1459 moea64_copy_page(vm_page_t msrc, vm_page_t mdst)
1460 {
1461 mtx_lock(&moea64_scratchpage_mtx);
1462
1463 moea64_set_scratchpage_pa(0, VM_PAGE_TO_PHYS(msrc));
1464 moea64_set_scratchpage_pa(1, VM_PAGE_TO_PHYS(mdst));
1465
1466 bcopy((void *)moea64_scratchpage_va[0],
1467 (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1468
1469 mtx_unlock(&moea64_scratchpage_mtx);
1470 }
1471
1472 void
moea64_copy_page_dmap(vm_page_t msrc,vm_page_t mdst)1473 moea64_copy_page_dmap(vm_page_t msrc, vm_page_t mdst)
1474 {
1475 vm_offset_t dst;
1476 vm_offset_t src;
1477
1478 dst = VM_PAGE_TO_PHYS(mdst);
1479 src = VM_PAGE_TO_PHYS(msrc);
1480
1481 bcopy((void *)PHYS_TO_DMAP(src), (void *)PHYS_TO_DMAP(dst),
1482 PAGE_SIZE);
1483 }
1484
1485 inline void
moea64_copy_pages_dmap(vm_page_t * ma,vm_offset_t a_offset,vm_page_t * mb,vm_offset_t b_offset,int xfersize)1486 moea64_copy_pages_dmap(vm_page_t *ma, vm_offset_t a_offset,
1487 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1488 {
1489 void *a_cp, *b_cp;
1490 vm_offset_t a_pg_offset, b_pg_offset;
1491 int cnt;
1492
1493 while (xfersize > 0) {
1494 a_pg_offset = a_offset & PAGE_MASK;
1495 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1496 a_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
1497 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])) +
1498 a_pg_offset;
1499 b_pg_offset = b_offset & PAGE_MASK;
1500 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1501 b_cp = (char *)(uintptr_t)PHYS_TO_DMAP(
1502 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])) +
1503 b_pg_offset;
1504 bcopy(a_cp, b_cp, cnt);
1505 a_offset += cnt;
1506 b_offset += cnt;
1507 xfersize -= cnt;
1508 }
1509 }
1510
1511 void
moea64_copy_pages(vm_page_t * ma,vm_offset_t a_offset,vm_page_t * mb,vm_offset_t b_offset,int xfersize)1512 moea64_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
1513 vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1514 {
1515 void *a_cp, *b_cp;
1516 vm_offset_t a_pg_offset, b_pg_offset;
1517 int cnt;
1518
1519 mtx_lock(&moea64_scratchpage_mtx);
1520 while (xfersize > 0) {
1521 a_pg_offset = a_offset & PAGE_MASK;
1522 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1523 moea64_set_scratchpage_pa(0,
1524 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1525 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1526 b_pg_offset = b_offset & PAGE_MASK;
1527 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1528 moea64_set_scratchpage_pa(1,
1529 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1530 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1531 bcopy(a_cp, b_cp, cnt);
1532 a_offset += cnt;
1533 b_offset += cnt;
1534 xfersize -= cnt;
1535 }
1536 mtx_unlock(&moea64_scratchpage_mtx);
1537 }
1538
1539 void
moea64_zero_page_area(vm_page_t m,int off,int size)1540 moea64_zero_page_area(vm_page_t m, int off, int size)
1541 {
1542 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1543
1544 if (size + off > PAGE_SIZE)
1545 panic("moea64_zero_page: size + off > PAGE_SIZE");
1546
1547 if (hw_direct_map) {
1548 bzero((caddr_t)(uintptr_t)PHYS_TO_DMAP(pa) + off, size);
1549 } else {
1550 mtx_lock(&moea64_scratchpage_mtx);
1551 moea64_set_scratchpage_pa(0, pa);
1552 bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1553 mtx_unlock(&moea64_scratchpage_mtx);
1554 }
1555 }
1556
1557 /*
1558 * Zero a page of physical memory by temporarily mapping it
1559 */
1560 void
moea64_zero_page(vm_page_t m)1561 moea64_zero_page(vm_page_t m)
1562 {
1563 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1564 vm_offset_t va, off;
1565
1566 mtx_lock(&moea64_scratchpage_mtx);
1567
1568 moea64_set_scratchpage_pa(0, pa);
1569 va = moea64_scratchpage_va[0];
1570
1571 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1572 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
1573
1574 mtx_unlock(&moea64_scratchpage_mtx);
1575 }
1576
1577 void
moea64_zero_page_dmap(vm_page_t m)1578 moea64_zero_page_dmap(vm_page_t m)
1579 {
1580 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1581 vm_offset_t va, off;
1582
1583 va = PHYS_TO_DMAP(pa);
1584 for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1585 __asm __volatile("dcbz 0,%0" :: "r"(va + off));
1586 }
1587
1588 vm_offset_t
moea64_quick_enter_page(vm_page_t m)1589 moea64_quick_enter_page(vm_page_t m)
1590 {
1591 struct pvo_entry *pvo;
1592 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1593
1594 /*
1595 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
1596 * a critical section and access the PCPU data like on i386.
1597 * Instead, pin the thread and grab the PCPU lock to prevent
1598 * a preempting thread from using the same PCPU data.
1599 */
1600 sched_pin();
1601
1602 mtx_assert(PCPU_PTR(aim.qmap_lock), MA_NOTOWNED);
1603 pvo = PCPU_GET(aim.qmap_pvo);
1604
1605 mtx_lock(PCPU_PTR(aim.qmap_lock));
1606 pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
1607 (uint64_t)pa;
1608 moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
1609 isync();
1610
1611 return (PCPU_GET(qmap_addr));
1612 }
1613
1614 vm_offset_t
moea64_quick_enter_page_dmap(vm_page_t m)1615 moea64_quick_enter_page_dmap(vm_page_t m)
1616 {
1617
1618 return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
1619 }
1620
1621 void
moea64_quick_remove_page(vm_offset_t addr)1622 moea64_quick_remove_page(vm_offset_t addr)
1623 {
1624
1625 mtx_assert(PCPU_PTR(aim.qmap_lock), MA_OWNED);
1626 KASSERT(PCPU_GET(qmap_addr) == addr,
1627 ("moea64_quick_remove_page: invalid address"));
1628 mtx_unlock(PCPU_PTR(aim.qmap_lock));
1629 sched_unpin();
1630 }
1631
1632 boolean_t
moea64_page_is_mapped(vm_page_t m)1633 moea64_page_is_mapped(vm_page_t m)
1634 {
1635 return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
1636 }
1637
1638 /*
1639 * Map the given physical page at the specified virtual address in the
1640 * target pmap with the protection requested. If specified the page
1641 * will be wired down.
1642 */
1643
1644 int
moea64_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)1645 moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
1646 vm_prot_t prot, u_int flags, int8_t psind)
1647 {
1648 struct pvo_entry *pvo, *oldpvo, *tpvo;
1649 struct pvo_head *pvo_head;
1650 uint64_t pte_lo;
1651 int error;
1652 vm_paddr_t pa;
1653
1654 if ((m->oflags & VPO_UNMANAGED) == 0) {
1655 if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0)
1656 VM_PAGE_OBJECT_BUSY_ASSERT(m);
1657 else
1658 VM_OBJECT_ASSERT_LOCKED(m->object);
1659 }
1660
1661 if (psind > 0)
1662 return (moea64_sp_enter(pmap, va, m, prot, flags, psind));
1663
1664 pvo = alloc_pvo_entry(0);
1665 if (pvo == NULL)
1666 return (KERN_RESOURCE_SHORTAGE);
1667 pvo->pvo_pmap = NULL; /* to be filled in later */
1668 pvo->pvo_pte.prot = prot;
1669
1670 pa = VM_PAGE_TO_PHYS(m);
1671 pte_lo = moea64_calc_wimg(pa, pmap_page_get_memattr(m));
1672 pvo->pvo_pte.pa = pa | pte_lo;
1673
1674 if ((flags & PMAP_ENTER_WIRED) != 0)
1675 pvo->pvo_vaddr |= PVO_WIRED;
1676
1677 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1678 pvo_head = NULL;
1679 } else {
1680 pvo_head = &m->md.mdpg_pvoh;
1681 pvo->pvo_vaddr |= PVO_MANAGED;
1682 }
1683
1684 PV_LOCK(pa);
1685 PMAP_LOCK(pmap);
1686 if (pvo->pvo_pmap == NULL)
1687 init_pvo_entry(pvo, pmap, va);
1688
1689 if (moea64_ps_enabled(pmap) &&
1690 (tpvo = moea64_pvo_find_va(pmap, va & ~HPT_SP_MASK)) != NULL &&
1691 PVO_IS_SP(tpvo)) {
1692 /* Demote SP before entering a regular page */
1693 CTR2(KTR_PMAP, "%s: demote before enter: va=%#jx",
1694 __func__, (uintmax_t)va);
1695 moea64_sp_demote_aligned(tpvo);
1696 }
1697
1698 if (prot & VM_PROT_WRITE)
1699 if (pmap_bootstrapped &&
1700 (m->oflags & VPO_UNMANAGED) == 0)
1701 vm_page_aflag_set(m, PGA_WRITEABLE);
1702
1703 error = moea64_pvo_enter(pvo, pvo_head, &oldpvo);
1704 if (error == EEXIST) {
1705 if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
1706 oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
1707 oldpvo->pvo_pte.prot == prot) {
1708 /* Identical mapping already exists */
1709 error = 0;
1710
1711 /* If not in page table, reinsert it */
1712 if (moea64_pte_synch(oldpvo) < 0) {
1713 STAT_MOEA64(moea64_pte_overflow--);
1714 moea64_pte_insert(oldpvo);
1715 }
1716
1717 /* Then just clean up and go home */
1718 PMAP_UNLOCK(pmap);
1719 PV_UNLOCK(pa);
1720 free_pvo_entry(pvo);
1721 pvo = NULL;
1722 goto out;
1723 } else {
1724 /* Otherwise, need to kill it first */
1725 KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
1726 "mapping does not match new mapping"));
1727 moea64_pvo_remove_from_pmap(oldpvo);
1728 moea64_pvo_enter(pvo, pvo_head, NULL);
1729 }
1730 }
1731 PMAP_UNLOCK(pmap);
1732 PV_UNLOCK(pa);
1733
1734 /* Free any dead pages */
1735 if (error == EEXIST) {
1736 moea64_pvo_remove_from_page(oldpvo);
1737 free_pvo_entry(oldpvo);
1738 }
1739
1740 out:
1741 /*
1742 * Flush the page from the instruction cache if this page is
1743 * mapped executable and cacheable.
1744 */
1745 if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 &&
1746 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1747 vm_page_aflag_set(m, PGA_EXECUTABLE);
1748 moea64_syncicache(pmap, va, pa, PAGE_SIZE);
1749 }
1750
1751 #if VM_NRESERVLEVEL > 0
1752 /*
1753 * Try to promote pages.
1754 *
1755 * If the VA of the entered page is not aligned with its PA,
1756 * don't try page promotion as it is not possible.
1757 * This reduces the number of promotion failures dramatically.
1758 */
1759 if (moea64_ps_enabled(pmap) && pmap != kernel_pmap && pvo != NULL &&
1760 (pvo->pvo_vaddr & PVO_MANAGED) != 0 &&
1761 (va & HPT_SP_MASK) == (pa & HPT_SP_MASK) &&
1762 (m->flags & PG_FICTITIOUS) == 0 &&
1763 vm_reserv_level_iffullpop(m) == 0)
1764 moea64_sp_promote(pmap, va, m);
1765 #endif
1766
1767 return (KERN_SUCCESS);
1768 }
1769
1770 static void
moea64_syncicache(pmap_t pmap,vm_offset_t va,vm_paddr_t pa,vm_size_t sz)1771 moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1772 vm_size_t sz)
1773 {
1774
1775 /*
1776 * This is much trickier than on older systems because
1777 * we can't sync the icache on physical addresses directly
1778 * without a direct map. Instead we check a couple of cases
1779 * where the memory is already mapped in and, failing that,
1780 * use the same trick we use for page zeroing to create
1781 * a temporary mapping for this physical address.
1782 */
1783
1784 if (!pmap_bootstrapped) {
1785 /*
1786 * If PMAP is not bootstrapped, we are likely to be
1787 * in real mode.
1788 */
1789 __syncicache((void *)(uintptr_t)pa, sz);
1790 } else if (pmap == kernel_pmap) {
1791 __syncicache((void *)va, sz);
1792 } else if (hw_direct_map) {
1793 __syncicache((void *)(uintptr_t)PHYS_TO_DMAP(pa), sz);
1794 } else {
1795 /* Use the scratch page to set up a temp mapping */
1796
1797 mtx_lock(&moea64_scratchpage_mtx);
1798
1799 moea64_set_scratchpage_pa(1, pa & ~ADDR_POFF);
1800 __syncicache((void *)(moea64_scratchpage_va[1] +
1801 (va & ADDR_POFF)), sz);
1802
1803 mtx_unlock(&moea64_scratchpage_mtx);
1804 }
1805 }
1806
1807 /*
1808 * Maps a sequence of resident pages belonging to the same object.
1809 * The sequence begins with the given page m_start. This page is
1810 * mapped at the given virtual address start. Each subsequent page is
1811 * mapped at a virtual address that is offset from start by the same
1812 * amount as the page is offset from m_start within the object. The
1813 * last page in the sequence is the page with the largest offset from
1814 * m_start that can be mapped at a virtual address less than the given
1815 * virtual address end. Not every virtual page between start and end
1816 * is mapped; only those for which a resident page exists with the
1817 * corresponding offset from m_start are mapped.
1818 */
1819 void
moea64_enter_object(pmap_t pm,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)1820 moea64_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
1821 vm_page_t m_start, vm_prot_t prot)
1822 {
1823 vm_page_t m;
1824 vm_pindex_t diff, psize;
1825 vm_offset_t va;
1826 int8_t psind;
1827
1828 VM_OBJECT_ASSERT_LOCKED(m_start->object);
1829
1830 psize = atop(end - start);
1831 m = m_start;
1832 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1833 va = start + ptoa(diff);
1834 if ((va & HPT_SP_MASK) == 0 && va + HPT_SP_SIZE <= end &&
1835 m->psind == 1 && moea64_ps_enabled(pm))
1836 psind = 1;
1837 else
1838 psind = 0;
1839 moea64_enter(pm, va, m, prot &
1840 (VM_PROT_READ | VM_PROT_EXECUTE),
1841 PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, psind);
1842 if (psind == 1)
1843 m = &m[HPT_SP_SIZE / PAGE_SIZE - 1];
1844 m = TAILQ_NEXT(m, listq);
1845 }
1846 }
1847
1848 void
moea64_enter_quick(pmap_t pm,vm_offset_t va,vm_page_t m,vm_prot_t prot)1849 moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
1850 vm_prot_t prot)
1851 {
1852
1853 moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1854 PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
1855 }
1856
1857 vm_paddr_t
moea64_extract(pmap_t pm,vm_offset_t va)1858 moea64_extract(pmap_t pm, vm_offset_t va)
1859 {
1860 struct pvo_entry *pvo;
1861 vm_paddr_t pa;
1862
1863 PMAP_LOCK(pm);
1864 pvo = moea64_pvo_find_va(pm, va);
1865 if (pvo == NULL)
1866 pa = 0;
1867 else
1868 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo));
1869 PMAP_UNLOCK(pm);
1870
1871 return (pa);
1872 }
1873
1874 /*
1875 * Atomically extract and hold the physical page with the given
1876 * pmap and virtual address pair if that mapping permits the given
1877 * protection.
1878 */
1879 vm_page_t
moea64_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)1880 moea64_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1881 {
1882 struct pvo_entry *pvo;
1883 vm_page_t m;
1884
1885 m = NULL;
1886 PMAP_LOCK(pmap);
1887 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1888 if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
1889 m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
1890 if (!vm_page_wire_mapped(m))
1891 m = NULL;
1892 }
1893 PMAP_UNLOCK(pmap);
1894 return (m);
1895 }
1896
1897 static void *
moea64_uma_page_alloc(uma_zone_t zone,vm_size_t bytes,int domain,uint8_t * flags,int wait)1898 moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
1899 uint8_t *flags, int wait)
1900 {
1901 struct pvo_entry *pvo;
1902 vm_offset_t va;
1903 vm_page_t m;
1904 int needed_lock;
1905
1906 /*
1907 * This entire routine is a horrible hack to avoid bothering kmem
1908 * for new KVA addresses. Because this can get called from inside
1909 * kmem allocation routines, calling kmem for a new address here
1910 * can lead to multiply locking non-recursive mutexes.
1911 */
1912
1913 *flags = UMA_SLAB_PRIV;
1914 needed_lock = !PMAP_LOCKED(kernel_pmap);
1915
1916 m = vm_page_alloc_noobj_domain(domain, malloc2vm_flags(wait) |
1917 VM_ALLOC_WIRED);
1918 if (m == NULL)
1919 return (NULL);
1920
1921 va = VM_PAGE_TO_PHYS(m);
1922
1923 pvo = alloc_pvo_entry(1 /* bootstrap */);
1924
1925 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
1926 pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
1927
1928 if (needed_lock)
1929 PMAP_LOCK(kernel_pmap);
1930
1931 init_pvo_entry(pvo, kernel_pmap, va);
1932 pvo->pvo_vaddr |= PVO_WIRED;
1933
1934 moea64_pvo_enter(pvo, NULL, NULL);
1935
1936 if (needed_lock)
1937 PMAP_UNLOCK(kernel_pmap);
1938
1939 return (void *)va;
1940 }
1941
1942 extern int elf32_nxstack;
1943
1944 void
moea64_init(void)1945 moea64_init(void)
1946 {
1947
1948 CTR0(KTR_PMAP, "moea64_init");
1949
1950 moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1951 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1952 UMA_ZONE_VM | UMA_ZONE_NOFREE);
1953
1954 /*
1955 * Are large page mappings enabled?
1956 *
1957 * While HPT superpages are not better tested, leave it disabled by
1958 * default.
1959 */
1960 superpages_enabled = 0;
1961 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
1962 if (superpages_enabled) {
1963 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
1964 ("moea64_init: can't assign to pagesizes[1]"));
1965
1966 if (moea64_large_page_size == 0) {
1967 printf("mmu_oea64: HW does not support large pages. "
1968 "Disabling superpages...\n");
1969 superpages_enabled = 0;
1970 } else if (!moea64_has_lp_4k_16m) {
1971 printf("mmu_oea64: "
1972 "HW does not support mixed 4KB/16MB page sizes. "
1973 "Disabling superpages...\n");
1974 superpages_enabled = 0;
1975 } else
1976 pagesizes[1] = HPT_SP_SIZE;
1977 }
1978
1979 if (!hw_direct_map) {
1980 uma_zone_set_allocf(moea64_pvo_zone, moea64_uma_page_alloc);
1981 }
1982
1983 #ifdef COMPAT_FREEBSD32
1984 elf32_nxstack = 1;
1985 #endif
1986
1987 moea64_initialized = TRUE;
1988 }
1989
1990 boolean_t
moea64_is_referenced(vm_page_t m)1991 moea64_is_referenced(vm_page_t m)
1992 {
1993
1994 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1995 ("moea64_is_referenced: page %p is not managed", m));
1996
1997 return (moea64_query_bit(m, LPTE_REF));
1998 }
1999
2000 boolean_t
moea64_is_modified(vm_page_t m)2001 moea64_is_modified(vm_page_t m)
2002 {
2003
2004 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2005 ("moea64_is_modified: page %p is not managed", m));
2006
2007 /*
2008 * If the page is not busied then this check is racy.
2009 */
2010 if (!pmap_page_is_write_mapped(m))
2011 return (FALSE);
2012
2013 return (moea64_query_bit(m, LPTE_CHG));
2014 }
2015
2016 boolean_t
moea64_is_prefaultable(pmap_t pmap,vm_offset_t va)2017 moea64_is_prefaultable(pmap_t pmap, vm_offset_t va)
2018 {
2019 struct pvo_entry *pvo;
2020 boolean_t rv = TRUE;
2021
2022 PMAP_LOCK(pmap);
2023 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
2024 if (pvo != NULL)
2025 rv = FALSE;
2026 PMAP_UNLOCK(pmap);
2027 return (rv);
2028 }
2029
2030 void
moea64_clear_modify(vm_page_t m)2031 moea64_clear_modify(vm_page_t m)
2032 {
2033
2034 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2035 ("moea64_clear_modify: page %p is not managed", m));
2036 vm_page_assert_busied(m);
2037
2038 if (!pmap_page_is_write_mapped(m))
2039 return;
2040 moea64_clear_bit(m, LPTE_CHG);
2041 }
2042
2043 /*
2044 * Clear the write and modified bits in each of the given page's mappings.
2045 */
2046 void
moea64_remove_write(vm_page_t m)2047 moea64_remove_write(vm_page_t m)
2048 {
2049 struct pvo_entry *pvo;
2050 int64_t refchg, ret;
2051 pmap_t pmap;
2052
2053 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2054 ("moea64_remove_write: page %p is not managed", m));
2055 vm_page_assert_busied(m);
2056
2057 if (!pmap_page_is_write_mapped(m))
2058 return;
2059
2060 powerpc_sync();
2061 PV_PAGE_LOCK(m);
2062 refchg = 0;
2063 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2064 pmap = pvo->pvo_pmap;
2065 PMAP_LOCK(pmap);
2066 if (!(pvo->pvo_vaddr & PVO_DEAD) &&
2067 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2068 if (PVO_IS_SP(pvo)) {
2069 CTR1(KTR_PMAP, "%s: demote before remwr",
2070 __func__);
2071 moea64_sp_demote(pvo);
2072 }
2073 pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
2074 ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE);
2075 if (ret < 0)
2076 ret = LPTE_CHG;
2077 refchg |= ret;
2078 if (pvo->pvo_pmap == kernel_pmap)
2079 isync();
2080 }
2081 PMAP_UNLOCK(pmap);
2082 }
2083 if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
2084 vm_page_dirty(m);
2085 vm_page_aflag_clear(m, PGA_WRITEABLE);
2086 PV_PAGE_UNLOCK(m);
2087 }
2088
2089 /*
2090 * moea64_ts_referenced:
2091 *
2092 * Return a count of reference bits for a page, clearing those bits.
2093 * It is not necessary for every reference bit to be cleared, but it
2094 * is necessary that 0 only be returned when there are truly no
2095 * reference bits set.
2096 *
2097 * XXX: The exact number of bits to check and clear is a matter that
2098 * should be tested and standardized at some point in the future for
2099 * optimal aging of shared pages.
2100 */
2101 int
moea64_ts_referenced(vm_page_t m)2102 moea64_ts_referenced(vm_page_t m)
2103 {
2104
2105 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2106 ("moea64_ts_referenced: page %p is not managed", m));
2107 return (moea64_clear_bit(m, LPTE_REF));
2108 }
2109
2110 /*
2111 * Modify the WIMG settings of all mappings for a page.
2112 */
2113 void
moea64_page_set_memattr(vm_page_t m,vm_memattr_t ma)2114 moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma)
2115 {
2116 struct pvo_entry *pvo;
2117 int64_t refchg;
2118 pmap_t pmap;
2119 uint64_t lo;
2120
2121 CTR3(KTR_PMAP, "%s: pa=%#jx, ma=%#x",
2122 __func__, (uintmax_t)VM_PAGE_TO_PHYS(m), ma);
2123
2124 if ((m->oflags & VPO_UNMANAGED) != 0) {
2125 m->md.mdpg_cache_attrs = ma;
2126 return;
2127 }
2128
2129 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
2130
2131 PV_PAGE_LOCK(m);
2132 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2133 pmap = pvo->pvo_pmap;
2134 PMAP_LOCK(pmap);
2135 if (!(pvo->pvo_vaddr & PVO_DEAD)) {
2136 if (PVO_IS_SP(pvo)) {
2137 CTR1(KTR_PMAP,
2138 "%s: demote before set_memattr", __func__);
2139 moea64_sp_demote(pvo);
2140 }
2141 pvo->pvo_pte.pa &= ~LPTE_WIMG;
2142 pvo->pvo_pte.pa |= lo;
2143 refchg = moea64_pte_replace(pvo, MOEA64_PTE_INVALIDATE);
2144 if (refchg < 0)
2145 refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
2146 LPTE_CHG : 0;
2147 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2148 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2149 refchg |=
2150 atomic_readandclear_32(&m->md.mdpg_attrs);
2151 if (refchg & LPTE_CHG)
2152 vm_page_dirty(m);
2153 if (refchg & LPTE_REF)
2154 vm_page_aflag_set(m, PGA_REFERENCED);
2155 }
2156 if (pvo->pvo_pmap == kernel_pmap)
2157 isync();
2158 }
2159 PMAP_UNLOCK(pmap);
2160 }
2161 m->md.mdpg_cache_attrs = ma;
2162 PV_PAGE_UNLOCK(m);
2163 }
2164
2165 /*
2166 * Map a wired page into kernel virtual address space.
2167 */
2168 void
moea64_kenter_attr(vm_offset_t va,vm_paddr_t pa,vm_memattr_t ma)2169 moea64_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
2170 {
2171 int error;
2172 struct pvo_entry *pvo, *oldpvo;
2173
2174 do {
2175 pvo = alloc_pvo_entry(0);
2176 if (pvo == NULL)
2177 vm_wait(NULL);
2178 } while (pvo == NULL);
2179 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
2180 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
2181 pvo->pvo_vaddr |= PVO_WIRED;
2182
2183 PMAP_LOCK(kernel_pmap);
2184 oldpvo = moea64_pvo_find_va(kernel_pmap, va);
2185 if (oldpvo != NULL)
2186 moea64_pvo_remove_from_pmap(oldpvo);
2187 init_pvo_entry(pvo, kernel_pmap, va);
2188 error = moea64_pvo_enter(pvo, NULL, NULL);
2189 PMAP_UNLOCK(kernel_pmap);
2190
2191 /* Free any dead pages */
2192 if (oldpvo != NULL) {
2193 moea64_pvo_remove_from_page(oldpvo);
2194 free_pvo_entry(oldpvo);
2195 }
2196
2197 if (error != 0)
2198 panic("moea64_kenter: failed to enter va %#zx pa %#jx: %d", va,
2199 (uintmax_t)pa, error);
2200 }
2201
2202 void
moea64_kenter(vm_offset_t va,vm_paddr_t pa)2203 moea64_kenter(vm_offset_t va, vm_paddr_t pa)
2204 {
2205
2206 moea64_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
2207 }
2208
2209 /*
2210 * Extract the physical page address associated with the given kernel virtual
2211 * address.
2212 */
2213 vm_paddr_t
moea64_kextract(vm_offset_t va)2214 moea64_kextract(vm_offset_t va)
2215 {
2216 struct pvo_entry *pvo;
2217 vm_paddr_t pa;
2218
2219 /*
2220 * Shortcut the direct-mapped case when applicable. We never put
2221 * anything but 1:1 (or 62-bit aliased) mappings below
2222 * VM_MIN_KERNEL_ADDRESS.
2223 */
2224 if (va < VM_MIN_KERNEL_ADDRESS)
2225 return (va & ~DMAP_BASE_ADDRESS);
2226
2227 PMAP_LOCK(kernel_pmap);
2228 pvo = moea64_pvo_find_va(kernel_pmap, va);
2229 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
2230 va));
2231 pa = PVO_PADDR(pvo) | (va - PVO_VADDR(pvo));
2232 PMAP_UNLOCK(kernel_pmap);
2233 return (pa);
2234 }
2235
2236 /*
2237 * Remove a wired page from kernel virtual address space.
2238 */
2239 void
moea64_kremove(vm_offset_t va)2240 moea64_kremove(vm_offset_t va)
2241 {
2242 moea64_remove(kernel_pmap, va, va + PAGE_SIZE);
2243 }
2244
2245 /*
2246 * Provide a kernel pointer corresponding to a given userland pointer.
2247 * The returned pointer is valid until the next time this function is
2248 * called in this thread. This is used internally in copyin/copyout.
2249 */
2250 static int
moea64_map_user_ptr(pmap_t pm,volatile const void * uaddr,void ** kaddr,size_t ulen,size_t * klen)2251 moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr,
2252 void **kaddr, size_t ulen, size_t *klen)
2253 {
2254 size_t l;
2255 #ifdef __powerpc64__
2256 struct slb *slb;
2257 #endif
2258 register_t slbv;
2259
2260 *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK);
2261 l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr);
2262 if (l > ulen)
2263 l = ulen;
2264 if (klen)
2265 *klen = l;
2266 else if (l != ulen)
2267 return (EFAULT);
2268
2269 #ifdef __powerpc64__
2270 /* Try lockless look-up first */
2271 slb = user_va_to_slb_entry(pm, (vm_offset_t)uaddr);
2272
2273 if (slb == NULL) {
2274 /* If it isn't there, we need to pre-fault the VSID */
2275 PMAP_LOCK(pm);
2276 slbv = va_to_vsid(pm, (vm_offset_t)uaddr) << SLBV_VSID_SHIFT;
2277 PMAP_UNLOCK(pm);
2278 } else {
2279 slbv = slb->slbv;
2280 }
2281
2282 /* Mark segment no-execute */
2283 slbv |= SLBV_N;
2284 #else
2285 slbv = va_to_vsid(pm, (vm_offset_t)uaddr);
2286
2287 /* Mark segment no-execute */
2288 slbv |= SR_N;
2289 #endif
2290
2291 /* If we have already set this VSID, we can just return */
2292 if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == slbv)
2293 return (0);
2294
2295 __asm __volatile("isync");
2296 curthread->td_pcb->pcb_cpu.aim.usr_segm =
2297 (uintptr_t)uaddr >> ADDR_SR_SHFT;
2298 curthread->td_pcb->pcb_cpu.aim.usr_vsid = slbv;
2299 #ifdef __powerpc64__
2300 __asm __volatile ("slbie %0; slbmte %1, %2; isync" ::
2301 "r"(USER_ADDR), "r"(slbv), "r"(USER_SLB_SLBE));
2302 #else
2303 __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(slbv));
2304 #endif
2305
2306 return (0);
2307 }
2308
2309 /*
2310 * Figure out where a given kernel pointer (usually in a fault) points
2311 * to from the VM's perspective, potentially remapping into userland's
2312 * address space.
2313 */
2314 static int
moea64_decode_kernel_ptr(vm_offset_t addr,int * is_user,vm_offset_t * decoded_addr)2315 moea64_decode_kernel_ptr(vm_offset_t addr, int *is_user,
2316 vm_offset_t *decoded_addr)
2317 {
2318 vm_offset_t user_sr;
2319
2320 if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
2321 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
2322 addr &= ADDR_PIDX | ADDR_POFF;
2323 addr |= user_sr << ADDR_SR_SHFT;
2324 *decoded_addr = addr;
2325 *is_user = 1;
2326 } else {
2327 *decoded_addr = addr;
2328 *is_user = 0;
2329 }
2330
2331 return (0);
2332 }
2333
2334 /*
2335 * Map a range of physical addresses into kernel virtual address space.
2336 *
2337 * The value passed in *virt is a suggested virtual address for the mapping.
2338 * Architectures which can support a direct-mapped physical to virtual region
2339 * can return the appropriate address within that region, leaving '*virt'
2340 * unchanged. Other architectures should map the pages starting at '*virt' and
2341 * update '*virt' with the first usable address after the mapped region.
2342 */
2343 vm_offset_t
moea64_map(vm_offset_t * virt,vm_paddr_t pa_start,vm_paddr_t pa_end,int prot)2344 moea64_map(vm_offset_t *virt, vm_paddr_t pa_start,
2345 vm_paddr_t pa_end, int prot)
2346 {
2347 vm_offset_t sva, va;
2348
2349 if (hw_direct_map) {
2350 /*
2351 * Check if every page in the region is covered by the direct
2352 * map. The direct map covers all of physical memory. Use
2353 * moea64_calc_wimg() as a shortcut to see if the page is in
2354 * physical memory as a way to see if the direct map covers it.
2355 */
2356 for (va = pa_start; va < pa_end; va += PAGE_SIZE)
2357 if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
2358 break;
2359 if (va == pa_end)
2360 return (PHYS_TO_DMAP(pa_start));
2361 }
2362 sva = *virt;
2363 va = sva;
2364 /* XXX respect prot argument */
2365 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
2366 moea64_kenter(va, pa_start);
2367 *virt = va;
2368
2369 return (sva);
2370 }
2371
2372 /*
2373 * Returns true if the pmap's pv is one of the first
2374 * 16 pvs linked to from this page. This count may
2375 * be changed upwards or downwards in the future; it
2376 * is only necessary that true be returned for a small
2377 * subset of pmaps for proper page aging.
2378 */
2379 boolean_t
moea64_page_exists_quick(pmap_t pmap,vm_page_t m)2380 moea64_page_exists_quick(pmap_t pmap, vm_page_t m)
2381 {
2382 int loops;
2383 struct pvo_entry *pvo;
2384 boolean_t rv;
2385
2386 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2387 ("moea64_page_exists_quick: page %p is not managed", m));
2388 loops = 0;
2389 rv = FALSE;
2390 PV_PAGE_LOCK(m);
2391 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2392 if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
2393 rv = TRUE;
2394 break;
2395 }
2396 if (++loops >= 16)
2397 break;
2398 }
2399 PV_PAGE_UNLOCK(m);
2400 return (rv);
2401 }
2402
2403 void
moea64_page_init(vm_page_t m)2404 moea64_page_init(vm_page_t m)
2405 {
2406
2407 m->md.mdpg_attrs = 0;
2408 m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
2409 LIST_INIT(&m->md.mdpg_pvoh);
2410 }
2411
2412 /*
2413 * Return the number of managed mappings to the given physical page
2414 * that are wired.
2415 */
2416 int
moea64_page_wired_mappings(vm_page_t m)2417 moea64_page_wired_mappings(vm_page_t m)
2418 {
2419 struct pvo_entry *pvo;
2420 int count;
2421
2422 count = 0;
2423 if ((m->oflags & VPO_UNMANAGED) != 0)
2424 return (count);
2425 PV_PAGE_LOCK(m);
2426 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
2427 if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
2428 count++;
2429 PV_PAGE_UNLOCK(m);
2430 return (count);
2431 }
2432
2433 static uintptr_t moea64_vsidcontext;
2434
2435 uintptr_t
moea64_get_unique_vsid(void)2436 moea64_get_unique_vsid(void) {
2437 u_int entropy;
2438 register_t hash;
2439 uint32_t mask;
2440 int i;
2441
2442 entropy = 0;
2443 __asm __volatile("mftb %0" : "=r"(entropy));
2444
2445 mtx_lock(&moea64_slb_mutex);
2446 for (i = 0; i < NVSIDS; i += VSID_NBPW) {
2447 u_int n;
2448
2449 /*
2450 * Create a new value by multiplying by a prime and adding in
2451 * entropy from the timebase register. This is to make the
2452 * VSID more random so that the PT hash function collides
2453 * less often. (Note that the prime casues gcc to do shifts
2454 * instead of a multiply.)
2455 */
2456 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
2457 hash = moea64_vsidcontext & (NVSIDS - 1);
2458 if (hash == 0) /* 0 is special, avoid it */
2459 continue;
2460 n = hash >> 5;
2461 mask = 1 << (hash & (VSID_NBPW - 1));
2462 hash = (moea64_vsidcontext & VSID_HASHMASK);
2463 if (moea64_vsid_bitmap[n] & mask) { /* collision? */
2464 /* anything free in this bucket? */
2465 if (moea64_vsid_bitmap[n] == 0xffffffff) {
2466 entropy = (moea64_vsidcontext >> 20);
2467 continue;
2468 }
2469 i = ffs(~moea64_vsid_bitmap[n]) - 1;
2470 mask = 1 << i;
2471 hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
2472 hash |= i;
2473 }
2474 if (hash == VSID_VRMA) /* also special, avoid this too */
2475 continue;
2476 KASSERT(!(moea64_vsid_bitmap[n] & mask),
2477 ("Allocating in-use VSID %#zx\n", hash));
2478 moea64_vsid_bitmap[n] |= mask;
2479 mtx_unlock(&moea64_slb_mutex);
2480 return (hash);
2481 }
2482
2483 mtx_unlock(&moea64_slb_mutex);
2484 panic("%s: out of segments",__func__);
2485 }
2486
2487 #ifdef __powerpc64__
2488 int
moea64_pinit(pmap_t pmap)2489 moea64_pinit(pmap_t pmap)
2490 {
2491
2492 RB_INIT(&pmap->pmap_pvo);
2493
2494 pmap->pm_slb_tree_root = slb_alloc_tree();
2495 pmap->pm_slb = slb_alloc_user_cache();
2496 pmap->pm_slb_len = 0;
2497
2498 return (1);
2499 }
2500 #else
2501 int
moea64_pinit(pmap_t pmap)2502 moea64_pinit(pmap_t pmap)
2503 {
2504 int i;
2505 uint32_t hash;
2506
2507 RB_INIT(&pmap->pmap_pvo);
2508
2509 if (pmap_bootstrapped)
2510 pmap->pmap_phys = (pmap_t)moea64_kextract((vm_offset_t)pmap);
2511 else
2512 pmap->pmap_phys = pmap;
2513
2514 /*
2515 * Allocate some segment registers for this pmap.
2516 */
2517 hash = moea64_get_unique_vsid();
2518
2519 for (i = 0; i < 16; i++)
2520 pmap->pm_sr[i] = VSID_MAKE(i, hash);
2521
2522 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2523
2524 return (1);
2525 }
2526 #endif
2527
2528 /*
2529 * Initialize the pmap associated with process 0.
2530 */
2531 void
moea64_pinit0(pmap_t pm)2532 moea64_pinit0(pmap_t pm)
2533 {
2534
2535 PMAP_LOCK_INIT(pm);
2536 moea64_pinit(pm);
2537 bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2538 }
2539
2540 /*
2541 * Set the physical protection on the specified range of this map as requested.
2542 */
2543 static void
moea64_pvo_protect(pmap_t pm,struct pvo_entry * pvo,vm_prot_t prot)2544 moea64_pvo_protect( pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
2545 {
2546 struct vm_page *pg;
2547 vm_prot_t oldprot;
2548 int32_t refchg;
2549
2550 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2551
2552 /*
2553 * Change the protection of the page.
2554 */
2555 oldprot = pvo->pvo_pte.prot;
2556 pvo->pvo_pte.prot = prot;
2557 pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
2558
2559 /*
2560 * If the PVO is in the page table, update mapping
2561 */
2562 refchg = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE);
2563 if (refchg < 0)
2564 refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
2565
2566 if (pm != kernel_pmap && pg != NULL &&
2567 (pg->a.flags & PGA_EXECUTABLE) == 0 &&
2568 (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2569 if ((pg->oflags & VPO_UNMANAGED) == 0)
2570 vm_page_aflag_set(pg, PGA_EXECUTABLE);
2571 moea64_syncicache(pm, PVO_VADDR(pvo),
2572 PVO_PADDR(pvo), PAGE_SIZE);
2573 }
2574
2575 /*
2576 * Update vm about the REF/CHG bits if the page is managed and we have
2577 * removed write access.
2578 */
2579 if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
2580 (oldprot & VM_PROT_WRITE)) {
2581 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2582 if (refchg & LPTE_CHG)
2583 vm_page_dirty(pg);
2584 if (refchg & LPTE_REF)
2585 vm_page_aflag_set(pg, PGA_REFERENCED);
2586 }
2587 }
2588
2589 void
moea64_protect(pmap_t pm,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)2590 moea64_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2591 vm_prot_t prot)
2592 {
2593 struct pvo_entry *pvo, key;
2594
2595 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
2596 sva, eva, prot);
2597
2598 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2599 ("moea64_protect: non current pmap"));
2600
2601 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2602 moea64_remove(pm, sva, eva);
2603 return;
2604 }
2605
2606 PMAP_LOCK(pm);
2607 key.pvo_vaddr = sva;
2608 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2609 pvo != NULL && PVO_VADDR(pvo) < eva;
2610 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
2611 if (PVO_IS_SP(pvo)) {
2612 if (moea64_sp_pvo_in_range(pvo, sva, eva)) {
2613 pvo = moea64_sp_protect(pvo, prot);
2614 continue;
2615 } else {
2616 CTR1(KTR_PMAP, "%s: demote before protect",
2617 __func__);
2618 moea64_sp_demote(pvo);
2619 }
2620 }
2621 moea64_pvo_protect(pm, pvo, prot);
2622 }
2623 PMAP_UNLOCK(pm);
2624 }
2625
2626 /*
2627 * Map a list of wired pages into kernel virtual address space. This is
2628 * intended for temporary mappings which do not need page modification or
2629 * references recorded. Existing mappings in the region are overwritten.
2630 */
2631 void
moea64_qenter(vm_offset_t va,vm_page_t * m,int count)2632 moea64_qenter(vm_offset_t va, vm_page_t *m, int count)
2633 {
2634 while (count-- > 0) {
2635 moea64_kenter(va, VM_PAGE_TO_PHYS(*m));
2636 va += PAGE_SIZE;
2637 m++;
2638 }
2639 }
2640
2641 /*
2642 * Remove page mappings from kernel virtual address space. Intended for
2643 * temporary mappings entered by moea64_qenter.
2644 */
2645 void
moea64_qremove(vm_offset_t va,int count)2646 moea64_qremove(vm_offset_t va, int count)
2647 {
2648 while (count-- > 0) {
2649 moea64_kremove(va);
2650 va += PAGE_SIZE;
2651 }
2652 }
2653
2654 void
moea64_release_vsid(uint64_t vsid)2655 moea64_release_vsid(uint64_t vsid)
2656 {
2657 int idx, mask;
2658
2659 mtx_lock(&moea64_slb_mutex);
2660 idx = vsid & (NVSIDS-1);
2661 mask = 1 << (idx % VSID_NBPW);
2662 idx /= VSID_NBPW;
2663 KASSERT(moea64_vsid_bitmap[idx] & mask,
2664 ("Freeing unallocated VSID %#jx", vsid));
2665 moea64_vsid_bitmap[idx] &= ~mask;
2666 mtx_unlock(&moea64_slb_mutex);
2667 }
2668
2669 void
moea64_release(pmap_t pmap)2670 moea64_release(pmap_t pmap)
2671 {
2672
2673 /*
2674 * Free segment registers' VSIDs
2675 */
2676 #ifdef __powerpc64__
2677 slb_free_tree(pmap);
2678 slb_free_user_cache(pmap->pm_slb);
2679 #else
2680 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2681
2682 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2683 #endif
2684 }
2685
2686 /*
2687 * Remove all pages mapped by the specified pmap
2688 */
2689 void
moea64_remove_pages(pmap_t pm)2690 moea64_remove_pages(pmap_t pm)
2691 {
2692 struct pvo_entry *pvo, *tpvo;
2693 struct pvo_dlist tofree;
2694
2695 SLIST_INIT(&tofree);
2696
2697 PMAP_LOCK(pm);
2698 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2699 if (pvo->pvo_vaddr & PVO_WIRED)
2700 continue;
2701
2702 /*
2703 * For locking reasons, remove this from the page table and
2704 * pmap, but save delinking from the vm_page for a second
2705 * pass
2706 */
2707 moea64_pvo_remove_from_pmap(pvo);
2708 SLIST_INSERT_HEAD(&tofree, pvo, pvo_dlink);
2709 }
2710 PMAP_UNLOCK(pm);
2711
2712 while (!SLIST_EMPTY(&tofree)) {
2713 pvo = SLIST_FIRST(&tofree);
2714 SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
2715 moea64_pvo_remove_from_page(pvo);
2716 free_pvo_entry(pvo);
2717 }
2718 }
2719
2720 static void
moea64_remove_locked(pmap_t pm,vm_offset_t sva,vm_offset_t eva,struct pvo_dlist * tofree)2721 moea64_remove_locked(pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2722 struct pvo_dlist *tofree)
2723 {
2724 struct pvo_entry *pvo, *tpvo, key;
2725
2726 PMAP_LOCK_ASSERT(pm, MA_OWNED);
2727
2728 key.pvo_vaddr = sva;
2729 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2730 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2731 if (PVO_IS_SP(pvo)) {
2732 if (moea64_sp_pvo_in_range(pvo, sva, eva)) {
2733 tpvo = moea64_sp_remove(pvo, tofree);
2734 continue;
2735 } else {
2736 CTR1(KTR_PMAP, "%s: demote before remove",
2737 __func__);
2738 moea64_sp_demote(pvo);
2739 }
2740 }
2741 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2742
2743 /*
2744 * For locking reasons, remove this from the page table and
2745 * pmap, but save delinking from the vm_page for a second
2746 * pass
2747 */
2748 moea64_pvo_remove_from_pmap(pvo);
2749 SLIST_INSERT_HEAD(tofree, pvo, pvo_dlink);
2750 }
2751 }
2752
2753 /*
2754 * Remove the given range of addresses from the specified map.
2755 */
2756 void
moea64_remove(pmap_t pm,vm_offset_t sva,vm_offset_t eva)2757 moea64_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2758 {
2759 struct pvo_entry *pvo;
2760 struct pvo_dlist tofree;
2761
2762 /*
2763 * Perform an unsynchronized read. This is, however, safe.
2764 */
2765 if (pm->pm_stats.resident_count == 0)
2766 return;
2767
2768 SLIST_INIT(&tofree);
2769 PMAP_LOCK(pm);
2770 moea64_remove_locked(pm, sva, eva, &tofree);
2771 PMAP_UNLOCK(pm);
2772
2773 while (!SLIST_EMPTY(&tofree)) {
2774 pvo = SLIST_FIRST(&tofree);
2775 SLIST_REMOVE_HEAD(&tofree, pvo_dlink);
2776 moea64_pvo_remove_from_page(pvo);
2777 free_pvo_entry(pvo);
2778 }
2779 }
2780
2781 /*
2782 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2783 * will reflect changes in pte's back to the vm_page.
2784 */
2785 void
moea64_remove_all(vm_page_t m)2786 moea64_remove_all(vm_page_t m)
2787 {
2788 struct pvo_entry *pvo, *next_pvo;
2789 struct pvo_head freequeue;
2790 int wasdead;
2791 pmap_t pmap;
2792
2793 LIST_INIT(&freequeue);
2794
2795 PV_PAGE_LOCK(m);
2796 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2797 pmap = pvo->pvo_pmap;
2798 PMAP_LOCK(pmap);
2799 wasdead = (pvo->pvo_vaddr & PVO_DEAD);
2800 if (!wasdead) {
2801 if (PVO_IS_SP(pvo)) {
2802 CTR1(KTR_PMAP, "%s: demote before remove_all",
2803 __func__);
2804 moea64_sp_demote(pvo);
2805 }
2806 moea64_pvo_remove_from_pmap(pvo);
2807 }
2808 moea64_pvo_remove_from_page_locked(pvo, m);
2809 if (!wasdead)
2810 LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
2811 PMAP_UNLOCK(pmap);
2812
2813 }
2814 KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
2815 KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable"));
2816 PV_PAGE_UNLOCK(m);
2817
2818 /* Clean up UMA allocations */
2819 LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
2820 free_pvo_entry(pvo);
2821 }
2822
2823 /*
2824 * Allocate a physical page of memory directly from the phys_avail map.
2825 * Can only be called from moea64_bootstrap before avail start and end are
2826 * calculated.
2827 */
2828 vm_offset_t
moea64_bootstrap_alloc(vm_size_t size,vm_size_t align)2829 moea64_bootstrap_alloc(vm_size_t size, vm_size_t align)
2830 {
2831 vm_offset_t s, e;
2832 int i, j;
2833
2834 size = round_page(size);
2835 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2836 if (align != 0)
2837 s = roundup2(phys_avail[i], align);
2838 else
2839 s = phys_avail[i];
2840 e = s + size;
2841
2842 if (s < phys_avail[i] || e > phys_avail[i + 1])
2843 continue;
2844
2845 if (s + size > platform_real_maxaddr())
2846 continue;
2847
2848 if (s == phys_avail[i]) {
2849 phys_avail[i] += size;
2850 } else if (e == phys_avail[i + 1]) {
2851 phys_avail[i + 1] -= size;
2852 } else {
2853 for (j = phys_avail_count * 2; j > i; j -= 2) {
2854 phys_avail[j] = phys_avail[j - 2];
2855 phys_avail[j + 1] = phys_avail[j - 1];
2856 }
2857
2858 phys_avail[i + 3] = phys_avail[i + 1];
2859 phys_avail[i + 1] = s;
2860 phys_avail[i + 2] = e;
2861 phys_avail_count++;
2862 }
2863
2864 return (s);
2865 }
2866 panic("moea64_bootstrap_alloc: could not allocate memory");
2867 }
2868
2869 static int
moea64_pvo_enter(struct pvo_entry * pvo,struct pvo_head * pvo_head,struct pvo_entry ** oldpvop)2870 moea64_pvo_enter(struct pvo_entry *pvo, struct pvo_head *pvo_head,
2871 struct pvo_entry **oldpvop)
2872 {
2873 struct pvo_entry *old_pvo;
2874 int err;
2875
2876 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2877
2878 STAT_MOEA64(moea64_pvo_enter_calls++);
2879
2880 /*
2881 * Add to pmap list
2882 */
2883 old_pvo = RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2884
2885 if (old_pvo != NULL) {
2886 if (oldpvop != NULL)
2887 *oldpvop = old_pvo;
2888 return (EEXIST);
2889 }
2890
2891 if (pvo_head != NULL) {
2892 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2893 }
2894
2895 if (pvo->pvo_vaddr & PVO_WIRED)
2896 pvo->pvo_pmap->pm_stats.wired_count++;
2897 pvo->pvo_pmap->pm_stats.resident_count++;
2898
2899 /*
2900 * Insert it into the hardware page table
2901 */
2902 err = moea64_pte_insert(pvo);
2903 if (err != 0) {
2904 panic("moea64_pvo_enter: overflow");
2905 }
2906
2907 STAT_MOEA64(moea64_pvo_entries++);
2908
2909 if (pvo->pvo_pmap == kernel_pmap)
2910 isync();
2911
2912 #ifdef __powerpc64__
2913 /*
2914 * Make sure all our bootstrap mappings are in the SLB as soon
2915 * as virtual memory is switched on.
2916 */
2917 if (!pmap_bootstrapped)
2918 moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
2919 pvo->pvo_vaddr & PVO_LARGE);
2920 #endif
2921
2922 return (0);
2923 }
2924
2925 static void
moea64_pvo_remove_from_pmap(struct pvo_entry * pvo)2926 moea64_pvo_remove_from_pmap(struct pvo_entry *pvo)
2927 {
2928 struct vm_page *pg;
2929 int32_t refchg;
2930
2931 KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
2932 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2933 KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
2934
2935 /*
2936 * If there is an active pte entry, we need to deactivate it
2937 */
2938 refchg = moea64_pte_unset(pvo);
2939 if (refchg < 0) {
2940 /*
2941 * If it was evicted from the page table, be pessimistic and
2942 * dirty the page.
2943 */
2944 if (pvo->pvo_pte.prot & VM_PROT_WRITE)
2945 refchg = LPTE_CHG;
2946 else
2947 refchg = 0;
2948 }
2949
2950 /*
2951 * Update our statistics.
2952 */
2953 pvo->pvo_pmap->pm_stats.resident_count--;
2954 if (pvo->pvo_vaddr & PVO_WIRED)
2955 pvo->pvo_pmap->pm_stats.wired_count--;
2956
2957 /*
2958 * Remove this PVO from the pmap list.
2959 */
2960 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2961
2962 /*
2963 * Mark this for the next sweep
2964 */
2965 pvo->pvo_vaddr |= PVO_DEAD;
2966
2967 /* Send RC bits to VM */
2968 if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2969 (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2970 pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
2971 if (pg != NULL) {
2972 refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2973 if (refchg & LPTE_CHG)
2974 vm_page_dirty(pg);
2975 if (refchg & LPTE_REF)
2976 vm_page_aflag_set(pg, PGA_REFERENCED);
2977 }
2978 }
2979 }
2980
2981 static inline void
moea64_pvo_remove_from_page_locked(struct pvo_entry * pvo,vm_page_t m)2982 moea64_pvo_remove_from_page_locked(struct pvo_entry *pvo,
2983 vm_page_t m)
2984 {
2985
2986 KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
2987
2988 /* Use NULL pmaps as a sentinel for races in page deletion */
2989 if (pvo->pvo_pmap == NULL)
2990 return;
2991 pvo->pvo_pmap = NULL;
2992
2993 /*
2994 * Update vm about page writeability/executability if managed
2995 */
2996 PV_LOCKASSERT(PVO_PADDR(pvo));
2997 if (pvo->pvo_vaddr & PVO_MANAGED) {
2998 if (m != NULL) {
2999 LIST_REMOVE(pvo, pvo_vlink);
3000 if (LIST_EMPTY(vm_page_to_pvoh(m)))
3001 vm_page_aflag_clear(m,
3002 PGA_WRITEABLE | PGA_EXECUTABLE);
3003 }
3004 }
3005
3006 STAT_MOEA64(moea64_pvo_entries--);
3007 STAT_MOEA64(moea64_pvo_remove_calls++);
3008 }
3009
3010 static void
moea64_pvo_remove_from_page(struct pvo_entry * pvo)3011 moea64_pvo_remove_from_page(struct pvo_entry *pvo)
3012 {
3013 vm_page_t pg = NULL;
3014
3015 if (pvo->pvo_vaddr & PVO_MANAGED)
3016 pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
3017
3018 PV_LOCK(PVO_PADDR(pvo));
3019 moea64_pvo_remove_from_page_locked(pvo, pg);
3020 PV_UNLOCK(PVO_PADDR(pvo));
3021 }
3022
3023 static struct pvo_entry *
moea64_pvo_find_va(pmap_t pm,vm_offset_t va)3024 moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
3025 {
3026 struct pvo_entry key;
3027
3028 PMAP_LOCK_ASSERT(pm, MA_OWNED);
3029
3030 key.pvo_vaddr = va & ~ADDR_POFF;
3031 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
3032 }
3033
3034 static boolean_t
moea64_query_bit(vm_page_t m,uint64_t ptebit)3035 moea64_query_bit(vm_page_t m, uint64_t ptebit)
3036 {
3037 struct pvo_entry *pvo;
3038 int64_t ret;
3039 boolean_t rv;
3040 vm_page_t sp;
3041
3042 /*
3043 * See if this bit is stored in the page already.
3044 *
3045 * For superpages, the bit is stored in the first vm page.
3046 */
3047 if ((m->md.mdpg_attrs & ptebit) != 0 ||
3048 ((sp = PHYS_TO_VM_PAGE(VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK)) != NULL &&
3049 (sp->md.mdpg_attrs & (ptebit | MDPG_ATTR_SP)) ==
3050 (ptebit | MDPG_ATTR_SP)))
3051 return (TRUE);
3052
3053 /*
3054 * Examine each PTE. Sync so that any pending REF/CHG bits are
3055 * flushed to the PTEs.
3056 */
3057 rv = FALSE;
3058 powerpc_sync();
3059 PV_PAGE_LOCK(m);
3060 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
3061 if (PVO_IS_SP(pvo)) {
3062 ret = moea64_sp_query(pvo, ptebit);
3063 /*
3064 * If SP was not demoted, check its REF/CHG bits here.
3065 */
3066 if (ret != -1) {
3067 if ((ret & ptebit) != 0) {
3068 rv = TRUE;
3069 break;
3070 }
3071 continue;
3072 }
3073 /* else, fallthrough */
3074 }
3075
3076 ret = 0;
3077
3078 /*
3079 * See if this pvo has a valid PTE. if so, fetch the
3080 * REF/CHG bits from the valid PTE. If the appropriate
3081 * ptebit is set, return success.
3082 */
3083 PMAP_LOCK(pvo->pvo_pmap);
3084 if (!(pvo->pvo_vaddr & PVO_DEAD))
3085 ret = moea64_pte_synch(pvo);
3086 PMAP_UNLOCK(pvo->pvo_pmap);
3087
3088 if (ret > 0) {
3089 atomic_set_32(&m->md.mdpg_attrs,
3090 ret & (LPTE_CHG | LPTE_REF));
3091 if (ret & ptebit) {
3092 rv = TRUE;
3093 break;
3094 }
3095 }
3096 }
3097 PV_PAGE_UNLOCK(m);
3098
3099 return (rv);
3100 }
3101
3102 static u_int
moea64_clear_bit(vm_page_t m,u_int64_t ptebit)3103 moea64_clear_bit(vm_page_t m, u_int64_t ptebit)
3104 {
3105 u_int count;
3106 struct pvo_entry *pvo;
3107 int64_t ret;
3108
3109 /*
3110 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
3111 * we can reset the right ones).
3112 */
3113 powerpc_sync();
3114
3115 /*
3116 * For each pvo entry, clear the pte's ptebit.
3117 */
3118 count = 0;
3119 PV_PAGE_LOCK(m);
3120 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
3121 if (PVO_IS_SP(pvo)) {
3122 if ((ret = moea64_sp_clear(pvo, m, ptebit)) != -1) {
3123 count += ret;
3124 continue;
3125 }
3126 }
3127 ret = 0;
3128
3129 PMAP_LOCK(pvo->pvo_pmap);
3130 if (!(pvo->pvo_vaddr & PVO_DEAD))
3131 ret = moea64_pte_clear(pvo, ptebit);
3132 PMAP_UNLOCK(pvo->pvo_pmap);
3133
3134 if (ret > 0 && (ret & ptebit))
3135 count++;
3136 }
3137 atomic_clear_32(&m->md.mdpg_attrs, ptebit);
3138 PV_PAGE_UNLOCK(m);
3139
3140 return (count);
3141 }
3142
3143 int
moea64_dev_direct_mapped(vm_paddr_t pa,vm_size_t size)3144 moea64_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
3145 {
3146 struct pvo_entry *pvo, key;
3147 vm_offset_t ppa;
3148 int error = 0;
3149
3150 if (hw_direct_map && mem_valid(pa, size) == 0)
3151 return (0);
3152
3153 PMAP_LOCK(kernel_pmap);
3154 ppa = pa & ~ADDR_POFF;
3155 key.pvo_vaddr = DMAP_BASE_ADDRESS + ppa;
3156 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
3157 ppa < pa + size; ppa += PAGE_SIZE,
3158 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
3159 if (pvo == NULL || PVO_PADDR(pvo) != ppa) {
3160 error = EFAULT;
3161 break;
3162 }
3163 }
3164 PMAP_UNLOCK(kernel_pmap);
3165
3166 return (error);
3167 }
3168
3169 /*
3170 * Map a set of physical memory pages into the kernel virtual
3171 * address space. Return a pointer to where it is mapped. This
3172 * routine is intended to be used for mapping device memory,
3173 * NOT real memory.
3174 */
3175 void *
moea64_mapdev_attr(vm_paddr_t pa,vm_size_t size,vm_memattr_t ma)3176 moea64_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3177 {
3178 vm_offset_t va, tmpva, ppa, offset;
3179
3180 ppa = trunc_page(pa);
3181 offset = pa & PAGE_MASK;
3182 size = roundup2(offset + size, PAGE_SIZE);
3183
3184 va = kva_alloc(size);
3185
3186 if (!va)
3187 panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
3188
3189 for (tmpva = va; size > 0;) {
3190 moea64_kenter_attr(tmpva, ppa, ma);
3191 size -= PAGE_SIZE;
3192 tmpva += PAGE_SIZE;
3193 ppa += PAGE_SIZE;
3194 }
3195
3196 return ((void *)(va + offset));
3197 }
3198
3199 void *
moea64_mapdev(vm_paddr_t pa,vm_size_t size)3200 moea64_mapdev(vm_paddr_t pa, vm_size_t size)
3201 {
3202
3203 return moea64_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT);
3204 }
3205
3206 void
moea64_unmapdev(void * p,vm_size_t size)3207 moea64_unmapdev(void *p, vm_size_t size)
3208 {
3209 vm_offset_t base, offset, va;
3210
3211 va = (vm_offset_t)p;
3212 base = trunc_page(va);
3213 offset = va & PAGE_MASK;
3214 size = roundup2(offset + size, PAGE_SIZE);
3215
3216 moea64_qremove(base, atop(size));
3217 kva_free(base, size);
3218 }
3219
3220 void
moea64_sync_icache(pmap_t pm,vm_offset_t va,vm_size_t sz)3221 moea64_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3222 {
3223 struct pvo_entry *pvo;
3224 vm_offset_t lim;
3225 vm_paddr_t pa;
3226 vm_size_t len;
3227
3228 if (__predict_false(pm == NULL))
3229 pm = &curthread->td_proc->p_vmspace->vm_pmap;
3230
3231 PMAP_LOCK(pm);
3232 while (sz > 0) {
3233 lim = round_page(va+1);
3234 len = MIN(lim - va, sz);
3235 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
3236 if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
3237 pa = PVO_PADDR(pvo) | (va & ADDR_POFF);
3238 moea64_syncicache(pm, va, pa, len);
3239 }
3240 va += len;
3241 sz -= len;
3242 }
3243 PMAP_UNLOCK(pm);
3244 }
3245
3246 void
moea64_dumpsys_map(vm_paddr_t pa,size_t sz,void ** va)3247 moea64_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
3248 {
3249
3250 *va = (void *)(uintptr_t)pa;
3251 }
3252
3253 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
3254
3255 void
moea64_scan_init(void)3256 moea64_scan_init(void)
3257 {
3258 struct pvo_entry *pvo;
3259 vm_offset_t va;
3260 int i;
3261
3262 if (!do_minidump) {
3263 /* Initialize phys. segments for dumpsys(). */
3264 memset(&dump_map, 0, sizeof(dump_map));
3265 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz);
3266 for (i = 0; i < pregions_sz; i++) {
3267 dump_map[i].pa_start = pregions[i].mr_start;
3268 dump_map[i].pa_size = pregions[i].mr_size;
3269 }
3270 return;
3271 }
3272
3273 /* Virtual segments for minidumps: */
3274 memset(&dump_map, 0, sizeof(dump_map));
3275
3276 /* 1st: kernel .data and .bss. */
3277 dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
3278 dump_map[0].pa_size = round_page((uintptr_t)_end) -
3279 dump_map[0].pa_start;
3280
3281 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
3282 dump_map[1].pa_start = (vm_paddr_t)(uintptr_t)msgbufp->msg_ptr;
3283 dump_map[1].pa_size = round_page(msgbufp->msg_size);
3284
3285 /* 3rd: kernel VM. */
3286 va = dump_map[1].pa_start + dump_map[1].pa_size;
3287 /* Find start of next chunk (from va). */
3288 while (va < virtual_end) {
3289 /* Don't dump the buffer cache. */
3290 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
3291 va = kmi.buffer_eva;
3292 continue;
3293 }
3294 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
3295 if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
3296 break;
3297 va += PAGE_SIZE;
3298 }
3299 if (va < virtual_end) {
3300 dump_map[2].pa_start = va;
3301 va += PAGE_SIZE;
3302 /* Find last page in chunk. */
3303 while (va < virtual_end) {
3304 /* Don't run into the buffer cache. */
3305 if (va == kmi.buffer_sva)
3306 break;
3307 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
3308 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD))
3309 break;
3310 va += PAGE_SIZE;
3311 }
3312 dump_map[2].pa_size = va - dump_map[2].pa_start;
3313 }
3314 }
3315
3316 #ifdef __powerpc64__
3317
3318 static size_t
moea64_scan_pmap(struct bitset * dump_bitset)3319 moea64_scan_pmap(struct bitset *dump_bitset)
3320 {
3321 struct pvo_entry *pvo;
3322 vm_paddr_t pa, pa_end;
3323 vm_offset_t va, pgva, kstart, kend, kstart_lp, kend_lp;
3324 uint64_t lpsize;
3325
3326 lpsize = moea64_large_page_size;
3327 kstart = trunc_page((vm_offset_t)_etext);
3328 kend = round_page((vm_offset_t)_end);
3329 kstart_lp = kstart & ~moea64_large_page_mask;
3330 kend_lp = (kend + moea64_large_page_mask) & ~moea64_large_page_mask;
3331
3332 CTR4(KTR_PMAP, "moea64_scan_pmap: kstart=0x%016lx, kend=0x%016lx, "
3333 "kstart_lp=0x%016lx, kend_lp=0x%016lx",
3334 kstart, kend, kstart_lp, kend_lp);
3335
3336 PMAP_LOCK(kernel_pmap);
3337 RB_FOREACH(pvo, pvo_tree, &kernel_pmap->pmap_pvo) {
3338 va = pvo->pvo_vaddr;
3339
3340 if (va & PVO_DEAD)
3341 continue;
3342
3343 /* Skip DMAP (except kernel area) */
3344 if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS) {
3345 if (va & PVO_LARGE) {
3346 pgva = va & ~moea64_large_page_mask;
3347 if (pgva < kstart_lp || pgva >= kend_lp)
3348 continue;
3349 } else {
3350 pgva = trunc_page(va);
3351 if (pgva < kstart || pgva >= kend)
3352 continue;
3353 }
3354 }
3355
3356 pa = PVO_PADDR(pvo);
3357
3358 if (va & PVO_LARGE) {
3359 pa_end = pa + lpsize;
3360 for (; pa < pa_end; pa += PAGE_SIZE) {
3361 if (vm_phys_is_dumpable(pa))
3362 vm_page_dump_add(dump_bitset, pa);
3363 }
3364 } else {
3365 if (vm_phys_is_dumpable(pa))
3366 vm_page_dump_add(dump_bitset, pa);
3367 }
3368 }
3369 PMAP_UNLOCK(kernel_pmap);
3370
3371 return (sizeof(struct lpte) * moea64_pteg_count * 8);
3372 }
3373
3374 static struct dump_context dump_ctx;
3375
3376 static void *
moea64_dump_pmap_init(unsigned blkpgs)3377 moea64_dump_pmap_init(unsigned blkpgs)
3378 {
3379 dump_ctx.ptex = 0;
3380 dump_ctx.ptex_end = moea64_pteg_count * 8;
3381 dump_ctx.blksz = blkpgs * PAGE_SIZE;
3382 return (&dump_ctx);
3383 }
3384
3385 #else
3386
3387 static size_t
moea64_scan_pmap(struct bitset * dump_bitset __unused)3388 moea64_scan_pmap(struct bitset *dump_bitset __unused)
3389 {
3390 return (0);
3391 }
3392
3393 static void *
moea64_dump_pmap_init(unsigned blkpgs)3394 moea64_dump_pmap_init(unsigned blkpgs)
3395 {
3396 return (NULL);
3397 }
3398
3399 #endif
3400
3401 #ifdef __powerpc64__
3402 static void
moea64_map_range(vm_offset_t va,vm_paddr_t pa,vm_size_t npages)3403 moea64_map_range(vm_offset_t va, vm_paddr_t pa, vm_size_t npages)
3404 {
3405
3406 for (; npages > 0; --npages) {
3407 if (moea64_large_page_size != 0 &&
3408 (pa & moea64_large_page_mask) == 0 &&
3409 (va & moea64_large_page_mask) == 0 &&
3410 npages >= (moea64_large_page_size >> PAGE_SHIFT)) {
3411 PMAP_LOCK(kernel_pmap);
3412 moea64_kenter_large(va, pa, 0, 0);
3413 PMAP_UNLOCK(kernel_pmap);
3414 pa += moea64_large_page_size;
3415 va += moea64_large_page_size;
3416 npages -= (moea64_large_page_size >> PAGE_SHIFT) - 1;
3417 } else {
3418 moea64_kenter(va, pa);
3419 pa += PAGE_SIZE;
3420 va += PAGE_SIZE;
3421 }
3422 }
3423 }
3424
3425 static void
moea64_page_array_startup(long pages)3426 moea64_page_array_startup(long pages)
3427 {
3428 long dom_pages[MAXMEMDOM];
3429 vm_paddr_t pa;
3430 vm_offset_t va, vm_page_base;
3431 vm_size_t needed, size;
3432 int domain;
3433 int i;
3434
3435 vm_page_base = 0xd000000000000000ULL;
3436
3437 /* Short-circuit single-domain systems. */
3438 if (vm_ndomains == 1) {
3439 size = round_page(pages * sizeof(struct vm_page));
3440 pa = vm_phys_early_alloc(0, size);
3441 vm_page_base = moea64_map(&vm_page_base,
3442 pa, pa + size, VM_PROT_READ | VM_PROT_WRITE);
3443 vm_page_array_size = pages;
3444 vm_page_array = (vm_page_t)vm_page_base;
3445 return;
3446 }
3447
3448 for (i = 0; i < MAXMEMDOM; i++)
3449 dom_pages[i] = 0;
3450
3451 /* Now get the number of pages required per domain. */
3452 for (i = 0; i < vm_phys_nsegs; i++) {
3453 domain = vm_phys_segs[i].domain;
3454 KASSERT(domain < MAXMEMDOM,
3455 ("Invalid vm_phys_segs NUMA domain %d!\n", domain));
3456 /* Get size of vm_page_array needed for this segment. */
3457 size = btoc(vm_phys_segs[i].end - vm_phys_segs[i].start);
3458 dom_pages[domain] += size;
3459 }
3460
3461 for (i = 0; phys_avail[i + 1] != 0; i+= 2) {
3462 domain = vm_phys_domain(phys_avail[i]);
3463 KASSERT(domain < MAXMEMDOM,
3464 ("Invalid phys_avail NUMA domain %d!\n", domain));
3465 size = btoc(phys_avail[i + 1] - phys_avail[i]);
3466 dom_pages[domain] += size;
3467 }
3468
3469 /*
3470 * Map in chunks that can get us all 16MB pages. There will be some
3471 * overlap between domains, but that's acceptable for now.
3472 */
3473 vm_page_array_size = 0;
3474 va = vm_page_base;
3475 for (i = 0; i < MAXMEMDOM && vm_page_array_size < pages; i++) {
3476 if (dom_pages[i] == 0)
3477 continue;
3478 size = ulmin(pages - vm_page_array_size, dom_pages[i]);
3479 size = round_page(size * sizeof(struct vm_page));
3480 needed = size;
3481 size = roundup2(size, moea64_large_page_size);
3482 pa = vm_phys_early_alloc(i, size);
3483 vm_page_array_size += size / sizeof(struct vm_page);
3484 moea64_map_range(va, pa, size >> PAGE_SHIFT);
3485 /* Scoot up domain 0, to reduce the domain page overlap. */
3486 if (i == 0)
3487 vm_page_base += size - needed;
3488 va += size;
3489 }
3490 vm_page_array = (vm_page_t)vm_page_base;
3491 vm_page_array_size = pages;
3492 }
3493 #endif
3494
3495 static int64_t
moea64_null_method(void)3496 moea64_null_method(void)
3497 {
3498 return (0);
3499 }
3500
moea64_pte_replace_default(struct pvo_entry * pvo,int flags)3501 static int64_t moea64_pte_replace_default(struct pvo_entry *pvo, int flags)
3502 {
3503 int64_t refchg;
3504
3505 refchg = moea64_pte_unset(pvo);
3506 moea64_pte_insert(pvo);
3507
3508 return (refchg);
3509 }
3510
3511 struct moea64_funcs *moea64_ops;
3512
3513 #define DEFINE_OEA64_IFUNC(ret, func, args, def) \
3514 DEFINE_IFUNC(, ret, moea64_##func, args) { \
3515 moea64_##func##_t f; \
3516 if (moea64_ops == NULL) \
3517 return ((moea64_##func##_t)def); \
3518 f = moea64_ops->func; \
3519 return (f != NULL ? f : (moea64_##func##_t)def);\
3520 }
3521
3522 void
moea64_install(void)3523 moea64_install(void)
3524 {
3525 #ifdef __powerpc64__
3526 if (hw_direct_map == -1) {
3527 moea64_probe_large_page();
3528
3529 /* Use a direct map if we have large page support */
3530 if (moea64_large_page_size > 0)
3531 hw_direct_map = 1;
3532 else
3533 hw_direct_map = 0;
3534 }
3535 #endif
3536
3537 /*
3538 * Default to non-DMAP, and switch over to DMAP functions once we know
3539 * we have DMAP.
3540 */
3541 if (hw_direct_map) {
3542 moea64_methods.quick_enter_page = moea64_quick_enter_page_dmap;
3543 moea64_methods.quick_remove_page = NULL;
3544 moea64_methods.copy_page = moea64_copy_page_dmap;
3545 moea64_methods.zero_page = moea64_zero_page_dmap;
3546 moea64_methods.copy_pages = moea64_copy_pages_dmap;
3547 }
3548 }
3549
3550 DEFINE_OEA64_IFUNC(int64_t, pte_replace, (struct pvo_entry *, int),
3551 moea64_pte_replace_default)
3552 DEFINE_OEA64_IFUNC(int64_t, pte_insert, (struct pvo_entry *), moea64_null_method)
3553 DEFINE_OEA64_IFUNC(int64_t, pte_unset, (struct pvo_entry *), moea64_null_method)
3554 DEFINE_OEA64_IFUNC(int64_t, pte_clear, (struct pvo_entry *, uint64_t),
3555 moea64_null_method)
3556 DEFINE_OEA64_IFUNC(int64_t, pte_synch, (struct pvo_entry *), moea64_null_method)
3557 DEFINE_OEA64_IFUNC(int64_t, pte_insert_sp, (struct pvo_entry *), moea64_null_method)
3558 DEFINE_OEA64_IFUNC(int64_t, pte_unset_sp, (struct pvo_entry *), moea64_null_method)
3559 DEFINE_OEA64_IFUNC(int64_t, pte_replace_sp, (struct pvo_entry *), moea64_null_method)
3560
3561 /* Superpage functions */
3562
3563 /* MMU interface */
3564
3565 static bool
moea64_ps_enabled(pmap_t pmap)3566 moea64_ps_enabled(pmap_t pmap)
3567 {
3568 return (superpages_enabled);
3569 }
3570
3571 static void
moea64_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)3572 moea64_align_superpage(vm_object_t object, vm_ooffset_t offset,
3573 vm_offset_t *addr, vm_size_t size)
3574 {
3575 vm_offset_t sp_offset;
3576
3577 if (size < HPT_SP_SIZE)
3578 return;
3579
3580 CTR4(KTR_PMAP, "%s: offs=%#jx, addr=%p, size=%#jx",
3581 __func__, (uintmax_t)offset, addr, (uintmax_t)size);
3582
3583 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3584 offset += ptoa(object->pg_color);
3585 sp_offset = offset & HPT_SP_MASK;
3586 if (size - ((HPT_SP_SIZE - sp_offset) & HPT_SP_MASK) < HPT_SP_SIZE ||
3587 (*addr & HPT_SP_MASK) == sp_offset)
3588 return;
3589 if ((*addr & HPT_SP_MASK) < sp_offset)
3590 *addr = (*addr & ~HPT_SP_MASK) + sp_offset;
3591 else
3592 *addr = ((*addr + HPT_SP_MASK) & ~HPT_SP_MASK) + sp_offset;
3593 }
3594
3595 /* Helpers */
3596
3597 static __inline void
moea64_pvo_cleanup(struct pvo_dlist * tofree)3598 moea64_pvo_cleanup(struct pvo_dlist *tofree)
3599 {
3600 struct pvo_entry *pvo;
3601
3602 /* clean up */
3603 while (!SLIST_EMPTY(tofree)) {
3604 pvo = SLIST_FIRST(tofree);
3605 SLIST_REMOVE_HEAD(tofree, pvo_dlink);
3606 if (pvo->pvo_vaddr & PVO_DEAD)
3607 moea64_pvo_remove_from_page(pvo);
3608 free_pvo_entry(pvo);
3609 }
3610 }
3611
3612 static __inline uint16_t
pvo_to_vmpage_flags(struct pvo_entry * pvo)3613 pvo_to_vmpage_flags(struct pvo_entry *pvo)
3614 {
3615 uint16_t flags;
3616
3617 flags = 0;
3618 if ((pvo->pvo_pte.prot & VM_PROT_WRITE) != 0)
3619 flags |= PGA_WRITEABLE;
3620 if ((pvo->pvo_pte.prot & VM_PROT_EXECUTE) != 0)
3621 flags |= PGA_EXECUTABLE;
3622
3623 return (flags);
3624 }
3625
3626 /*
3627 * Check if the given pvo and its superpage are in sva-eva range.
3628 */
3629 static __inline bool
moea64_sp_pvo_in_range(struct pvo_entry * pvo,vm_offset_t sva,vm_offset_t eva)3630 moea64_sp_pvo_in_range(struct pvo_entry *pvo, vm_offset_t sva, vm_offset_t eva)
3631 {
3632 vm_offset_t spva;
3633
3634 spva = PVO_VADDR(pvo) & ~HPT_SP_MASK;
3635 if (spva >= sva && spva + HPT_SP_SIZE <= eva) {
3636 /*
3637 * Because this function is intended to be called from loops
3638 * that iterate over ordered pvo entries, if the condition
3639 * above is true then the pvo must be the first of its
3640 * superpage.
3641 */
3642 KASSERT(PVO_VADDR(pvo) == spva,
3643 ("%s: unexpected unaligned superpage pvo", __func__));
3644 return (true);
3645 }
3646 return (false);
3647 }
3648
3649 /*
3650 * Update vm about the REF/CHG bits if the superpage is managed and
3651 * has (or had) write access.
3652 */
3653 static void
moea64_sp_refchg_process(struct pvo_entry * sp,vm_page_t m,int64_t sp_refchg,vm_prot_t prot)3654 moea64_sp_refchg_process(struct pvo_entry *sp, vm_page_t m,
3655 int64_t sp_refchg, vm_prot_t prot)
3656 {
3657 vm_page_t m_end;
3658 int64_t refchg;
3659
3660 if ((sp->pvo_vaddr & PVO_MANAGED) != 0 && (prot & VM_PROT_WRITE) != 0) {
3661 for (m_end = &m[HPT_SP_PAGES]; m < m_end; m++) {
3662 refchg = sp_refchg |
3663 atomic_readandclear_32(&m->md.mdpg_attrs);
3664 if (refchg & LPTE_CHG)
3665 vm_page_dirty(m);
3666 if (refchg & LPTE_REF)
3667 vm_page_aflag_set(m, PGA_REFERENCED);
3668 }
3669 }
3670 }
3671
3672 /* Superpage ops */
3673
3674 static int
moea64_sp_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind)3675 moea64_sp_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
3676 vm_prot_t prot, u_int flags, int8_t psind)
3677 {
3678 struct pvo_entry *pvo, **pvos;
3679 struct pvo_head *pvo_head;
3680 vm_offset_t sva;
3681 vm_page_t sm;
3682 vm_paddr_t pa, spa;
3683 bool sync;
3684 struct pvo_dlist tofree;
3685 int error __diagused, i;
3686 uint16_t aflags;
3687
3688 KASSERT((va & HPT_SP_MASK) == 0, ("%s: va %#jx unaligned",
3689 __func__, (uintmax_t)va));
3690 KASSERT(psind == 1, ("%s: invalid psind: %d", __func__, psind));
3691 KASSERT(m->psind == 1, ("%s: invalid m->psind: %d",
3692 __func__, m->psind));
3693 KASSERT(pmap != kernel_pmap,
3694 ("%s: function called with kernel pmap", __func__));
3695
3696 CTR5(KTR_PMAP, "%s: va=%#jx, pa=%#jx, prot=%#x, flags=%#x, psind=1",
3697 __func__, (uintmax_t)va, (uintmax_t)VM_PAGE_TO_PHYS(m),
3698 prot, flags);
3699
3700 SLIST_INIT(&tofree);
3701
3702 sva = va;
3703 sm = m;
3704 spa = pa = VM_PAGE_TO_PHYS(sm);
3705
3706 /* Try to allocate all PVOs first, to make failure handling easier. */
3707 pvos = malloc(HPT_SP_PAGES * sizeof(struct pvo_entry *), M_TEMP,
3708 M_NOWAIT);
3709 if (pvos == NULL) {
3710 CTR1(KTR_PMAP, "%s: failed to alloc pvo array", __func__);
3711 return (KERN_RESOURCE_SHORTAGE);
3712 }
3713
3714 for (i = 0; i < HPT_SP_PAGES; i++) {
3715 pvos[i] = alloc_pvo_entry(0);
3716 if (pvos[i] == NULL) {
3717 CTR1(KTR_PMAP, "%s: failed to alloc pvo", __func__);
3718 for (i = i - 1; i >= 0; i--)
3719 free_pvo_entry(pvos[i]);
3720 free(pvos, M_TEMP);
3721 return (KERN_RESOURCE_SHORTAGE);
3722 }
3723 }
3724
3725 SP_PV_LOCK_ALIGNED(spa);
3726 PMAP_LOCK(pmap);
3727
3728 /* Note: moea64_remove_locked() also clears cached REF/CHG bits. */
3729 moea64_remove_locked(pmap, va, va + HPT_SP_SIZE, &tofree);
3730
3731 /* Enter pages */
3732 for (i = 0; i < HPT_SP_PAGES;
3733 i++, va += PAGE_SIZE, pa += PAGE_SIZE, m++) {
3734 pvo = pvos[i];
3735
3736 pvo->pvo_pte.prot = prot;
3737 pvo->pvo_pte.pa = (pa & ~HPT_SP_MASK) | LPTE_LP_4K_16M |
3738 moea64_calc_wimg(pa, pmap_page_get_memattr(m));
3739
3740 if ((flags & PMAP_ENTER_WIRED) != 0)
3741 pvo->pvo_vaddr |= PVO_WIRED;
3742 pvo->pvo_vaddr |= PVO_LARGE;
3743
3744 if ((m->oflags & VPO_UNMANAGED) != 0)
3745 pvo_head = NULL;
3746 else {
3747 pvo_head = &m->md.mdpg_pvoh;
3748 pvo->pvo_vaddr |= PVO_MANAGED;
3749 }
3750
3751 init_pvo_entry(pvo, pmap, va);
3752
3753 error = moea64_pvo_enter(pvo, pvo_head, NULL);
3754 /*
3755 * All superpage PVOs were previously removed, so no errors
3756 * should occur while inserting the new ones.
3757 */
3758 KASSERT(error == 0, ("%s: unexpected error "
3759 "when inserting superpage PVO: %d",
3760 __func__, error));
3761 }
3762
3763 PMAP_UNLOCK(pmap);
3764 SP_PV_UNLOCK_ALIGNED(spa);
3765
3766 sync = (sm->a.flags & PGA_EXECUTABLE) == 0;
3767 /* Note: moea64_pvo_cleanup() also clears page prot. flags. */
3768 moea64_pvo_cleanup(&tofree);
3769 pvo = pvos[0];
3770
3771 /* Set vm page flags */
3772 aflags = pvo_to_vmpage_flags(pvo);
3773 if (aflags != 0)
3774 for (m = sm; m < &sm[HPT_SP_PAGES]; m++)
3775 vm_page_aflag_set(m, aflags);
3776
3777 /*
3778 * Flush the page from the instruction cache if this page is
3779 * mapped executable and cacheable.
3780 */
3781 if (sync && (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0)
3782 moea64_syncicache(pmap, sva, spa, HPT_SP_SIZE);
3783
3784 atomic_add_long(&sp_mappings, 1);
3785 CTR3(KTR_PMAP, "%s: SP success for va %#jx in pmap %p",
3786 __func__, (uintmax_t)sva, pmap);
3787
3788 free(pvos, M_TEMP);
3789 return (KERN_SUCCESS);
3790 }
3791
3792 static void
moea64_sp_promote(pmap_t pmap,vm_offset_t va,vm_page_t m)3793 moea64_sp_promote(pmap_t pmap, vm_offset_t va, vm_page_t m)
3794 {
3795 struct pvo_entry *first, *pvo;
3796 vm_paddr_t pa, pa_end;
3797 vm_offset_t sva, va_end;
3798 int64_t sp_refchg;
3799
3800 /* This CTR may generate a lot of output. */
3801 /* CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)va); */
3802
3803 va &= ~HPT_SP_MASK;
3804 sva = va;
3805 /* Get superpage */
3806 pa = VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK;
3807 m = PHYS_TO_VM_PAGE(pa);
3808
3809 PMAP_LOCK(pmap);
3810
3811 /*
3812 * Check if all pages meet promotion criteria.
3813 *
3814 * XXX In some cases the loop below may be executed for each or most
3815 * of the entered pages of a superpage, which can be expensive
3816 * (although it was not profiled) and need some optimization.
3817 *
3818 * Some cases where this seems to happen are:
3819 * - When a superpage is first entered read-only and later becomes
3820 * read-write.
3821 * - When some of the superpage's virtual addresses map to previously
3822 * wired/cached pages while others map to pages allocated from a
3823 * different physical address range. A common scenario where this
3824 * happens is when mmap'ing a file that is already present in FS
3825 * block cache and doesn't fill a superpage.
3826 */
3827 first = pvo = moea64_pvo_find_va(pmap, sva);
3828 for (pa_end = pa + HPT_SP_SIZE;
3829 pa < pa_end; pa += PAGE_SIZE, va += PAGE_SIZE) {
3830 if (pvo == NULL || (pvo->pvo_vaddr & PVO_DEAD) != 0) {
3831 CTR3(KTR_PMAP,
3832 "%s: NULL or dead PVO: pmap=%p, va=%#jx",
3833 __func__, pmap, (uintmax_t)va);
3834 goto error;
3835 }
3836 if (PVO_PADDR(pvo) != pa) {
3837 CTR5(KTR_PMAP, "%s: PAs don't match: "
3838 "pmap=%p, va=%#jx, pvo_pa=%#jx, exp_pa=%#jx",
3839 __func__, pmap, (uintmax_t)va,
3840 (uintmax_t)PVO_PADDR(pvo), (uintmax_t)pa);
3841 atomic_add_long(&sp_p_fail_pa, 1);
3842 goto error;
3843 }
3844 if ((first->pvo_vaddr & PVO_FLAGS_PROMOTE) !=
3845 (pvo->pvo_vaddr & PVO_FLAGS_PROMOTE)) {
3846 CTR5(KTR_PMAP, "%s: PVO flags don't match: "
3847 "pmap=%p, va=%#jx, pvo_flags=%#jx, exp_flags=%#jx",
3848 __func__, pmap, (uintmax_t)va,
3849 (uintmax_t)(pvo->pvo_vaddr & PVO_FLAGS_PROMOTE),
3850 (uintmax_t)(first->pvo_vaddr & PVO_FLAGS_PROMOTE));
3851 atomic_add_long(&sp_p_fail_flags, 1);
3852 goto error;
3853 }
3854 if (first->pvo_pte.prot != pvo->pvo_pte.prot) {
3855 CTR5(KTR_PMAP, "%s: PVO protections don't match: "
3856 "pmap=%p, va=%#jx, pvo_prot=%#x, exp_prot=%#x",
3857 __func__, pmap, (uintmax_t)va,
3858 pvo->pvo_pte.prot, first->pvo_pte.prot);
3859 atomic_add_long(&sp_p_fail_prot, 1);
3860 goto error;
3861 }
3862 if ((first->pvo_pte.pa & LPTE_WIMG) !=
3863 (pvo->pvo_pte.pa & LPTE_WIMG)) {
3864 CTR5(KTR_PMAP, "%s: WIMG bits don't match: "
3865 "pmap=%p, va=%#jx, pvo_wimg=%#jx, exp_wimg=%#jx",
3866 __func__, pmap, (uintmax_t)va,
3867 (uintmax_t)(pvo->pvo_pte.pa & LPTE_WIMG),
3868 (uintmax_t)(first->pvo_pte.pa & LPTE_WIMG));
3869 atomic_add_long(&sp_p_fail_wimg, 1);
3870 goto error;
3871 }
3872
3873 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo);
3874 }
3875
3876 /* All OK, promote. */
3877
3878 /*
3879 * Handle superpage REF/CHG bits. If REF or CHG is set in
3880 * any page, then it must be set in the superpage.
3881 *
3882 * Instead of querying each page, we take advantage of two facts:
3883 * 1- If a page is being promoted, it was referenced.
3884 * 2- If promoted pages are writable, they were modified.
3885 */
3886 sp_refchg = LPTE_REF |
3887 ((first->pvo_pte.prot & VM_PROT_WRITE) != 0 ? LPTE_CHG : 0);
3888
3889 /* Promote pages */
3890
3891 for (pvo = first, va_end = PVO_VADDR(pvo) + HPT_SP_SIZE;
3892 pvo != NULL && PVO_VADDR(pvo) < va_end;
3893 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
3894 pvo->pvo_pte.pa &= ADDR_POFF | ~HPT_SP_MASK;
3895 pvo->pvo_pte.pa |= LPTE_LP_4K_16M;
3896 pvo->pvo_vaddr |= PVO_LARGE;
3897 }
3898 moea64_pte_replace_sp(first);
3899
3900 /* Send REF/CHG bits to VM */
3901 moea64_sp_refchg_process(first, m, sp_refchg, first->pvo_pte.prot);
3902
3903 /* Use first page to cache REF/CHG bits */
3904 atomic_set_32(&m->md.mdpg_attrs, sp_refchg | MDPG_ATTR_SP);
3905
3906 PMAP_UNLOCK(pmap);
3907
3908 atomic_add_long(&sp_mappings, 1);
3909 atomic_add_long(&sp_promotions, 1);
3910 CTR3(KTR_PMAP, "%s: success for va %#jx in pmap %p",
3911 __func__, (uintmax_t)sva, pmap);
3912 return;
3913
3914 error:
3915 atomic_add_long(&sp_p_failures, 1);
3916 PMAP_UNLOCK(pmap);
3917 }
3918
3919 static void
moea64_sp_demote_aligned(struct pvo_entry * sp)3920 moea64_sp_demote_aligned(struct pvo_entry *sp)
3921 {
3922 struct pvo_entry *pvo;
3923 vm_offset_t va, va_end;
3924 vm_paddr_t pa;
3925 vm_page_t m;
3926 pmap_t pmap __diagused;
3927 int64_t refchg;
3928
3929 CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp));
3930
3931 pmap = sp->pvo_pmap;
3932 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3933
3934 pvo = sp;
3935
3936 /* Demote pages */
3937
3938 va = PVO_VADDR(pvo);
3939 pa = PVO_PADDR(pvo);
3940 m = PHYS_TO_VM_PAGE(pa);
3941
3942 for (pvo = sp, va_end = va + HPT_SP_SIZE;
3943 pvo != NULL && PVO_VADDR(pvo) < va_end;
3944 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo),
3945 va += PAGE_SIZE, pa += PAGE_SIZE) {
3946 KASSERT(pvo && PVO_VADDR(pvo) == va,
3947 ("%s: missing PVO for va %#jx", __func__, (uintmax_t)va));
3948
3949 pvo->pvo_vaddr &= ~PVO_LARGE;
3950 pvo->pvo_pte.pa &= ~LPTE_RPGN;
3951 pvo->pvo_pte.pa |= pa;
3952
3953 }
3954 refchg = moea64_pte_replace_sp(sp);
3955
3956 /*
3957 * Clear SP flag
3958 *
3959 * XXX It is possible that another pmap has this page mapped as
3960 * part of a superpage, but as the SP flag is used only for
3961 * caching SP REF/CHG bits, that will be queried if not set
3962 * in cache, it should be ok to clear it here.
3963 */
3964 atomic_clear_32(&m->md.mdpg_attrs, MDPG_ATTR_SP);
3965
3966 /*
3967 * Handle superpage REF/CHG bits. A bit set in the superpage
3968 * means all pages should consider it set.
3969 */
3970 moea64_sp_refchg_process(sp, m, refchg, sp->pvo_pte.prot);
3971
3972 atomic_add_long(&sp_demotions, 1);
3973 CTR3(KTR_PMAP, "%s: success for va %#jx in pmap %p",
3974 __func__, (uintmax_t)PVO_VADDR(sp), pmap);
3975 }
3976
3977 static void
moea64_sp_demote(struct pvo_entry * pvo)3978 moea64_sp_demote(struct pvo_entry *pvo)
3979 {
3980 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
3981
3982 if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) {
3983 pvo = moea64_pvo_find_va(pvo->pvo_pmap,
3984 PVO_VADDR(pvo) & ~HPT_SP_MASK);
3985 KASSERT(pvo != NULL, ("%s: missing PVO for va %#jx",
3986 __func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK)));
3987 }
3988 moea64_sp_demote_aligned(pvo);
3989 }
3990
3991 static struct pvo_entry *
moea64_sp_unwire(struct pvo_entry * sp)3992 moea64_sp_unwire(struct pvo_entry *sp)
3993 {
3994 struct pvo_entry *pvo, *prev;
3995 vm_offset_t eva;
3996 pmap_t pm;
3997 int64_t ret, refchg;
3998
3999 CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp));
4000
4001 pm = sp->pvo_pmap;
4002 PMAP_LOCK_ASSERT(pm, MA_OWNED);
4003
4004 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4005 refchg = 0;
4006 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
4007 prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
4008 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
4009 panic("%s: pvo %p is missing PVO_WIRED",
4010 __func__, pvo);
4011 pvo->pvo_vaddr &= ~PVO_WIRED;
4012
4013 ret = moea64_pte_replace(pvo, 0 /* No invalidation */);
4014 if (ret < 0)
4015 refchg |= LPTE_CHG;
4016 else
4017 refchg |= ret;
4018
4019 pm->pm_stats.wired_count--;
4020 }
4021
4022 /* Send REF/CHG bits to VM */
4023 moea64_sp_refchg_process(sp, PHYS_TO_VM_PAGE(PVO_PADDR(sp)),
4024 refchg, sp->pvo_pte.prot);
4025
4026 return (prev);
4027 }
4028
4029 static struct pvo_entry *
moea64_sp_protect(struct pvo_entry * sp,vm_prot_t prot)4030 moea64_sp_protect(struct pvo_entry *sp, vm_prot_t prot)
4031 {
4032 struct pvo_entry *pvo, *prev;
4033 vm_offset_t eva;
4034 pmap_t pm;
4035 vm_page_t m, m_end;
4036 int64_t ret, refchg;
4037 vm_prot_t oldprot;
4038
4039 CTR3(KTR_PMAP, "%s: va=%#jx, prot=%x",
4040 __func__, (uintmax_t)PVO_VADDR(sp), prot);
4041
4042 pm = sp->pvo_pmap;
4043 PMAP_LOCK_ASSERT(pm, MA_OWNED);
4044
4045 oldprot = sp->pvo_pte.prot;
4046 m = PHYS_TO_VM_PAGE(PVO_PADDR(sp));
4047 KASSERT(m != NULL, ("%s: missing vm page for pa %#jx",
4048 __func__, (uintmax_t)PVO_PADDR(sp)));
4049 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4050 refchg = 0;
4051
4052 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
4053 prev = pvo, pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
4054 pvo->pvo_pte.prot = prot;
4055 /*
4056 * If the PVO is in the page table, update mapping
4057 */
4058 ret = moea64_pte_replace(pvo, MOEA64_PTE_PROT_UPDATE);
4059 if (ret < 0)
4060 refchg |= LPTE_CHG;
4061 else
4062 refchg |= ret;
4063 }
4064
4065 /* Send REF/CHG bits to VM */
4066 moea64_sp_refchg_process(sp, m, refchg, oldprot);
4067
4068 /* Handle pages that became executable */
4069 if ((m->a.flags & PGA_EXECUTABLE) == 0 &&
4070 (sp->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
4071 if ((m->oflags & VPO_UNMANAGED) == 0)
4072 for (m_end = &m[HPT_SP_PAGES]; m < m_end; m++)
4073 vm_page_aflag_set(m, PGA_EXECUTABLE);
4074 moea64_syncicache(pm, PVO_VADDR(sp), PVO_PADDR(sp),
4075 HPT_SP_SIZE);
4076 }
4077
4078 return (prev);
4079 }
4080
4081 static struct pvo_entry *
moea64_sp_remove(struct pvo_entry * sp,struct pvo_dlist * tofree)4082 moea64_sp_remove(struct pvo_entry *sp, struct pvo_dlist *tofree)
4083 {
4084 struct pvo_entry *pvo, *tpvo;
4085 vm_offset_t eva;
4086 pmap_t pm __diagused;
4087
4088 CTR2(KTR_PMAP, "%s: va=%#jx", __func__, (uintmax_t)PVO_VADDR(sp));
4089
4090 pm = sp->pvo_pmap;
4091 PMAP_LOCK_ASSERT(pm, MA_OWNED);
4092
4093 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4094 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
4095 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
4096
4097 /*
4098 * For locking reasons, remove this from the page table and
4099 * pmap, but save delinking from the vm_page for a second
4100 * pass
4101 */
4102 moea64_pvo_remove_from_pmap(pvo);
4103 SLIST_INSERT_HEAD(tofree, pvo, pvo_dlink);
4104 }
4105
4106 /*
4107 * Clear SP bit
4108 *
4109 * XXX See comment in moea64_sp_demote_aligned() for why it's
4110 * ok to always clear the SP bit on remove/demote.
4111 */
4112 atomic_clear_32(&PHYS_TO_VM_PAGE(PVO_PADDR(sp))->md.mdpg_attrs,
4113 MDPG_ATTR_SP);
4114
4115 return (tpvo);
4116 }
4117
4118 static int64_t
moea64_sp_query_locked(struct pvo_entry * pvo,uint64_t ptebit)4119 moea64_sp_query_locked(struct pvo_entry *pvo, uint64_t ptebit)
4120 {
4121 int64_t refchg, ret;
4122 vm_offset_t eva;
4123 vm_page_t m;
4124 pmap_t pmap;
4125 struct pvo_entry *sp;
4126
4127 pmap = pvo->pvo_pmap;
4128 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4129
4130 /* Get first SP PVO */
4131 if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) {
4132 sp = moea64_pvo_find_va(pmap, PVO_VADDR(pvo) & ~HPT_SP_MASK);
4133 KASSERT(sp != NULL, ("%s: missing PVO for va %#jx",
4134 __func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK)));
4135 } else
4136 sp = pvo;
4137 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4138
4139 refchg = 0;
4140 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
4141 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
4142 ret = moea64_pte_synch(pvo);
4143 if (ret > 0) {
4144 refchg |= ret & (LPTE_CHG | LPTE_REF);
4145 if ((refchg & ptebit) != 0)
4146 break;
4147 }
4148 }
4149
4150 /* Save results */
4151 if (refchg != 0) {
4152 m = PHYS_TO_VM_PAGE(PVO_PADDR(sp));
4153 atomic_set_32(&m->md.mdpg_attrs, refchg | MDPG_ATTR_SP);
4154 }
4155
4156 return (refchg);
4157 }
4158
4159 static int64_t
moea64_sp_query(struct pvo_entry * pvo,uint64_t ptebit)4160 moea64_sp_query(struct pvo_entry *pvo, uint64_t ptebit)
4161 {
4162 int64_t refchg;
4163 pmap_t pmap;
4164
4165 pmap = pvo->pvo_pmap;
4166 PMAP_LOCK(pmap);
4167
4168 /*
4169 * Check if SP was demoted/removed before pmap lock was acquired.
4170 */
4171 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) {
4172 CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx",
4173 __func__, (uintmax_t)PVO_PADDR(pvo));
4174 PMAP_UNLOCK(pmap);
4175 return (-1);
4176 }
4177
4178 refchg = moea64_sp_query_locked(pvo, ptebit);
4179 PMAP_UNLOCK(pmap);
4180
4181 CTR4(KTR_PMAP, "%s: va=%#jx, pa=%#jx: refchg=%#jx",
4182 __func__, (uintmax_t)PVO_VADDR(pvo),
4183 (uintmax_t)PVO_PADDR(pvo), (uintmax_t)refchg);
4184
4185 return (refchg);
4186 }
4187
4188 static int64_t
moea64_sp_pvo_clear(struct pvo_entry * pvo,uint64_t ptebit)4189 moea64_sp_pvo_clear(struct pvo_entry *pvo, uint64_t ptebit)
4190 {
4191 int64_t refchg, ret;
4192 pmap_t pmap;
4193 struct pvo_entry *sp;
4194 vm_offset_t eva;
4195 vm_page_t m;
4196
4197 pmap = pvo->pvo_pmap;
4198 PMAP_LOCK(pmap);
4199
4200 /*
4201 * Check if SP was demoted/removed before pmap lock was acquired.
4202 */
4203 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) {
4204 CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx",
4205 __func__, (uintmax_t)PVO_PADDR(pvo));
4206 PMAP_UNLOCK(pmap);
4207 return (-1);
4208 }
4209
4210 /* Get first SP PVO */
4211 if ((PVO_VADDR(pvo) & HPT_SP_MASK) != 0) {
4212 sp = moea64_pvo_find_va(pmap, PVO_VADDR(pvo) & ~HPT_SP_MASK);
4213 KASSERT(sp != NULL, ("%s: missing PVO for va %#jx",
4214 __func__, (uintmax_t)(PVO_VADDR(pvo) & ~HPT_SP_MASK)));
4215 } else
4216 sp = pvo;
4217 eva = PVO_VADDR(sp) + HPT_SP_SIZE;
4218
4219 refchg = 0;
4220 for (pvo = sp; pvo != NULL && PVO_VADDR(pvo) < eva;
4221 pvo = RB_NEXT(pvo_tree, &pmap->pmap_pvo, pvo)) {
4222 ret = moea64_pte_clear(pvo, ptebit);
4223 if (ret > 0)
4224 refchg |= ret & (LPTE_CHG | LPTE_REF);
4225 }
4226
4227 m = PHYS_TO_VM_PAGE(PVO_PADDR(sp));
4228 atomic_clear_32(&m->md.mdpg_attrs, ptebit);
4229 PMAP_UNLOCK(pmap);
4230
4231 CTR4(KTR_PMAP, "%s: va=%#jx, pa=%#jx: refchg=%#jx",
4232 __func__, (uintmax_t)PVO_VADDR(sp),
4233 (uintmax_t)PVO_PADDR(sp), (uintmax_t)refchg);
4234
4235 return (refchg);
4236 }
4237
4238 static int64_t
moea64_sp_clear(struct pvo_entry * pvo,vm_page_t m,uint64_t ptebit)4239 moea64_sp_clear(struct pvo_entry *pvo, vm_page_t m, uint64_t ptebit)
4240 {
4241 int64_t count, ret;
4242 pmap_t pmap;
4243
4244 count = 0;
4245 pmap = pvo->pvo_pmap;
4246
4247 /*
4248 * Since this reference bit is shared by 4096 4KB pages, it
4249 * should not be cleared every time it is tested. Apply a
4250 * simple "hash" function on the physical page number, the
4251 * virtual superpage number, and the pmap address to select
4252 * one 4KB page out of the 4096 on which testing the
4253 * reference bit will result in clearing that reference bit.
4254 * This function is designed to avoid the selection of the
4255 * same 4KB page for every 16MB page mapping.
4256 *
4257 * Always leave the reference bit of a wired mapping set, as
4258 * the current state of its reference bit won't affect page
4259 * replacement.
4260 */
4261 if (ptebit == LPTE_REF && (((VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) ^
4262 (PVO_VADDR(pvo) >> HPT_SP_SHIFT) ^ (uintptr_t)pmap) &
4263 (HPT_SP_PAGES - 1)) == 0 && (pvo->pvo_vaddr & PVO_WIRED) == 0) {
4264 if ((ret = moea64_sp_pvo_clear(pvo, ptebit)) == -1)
4265 return (-1);
4266
4267 if ((ret & ptebit) != 0)
4268 count++;
4269
4270 /*
4271 * If this page was not selected by the hash function, then assume
4272 * its REF bit was set.
4273 */
4274 } else if (ptebit == LPTE_REF) {
4275 count++;
4276
4277 /*
4278 * To clear the CHG bit of a single SP page, first it must be demoted.
4279 * But if no CHG bit is set, no bit clear and thus no SP demotion is
4280 * needed.
4281 */
4282 } else {
4283 CTR4(KTR_PMAP, "%s: ptebit=%#jx, va=%#jx, pa=%#jx",
4284 __func__, (uintmax_t)ptebit, (uintmax_t)PVO_VADDR(pvo),
4285 (uintmax_t)PVO_PADDR(pvo));
4286
4287 PMAP_LOCK(pmap);
4288
4289 /*
4290 * Make sure SP wasn't demoted/removed before pmap lock
4291 * was acquired.
4292 */
4293 if (!PVO_IS_SP(pvo) || (pvo->pvo_vaddr & PVO_DEAD) != 0) {
4294 CTR2(KTR_PMAP, "%s: demoted/removed: pa=%#jx",
4295 __func__, (uintmax_t)PVO_PADDR(pvo));
4296 PMAP_UNLOCK(pmap);
4297 return (-1);
4298 }
4299
4300 ret = moea64_sp_query_locked(pvo, ptebit);
4301 if ((ret & ptebit) != 0)
4302 count++;
4303 else {
4304 PMAP_UNLOCK(pmap);
4305 return (0);
4306 }
4307
4308 moea64_sp_demote(pvo);
4309 moea64_pte_clear(pvo, ptebit);
4310
4311 /*
4312 * Write protect the mapping to a single page so that a
4313 * subsequent write access may repromote.
4314 */
4315 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
4316 moea64_pvo_protect(pmap, pvo,
4317 pvo->pvo_pte.prot & ~VM_PROT_WRITE);
4318
4319 PMAP_UNLOCK(pmap);
4320 }
4321
4322 return (count);
4323 }
4324