xref: /f-stack/freebsd/mips/mips/pmap.c (revision 22ce4aff)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  * Copyright (c) 1994 John S. Dyson
7  * All rights reserved.
8  * Copyright (c) 1994 David Greenman
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to Berkeley by
12  * the Systems Programming Group of the University of Utah Computer
13  * Science Department and William Jolitz of UUNET Technologies Inc.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  * 3. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
40  *	from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
41  *	JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
42  */
43 
44 /*
45  *	Manages physical address maps.
46  *
47  *	Since the information managed by this module is
48  *	also stored by the logical address mapping module,
49  *	this module may throw away valid virtual-to-physical
50  *	mappings at almost any time.  However, invalidations
51  *	of virtual-to-physical mappings must be done as
52  *	requested.
53  *
54  *	In order to cope with hardware architectures which
55  *	make virtual-to-physical map invalidates expensive,
56  *	this module may delay invalidate or reduced protection
57  *	operations until such time as they are actually
58  *	necessary.  This module is given full information as
59  *	to which processors are currently using which maps,
60  *	and to when physical maps must be made correct.
61  */
62 
63 #include <sys/cdefs.h>
64 __FBSDID("$FreeBSD$");
65 
66 #include "opt_ddb.h"
67 #include "opt_pmap.h"
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/lock.h>
73 #include <sys/mman.h>
74 #include <sys/msgbuf.h>
75 #include <sys/mutex.h>
76 #include <sys/pcpu.h>
77 #include <sys/proc.h>
78 #include <sys/rwlock.h>
79 #include <sys/sched.h>
80 #include <sys/smp.h>
81 #include <sys/sysctl.h>
82 #include <sys/vmmeter.h>
83 
84 #ifdef DDB
85 #include <ddb/ddb.h>
86 #endif
87 
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_phys.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_dumpset.h>
99 #include <vm/uma.h>
100 
101 #include <machine/cache.h>
102 #include <machine/md_var.h>
103 #include <machine/tlb.h>
104 
105 #undef PMAP_DEBUG
106 
107 #if !defined(DIAGNOSTIC)
108 #define	PMAP_INLINE __inline
109 #else
110 #define	PMAP_INLINE
111 #endif
112 
113 #ifdef PV_STATS
114 #define PV_STAT(x)	do { x ; } while (0)
115 #else
116 #define PV_STAT(x)	do { } while (0)
117 #endif
118 
119 /*
120  * Get PDEs and PTEs for user/kernel address space
121  */
122 #define	pmap_seg_index(v)	(((v) >> SEGSHIFT) & (NPDEPG - 1))
123 #define	pmap_pde_index(v)	(((v) >> PDRSHIFT) & (NPDEPG - 1))
124 #define	pmap_pte_index(v)	(((v) >> PAGE_SHIFT) & (NPTEPG - 1))
125 #define	pmap_pde_pindex(v)	((v) >> PDRSHIFT)
126 
127 #ifdef __mips_n64
128 #define	NUPDE			(NPDEPG * NPDEPG)
129 #define	NUSERPGTBLS		(NUPDE + NPDEPG)
130 #else
131 #define	NUPDE			(NPDEPG)
132 #define	NUSERPGTBLS		(NUPDE)
133 #endif
134 
135 #define	is_kernel_pmap(x)	((x) == kernel_pmap)
136 
137 struct pmap kernel_pmap_store;
138 pd_entry_t *kernel_segmap;
139 
140 vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
141 vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
142 
143 static int need_local_mappings;
144 
145 static int nkpt;
146 unsigned pmap_max_asid;		/* max ASID supported by the system */
147 
148 #define	PMAP_ASID_RESERVED	0
149 
150 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
151 
152 static void pmap_asid_alloc(pmap_t pmap);
153 
154 static struct rwlock_padalign pvh_global_lock;
155 
156 /*
157  * Data for the pv entry allocation mechanism
158  */
159 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
160 static int pv_entry_count;
161 
162 static void free_pv_chunk(struct pv_chunk *pc);
163 static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
164 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
165 static vm_page_t pmap_pv_reclaim(pmap_t locked_pmap);
166 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
167 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
168     vm_offset_t va);
169 static vm_page_t pmap_alloc_direct_page(unsigned int index, int req);
170 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
171     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
172 static void pmap_grow_direct_page(int req);
173 static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
174     pd_entry_t pde);
175 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
176 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
177 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
178     vm_offset_t va, vm_page_t m);
179 static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
180 static void pmap_invalidate_all(pmap_t pmap);
181 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
182 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
183 
184 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
185 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags);
186 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
187 static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
188 
189 static void pmap_invalidate_page_action(void *arg);
190 static void pmap_invalidate_range_action(void *arg);
191 static void pmap_update_page_action(void *arg);
192 
193 #ifndef __mips_n64
194 
195 static vm_offset_t crashdumpva;
196 
197 /*
198  * These functions are for high memory (memory above 512Meg in 32 bit) support.
199  * The highmem area does not have a KSEG0 mapping, and we need a mechanism to
200  * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
201  *
202  * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
203  * access a highmem physical address on a CPU, we map the physical address to
204  * the reserved virtual address for the CPU in the kernel pagetable.
205  */
206 
207 static void
pmap_init_reserved_pages(void)208 pmap_init_reserved_pages(void)
209 {
210 	struct pcpu *pc;
211 	vm_offset_t pages;
212  	int i;
213 
214 	if (need_local_mappings == 0)
215 		return;
216 
217 	CPU_FOREACH(i) {
218 		pc = pcpu_find(i);
219 		/*
220 		 * Skip if the mapping has already been initialized,
221 		 * i.e. this is the BSP.
222 		 */
223 		if (pc->pc_cmap1_addr != 0)
224 			continue;
225 		pages =  kva_alloc(PAGE_SIZE * 3);
226 		if (pages == 0)
227 			panic("%s: unable to allocate KVA", __func__);
228 		pc->pc_cmap1_ptep = pmap_pte(kernel_pmap, pages);
229 		pc->pc_cmap2_ptep = pmap_pte(kernel_pmap, pages + PAGE_SIZE);
230 		pc->pc_qmap_ptep =
231 		    pmap_pte(kernel_pmap, pages + (PAGE_SIZE * 2));
232 		pc->pc_cmap1_addr = pages;
233 		pc->pc_cmap2_addr = pages + PAGE_SIZE;
234 		pc->pc_qmap_addr = pages + (PAGE_SIZE * 2);
235  	}
236 }
237 SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL);
238 
239 static __inline void
pmap_alloc_lmem_map(void)240 pmap_alloc_lmem_map(void)
241 {
242 	PCPU_SET(cmap1_addr, virtual_avail);
243 	PCPU_SET(cmap2_addr, virtual_avail + PAGE_SIZE);
244 	PCPU_SET(cmap1_ptep, pmap_pte(kernel_pmap, virtual_avail));
245 	PCPU_SET(cmap2_ptep, pmap_pte(kernel_pmap, virtual_avail + PAGE_SIZE));
246 	PCPU_SET(qmap_addr, virtual_avail + (2 * PAGE_SIZE));
247 	PCPU_SET(qmap_ptep, pmap_pte(kernel_pmap, virtual_avail + (2 * PAGE_SIZE)));
248 	crashdumpva = virtual_avail + (3 * PAGE_SIZE);
249 	virtual_avail += PAGE_SIZE * 4;
250 }
251 
252 static __inline vm_offset_t
pmap_lmem_map1(vm_paddr_t phys)253 pmap_lmem_map1(vm_paddr_t phys)
254 {
255 	critical_enter();
256 	*PCPU_GET(cmap1_ptep) =
257 	    TLBLO_PA_TO_PFN(phys) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
258 	return (PCPU_GET(cmap1_addr));
259 }
260 
261 static __inline vm_offset_t
pmap_lmem_map2(vm_paddr_t phys1,vm_paddr_t phys2)262 pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
263 {
264 	critical_enter();
265 	*PCPU_GET(cmap1_ptep) =
266 	    TLBLO_PA_TO_PFN(phys1) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
267 	*PCPU_GET(cmap2_ptep) =
268 	    TLBLO_PA_TO_PFN(phys2) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
269 	return (PCPU_GET(cmap1_addr));
270 }
271 
272 static __inline void
pmap_lmem_unmap(void)273 pmap_lmem_unmap(void)
274 {
275 	 *PCPU_GET(cmap1_ptep) = PTE_G;
276 	tlb_invalidate_address(kernel_pmap, PCPU_GET(cmap1_addr));
277 	if (*PCPU_GET(cmap2_ptep) != PTE_G) {
278 		*PCPU_GET(cmap2_ptep) = PTE_G;
279 		tlb_invalidate_address(kernel_pmap, PCPU_GET(cmap2_addr));
280  	}
281 	critical_exit();
282 }
283 
284 #else  /* __mips_n64 */
285 
286 static __inline void
pmap_alloc_lmem_map(void)287 pmap_alloc_lmem_map(void)
288 {
289 }
290 
291 static __inline vm_offset_t
pmap_lmem_map1(vm_paddr_t phys)292 pmap_lmem_map1(vm_paddr_t phys)
293 {
294 
295 	return (0);
296 }
297 
298 static __inline vm_offset_t
pmap_lmem_map2(vm_paddr_t phys1,vm_paddr_t phys2)299 pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
300 {
301 
302 	return (0);
303 }
304 
305 static __inline vm_offset_t
pmap_lmem_unmap(void)306 pmap_lmem_unmap(void)
307 {
308 
309 	return (0);
310 }
311 #endif /* !__mips_n64 */
312 
313 static __inline int
pmap_pte_cache_bits(vm_paddr_t pa,vm_page_t m)314 pmap_pte_cache_bits(vm_paddr_t pa, vm_page_t m)
315 {
316 	vm_memattr_t ma;
317 
318 	ma = pmap_page_get_memattr(m);
319 	if (ma == VM_MEMATTR_WRITE_BACK && !is_cacheable_mem(pa))
320 		ma = VM_MEMATTR_UNCACHEABLE;
321 	return PTE_C(ma);
322 }
323 #define PMAP_PTE_SET_CACHE_BITS(pte, pa, m) {	\
324 	pte &= ~PTE_C_MASK;			\
325 	pte |= pmap_pte_cache_bits(pa, m);	\
326 }
327 
328 /*
329  * Page table entry lookup routines.
330  */
331 static __inline pd_entry_t *
pmap_segmap(pmap_t pmap,vm_offset_t va)332 pmap_segmap(pmap_t pmap, vm_offset_t va)
333 {
334 
335 	return (&pmap->pm_segtab[pmap_seg_index(va)]);
336 }
337 
338 #ifdef __mips_n64
339 static __inline pd_entry_t *
pmap_pdpe_to_pde(pd_entry_t * pdpe,vm_offset_t va)340 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
341 {
342 	pd_entry_t *pde;
343 
344 	pde = (pd_entry_t *)*pdpe;
345 	return (&pde[pmap_pde_index(va)]);
346 }
347 
348 static __inline pd_entry_t *
pmap_pde(pmap_t pmap,vm_offset_t va)349 pmap_pde(pmap_t pmap, vm_offset_t va)
350 {
351 	pd_entry_t *pdpe;
352 
353 	pdpe = pmap_segmap(pmap, va);
354 	if (*pdpe == NULL)
355 		return (NULL);
356 
357 	return (pmap_pdpe_to_pde(pdpe, va));
358 }
359 #else
360 static __inline pd_entry_t *
pmap_pdpe_to_pde(pd_entry_t * pdpe,vm_offset_t va)361 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
362 {
363 
364 	return (pdpe);
365 }
366 
367 static __inline
pmap_pde(pmap_t pmap,vm_offset_t va)368 pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
369 {
370 
371 	return (pmap_segmap(pmap, va));
372 }
373 #endif
374 
375 static __inline pt_entry_t *
pmap_pde_to_pte(pd_entry_t * pde,vm_offset_t va)376 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
377 {
378 	pt_entry_t *pte;
379 
380 	pte = (pt_entry_t *)*pde;
381 	return (&pte[pmap_pte_index(va)]);
382 }
383 
384 pt_entry_t *
pmap_pte(pmap_t pmap,vm_offset_t va)385 pmap_pte(pmap_t pmap, vm_offset_t va)
386 {
387 	pd_entry_t *pde;
388 
389 	pde = pmap_pde(pmap, va);
390 	if (pde == NULL || *pde == NULL)
391 		return (NULL);
392 
393 	return (pmap_pde_to_pte(pde, va));
394 }
395 
396 vm_offset_t
pmap_steal_memory(vm_size_t size)397 pmap_steal_memory(vm_size_t size)
398 {
399 	vm_paddr_t bank_size, pa;
400 	vm_offset_t va;
401 
402 	size = round_page(size);
403 	bank_size = phys_avail[1] - phys_avail[0];
404 	while (size > bank_size) {
405 		int i;
406 
407 		for (i = 0; phys_avail[i + 2]; i += 2) {
408 			phys_avail[i] = phys_avail[i + 2];
409 			phys_avail[i + 1] = phys_avail[i + 3];
410 		}
411 		phys_avail[i] = 0;
412 		phys_avail[i + 1] = 0;
413 		if (!phys_avail[0])
414 			panic("pmap_steal_memory: out of memory");
415 		bank_size = phys_avail[1] - phys_avail[0];
416 	}
417 
418 	pa = phys_avail[0];
419 	phys_avail[0] += size;
420 	if (MIPS_DIRECT_MAPPABLE(pa) == 0)
421 		panic("Out of memory below 512Meg?");
422 	va = MIPS_PHYS_TO_DIRECT(pa);
423 	bzero((caddr_t)va, size);
424 	return (va);
425 }
426 
427 /*
428  * Bootstrap the system enough to run with virtual memory.  This
429  * assumes that the phys_avail array has been initialized.
430  */
431 static void
pmap_create_kernel_pagetable(void)432 pmap_create_kernel_pagetable(void)
433 {
434 	int i, j;
435 	vm_offset_t ptaddr;
436 	pt_entry_t *pte;
437 #ifdef __mips_n64
438 	pd_entry_t *pde;
439 	vm_offset_t pdaddr;
440 	int npt, npde;
441 #endif
442 
443 	/*
444 	 * Allocate segment table for the kernel
445 	 */
446 	kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
447 
448 	/*
449 	 * Allocate second level page tables for the kernel
450 	 */
451 #ifdef __mips_n64
452 	npde = howmany(NKPT, NPDEPG);
453 	pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
454 #endif
455 	nkpt = NKPT;
456 	ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
457 
458 	/*
459 	 * The R[4-7]?00 stores only one copy of the Global bit in the
460 	 * translation lookaside buffer for each 2 page entry. Thus invalid
461 	 * entrys must have the Global bit set so when Entry LO and Entry HI
462 	 * G bits are anded together they will produce a global bit to store
463 	 * in the tlb.
464 	 */
465 	for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
466 		*pte = PTE_G;
467 
468 #ifdef __mips_n64
469 	for (i = 0,  npt = nkpt; npt > 0; i++) {
470 		kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
471 		pde = (pd_entry_t *)kernel_segmap[i];
472 
473 		for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
474 			pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
475 	}
476 #else
477 	for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
478 		kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
479 #endif
480 
481 	PMAP_LOCK_INIT(kernel_pmap);
482 	kernel_pmap->pm_segtab = kernel_segmap;
483 	CPU_FILL(&kernel_pmap->pm_active);
484 	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
485 	kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
486 	kernel_pmap->pm_asid[0].gen = 0;
487 	kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
488 }
489 
490 void
pmap_bootstrap(void)491 pmap_bootstrap(void)
492 {
493 	int i;
494 
495 	/* Sort. */
496 again:
497 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
498 		/*
499 		 * Keep the memory aligned on page boundary.
500 		 */
501 		phys_avail[i] = round_page(phys_avail[i]);
502 		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
503 
504 		if (i < 2)
505 			continue;
506 		if (phys_avail[i - 2] > phys_avail[i]) {
507 			vm_paddr_t ptemp[2];
508 
509 			ptemp[0] = phys_avail[i + 0];
510 			ptemp[1] = phys_avail[i + 1];
511 
512 			phys_avail[i + 0] = phys_avail[i - 2];
513 			phys_avail[i + 1] = phys_avail[i - 1];
514 
515 			phys_avail[i - 2] = ptemp[0];
516 			phys_avail[i - 1] = ptemp[1];
517 			goto again;
518 		}
519 	}
520 
521        	/*
522 	 * In 32 bit, we may have memory which cannot be mapped directly.
523 	 * This memory will need temporary mapping before it can be
524 	 * accessed.
525 	 */
526 	if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1))
527 		need_local_mappings = 1;
528 
529 	/*
530 	 * Copy the phys_avail[] array before we start stealing memory from it.
531 	 */
532 	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
533 		physmem_desc[i] = phys_avail[i];
534 		physmem_desc[i + 1] = phys_avail[i + 1];
535 	}
536 
537 	Maxmem = atop(phys_avail[i - 1]);
538 
539 	if (bootverbose) {
540 		printf("Physical memory chunk(s):\n");
541 		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
542 			vm_paddr_t size;
543 
544 			size = phys_avail[i + 1] - phys_avail[i];
545 			printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
546 			    (uintmax_t) phys_avail[i],
547 			    (uintmax_t) phys_avail[i + 1] - 1,
548 			    (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
549 		}
550 		printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem));
551 	}
552 	/*
553 	 * Steal the message buffer from the beginning of memory.
554 	 */
555 	msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
556 	msgbufinit(msgbufp, msgbufsize);
557 
558 	/*
559 	 * Steal thread0 kstack.
560 	 */
561 	kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
562 
563 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
564 	virtual_end = VM_MAX_KERNEL_ADDRESS;
565 
566 #ifdef SMP
567 	/*
568 	 * Steal some virtual address space to map the pcpu area.
569 	 */
570 	virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
571 	pcpup = (struct pcpu *)virtual_avail;
572 	virtual_avail += PAGE_SIZE * 2;
573 
574 	/*
575 	 * Initialize the wired TLB entry mapping the pcpu region for
576 	 * the BSP at 'pcpup'. Up until this point we were operating
577 	 * with the 'pcpup' for the BSP pointing to a virtual address
578 	 * in KSEG0 so there was no need for a TLB mapping.
579 	 */
580 	mips_pcpu_tlb_init(PCPU_ADDR(0));
581 
582 	if (bootverbose)
583 		printf("pcpu is available at virtual address %p.\n", pcpup);
584 #endif
585 
586 	pmap_create_kernel_pagetable();
587 	if (need_local_mappings)
588 		pmap_alloc_lmem_map();
589 	pmap_max_asid = VMNUM_PIDS;
590 	mips_wr_entryhi(0);
591 	mips_wr_pagemask(0);
592 
593  	/*
594 	 * Initialize the global pv list lock.
595 	 */
596 	rw_init(&pvh_global_lock, "pmap pv global");
597 }
598 
599 /*
600  * Initialize a vm_page's machine-dependent fields.
601  */
602 void
pmap_page_init(vm_page_t m)603 pmap_page_init(vm_page_t m)
604 {
605 
606 	TAILQ_INIT(&m->md.pv_list);
607 	m->md.pv_flags = VM_MEMATTR_DEFAULT << PV_MEMATTR_SHIFT;
608 }
609 
610 /*
611  *	Initialize the pmap module.
612  *	Called by vm_init, to initialize any structures that the pmap
613  *	system needs to map virtual memory.
614  */
615 void
pmap_init(void)616 pmap_init(void)
617 {
618 }
619 
620 /***************************************************
621  * Low level helper routines.....
622  ***************************************************/
623 
624 #ifdef	SMP
625 static __inline void
pmap_call_on_active_cpus(pmap_t pmap,void (* fn)(void *),void * arg)626 pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
627 {
628 	int	cpuid, cpu, self;
629 	cpuset_t active_cpus;
630 
631 	sched_pin();
632 	if (is_kernel_pmap(pmap)) {
633 		smp_rendezvous(NULL, fn, NULL, arg);
634 		goto out;
635 	}
636 	/* Force ASID update on inactive CPUs */
637 	CPU_FOREACH(cpu) {
638 		if (!CPU_ISSET(cpu, &pmap->pm_active))
639 			pmap->pm_asid[cpu].gen = 0;
640 	}
641 	cpuid = PCPU_GET(cpuid);
642 	/*
643 	 * XXX: barrier/locking for active?
644 	 *
645 	 * Take a snapshot of active here, any further changes are ignored.
646 	 * tlb update/invalidate should be harmless on inactive CPUs
647 	 */
648 	active_cpus = pmap->pm_active;
649 	self = CPU_ISSET(cpuid, &active_cpus);
650 	CPU_CLR(cpuid, &active_cpus);
651 	/* Optimize for the case where this cpu is the only active one */
652 	if (CPU_EMPTY(&active_cpus)) {
653 		if (self)
654 			fn(arg);
655 	} else {
656 		if (self)
657 			CPU_SET(cpuid, &active_cpus);
658 		smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg);
659 	}
660 out:
661 	sched_unpin();
662 }
663 #else /* !SMP */
664 static __inline void
pmap_call_on_active_cpus(pmap_t pmap,void (* fn)(void *),void * arg)665 pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
666 {
667 	int	cpuid;
668 
669 	if (is_kernel_pmap(pmap)) {
670 		fn(arg);
671 		return;
672 	}
673 	cpuid = PCPU_GET(cpuid);
674 	if (!CPU_ISSET(cpuid, &pmap->pm_active))
675 		pmap->pm_asid[cpuid].gen = 0;
676 	else
677 		fn(arg);
678 }
679 #endif /* SMP */
680 
681 static void
pmap_invalidate_all(pmap_t pmap)682 pmap_invalidate_all(pmap_t pmap)
683 {
684 
685 	pmap_call_on_active_cpus(pmap,
686 	    (void (*)(void *))tlb_invalidate_all_user, pmap);
687 }
688 
689 struct pmap_invalidate_page_arg {
690 	pmap_t pmap;
691 	vm_offset_t va;
692 };
693 
694 static void
pmap_invalidate_page_action(void * arg)695 pmap_invalidate_page_action(void *arg)
696 {
697 	struct pmap_invalidate_page_arg *p = arg;
698 
699 	tlb_invalidate_address(p->pmap, p->va);
700 }
701 
702 static void
pmap_invalidate_page(pmap_t pmap,vm_offset_t va)703 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
704 {
705 	struct pmap_invalidate_page_arg arg;
706 
707 	arg.pmap = pmap;
708 	arg.va = va;
709 	pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg);
710 }
711 
712 struct pmap_invalidate_range_arg {
713 	pmap_t pmap;
714 	vm_offset_t sva;
715 	vm_offset_t eva;
716 };
717 
718 static void
pmap_invalidate_range_action(void * arg)719 pmap_invalidate_range_action(void *arg)
720 {
721 	struct pmap_invalidate_range_arg *p = arg;
722 
723 	tlb_invalidate_range(p->pmap, p->sva, p->eva);
724 }
725 
726 static void
pmap_invalidate_range(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)727 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
728 {
729 	struct pmap_invalidate_range_arg arg;
730 
731 	arg.pmap = pmap;
732 	arg.sva = sva;
733 	arg.eva = eva;
734 	pmap_call_on_active_cpus(pmap, pmap_invalidate_range_action, &arg);
735 }
736 
737 struct pmap_update_page_arg {
738 	pmap_t pmap;
739 	vm_offset_t va;
740 	pt_entry_t pte;
741 };
742 
743 static void
pmap_update_page_action(void * arg)744 pmap_update_page_action(void *arg)
745 {
746 	struct pmap_update_page_arg *p = arg;
747 
748 	tlb_update(p->pmap, p->va, p->pte);
749 }
750 
751 static void
pmap_update_page(pmap_t pmap,vm_offset_t va,pt_entry_t pte)752 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
753 {
754 	struct pmap_update_page_arg arg;
755 
756 	arg.pmap = pmap;
757 	arg.va = va;
758 	arg.pte = pte;
759 	pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg);
760 }
761 
762 /*
763  *	Routine:	pmap_extract
764  *	Function:
765  *		Extract the physical page address associated
766  *		with the given map/virtual_address pair.
767  */
768 vm_paddr_t
pmap_extract(pmap_t pmap,vm_offset_t va)769 pmap_extract(pmap_t pmap, vm_offset_t va)
770 {
771 	pt_entry_t *pte;
772 	vm_offset_t retval = 0;
773 
774 	PMAP_LOCK(pmap);
775 	pte = pmap_pte(pmap, va);
776 	if (pte) {
777 		retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
778 	}
779 	PMAP_UNLOCK(pmap);
780 	return (retval);
781 }
782 
783 /*
784  *	Routine:	pmap_extract_and_hold
785  *	Function:
786  *		Atomically extract and hold the physical page
787  *		with the given pmap and virtual address pair
788  *		if that mapping permits the given protection.
789  */
790 vm_page_t
pmap_extract_and_hold(pmap_t pmap,vm_offset_t va,vm_prot_t prot)791 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
792 {
793 	pt_entry_t pte, *ptep;
794 	vm_paddr_t pa;
795 	vm_page_t m;
796 
797 	m = NULL;
798 	PMAP_LOCK(pmap);
799 	ptep = pmap_pte(pmap, va);
800 	if (ptep != NULL) {
801 		pte = *ptep;
802 		if (pte_test(&pte, PTE_V) && (!pte_test(&pte, PTE_RO) ||
803 		    (prot & VM_PROT_WRITE) == 0)) {
804 			pa = TLBLO_PTE_TO_PA(pte);
805 			m = PHYS_TO_VM_PAGE(pa);
806 			if (!vm_page_wire_mapped(m))
807 				m = NULL;
808 		}
809 	}
810 	PMAP_UNLOCK(pmap);
811 	return (m);
812 }
813 
814 /***************************************************
815  * Low level mapping routines.....
816  ***************************************************/
817 
818 /*
819  * add a wired page to the kva
820  */
821 void
pmap_kenter_attr(vm_offset_t va,vm_paddr_t pa,vm_memattr_t ma)822 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
823 {
824 	pt_entry_t *pte;
825 	pt_entry_t opte, npte;
826 
827 #ifdef PMAP_DEBUG
828 	printf("pmap_kenter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
829 #endif
830 
831 	pte = pmap_pte(kernel_pmap, va);
832 	opte = *pte;
833 	npte = TLBLO_PA_TO_PFN(pa) | PTE_C(ma) | PTE_D | PTE_V | PTE_G;
834 	*pte = npte;
835 	if (pte_test(&opte, PTE_V) && opte != npte)
836 		pmap_update_page(kernel_pmap, va, npte);
837 }
838 
839 void
pmap_kenter(vm_offset_t va,vm_paddr_t pa)840 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
841 {
842 
843 	KASSERT(is_cacheable_mem(pa),
844 		("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
845 
846 	pmap_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
847 }
848 
849 void
pmap_kenter_device(vm_offset_t va,vm_size_t size,vm_paddr_t pa)850 pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa)
851 {
852 
853 	KASSERT((size & PAGE_MASK) == 0,
854 	    ("%s: device mapping not page-sized", __func__));
855 
856 	for (; size > 0; size -= PAGE_SIZE) {
857 		/*
858 		 * XXXCEM: this is somewhat inefficient on SMP systems in that
859 		 * every single page is individually TLB-invalidated via
860 		 * rendezvous (pmap_update_page()), instead of invalidating the
861 		 * entire range via a single rendezvous.
862 		 */
863 		pmap_kenter_attr(va, pa, VM_MEMATTR_UNCACHEABLE);
864 		va += PAGE_SIZE;
865 		pa += PAGE_SIZE;
866 	}
867 }
868 
869 void
pmap_kremove_device(vm_offset_t va,vm_size_t size)870 pmap_kremove_device(vm_offset_t va, vm_size_t size)
871 {
872 
873 	KASSERT((size & PAGE_MASK) == 0,
874 	    ("%s: device mapping not page-sized", __func__));
875 
876 	/*
877 	 * XXXCEM: Similar to pmap_kenter_device, this is inefficient on SMP,
878 	 * in that pages are invalidated individually instead of a single range
879 	 * rendezvous.
880 	 */
881 	for (; size > 0; size -= PAGE_SIZE) {
882 		pmap_kremove(va);
883 		va += PAGE_SIZE;
884 	}
885 }
886 
887 /*
888  * remove a page from the kernel pagetables
889  */
890  /* PMAP_INLINE */ void
pmap_kremove(vm_offset_t va)891 pmap_kremove(vm_offset_t va)
892 {
893 	pt_entry_t *pte;
894 
895 	/*
896 	 * Write back all caches from the page being destroyed
897 	 */
898 	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
899 
900 	pte = pmap_pte(kernel_pmap, va);
901 	*pte = PTE_G;
902 	pmap_invalidate_page(kernel_pmap, va);
903 }
904 
905 /*
906  *	Used to map a range of physical addresses into kernel
907  *	virtual address space.
908  *
909  *	The value passed in '*virt' is a suggested virtual address for
910  *	the mapping. Architectures which can support a direct-mapped
911  *	physical to virtual region can return the appropriate address
912  *	within that region, leaving '*virt' unchanged. Other
913  *	architectures should map the pages starting at '*virt' and
914  *	update '*virt' with the first usable address after the mapped
915  *	region.
916  *
917  *	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
918  */
919 vm_offset_t
pmap_map(vm_offset_t * virt,vm_paddr_t start,vm_paddr_t end,int prot)920 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
921 {
922 	vm_offset_t va, sva;
923 
924 	if (MIPS_DIRECT_MAPPABLE(end - 1))
925 		return (MIPS_PHYS_TO_DIRECT(start));
926 
927 	va = sva = *virt;
928 	while (start < end) {
929 		pmap_kenter(va, start);
930 		va += PAGE_SIZE;
931 		start += PAGE_SIZE;
932 	}
933 	*virt = va;
934 	return (sva);
935 }
936 
937 /*
938  * Add a list of wired pages to the kva
939  * this routine is only used for temporary
940  * kernel mappings that do not need to have
941  * page modification or references recorded.
942  * Note that old mappings are simply written
943  * over.  The page *must* be wired.
944  */
945 void
pmap_qenter(vm_offset_t va,vm_page_t * m,int count)946 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
947 {
948 	int i;
949 	vm_offset_t origva = va;
950 
951 	for (i = 0; i < count; i++) {
952 		pmap_flush_pvcache(m[i]);
953 		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
954 		va += PAGE_SIZE;
955 	}
956 
957 	mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
958 }
959 
960 /*
961  * this routine jerks page mappings from the
962  * kernel -- it is meant only for temporary mappings.
963  */
964 void
pmap_qremove(vm_offset_t va,int count)965 pmap_qremove(vm_offset_t va, int count)
966 {
967 	pt_entry_t *pte;
968 	vm_offset_t origva;
969 
970 	if (count < 1)
971 		return;
972 	mips_dcache_wbinv_range_index(va, PAGE_SIZE * count);
973 	origva = va;
974 	do {
975 		pte = pmap_pte(kernel_pmap, va);
976 		*pte = PTE_G;
977 		va += PAGE_SIZE;
978 	} while (--count > 0);
979 	pmap_invalidate_range(kernel_pmap, origva, va);
980 }
981 
982 /***************************************************
983  * Page table page management routines.....
984  ***************************************************/
985 
986 /*
987  * Decrements a page table page's reference count, which is used to record the
988  * number of valid page table entries within the page.  If the reference count
989  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
990  * page table page was unmapped and FALSE otherwise.
991  */
992 static PMAP_INLINE boolean_t
pmap_unwire_ptp(pmap_t pmap,vm_offset_t va,vm_page_t m)993 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
994 {
995 
996 	--m->ref_count;
997 	if (m->ref_count == 0) {
998 		_pmap_unwire_ptp(pmap, va, m);
999 		return (TRUE);
1000 	} else
1001 		return (FALSE);
1002 }
1003 
1004 static void
_pmap_unwire_ptp(pmap_t pmap,vm_offset_t va,vm_page_t m)1005 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
1006 {
1007 	pd_entry_t *pde;
1008 	vm_offset_t sva, eva;
1009 
1010 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1011 	/*
1012 	 * unmap the page table page
1013 	 */
1014 #ifdef __mips_n64
1015 	if (m->pindex < NUPDE) {
1016 		pde = pmap_pde(pmap, va);
1017 		sva = va & ~PDRMASK;
1018 		eva = sva + NBPDR;
1019 	} else {
1020 		pde = pmap_segmap(pmap, va);
1021 		sva = va & ~SEGMASK;
1022 		eva = sva + NBSEG;
1023 	}
1024 #else
1025 	pde = pmap_pde(pmap, va);
1026 	sva = va & ~SEGMASK;
1027 	eva = sva + NBSEG;
1028 #endif
1029 	*pde = 0;
1030 	pmap->pm_stats.resident_count--;
1031 
1032 #ifdef __mips_n64
1033 	if (m->pindex < NUPDE) {
1034 		pd_entry_t *pdp;
1035 		vm_page_t pdpg;
1036 
1037 		/*
1038 		 * Recursively decrement next level pagetable refcount.
1039 		 * Either that shoots down a larger range from TLBs (below)
1040 		 * or we're to shoot down just the page in question.
1041 		 */
1042 		pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
1043 		pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
1044 		if (!pmap_unwire_ptp(pmap, va, pdpg)) {
1045 			pmap_invalidate_range(pmap, sva, eva);
1046 		}
1047 	} else {
1048 		/* Segmap entry shootdown */
1049 		pmap_invalidate_range(pmap, sva, eva);
1050 	}
1051 #else
1052 	/* Segmap entry shootdown */
1053 	pmap_invalidate_range(pmap, sva, eva);
1054 #endif
1055 
1056 	/*
1057 	 * If the page is finally unwired, simply free it.
1058 	 */
1059 	vm_page_free_zero(m);
1060 	vm_wire_sub(1);
1061 }
1062 
1063 /*
1064  * After removing a page table entry, this routine is used to
1065  * conditionally free the page, and manage the reference count.
1066  */
1067 static int
pmap_unuse_pt(pmap_t pmap,vm_offset_t va,pd_entry_t pde)1068 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1069 {
1070 	vm_page_t mpte;
1071 
1072 	if (va >= VM_MAXUSER_ADDRESS)
1073 		return (0);
1074 	KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0"));
1075 	mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde));
1076 	return (pmap_unwire_ptp(pmap, va, mpte));
1077 }
1078 
1079 void
pmap_pinit0(pmap_t pmap)1080 pmap_pinit0(pmap_t pmap)
1081 {
1082 	int i;
1083 
1084 	PMAP_LOCK_INIT(pmap);
1085 	pmap->pm_segtab = kernel_segmap;
1086 	CPU_ZERO(&pmap->pm_active);
1087 	for (i = 0; i < MAXCPU; i++) {
1088 		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1089 		pmap->pm_asid[i].gen = 0;
1090 	}
1091 	PCPU_SET(curpmap, pmap);
1092 	TAILQ_INIT(&pmap->pm_pvchunk);
1093 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1094 }
1095 
1096 static void
pmap_grow_direct_page(int req)1097 pmap_grow_direct_page(int req)
1098 {
1099 
1100 #ifdef __mips_n64
1101 	vm_wait(NULL);
1102 #else
1103 	if (!vm_page_reclaim_contig(req, 1, 0, MIPS_KSEG0_LARGEST_PHYS,
1104 	    PAGE_SIZE, 0))
1105 		vm_wait(NULL);
1106 #endif
1107 }
1108 
1109 static vm_page_t
pmap_alloc_direct_page(unsigned int index,int req)1110 pmap_alloc_direct_page(unsigned int index, int req)
1111 {
1112 	vm_page_t m;
1113 
1114 	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED |
1115 	    VM_ALLOC_ZERO);
1116 	if (m == NULL)
1117 		return (NULL);
1118 
1119 	if ((m->flags & PG_ZERO) == 0)
1120 		pmap_zero_page(m);
1121 
1122 	m->pindex = index;
1123 	return (m);
1124 }
1125 
1126 /*
1127  * Initialize a preallocated and zeroed pmap structure,
1128  * such as one in a vmspace structure.
1129  */
1130 int
pmap_pinit(pmap_t pmap)1131 pmap_pinit(pmap_t pmap)
1132 {
1133 	vm_offset_t ptdva;
1134 	vm_page_t ptdpg;
1135 	int i, req_class;
1136 
1137 	/*
1138 	 * allocate the page directory page
1139 	 */
1140 	req_class = VM_ALLOC_NORMAL;
1141 	while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, req_class)) ==
1142 	    NULL)
1143 		pmap_grow_direct_page(req_class);
1144 
1145 	ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1146 	pmap->pm_segtab = (pd_entry_t *)ptdva;
1147 	CPU_ZERO(&pmap->pm_active);
1148 	for (i = 0; i < MAXCPU; i++) {
1149 		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1150 		pmap->pm_asid[i].gen = 0;
1151 	}
1152 	TAILQ_INIT(&pmap->pm_pvchunk);
1153 	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1154 
1155 	return (1);
1156 }
1157 
1158 /*
1159  * this routine is called if the page table page is not
1160  * mapped correctly.
1161  */
1162 static vm_page_t
_pmap_allocpte(pmap_t pmap,unsigned ptepindex,u_int flags)1163 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
1164 {
1165 	vm_offset_t pageva;
1166 	vm_page_t m;
1167 	int req_class;
1168 
1169 	/*
1170 	 * Find or fabricate a new pagetable page
1171 	 */
1172 	req_class = VM_ALLOC_NORMAL;
1173 	if ((m = pmap_alloc_direct_page(ptepindex, req_class)) == NULL) {
1174 		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
1175 			PMAP_UNLOCK(pmap);
1176 			rw_wunlock(&pvh_global_lock);
1177 			pmap_grow_direct_page(req_class);
1178 			rw_wlock(&pvh_global_lock);
1179 			PMAP_LOCK(pmap);
1180 		}
1181 
1182 		/*
1183 		 * Indicate the need to retry.	While waiting, the page
1184 		 * table page may have been allocated.
1185 		 */
1186 		return (NULL);
1187 	}
1188 
1189 	/*
1190 	 * Map the pagetable page into the process address space, if it
1191 	 * isn't already there.
1192 	 */
1193 	pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1194 
1195 #ifdef __mips_n64
1196 	if (ptepindex >= NUPDE) {
1197 		pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1198 	} else {
1199 		pd_entry_t *pdep, *pde;
1200 		int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1201 		int pdeindex = ptepindex & (NPDEPG - 1);
1202 		vm_page_t pg;
1203 
1204 		pdep = &pmap->pm_segtab[segindex];
1205 		if (*pdep == NULL) {
1206 			/* recurse for allocating page dir */
1207 			if (_pmap_allocpte(pmap, NUPDE + segindex,
1208 			    flags) == NULL) {
1209 				/* alloc failed, release current */
1210 				vm_page_unwire_noq(m);
1211 				vm_page_free_zero(m);
1212 				return (NULL);
1213 			}
1214 		} else {
1215 			pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1216 			pg->ref_count++;
1217 		}
1218 		/* Next level entry */
1219 		pde = (pd_entry_t *)*pdep;
1220 		pde[pdeindex] = (pd_entry_t)pageva;
1221 	}
1222 #else
1223 	pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1224 #endif
1225 	pmap->pm_stats.resident_count++;
1226 	return (m);
1227 }
1228 
1229 static vm_page_t
pmap_allocpte(pmap_t pmap,vm_offset_t va,u_int flags)1230 pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
1231 {
1232 	unsigned ptepindex;
1233 	pd_entry_t *pde;
1234 	vm_page_t m;
1235 
1236 	/*
1237 	 * Calculate pagetable page index
1238 	 */
1239 	ptepindex = pmap_pde_pindex(va);
1240 retry:
1241 	/*
1242 	 * Get the page directory entry
1243 	 */
1244 	pde = pmap_pde(pmap, va);
1245 
1246 	/*
1247 	 * If the page table page is mapped, we just increment the hold
1248 	 * count, and activate it.
1249 	 */
1250 	if (pde != NULL && *pde != NULL) {
1251 		m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1252 		m->ref_count++;
1253 	} else {
1254 		/*
1255 		 * Here if the pte page isn't mapped, or if it has been
1256 		 * deallocated.
1257 		 */
1258 		m = _pmap_allocpte(pmap, ptepindex, flags);
1259 		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
1260 			goto retry;
1261 	}
1262 	return (m);
1263 }
1264 
1265 /***************************************************
1266  * Pmap allocation/deallocation routines.
1267  ***************************************************/
1268 
1269 /*
1270  * Release any resources held by the given physical map.
1271  * Called when a pmap initialized by pmap_pinit is being released.
1272  * Should only be called if the map contains no valid mappings.
1273  */
1274 void
pmap_release(pmap_t pmap)1275 pmap_release(pmap_t pmap)
1276 {
1277 	vm_offset_t ptdva;
1278 	vm_page_t ptdpg;
1279 
1280 	KASSERT(pmap->pm_stats.resident_count == 0,
1281 	    ("pmap_release: pmap resident count %ld != 0",
1282 	    pmap->pm_stats.resident_count));
1283 
1284 	ptdva = (vm_offset_t)pmap->pm_segtab;
1285 	ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1286 
1287 	vm_page_unwire_noq(ptdpg);
1288 	vm_page_free_zero(ptdpg);
1289 }
1290 
1291 /*
1292  * grow the number of kernel page table entries, if needed
1293  */
1294 void
pmap_growkernel(vm_offset_t addr)1295 pmap_growkernel(vm_offset_t addr)
1296 {
1297 	vm_page_t nkpg;
1298 	pd_entry_t *pde, *pdpe;
1299 	pt_entry_t *pte;
1300 	int i, req_class;
1301 
1302 	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1303 	req_class = VM_ALLOC_INTERRUPT;
1304 	addr = roundup2(addr, NBSEG);
1305 	if (addr - 1 >= vm_map_max(kernel_map))
1306 		addr = vm_map_max(kernel_map);
1307 	while (kernel_vm_end < addr) {
1308 		pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1309 #ifdef __mips_n64
1310 		if (*pdpe == 0) {
1311 			/* new intermediate page table entry */
1312 			nkpg = pmap_alloc_direct_page(nkpt, req_class);
1313 			if (nkpg == NULL)
1314 				panic("pmap_growkernel: no memory to grow kernel");
1315 			*pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1316 			continue; /* try again */
1317 		}
1318 #endif
1319 		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1320 		if (*pde != 0) {
1321 			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1322 			if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1323 				kernel_vm_end = vm_map_max(kernel_map);
1324 				break;
1325 			}
1326 			continue;
1327 		}
1328 
1329 		/*
1330 		 * This index is bogus, but out of the way
1331 		 */
1332 		nkpg = pmap_alloc_direct_page(nkpt, req_class);
1333 #ifndef __mips_n64
1334 		if (nkpg == NULL && vm_page_reclaim_contig(req_class, 1,
1335 		    0, MIPS_KSEG0_LARGEST_PHYS, PAGE_SIZE, 0))
1336 			nkpg = pmap_alloc_direct_page(nkpt, req_class);
1337 #endif
1338 		if (nkpg == NULL)
1339 			panic("pmap_growkernel: no memory to grow kernel");
1340 		nkpt++;
1341 		*pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1342 
1343 		/*
1344 		 * The R[4-7]?00 stores only one copy of the Global bit in
1345 		 * the translation lookaside buffer for each 2 page entry.
1346 		 * Thus invalid entrys must have the Global bit set so when
1347 		 * Entry LO and Entry HI G bits are anded together they will
1348 		 * produce a global bit to store in the tlb.
1349 		 */
1350 		pte = (pt_entry_t *)*pde;
1351 		for (i = 0; i < NPTEPG; i++)
1352 			pte[i] = PTE_G;
1353 
1354 		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1355 		if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
1356 			kernel_vm_end = vm_map_max(kernel_map);
1357 			break;
1358 		}
1359 	}
1360 }
1361 
1362 /***************************************************
1363  * page management routines.
1364  ***************************************************/
1365 
1366 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1367 #ifdef __mips_n64
1368 CTASSERT(_NPCM == 3);
1369 CTASSERT(_NPCPV == 168);
1370 #else
1371 CTASSERT(_NPCM == 11);
1372 CTASSERT(_NPCPV == 336);
1373 #endif
1374 
1375 static __inline struct pv_chunk *
pv_to_chunk(pv_entry_t pv)1376 pv_to_chunk(pv_entry_t pv)
1377 {
1378 
1379 	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1380 }
1381 
1382 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1383 
1384 #ifdef __mips_n64
1385 #define	PC_FREE0_1	0xfffffffffffffffful
1386 #define	PC_FREE2	0x000000fffffffffful
1387 #else
1388 #define	PC_FREE0_9	0xfffffffful	/* Free values for index 0 through 9 */
1389 #define	PC_FREE10	0x0000fffful	/* Free values for index 10 */
1390 #endif
1391 
1392 static const u_long pc_freemask[_NPCM] = {
1393 #ifdef __mips_n64
1394 	PC_FREE0_1, PC_FREE0_1, PC_FREE2
1395 #else
1396 	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1397 	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1398 	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1399 	PC_FREE0_9, PC_FREE10
1400 #endif
1401 };
1402 
1403 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1404     "VM/pmap parameters");
1405 
1406 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1407     "Current number of pv entries");
1408 
1409 #ifdef PV_STATS
1410 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1411 
1412 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1413     "Current number of pv entry chunks");
1414 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1415     "Current number of pv entry chunks allocated");
1416 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1417     "Current number of pv entry chunks frees");
1418 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1419     "Number of times tried to get a chunk page but failed.");
1420 
1421 static long pv_entry_frees, pv_entry_allocs;
1422 static int pv_entry_spare;
1423 
1424 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1425     "Current number of pv entry frees");
1426 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1427     "Current number of pv entry allocs");
1428 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1429     "Current number of spare pv entries");
1430 #endif
1431 
1432 /*
1433  * We are in a serious low memory condition.  Resort to
1434  * drastic measures to free some pages so we can allocate
1435  * another pv entry chunk.
1436  */
1437 static vm_page_t
pmap_pv_reclaim(pmap_t locked_pmap)1438 pmap_pv_reclaim(pmap_t locked_pmap)
1439 {
1440 	struct pch newtail;
1441 	struct pv_chunk *pc;
1442 	pd_entry_t *pde;
1443 	pmap_t pmap;
1444 	pt_entry_t *pte, oldpte;
1445 	pv_entry_t pv;
1446 	vm_offset_t va;
1447 	vm_page_t m, m_pc;
1448 	u_long inuse;
1449 	int bit, field, freed, idx;
1450 
1451 	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1452 	pmap = NULL;
1453 	m_pc = NULL;
1454 	TAILQ_INIT(&newtail);
1455 	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL) {
1456 		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1457 		if (pmap != pc->pc_pmap) {
1458 			if (pmap != NULL) {
1459 				pmap_invalidate_all(pmap);
1460 				if (pmap != locked_pmap)
1461 					PMAP_UNLOCK(pmap);
1462 			}
1463 			pmap = pc->pc_pmap;
1464 			/* Avoid deadlock and lock recursion. */
1465 			if (pmap > locked_pmap)
1466 				PMAP_LOCK(pmap);
1467 			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
1468 				pmap = NULL;
1469 				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1470 				continue;
1471 			}
1472 		}
1473 
1474 		/*
1475 		 * Destroy every non-wired, 4 KB page mapping in the chunk.
1476 		 */
1477 		freed = 0;
1478 		for (field = 0; field < _NPCM; field++) {
1479 			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1480 			    inuse != 0; inuse &= ~(1UL << bit)) {
1481 				bit = ffsl(inuse) - 1;
1482 				idx = field * sizeof(inuse) * NBBY + bit;
1483 				pv = &pc->pc_pventry[idx];
1484 				va = pv->pv_va;
1485 				pde = pmap_pde(pmap, va);
1486 				KASSERT(pde != NULL && *pde != 0,
1487 				    ("pmap_pv_reclaim: pde"));
1488 				pte = pmap_pde_to_pte(pde, va);
1489 				oldpte = *pte;
1490 				if (pte_test(&oldpte, PTE_W))
1491 					continue;
1492 				if (is_kernel_pmap(pmap))
1493 					*pte = PTE_G;
1494 				else
1495 					*pte = 0;
1496 				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte));
1497 				if (pte_test(&oldpte, PTE_D))
1498 					vm_page_dirty(m);
1499 				if (m->md.pv_flags & PV_TABLE_REF)
1500 					vm_page_aflag_set(m, PGA_REFERENCED);
1501 				m->md.pv_flags &= ~PV_TABLE_REF;
1502 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1503 				if (TAILQ_EMPTY(&m->md.pv_list))
1504 					vm_page_aflag_clear(m, PGA_WRITEABLE);
1505 				pc->pc_map[field] |= 1UL << bit;
1506 
1507 				/*
1508 				 * For simplicity, we will unconditionally shoot
1509 				 * down TLBs either at the end of this function
1510 				 * or at the top of the loop above if we switch
1511 				 * to a different pmap.
1512 				 */
1513 				(void)pmap_unuse_pt(pmap, va, *pde);
1514 
1515 				freed++;
1516 			}
1517 		}
1518 		if (freed == 0) {
1519 			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1520 			continue;
1521 		}
1522 		/* Every freed mapping is for a 4 KB page. */
1523 		pmap->pm_stats.resident_count -= freed;
1524 		PV_STAT(pv_entry_frees += freed);
1525 		PV_STAT(pv_entry_spare += freed);
1526 		pv_entry_count -= freed;
1527 		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1528 		for (field = 0; field < _NPCM; field++)
1529 			if (pc->pc_map[field] != pc_freemask[field]) {
1530 				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1531 				    pc_list);
1532 				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1533 
1534 				/*
1535 				 * One freed pv entry in locked_pmap is
1536 				 * sufficient.
1537 				 */
1538 				if (pmap == locked_pmap)
1539 					goto out;
1540 				break;
1541 			}
1542 		if (field == _NPCM) {
1543 			PV_STAT(pv_entry_spare -= _NPCPV);
1544 			PV_STAT(pc_chunk_count--);
1545 			PV_STAT(pc_chunk_frees++);
1546 			/* Entire chunk is free; return it. */
1547 			m_pc = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(
1548 			    (vm_offset_t)pc));
1549 			dump_drop_page(m_pc->phys_addr);
1550 			break;
1551 		}
1552 	}
1553 out:
1554 	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
1555 	if (pmap != NULL) {
1556 		pmap_invalidate_all(pmap);
1557 		if (pmap != locked_pmap)
1558 			PMAP_UNLOCK(pmap);
1559 	}
1560 	return (m_pc);
1561 }
1562 
1563 /*
1564  * free the pv_entry back to the free list
1565  */
1566 static void
free_pv_entry(pmap_t pmap,pv_entry_t pv)1567 free_pv_entry(pmap_t pmap, pv_entry_t pv)
1568 {
1569 	struct pv_chunk *pc;
1570 	int bit, field, idx;
1571 
1572 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1573 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1574 	PV_STAT(pv_entry_frees++);
1575 	PV_STAT(pv_entry_spare++);
1576 	pv_entry_count--;
1577 	pc = pv_to_chunk(pv);
1578 	idx = pv - &pc->pc_pventry[0];
1579 	field = idx / (sizeof(u_long) * NBBY);
1580 	bit = idx % (sizeof(u_long) * NBBY);
1581 	pc->pc_map[field] |= 1ul << bit;
1582 	for (idx = 0; idx < _NPCM; idx++)
1583 		if (pc->pc_map[idx] != pc_freemask[idx]) {
1584 			/*
1585 			 * 98% of the time, pc is already at the head of the
1586 			 * list.  If it isn't already, move it to the head.
1587 			 */
1588 			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
1589 			    pc)) {
1590 				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1591 				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1592 				    pc_list);
1593 			}
1594 			return;
1595 		}
1596 	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1597 	free_pv_chunk(pc);
1598 }
1599 
1600 static void
free_pv_chunk(struct pv_chunk * pc)1601 free_pv_chunk(struct pv_chunk *pc)
1602 {
1603 	vm_page_t m;
1604 
1605  	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1606 	PV_STAT(pv_entry_spare -= _NPCPV);
1607 	PV_STAT(pc_chunk_count--);
1608 	PV_STAT(pc_chunk_frees++);
1609 	/* entire chunk is free, return it */
1610 	m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc));
1611 	dump_drop_page(m->phys_addr);
1612 	vm_page_unwire_noq(m);
1613 	vm_page_free(m);
1614 }
1615 
1616 /*
1617  * get a new pv_entry, allocating a block from the system
1618  * when needed.
1619  */
1620 static pv_entry_t
get_pv_entry(pmap_t pmap,boolean_t try)1621 get_pv_entry(pmap_t pmap, boolean_t try)
1622 {
1623 	struct pv_chunk *pc;
1624 	pv_entry_t pv;
1625 	vm_page_t m;
1626 	int bit, field, idx;
1627 
1628 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1629 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1630 	PV_STAT(pv_entry_allocs++);
1631 	pv_entry_count++;
1632 retry:
1633 	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1634 	if (pc != NULL) {
1635 		for (field = 0; field < _NPCM; field++) {
1636 			if (pc->pc_map[field]) {
1637 				bit = ffsl(pc->pc_map[field]) - 1;
1638 				break;
1639 			}
1640 		}
1641 		if (field < _NPCM) {
1642 			idx = field * sizeof(pc->pc_map[field]) * NBBY + bit;
1643 			pv = &pc->pc_pventry[idx];
1644 			pc->pc_map[field] &= ~(1ul << bit);
1645 			/* If this was the last item, move it to tail */
1646 			for (field = 0; field < _NPCM; field++)
1647 				if (pc->pc_map[field] != 0) {
1648 					PV_STAT(pv_entry_spare--);
1649 					return (pv);	/* not full, return */
1650 				}
1651 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1652 			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1653 			PV_STAT(pv_entry_spare--);
1654 			return (pv);
1655 		}
1656 	}
1657 	/* No free items, allocate another chunk */
1658 	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL |
1659 	    VM_ALLOC_WIRED);
1660 	if (m == NULL) {
1661 		if (try) {
1662 			pv_entry_count--;
1663 			PV_STAT(pc_chunk_tryfail++);
1664 			return (NULL);
1665 		}
1666 		m = pmap_pv_reclaim(pmap);
1667 		if (m == NULL)
1668 			goto retry;
1669 	}
1670 	PV_STAT(pc_chunk_count++);
1671 	PV_STAT(pc_chunk_allocs++);
1672 	dump_add_page(m->phys_addr);
1673 	pc = (struct pv_chunk *)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1674 	pc->pc_pmap = pmap;
1675 	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
1676 	for (field = 1; field < _NPCM; field++)
1677 		pc->pc_map[field] = pc_freemask[field];
1678 	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1679 	pv = &pc->pc_pventry[0];
1680 	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1681 	PV_STAT(pv_entry_spare += _NPCPV - 1);
1682 	return (pv);
1683 }
1684 
1685 static pv_entry_t
pmap_pvh_remove(struct md_page * pvh,pmap_t pmap,vm_offset_t va)1686 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1687 {
1688 	pv_entry_t pv;
1689 
1690 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1691 	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1692 		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1693 			TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1694 			break;
1695 		}
1696 	}
1697 	return (pv);
1698 }
1699 
1700 static void
pmap_pvh_free(struct md_page * pvh,pmap_t pmap,vm_offset_t va)1701 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1702 {
1703 	pv_entry_t pv;
1704 
1705 	pv = pmap_pvh_remove(pvh, pmap, va);
1706 	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1707 	     (u_long)VM_PAGE_TO_PHYS(__containerof(pvh, struct vm_page, md)),
1708 	     (u_long)va));
1709 	free_pv_entry(pmap, pv);
1710 }
1711 
1712 static void
pmap_remove_entry(pmap_t pmap,vm_page_t m,vm_offset_t va)1713 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1714 {
1715 
1716 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1717 	pmap_pvh_free(&m->md, pmap, va);
1718 	if (TAILQ_EMPTY(&m->md.pv_list))
1719 		vm_page_aflag_clear(m, PGA_WRITEABLE);
1720 }
1721 
1722 /*
1723  * Conditionally create a pv entry.
1724  */
1725 static boolean_t
pmap_try_insert_pv_entry(pmap_t pmap,vm_page_t mpte,vm_offset_t va,vm_page_t m)1726 pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1727     vm_page_t m)
1728 {
1729 	pv_entry_t pv;
1730 
1731 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1732 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1733 	if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
1734 		pv->pv_va = va;
1735 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1736 		return (TRUE);
1737 	} else
1738 		return (FALSE);
1739 }
1740 
1741 /*
1742  * pmap_remove_pte: do the things to unmap a page in a process
1743  *
1744  * Returns true if this was the last PTE in the PT (and possibly the last PT in
1745  * the PD, and possibly the last PD in the segmap), in which case...
1746  *
1747  *   1) the TLB has been invalidated for the whole PT's span (at least),
1748  *   already, to ensure that MipsDoTLBMiss does not attempt to follow a
1749  *   dangling pointer into a freed page.  No additional TLB shootdown is
1750  *   required.
1751  *
1752  *   2) if this removal was part of a sweep to remove PTEs, it is safe to jump
1753  *   to the PT span boundary and continue.
1754  *
1755  *   3) The given pde may now point onto a freed page and must not be
1756  *   dereferenced
1757  *
1758  * If the return value is false, the TLB has not been shot down (and the segmap
1759  * entry, PD, and PT all remain in place).
1760  */
1761 static int
pmap_remove_pte(struct pmap * pmap,pt_entry_t * ptq,vm_offset_t va,pd_entry_t pde)1762 pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
1763     pd_entry_t pde)
1764 {
1765 	pt_entry_t oldpte;
1766 	vm_page_t m;
1767 	vm_paddr_t pa;
1768 
1769 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1770 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1771 
1772 	/*
1773 	 * Write back all cache lines from the page being unmapped.
1774 	 */
1775 	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1776 
1777 	oldpte = *ptq;
1778 	if (is_kernel_pmap(pmap))
1779 		*ptq = PTE_G;
1780 	else
1781 		*ptq = 0;
1782 
1783 	if (pte_test(&oldpte, PTE_W))
1784 		pmap->pm_stats.wired_count -= 1;
1785 
1786 	pmap->pm_stats.resident_count -= 1;
1787 
1788 	if (pte_test(&oldpte, PTE_MANAGED)) {
1789 		pa = TLBLO_PTE_TO_PA(oldpte);
1790 		m = PHYS_TO_VM_PAGE(pa);
1791 		if (pte_test(&oldpte, PTE_D)) {
1792 			KASSERT(!pte_test(&oldpte, PTE_RO),
1793 			    ("%s: modified page not writable: va: %p, pte: %#jx",
1794 			    __func__, (void *)va, (uintmax_t)oldpte));
1795 			vm_page_dirty(m);
1796 		}
1797 		if (m->md.pv_flags & PV_TABLE_REF)
1798 			vm_page_aflag_set(m, PGA_REFERENCED);
1799 		m->md.pv_flags &= ~PV_TABLE_REF;
1800 
1801 		pmap_remove_entry(pmap, m, va);
1802 	}
1803 	return (pmap_unuse_pt(pmap, va, pde));
1804 }
1805 
1806 /*
1807  * Remove a single page from a process address space
1808  */
1809 static void
pmap_remove_page(struct pmap * pmap,vm_offset_t va)1810 pmap_remove_page(struct pmap *pmap, vm_offset_t va)
1811 {
1812 	pd_entry_t *pde;
1813 	pt_entry_t *ptq;
1814 
1815 	rw_assert(&pvh_global_lock, RA_WLOCKED);
1816 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1817 	pde = pmap_pde(pmap, va);
1818 	if (pde == NULL || *pde == 0)
1819 		return;
1820 	ptq = pmap_pde_to_pte(pde, va);
1821 
1822 	/*
1823 	 * If there is no pte for this address, just skip it!
1824 	 */
1825 	if (!pte_test(ptq, PTE_V))
1826 		return;
1827 
1828 	/*
1829 	 * Remove this PTE from the PT.  If this is the last one, then
1830 	 * the TLB has already been shot down, so don't bother again
1831 	 */
1832 	if (!pmap_remove_pte(pmap, ptq, va, *pde))
1833 		pmap_invalidate_page(pmap, va);
1834 }
1835 
1836 /*
1837  *	Remove the given range of addresses from the specified map.
1838  *
1839  *	It is assumed that the start and end are properly
1840  *	rounded to the page size.
1841  */
1842 void
pmap_remove(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)1843 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1844 {
1845 	pd_entry_t *pde, *pdpe;
1846 	pt_entry_t *pte;
1847 	vm_offset_t va_next;
1848 	vm_offset_t va_init, va_fini;
1849 	bool need_tlb_shootdown;
1850 
1851 	/*
1852 	 * Perform an unsynchronized read.  This is, however, safe.
1853 	 */
1854 	if (pmap->pm_stats.resident_count == 0)
1855 		return;
1856 
1857 	rw_wlock(&pvh_global_lock);
1858 	PMAP_LOCK(pmap);
1859 
1860 	/*
1861 	 * special handling of removing one page.  a very common operation
1862 	 * and easy to short circuit some code.
1863 	 */
1864 	if ((sva + PAGE_SIZE) == eva) {
1865 		pmap_remove_page(pmap, sva);
1866 		goto out;
1867 	}
1868 	for (; sva < eva; sva = va_next) {
1869 		pdpe = pmap_segmap(pmap, sva);
1870 #ifdef __mips_n64
1871 		if (*pdpe == 0) {
1872 			va_next = (sva + NBSEG) & ~SEGMASK;
1873 			if (va_next < sva)
1874 				va_next = eva;
1875 			continue;
1876 		}
1877 #endif
1878 
1879 		/* Scan up to the end of the page table pointed to by pde */
1880 		va_next = (sva + NBPDR) & ~PDRMASK;
1881 		if (va_next < sva)
1882 			va_next = eva;
1883 
1884 		pde = pmap_pdpe_to_pde(pdpe, sva);
1885 		if (*pde == NULL)
1886 			continue;
1887 
1888 		/*
1889 		 * Limit our scan to either the end of the va represented
1890 		 * by the current page table page, or to the end of the
1891 		 * range being removed.
1892 		 */
1893 		if (va_next > eva)
1894 			va_next = eva;
1895 
1896 		need_tlb_shootdown = false;
1897 		va_init = sva;
1898 		va_fini = va_next;
1899 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1900 		    sva += PAGE_SIZE) {
1901 			/* Skip over invalid entries; no need to shootdown */
1902 			if (!pte_test(pte, PTE_V)) {
1903 				/*
1904 				 * If we have not yet found a valid entry, then
1905 				 * we can move the lower edge of the region to
1906 				 * invalidate to the next PTE.
1907 				 */
1908 				if (!need_tlb_shootdown)
1909 					va_init = sva + PAGE_SIZE;
1910 				continue;
1911 			}
1912 
1913 			/*
1914 			 * A valid entry; the range we are shooting down must
1915 			 * include this page.  va_fini is used instead of sva
1916 			 * so that if the range ends with a run of !PTE_V PTEs,
1917 			 * but doesn't clear out so much that pmap_remove_pte
1918 			 * removes the entire PT, we won't include these !PTE_V
1919 			 * entries in the region to be shot down.
1920 			 */
1921 			va_fini = sva + PAGE_SIZE;
1922 
1923 			if (pmap_remove_pte(pmap, pte, sva, *pde)) {
1924 				/* Entire PT removed and TLBs shot down. */
1925 				need_tlb_shootdown = false;
1926 				break;
1927 			} else {
1928 				need_tlb_shootdown = true;
1929 			}
1930 		}
1931 		if (need_tlb_shootdown)
1932 			pmap_invalidate_range(pmap, va_init, va_fini);
1933 	}
1934 out:
1935 	rw_wunlock(&pvh_global_lock);
1936 	PMAP_UNLOCK(pmap);
1937 }
1938 
1939 /*
1940  *	Routine:	pmap_remove_all
1941  *	Function:
1942  *		Removes this physical page from
1943  *		all physical maps in which it resides.
1944  *		Reflects back modify bits to the pager.
1945  *
1946  *	Notes:
1947  *		Original versions of this routine were very
1948  *		inefficient because they iteratively called
1949  *		pmap_remove (slow...)
1950  */
1951 
1952 void
pmap_remove_all(vm_page_t m)1953 pmap_remove_all(vm_page_t m)
1954 {
1955 	pv_entry_t pv;
1956 	pmap_t pmap;
1957 	pd_entry_t *pde;
1958 	pt_entry_t *pte, tpte;
1959 
1960 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1961 	    ("pmap_remove_all: page %p is not managed", m));
1962 	rw_wlock(&pvh_global_lock);
1963 
1964 	if (m->md.pv_flags & PV_TABLE_REF)
1965 		vm_page_aflag_set(m, PGA_REFERENCED);
1966 
1967 	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1968 		pmap = PV_PMAP(pv);
1969 		PMAP_LOCK(pmap);
1970 
1971 		/*
1972 		 * If it's last mapping writeback all caches from
1973 		 * the page being destroyed
1974 	 	 */
1975 		if (TAILQ_NEXT(pv, pv_list) == NULL)
1976 			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1977 
1978 		pmap->pm_stats.resident_count--;
1979 
1980 		pde = pmap_pde(pmap, pv->pv_va);
1981 		KASSERT(pde != NULL && *pde != 0, ("pmap_remove_all: pde"));
1982 		pte = pmap_pde_to_pte(pde, pv->pv_va);
1983 
1984 		tpte = *pte;
1985 		if (is_kernel_pmap(pmap))
1986 			*pte = PTE_G;
1987 		else
1988 			*pte = 0;
1989 
1990 		if (pte_test(&tpte, PTE_W))
1991 			pmap->pm_stats.wired_count--;
1992 
1993 		/*
1994 		 * Update the vm_page_t clean and reference bits.
1995 		 */
1996 		if (pte_test(&tpte, PTE_D)) {
1997 			KASSERT(!pte_test(&tpte, PTE_RO),
1998 			    ("%s: modified page not writable: va: %p, pte: %#jx",
1999 			    __func__, (void *)pv->pv_va, (uintmax_t)tpte));
2000 			vm_page_dirty(m);
2001 		}
2002 
2003 		if (!pmap_unuse_pt(pmap, pv->pv_va, *pde))
2004 			pmap_invalidate_page(pmap, pv->pv_va);
2005 
2006 		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2007 		free_pv_entry(pmap, pv);
2008 		PMAP_UNLOCK(pmap);
2009 	}
2010 
2011 	vm_page_aflag_clear(m, PGA_WRITEABLE);
2012 	m->md.pv_flags &= ~PV_TABLE_REF;
2013 	rw_wunlock(&pvh_global_lock);
2014 }
2015 
2016 /*
2017  *	Set the physical protection on the
2018  *	specified range of this map as requested.
2019  */
2020 void
pmap_protect(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,vm_prot_t prot)2021 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2022 {
2023 	pt_entry_t pbits, *pte;
2024 	pd_entry_t *pde, *pdpe;
2025 	vm_offset_t va, va_next;
2026 	vm_paddr_t pa;
2027 	vm_page_t m;
2028 
2029 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2030 		pmap_remove(pmap, sva, eva);
2031 		return;
2032 	}
2033 	if (prot & VM_PROT_WRITE)
2034 		return;
2035 
2036 	PMAP_LOCK(pmap);
2037 	for (; sva < eva; sva = va_next) {
2038 		pdpe = pmap_segmap(pmap, sva);
2039 #ifdef __mips_n64
2040 		if (*pdpe == 0) {
2041 			va_next = (sva + NBSEG) & ~SEGMASK;
2042 			if (va_next < sva)
2043 				va_next = eva;
2044 			continue;
2045 		}
2046 #endif
2047 		va_next = (sva + NBPDR) & ~PDRMASK;
2048 		if (va_next < sva)
2049 			va_next = eva;
2050 
2051 		pde = pmap_pdpe_to_pde(pdpe, sva);
2052 		if (*pde == NULL)
2053 			continue;
2054 
2055 		/*
2056 		 * Limit our scan to either the end of the va represented
2057 		 * by the current page table page, or to the end of the
2058 		 * range being write protected.
2059 		 */
2060 		if (va_next > eva)
2061 			va_next = eva;
2062 
2063 		va = va_next;
2064 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2065 		    sva += PAGE_SIZE) {
2066 			pbits = *pte;
2067 			if (!pte_test(&pbits, PTE_V) || pte_test(&pbits,
2068 			    PTE_RO)) {
2069 				if (va != va_next) {
2070 					pmap_invalidate_range(pmap, va, sva);
2071 					va = va_next;
2072 				}
2073 				continue;
2074 			}
2075 			pte_set(&pbits, PTE_RO);
2076 			if (pte_test(&pbits, PTE_D)) {
2077 				pte_clear(&pbits, PTE_D);
2078 				if (pte_test(&pbits, PTE_MANAGED)) {
2079 					pa = TLBLO_PTE_TO_PA(pbits);
2080 					m = PHYS_TO_VM_PAGE(pa);
2081 					vm_page_dirty(m);
2082 				}
2083 				if (va == va_next)
2084 					va = sva;
2085 			} else {
2086 				/*
2087 				 * Unless PTE_D is set, any TLB entries
2088 				 * mapping "sva" don't allow write access, so
2089 				 * they needn't be invalidated.
2090 				 */
2091 				if (va != va_next) {
2092 					pmap_invalidate_range(pmap, va, sva);
2093 					va = va_next;
2094 				}
2095 			}
2096 			*pte = pbits;
2097 		}
2098 		if (va != va_next)
2099 			pmap_invalidate_range(pmap, va, sva);
2100 	}
2101 	PMAP_UNLOCK(pmap);
2102 }
2103 
2104 /*
2105  *	Insert the given physical page (p) at
2106  *	the specified virtual address (v) in the
2107  *	target physical map with the protection requested.
2108  *
2109  *	If specified, the page will be wired down, meaning
2110  *	that the related pte can not be reclaimed.
2111  *
2112  *	NB:  This is the only routine which MAY NOT lazy-evaluate
2113  *	or lose information.  That is, this routine must actually
2114  *	insert this page into the given map NOW.
2115  */
2116 int
pmap_enter(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,u_int flags,int8_t psind __unused)2117 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2118     u_int flags, int8_t psind __unused)
2119 {
2120 	vm_paddr_t pa, opa;
2121 	pt_entry_t *pte;
2122 	pt_entry_t origpte, newpte;
2123 	pv_entry_t pv;
2124 	vm_page_t mpte, om;
2125 
2126 	va &= ~PAGE_MASK;
2127  	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2128 	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
2129 	    va >= kmi.clean_eva,
2130 	    ("pmap_enter: managed mapping within the clean submap"));
2131 	if ((m->oflags & VPO_UNMANAGED) == 0)
2132 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
2133 	pa = VM_PAGE_TO_PHYS(m);
2134 	newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot);
2135 	if ((flags & PMAP_ENTER_WIRED) != 0)
2136 		newpte |= PTE_W;
2137 	if (is_kernel_pmap(pmap))
2138 		newpte |= PTE_G;
2139 	PMAP_PTE_SET_CACHE_BITS(newpte, pa, m);
2140 	if ((m->oflags & VPO_UNMANAGED) == 0)
2141 		newpte |= PTE_MANAGED;
2142 
2143 	mpte = NULL;
2144 
2145 	rw_wlock(&pvh_global_lock);
2146 	PMAP_LOCK(pmap);
2147 
2148 	/*
2149 	 * In the case that a page table page is not resident, we are
2150 	 * creating it here.
2151 	 */
2152 	if (va < VM_MAXUSER_ADDRESS) {
2153 		mpte = pmap_allocpte(pmap, va, flags);
2154 		if (mpte == NULL) {
2155 			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
2156 			    ("pmap_allocpte failed with sleep allowed"));
2157 			rw_wunlock(&pvh_global_lock);
2158 			PMAP_UNLOCK(pmap);
2159 			return (KERN_RESOURCE_SHORTAGE);
2160 		}
2161 	}
2162 	pte = pmap_pte(pmap, va);
2163 
2164 	/*
2165 	 * Page Directory table entry not valid, we need a new PT page
2166 	 */
2167 	if (pte == NULL) {
2168 		panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
2169 		    (void *)pmap->pm_segtab, (void *)va);
2170 	}
2171 
2172 	origpte = *pte;
2173 	KASSERT(!pte_test(&origpte, PTE_D | PTE_RO | PTE_V),
2174 	    ("pmap_enter: modified page not writable: va: %p, pte: %#jx",
2175 	    (void *)va, (uintmax_t)origpte));
2176 	opa = TLBLO_PTE_TO_PA(origpte);
2177 
2178 	/*
2179 	 * Mapping has not changed, must be protection or wiring change.
2180 	 */
2181 	if (pte_test(&origpte, PTE_V) && opa == pa) {
2182 		/*
2183 		 * Wiring change, just update stats. We don't worry about
2184 		 * wiring PT pages as they remain resident as long as there
2185 		 * are valid mappings in them. Hence, if a user page is
2186 		 * wired, the PT page will be also.
2187 		 */
2188 		if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W))
2189 			pmap->pm_stats.wired_count++;
2190 		else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte,
2191 		    PTE_W))
2192 			pmap->pm_stats.wired_count--;
2193 
2194 		/*
2195 		 * Remove extra pte reference
2196 		 */
2197 		if (mpte)
2198 			mpte->ref_count--;
2199 
2200 		if (pte_test(&origpte, PTE_MANAGED)) {
2201 			m->md.pv_flags |= PV_TABLE_REF;
2202 			if (!pte_test(&newpte, PTE_RO))
2203 				vm_page_aflag_set(m, PGA_WRITEABLE);
2204 		}
2205 		goto validate;
2206 	}
2207 
2208 	pv = NULL;
2209 
2210 	/*
2211 	 * Mapping has changed, invalidate old range and fall through to
2212 	 * handle validating new mapping.
2213 	 */
2214 	if (opa) {
2215 		if (is_kernel_pmap(pmap))
2216 			*pte = PTE_G;
2217 		else
2218 			*pte = 0;
2219 		if (pte_test(&origpte, PTE_W))
2220 			pmap->pm_stats.wired_count--;
2221 		if (pte_test(&origpte, PTE_MANAGED)) {
2222 			om = PHYS_TO_VM_PAGE(opa);
2223 			if (pte_test(&origpte, PTE_D))
2224 				vm_page_dirty(om);
2225 			if ((om->md.pv_flags & PV_TABLE_REF) != 0) {
2226 				om->md.pv_flags &= ~PV_TABLE_REF;
2227 				vm_page_aflag_set(om, PGA_REFERENCED);
2228 			}
2229 			pv = pmap_pvh_remove(&om->md, pmap, va);
2230 			if (!pte_test(&newpte, PTE_MANAGED))
2231 				free_pv_entry(pmap, pv);
2232 			if ((om->a.flags & PGA_WRITEABLE) != 0 &&
2233 			    TAILQ_EMPTY(&om->md.pv_list))
2234 				vm_page_aflag_clear(om, PGA_WRITEABLE);
2235 		}
2236 		pmap_invalidate_page(pmap, va);
2237 		origpte = 0;
2238 		if (mpte != NULL) {
2239 			mpte->ref_count--;
2240 			KASSERT(mpte->ref_count > 0,
2241 			    ("pmap_enter: missing reference to page table page,"
2242 			    " va: %p", (void *)va));
2243 		}
2244 	} else
2245 		pmap->pm_stats.resident_count++;
2246 
2247 	/*
2248 	 * Enter on the PV list if part of our managed memory.
2249 	 */
2250 	if (pte_test(&newpte, PTE_MANAGED)) {
2251 		m->md.pv_flags |= PV_TABLE_REF;
2252 		if (pv == NULL) {
2253 			pv = get_pv_entry(pmap, FALSE);
2254 			pv->pv_va = va;
2255 		}
2256 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2257 		if (!pte_test(&newpte, PTE_RO))
2258 			vm_page_aflag_set(m, PGA_WRITEABLE);
2259 	}
2260 
2261 	/*
2262 	 * Increment counters
2263 	 */
2264 	if (pte_test(&newpte, PTE_W))
2265 		pmap->pm_stats.wired_count++;
2266 
2267 validate:
2268 
2269 #ifdef PMAP_DEBUG
2270 	printf("pmap_enter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
2271 #endif
2272 
2273 	/*
2274 	 * if the mapping or permission bits are different, we need to
2275 	 * update the pte.
2276 	 */
2277 	if (origpte != newpte) {
2278 		*pte = newpte;
2279 		if (pte_test(&origpte, PTE_V)) {
2280 			KASSERT(opa == pa, ("pmap_enter: invalid update"));
2281 			if (pte_test(&origpte, PTE_D)) {
2282 				if (pte_test(&origpte, PTE_MANAGED))
2283 					vm_page_dirty(m);
2284 			}
2285 			pmap_update_page(pmap, va, newpte);
2286 		}
2287 	}
2288 
2289 	/*
2290 	 * Sync I & D caches for executable pages.  Do this only if the
2291 	 * target pmap belongs to the current process.  Otherwise, an
2292 	 * unresolvable TLB miss may occur.
2293 	 */
2294 	if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2295 	    (prot & VM_PROT_EXECUTE)) {
2296 		mips_icache_sync_range(va, PAGE_SIZE);
2297 		mips_dcache_wbinv_range(va, PAGE_SIZE);
2298 	}
2299 	rw_wunlock(&pvh_global_lock);
2300 	PMAP_UNLOCK(pmap);
2301 	return (KERN_SUCCESS);
2302 }
2303 
2304 /*
2305  * this code makes some *MAJOR* assumptions:
2306  * 1. Current pmap & pmap exists.
2307  * 2. Not wired.
2308  * 3. Read access.
2309  * 4. No page table pages.
2310  * but is *MUCH* faster than pmap_enter...
2311  */
2312 
2313 void
pmap_enter_quick(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot)2314 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2315 {
2316 
2317 	rw_wlock(&pvh_global_lock);
2318 	PMAP_LOCK(pmap);
2319 	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2320 	rw_wunlock(&pvh_global_lock);
2321 	PMAP_UNLOCK(pmap);
2322 }
2323 
2324 static vm_page_t
pmap_enter_quick_locked(pmap_t pmap,vm_offset_t va,vm_page_t m,vm_prot_t prot,vm_page_t mpte)2325 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2326     vm_prot_t prot, vm_page_t mpte)
2327 {
2328 	pt_entry_t *pte, npte;
2329 	vm_paddr_t pa;
2330 
2331 	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2332 	    (m->oflags & VPO_UNMANAGED) != 0,
2333 	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2334 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2335 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2336 
2337 	/*
2338 	 * In the case that a page table page is not resident, we are
2339 	 * creating it here.
2340 	 */
2341 	if (va < VM_MAXUSER_ADDRESS) {
2342 		pd_entry_t *pde;
2343 		unsigned ptepindex;
2344 
2345 		/*
2346 		 * Calculate pagetable page index
2347 		 */
2348 		ptepindex = pmap_pde_pindex(va);
2349 		if (mpte && (mpte->pindex == ptepindex)) {
2350 			mpte->ref_count++;
2351 		} else {
2352 			/*
2353 			 * Get the page directory entry
2354 			 */
2355 			pde = pmap_pde(pmap, va);
2356 
2357 			/*
2358 			 * If the page table page is mapped, we just
2359 			 * increment the hold count, and activate it.
2360 			 */
2361 			if (pde && *pde != 0) {
2362 				mpte = PHYS_TO_VM_PAGE(
2363 				    MIPS_DIRECT_TO_PHYS(*pde));
2364 				mpte->ref_count++;
2365 			} else {
2366 				mpte = _pmap_allocpte(pmap, ptepindex,
2367 				    PMAP_ENTER_NOSLEEP);
2368 				if (mpte == NULL)
2369 					return (mpte);
2370 			}
2371 		}
2372 	} else {
2373 		mpte = NULL;
2374 	}
2375 
2376 	pte = pmap_pte(pmap, va);
2377 	if (pte_test(pte, PTE_V)) {
2378 		if (mpte != NULL) {
2379 			mpte->ref_count--;
2380 			mpte = NULL;
2381 		}
2382 		return (mpte);
2383 	}
2384 
2385 	/*
2386 	 * Enter on the PV list if part of our managed memory.
2387 	 */
2388 	if ((m->oflags & VPO_UNMANAGED) == 0 &&
2389 	    !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2390 		if (mpte != NULL) {
2391 			pmap_unwire_ptp(pmap, va, mpte);
2392 			mpte = NULL;
2393 		}
2394 		return (mpte);
2395 	}
2396 
2397 	/*
2398 	 * Increment counters
2399 	 */
2400 	pmap->pm_stats.resident_count++;
2401 
2402 	pa = VM_PAGE_TO_PHYS(m);
2403 
2404 	/*
2405 	 * Now validate mapping with RO protection
2406 	 */
2407 	npte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
2408 	if ((m->oflags & VPO_UNMANAGED) == 0)
2409 		npte |= PTE_MANAGED;
2410 
2411 	PMAP_PTE_SET_CACHE_BITS(npte, pa, m);
2412 
2413 	if (is_kernel_pmap(pmap))
2414 		*pte = npte | PTE_G;
2415 	else {
2416 		*pte = npte;
2417 		/*
2418 		 * Sync I & D caches.  Do this only if the target pmap
2419 		 * belongs to the current process.  Otherwise, an
2420 		 * unresolvable TLB miss may occur. */
2421 		if (pmap == &curproc->p_vmspace->vm_pmap) {
2422 			va &= ~PAGE_MASK;
2423 			mips_icache_sync_range(va, PAGE_SIZE);
2424 			mips_dcache_wbinv_range(va, PAGE_SIZE);
2425 		}
2426 	}
2427 	return (mpte);
2428 }
2429 
2430 /*
2431  * Make a temporary mapping for a physical address.  This is only intended
2432  * to be used for panic dumps.
2433  *
2434  * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2435  */
2436 void *
pmap_kenter_temporary(vm_paddr_t pa,int i)2437 pmap_kenter_temporary(vm_paddr_t pa, int i)
2438 {
2439 	vm_offset_t va;
2440 
2441 	if (i != 0)
2442 		printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2443 		    __func__);
2444 
2445 	if (MIPS_DIRECT_MAPPABLE(pa)) {
2446 		va = MIPS_PHYS_TO_DIRECT(pa);
2447 	} else {
2448 #ifndef __mips_n64    /* XXX : to be converted to new style */
2449 		pt_entry_t *pte, npte;
2450 
2451 		pte = pmap_pte(kernel_pmap, crashdumpva);
2452 
2453 		/* Since this is for the debugger, no locks or any other fun */
2454 		npte = TLBLO_PA_TO_PFN(pa) | PTE_C_CACHE | PTE_D | PTE_V |
2455 		    PTE_G;
2456 		*pte = npte;
2457 		pmap_update_page(kernel_pmap, crashdumpva, npte);
2458 		va = crashdumpva;
2459 #endif
2460 	}
2461 	return ((void *)va);
2462 }
2463 
2464 void
pmap_kenter_temporary_free(vm_paddr_t pa)2465 pmap_kenter_temporary_free(vm_paddr_t pa)
2466 {
2467  #ifndef __mips_n64    /* XXX : to be converted to new style */
2468 	pt_entry_t *pte;
2469  #endif
2470 	if (MIPS_DIRECT_MAPPABLE(pa)) {
2471 		/* nothing to do for this case */
2472 		return;
2473 	}
2474 #ifndef __mips_n64    /* XXX : to be converted to new style */
2475 	pte = pmap_pte(kernel_pmap, crashdumpva);
2476 	*pte = PTE_G;
2477 	pmap_invalidate_page(kernel_pmap, crashdumpva);
2478 #endif
2479 }
2480 
2481 /*
2482  * Maps a sequence of resident pages belonging to the same object.
2483  * The sequence begins with the given page m_start.  This page is
2484  * mapped at the given virtual address start.  Each subsequent page is
2485  * mapped at a virtual address that is offset from start by the same
2486  * amount as the page is offset from m_start within the object.  The
2487  * last page in the sequence is the page with the largest offset from
2488  * m_start that can be mapped at a virtual address less than the given
2489  * virtual address end.  Not every virtual page between start and end
2490  * is mapped; only those for which a resident page exists with the
2491  * corresponding offset from m_start are mapped.
2492  */
2493 void
pmap_enter_object(pmap_t pmap,vm_offset_t start,vm_offset_t end,vm_page_t m_start,vm_prot_t prot)2494 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2495     vm_page_t m_start, vm_prot_t prot)
2496 {
2497 	vm_page_t m, mpte;
2498 	vm_pindex_t diff, psize;
2499 
2500 	VM_OBJECT_ASSERT_LOCKED(m_start->object);
2501 
2502 	psize = atop(end - start);
2503 	mpte = NULL;
2504 	m = m_start;
2505 	rw_wlock(&pvh_global_lock);
2506 	PMAP_LOCK(pmap);
2507 	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2508 		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2509 		    prot, mpte);
2510 		m = TAILQ_NEXT(m, listq);
2511 	}
2512 	rw_wunlock(&pvh_global_lock);
2513  	PMAP_UNLOCK(pmap);
2514 }
2515 
2516 /*
2517  * pmap_object_init_pt preloads the ptes for a given object
2518  * into the specified pmap.  This eliminates the blast of soft
2519  * faults on process startup and immediately after an mmap.
2520  */
2521 void
pmap_object_init_pt(pmap_t pmap,vm_offset_t addr,vm_object_t object,vm_pindex_t pindex,vm_size_t size)2522 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2523     vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2524 {
2525 	VM_OBJECT_ASSERT_WLOCKED(object);
2526 	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2527 	    ("pmap_object_init_pt: non-device object"));
2528 }
2529 
2530 /*
2531  *	Clear the wired attribute from the mappings for the specified range of
2532  *	addresses in the given pmap.  Every valid mapping within that range
2533  *	must have the wired attribute set.  In contrast, invalid mappings
2534  *	cannot have the wired attribute set, so they are ignored.
2535  *
2536  *	The wired attribute of the page table entry is not a hardware feature,
2537  *	so there is no need to invalidate any TLB entries.
2538  */
2539 void
pmap_unwire(pmap_t pmap,vm_offset_t sva,vm_offset_t eva)2540 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2541 {
2542 	pd_entry_t *pde, *pdpe;
2543 	pt_entry_t *pte;
2544 	vm_offset_t va_next;
2545 
2546 	PMAP_LOCK(pmap);
2547 	for (; sva < eva; sva = va_next) {
2548 		pdpe = pmap_segmap(pmap, sva);
2549 #ifdef __mips_n64
2550 		if (*pdpe == NULL) {
2551 			va_next = (sva + NBSEG) & ~SEGMASK;
2552 			if (va_next < sva)
2553 				va_next = eva;
2554 			continue;
2555 		}
2556 #endif
2557 		va_next = (sva + NBPDR) & ~PDRMASK;
2558 		if (va_next < sva)
2559 			va_next = eva;
2560 		pde = pmap_pdpe_to_pde(pdpe, sva);
2561 		if (*pde == NULL)
2562 			continue;
2563 		if (va_next > eva)
2564 			va_next = eva;
2565 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2566 		    sva += PAGE_SIZE) {
2567 			if (!pte_test(pte, PTE_V))
2568 				continue;
2569 			if (!pte_test(pte, PTE_W))
2570 				panic("pmap_unwire: pte %#jx is missing PG_W",
2571 				    (uintmax_t)*pte);
2572 			pte_clear(pte, PTE_W);
2573 			pmap->pm_stats.wired_count--;
2574 		}
2575 	}
2576 	PMAP_UNLOCK(pmap);
2577 }
2578 
2579 /*
2580  *	Copy the range specified by src_addr/len
2581  *	from the source map to the range dst_addr/len
2582  *	in the destination map.
2583  *
2584  *	This routine is only advisory and need not do anything.
2585  */
2586 
2587 void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vm_offset_t dst_addr,vm_size_t len,vm_offset_t src_addr)2588 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2589     vm_size_t len, vm_offset_t src_addr)
2590 {
2591 }
2592 
2593 /*
2594  *	pmap_zero_page zeros the specified hardware page by mapping
2595  *	the page into KVM and using bzero to clear its contents.
2596  *
2597  * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2598  */
2599 void
pmap_zero_page(vm_page_t m)2600 pmap_zero_page(vm_page_t m)
2601 {
2602 	vm_offset_t va;
2603 	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2604 
2605 	if (MIPS_DIRECT_MAPPABLE(phys)) {
2606 		va = MIPS_PHYS_TO_DIRECT(phys);
2607 		bzero((caddr_t)va, PAGE_SIZE);
2608 		mips_dcache_wbinv_range(va, PAGE_SIZE);
2609 	} else {
2610 		va = pmap_lmem_map1(phys);
2611 		bzero((caddr_t)va, PAGE_SIZE);
2612 		mips_dcache_wbinv_range(va, PAGE_SIZE);
2613 		pmap_lmem_unmap();
2614 	}
2615 }
2616 
2617 /*
2618  *	pmap_zero_page_area zeros the specified hardware page by mapping
2619  *	the page into KVM and using bzero to clear its contents.
2620  *
2621  *	off and size may not cover an area beyond a single hardware page.
2622  */
2623 void
pmap_zero_page_area(vm_page_t m,int off,int size)2624 pmap_zero_page_area(vm_page_t m, int off, int size)
2625 {
2626 	vm_offset_t va;
2627 	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2628 
2629 	if (MIPS_DIRECT_MAPPABLE(phys)) {
2630 		va = MIPS_PHYS_TO_DIRECT(phys);
2631 		bzero((char *)(caddr_t)va + off, size);
2632 		mips_dcache_wbinv_range(va + off, size);
2633 	} else {
2634 		va = pmap_lmem_map1(phys);
2635 		bzero((char *)va + off, size);
2636 		mips_dcache_wbinv_range(va + off, size);
2637 		pmap_lmem_unmap();
2638 	}
2639 }
2640 
2641 /*
2642  *	pmap_copy_page copies the specified (machine independent)
2643  *	page by mapping the page into virtual memory and using
2644  *	bcopy to copy the page, one machine dependent page at a
2645  *	time.
2646  *
2647  * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2648  */
2649 void
pmap_copy_page(vm_page_t src,vm_page_t dst)2650 pmap_copy_page(vm_page_t src, vm_page_t dst)
2651 {
2652 	vm_offset_t va_src, va_dst;
2653 	vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2654 	vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2655 
2656 	if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2657 		/* easy case, all can be accessed via KSEG0 */
2658 		/*
2659 		 * Flush all caches for VA that are mapped to this page
2660 		 * to make sure that data in SDRAM is up to date
2661 		 */
2662 		pmap_flush_pvcache(src);
2663 		mips_dcache_wbinv_range_index(
2664 		    MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2665 		va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2666 		va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2667 		bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2668 		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2669 	} else {
2670 		va_src = pmap_lmem_map2(phys_src, phys_dst);
2671 		va_dst = va_src + PAGE_SIZE;
2672 		bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2673 		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2674 		pmap_lmem_unmap();
2675 	}
2676 }
2677 
2678 int unmapped_buf_allowed;
2679 
2680 void
pmap_copy_pages(vm_page_t ma[],vm_offset_t a_offset,vm_page_t mb[],vm_offset_t b_offset,int xfersize)2681 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2682     vm_offset_t b_offset, int xfersize)
2683 {
2684 	char *a_cp, *b_cp;
2685 	vm_page_t a_m, b_m;
2686 	vm_offset_t a_pg_offset, b_pg_offset;
2687 	vm_paddr_t a_phys, b_phys;
2688 	int cnt;
2689 
2690 	while (xfersize > 0) {
2691 		a_pg_offset = a_offset & PAGE_MASK;
2692 		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2693 		a_m = ma[a_offset >> PAGE_SHIFT];
2694 		a_phys = VM_PAGE_TO_PHYS(a_m);
2695 		b_pg_offset = b_offset & PAGE_MASK;
2696 		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2697 		b_m = mb[b_offset >> PAGE_SHIFT];
2698 		b_phys = VM_PAGE_TO_PHYS(b_m);
2699 		if (MIPS_DIRECT_MAPPABLE(a_phys) &&
2700 		    MIPS_DIRECT_MAPPABLE(b_phys)) {
2701 			pmap_flush_pvcache(a_m);
2702 			mips_dcache_wbinv_range_index(
2703 			    MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
2704 			a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
2705 			    a_pg_offset;
2706 			b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
2707 			    b_pg_offset;
2708 			bcopy(a_cp, b_cp, cnt);
2709 			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2710 		} else {
2711 			a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
2712 			b_cp = (char *)a_cp + PAGE_SIZE;
2713 			a_cp += a_pg_offset;
2714 			b_cp += b_pg_offset;
2715 			bcopy(a_cp, b_cp, cnt);
2716 			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2717 			pmap_lmem_unmap();
2718 		}
2719 		a_offset += cnt;
2720 		b_offset += cnt;
2721 		xfersize -= cnt;
2722 	}
2723 }
2724 
2725 vm_offset_t
pmap_quick_enter_page(vm_page_t m)2726 pmap_quick_enter_page(vm_page_t m)
2727 {
2728 #if defined(__mips_n64)
2729 	return MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
2730 #else
2731 	vm_offset_t qaddr;
2732 	vm_paddr_t pa;
2733 	pt_entry_t *pte, npte;
2734 
2735 	pa = VM_PAGE_TO_PHYS(m);
2736 
2737 	if (MIPS_DIRECT_MAPPABLE(pa)) {
2738 		if (pmap_page_get_memattr(m) != VM_MEMATTR_WRITE_BACK)
2739 			return (MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
2740 		else
2741 			return (MIPS_PHYS_TO_DIRECT(pa));
2742 	}
2743 	critical_enter();
2744 	qaddr = PCPU_GET(qmap_addr);
2745 	pte = PCPU_GET(qmap_ptep);
2746 
2747 	KASSERT(*pte == PTE_G, ("pmap_quick_enter_page: PTE busy"));
2748 
2749 	npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G;
2750 	PMAP_PTE_SET_CACHE_BITS(npte, pa, m);
2751 	*pte = npte;
2752 
2753 	return (qaddr);
2754 #endif
2755 }
2756 
2757 void
pmap_quick_remove_page(vm_offset_t addr)2758 pmap_quick_remove_page(vm_offset_t addr)
2759 {
2760 	mips_dcache_wbinv_range(addr, PAGE_SIZE);
2761 
2762 #if !defined(__mips_n64)
2763 	pt_entry_t *pte;
2764 
2765 	if (addr >= MIPS_KSEG0_START && addr < MIPS_KSEG0_END)
2766 		return;
2767 
2768 	pte = PCPU_GET(qmap_ptep);
2769 
2770 	KASSERT(*pte != PTE_G,
2771 	    ("pmap_quick_remove_page: PTE not in use"));
2772 	KASSERT(PCPU_GET(qmap_addr) == addr,
2773 	    ("pmap_quick_remove_page: invalid address"));
2774 
2775 	*pte = PTE_G;
2776 	tlb_invalidate_address(kernel_pmap, addr);
2777 	critical_exit();
2778 #endif
2779 }
2780 
2781 /*
2782  * Returns true if the pmap's pv is one of the first
2783  * 16 pvs linked to from this page.  This count may
2784  * be changed upwards or downwards in the future; it
2785  * is only necessary that true be returned for a small
2786  * subset of pmaps for proper page aging.
2787  */
2788 boolean_t
pmap_page_exists_quick(pmap_t pmap,vm_page_t m)2789 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2790 {
2791 	pv_entry_t pv;
2792 	int loops = 0;
2793 	boolean_t rv;
2794 
2795 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2796 	    ("pmap_page_exists_quick: page %p is not managed", m));
2797 	rv = FALSE;
2798 	rw_wlock(&pvh_global_lock);
2799 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2800 		if (PV_PMAP(pv) == pmap) {
2801 			rv = TRUE;
2802 			break;
2803 		}
2804 		loops++;
2805 		if (loops >= 16)
2806 			break;
2807 	}
2808 	rw_wunlock(&pvh_global_lock);
2809 	return (rv);
2810 }
2811 
2812 /*
2813  * Remove all pages from specified address space
2814  * this aids process exit speeds.  Also, this code
2815  * is special cased for current process only, but
2816  * can have the more generic (and slightly slower)
2817  * mode enabled.  This is much faster than pmap_remove
2818  * in the case of running down an entire address space.
2819  */
2820 void
pmap_remove_pages(pmap_t pmap)2821 pmap_remove_pages(pmap_t pmap)
2822 {
2823 	pd_entry_t *pde;
2824 	pt_entry_t *pte, tpte;
2825 	pv_entry_t pv;
2826 	vm_page_t m;
2827 	struct pv_chunk *pc, *npc;
2828 	u_long inuse, bitmask;
2829 	int allfree, bit, field, idx;
2830 
2831 	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2832 		printf("warning: pmap_remove_pages called with non-current pmap\n");
2833 		return;
2834 	}
2835 	rw_wlock(&pvh_global_lock);
2836 	PMAP_LOCK(pmap);
2837 	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2838 		allfree = 1;
2839 		for (field = 0; field < _NPCM; field++) {
2840 			inuse = ~pc->pc_map[field] & pc_freemask[field];
2841 			while (inuse != 0) {
2842 				bit = ffsl(inuse) - 1;
2843 				bitmask = 1UL << bit;
2844 				idx = field * sizeof(inuse) * NBBY + bit;
2845 				pv = &pc->pc_pventry[idx];
2846 				inuse &= ~bitmask;
2847 
2848 				pde = pmap_pde(pmap, pv->pv_va);
2849 				KASSERT(pde != NULL && *pde != 0,
2850 				    ("pmap_remove_pages: pde"));
2851 				pte = pmap_pde_to_pte(pde, pv->pv_va);
2852 				if (!pte_test(pte, PTE_V))
2853 					panic("pmap_remove_pages: bad pte");
2854 				tpte = *pte;
2855 
2856 /*
2857  * We cannot remove wired pages from a process' mapping at this time
2858  */
2859 				if (pte_test(&tpte, PTE_W)) {
2860 					allfree = 0;
2861 					continue;
2862 				}
2863 				*pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2864 
2865 				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2866 				KASSERT(m != NULL,
2867 				    ("pmap_remove_pages: bad tpte %#jx",
2868 				    (uintmax_t)tpte));
2869 
2870 				/*
2871 				 * Update the vm_page_t clean and reference bits.
2872 				 */
2873 				if (pte_test(&tpte, PTE_D))
2874 					vm_page_dirty(m);
2875 
2876 				/* Mark free */
2877 				PV_STAT(pv_entry_frees++);
2878 				PV_STAT(pv_entry_spare++);
2879 				pv_entry_count--;
2880 				pc->pc_map[field] |= bitmask;
2881 				pmap->pm_stats.resident_count--;
2882 				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2883 				if (TAILQ_EMPTY(&m->md.pv_list))
2884 					vm_page_aflag_clear(m, PGA_WRITEABLE);
2885 
2886 				/*
2887 				 * For simplicity, unconditionally call
2888 				 * pmap_invalidate_all(), below.
2889 				 */
2890 				(void)pmap_unuse_pt(pmap, pv->pv_va, *pde);
2891 			}
2892 		}
2893 		if (allfree) {
2894 			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2895 			free_pv_chunk(pc);
2896 		}
2897 	}
2898 	pmap_invalidate_all(pmap);
2899 	PMAP_UNLOCK(pmap);
2900 	rw_wunlock(&pvh_global_lock);
2901 }
2902 
2903 /*
2904  * pmap_testbit tests bits in pte's
2905  */
2906 static boolean_t
pmap_testbit(vm_page_t m,int bit)2907 pmap_testbit(vm_page_t m, int bit)
2908 {
2909 	pv_entry_t pv;
2910 	pmap_t pmap;
2911 	pt_entry_t *pte;
2912 	boolean_t rv = FALSE;
2913 
2914 	if (m->oflags & VPO_UNMANAGED)
2915 		return (rv);
2916 
2917 	rw_assert(&pvh_global_lock, RA_WLOCKED);
2918 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2919 		pmap = PV_PMAP(pv);
2920 		PMAP_LOCK(pmap);
2921 		pte = pmap_pte(pmap, pv->pv_va);
2922 		rv = pte_test(pte, bit);
2923 		PMAP_UNLOCK(pmap);
2924 		if (rv)
2925 			break;
2926 	}
2927 	return (rv);
2928 }
2929 
2930 /*
2931  *	pmap_page_wired_mappings:
2932  *
2933  *	Return the number of managed mappings to the given physical page
2934  *	that are wired.
2935  */
2936 int
pmap_page_wired_mappings(vm_page_t m)2937 pmap_page_wired_mappings(vm_page_t m)
2938 {
2939 	pv_entry_t pv;
2940 	pmap_t pmap;
2941 	pt_entry_t *pte;
2942 	int count;
2943 
2944 	count = 0;
2945 	if ((m->oflags & VPO_UNMANAGED) != 0)
2946 		return (count);
2947 	rw_wlock(&pvh_global_lock);
2948 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2949 		pmap = PV_PMAP(pv);
2950 		PMAP_LOCK(pmap);
2951 		pte = pmap_pte(pmap, pv->pv_va);
2952 		if (pte_test(pte, PTE_W))
2953 			count++;
2954 		PMAP_UNLOCK(pmap);
2955 	}
2956 	rw_wunlock(&pvh_global_lock);
2957 	return (count);
2958 }
2959 
2960 /*
2961  * Clear the write and modified bits in each of the given page's mappings.
2962  */
2963 void
pmap_remove_write(vm_page_t m)2964 pmap_remove_write(vm_page_t m)
2965 {
2966 	pmap_t pmap;
2967 	pt_entry_t pbits, *pte;
2968 	pv_entry_t pv;
2969 
2970 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2971 	    ("pmap_remove_write: page %p is not managed", m));
2972 	vm_page_assert_busied(m);
2973 
2974 	if (!pmap_page_is_write_mapped(m))
2975 		return;
2976 	rw_wlock(&pvh_global_lock);
2977 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2978 		pmap = PV_PMAP(pv);
2979 		PMAP_LOCK(pmap);
2980 		pte = pmap_pte(pmap, pv->pv_va);
2981 		KASSERT(pte != NULL && pte_test(pte, PTE_V),
2982 		    ("page on pv_list has no pte"));
2983 		pbits = *pte;
2984 		if (pte_test(&pbits, PTE_D)) {
2985 			pte_clear(&pbits, PTE_D);
2986 			vm_page_dirty(m);
2987 		}
2988 		pte_set(&pbits, PTE_RO);
2989 		if (pbits != *pte) {
2990 			*pte = pbits;
2991 			pmap_update_page(pmap, pv->pv_va, pbits);
2992 		}
2993 		PMAP_UNLOCK(pmap);
2994 	}
2995 	vm_page_aflag_clear(m, PGA_WRITEABLE);
2996 	rw_wunlock(&pvh_global_lock);
2997 }
2998 
2999 /*
3000  *	pmap_ts_referenced:
3001  *
3002  *	Return the count of reference bits for a page, clearing all of them.
3003  */
3004 int
pmap_ts_referenced(vm_page_t m)3005 pmap_ts_referenced(vm_page_t m)
3006 {
3007 
3008 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3009 	    ("pmap_ts_referenced: page %p is not managed", m));
3010 	if (m->md.pv_flags & PV_TABLE_REF) {
3011 		rw_wlock(&pvh_global_lock);
3012 		m->md.pv_flags &= ~PV_TABLE_REF;
3013 		rw_wunlock(&pvh_global_lock);
3014 		return (1);
3015 	}
3016 	return (0);
3017 }
3018 
3019 /*
3020  *	pmap_is_modified:
3021  *
3022  *	Return whether or not the specified physical page was modified
3023  *	in any physical maps.
3024  */
3025 boolean_t
pmap_is_modified(vm_page_t m)3026 pmap_is_modified(vm_page_t m)
3027 {
3028 	boolean_t rv;
3029 
3030 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3031 	    ("pmap_is_modified: page %p is not managed", m));
3032 
3033 	/*
3034 	 * If the page is not busied then this check is racy.
3035 	 */
3036 	if (!pmap_page_is_write_mapped(m))
3037 		return (FALSE);
3038 
3039 	rw_wlock(&pvh_global_lock);
3040 	rv = pmap_testbit(m, PTE_D);
3041 	rw_wunlock(&pvh_global_lock);
3042 	return (rv);
3043 }
3044 
3045 /* N/C */
3046 
3047 /*
3048  *	pmap_is_prefaultable:
3049  *
3050  *	Return whether or not the specified virtual address is elgible
3051  *	for prefault.
3052  */
3053 boolean_t
pmap_is_prefaultable(pmap_t pmap,vm_offset_t addr)3054 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3055 {
3056 	pd_entry_t *pde;
3057 	pt_entry_t *pte;
3058 	boolean_t rv;
3059 
3060 	rv = FALSE;
3061 	PMAP_LOCK(pmap);
3062 	pde = pmap_pde(pmap, addr);
3063 	if (pde != NULL && *pde != 0) {
3064 		pte = pmap_pde_to_pte(pde, addr);
3065 		rv = (*pte == 0);
3066 	}
3067 	PMAP_UNLOCK(pmap);
3068 	return (rv);
3069 }
3070 
3071 /*
3072  *	Apply the given advice to the specified range of addresses within the
3073  *	given pmap.  Depending on the advice, clear the referenced and/or
3074  *	modified flags in each mapping and set the mapped page's dirty field.
3075  */
3076 void
pmap_advise(pmap_t pmap,vm_offset_t sva,vm_offset_t eva,int advice)3077 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
3078 {
3079 	pd_entry_t *pde, *pdpe;
3080 	pt_entry_t *pte;
3081 	vm_offset_t va, va_next;
3082 	vm_paddr_t pa;
3083 	vm_page_t m;
3084 
3085 	if (advice != MADV_DONTNEED && advice != MADV_FREE)
3086 		return;
3087 	rw_wlock(&pvh_global_lock);
3088 	PMAP_LOCK(pmap);
3089 	for (; sva < eva; sva = va_next) {
3090 		pdpe = pmap_segmap(pmap, sva);
3091 #ifdef __mips_n64
3092 		if (*pdpe == 0) {
3093 			va_next = (sva + NBSEG) & ~SEGMASK;
3094 			if (va_next < sva)
3095 				va_next = eva;
3096 			continue;
3097 		}
3098 #endif
3099 		va_next = (sva + NBPDR) & ~PDRMASK;
3100 		if (va_next < sva)
3101 			va_next = eva;
3102 
3103 		pde = pmap_pdpe_to_pde(pdpe, sva);
3104 		if (*pde == NULL)
3105 			continue;
3106 
3107 		/*
3108 		 * Limit our scan to either the end of the va represented
3109 		 * by the current page table page, or to the end of the
3110 		 * range being write protected.
3111 		 */
3112 		if (va_next > eva)
3113 			va_next = eva;
3114 
3115 		va = va_next;
3116 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
3117 		    sva += PAGE_SIZE) {
3118 			if (!pte_test(pte, PTE_MANAGED | PTE_V)) {
3119 				if (va != va_next) {
3120 					pmap_invalidate_range(pmap, va, sva);
3121 					va = va_next;
3122 				}
3123 				continue;
3124 			}
3125 			pa = TLBLO_PTE_TO_PA(*pte);
3126 			m = PHYS_TO_VM_PAGE(pa);
3127 			m->md.pv_flags &= ~PV_TABLE_REF;
3128 			if (pte_test(pte, PTE_D)) {
3129 				if (advice == MADV_DONTNEED) {
3130 					/*
3131 					 * Future calls to pmap_is_modified()
3132 					 * can be avoided by making the page
3133 					 * dirty now.
3134 					 */
3135 					vm_page_dirty(m);
3136 				} else {
3137 					pte_clear(pte, PTE_D);
3138 					if (va == va_next)
3139 						va = sva;
3140 				}
3141 			} else {
3142 				/*
3143 				 * Unless PTE_D is set, any TLB entries
3144 				 * mapping "sva" don't allow write access, so
3145 				 * they needn't be invalidated.
3146 				 */
3147 				if (va != va_next) {
3148 					pmap_invalidate_range(pmap, va, sva);
3149 					va = va_next;
3150 				}
3151 			}
3152 		}
3153 		if (va != va_next)
3154 			pmap_invalidate_range(pmap, va, sva);
3155 	}
3156 	rw_wunlock(&pvh_global_lock);
3157 	PMAP_UNLOCK(pmap);
3158 }
3159 
3160 /*
3161  *	Clear the modify bits on the specified physical page.
3162  */
3163 void
pmap_clear_modify(vm_page_t m)3164 pmap_clear_modify(vm_page_t m)
3165 {
3166 	pmap_t pmap;
3167 	pt_entry_t *pte;
3168 	pv_entry_t pv;
3169 
3170 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3171 	    ("pmap_clear_modify: page %p is not managed", m));
3172 	vm_page_assert_busied(m);
3173 
3174 	if (!pmap_page_is_write_mapped(m))
3175 		return;
3176 	rw_wlock(&pvh_global_lock);
3177 	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3178 		pmap = PV_PMAP(pv);
3179 		PMAP_LOCK(pmap);
3180 		pte = pmap_pte(pmap, pv->pv_va);
3181 		if (pte_test(pte, PTE_D)) {
3182 			pte_clear(pte, PTE_D);
3183 			pmap_update_page(pmap, pv->pv_va, *pte);
3184 		}
3185 		PMAP_UNLOCK(pmap);
3186 	}
3187 	rw_wunlock(&pvh_global_lock);
3188 }
3189 
3190 /*
3191  *	pmap_is_referenced:
3192  *
3193  *	Return whether or not the specified physical page was referenced
3194  *	in any physical maps.
3195  */
3196 boolean_t
pmap_is_referenced(vm_page_t m)3197 pmap_is_referenced(vm_page_t m)
3198 {
3199 
3200 	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3201 	    ("pmap_is_referenced: page %p is not managed", m));
3202 	return ((m->md.pv_flags & PV_TABLE_REF) != 0);
3203 }
3204 
3205 /*
3206  * Miscellaneous support routines follow
3207  */
3208 
3209 /*
3210  * Map a set of physical memory pages into the kernel virtual
3211  * address space. Return a pointer to where it is mapped. This
3212  * routine is intended to be used for mapping device memory,
3213  * NOT real memory.
3214  *
3215  * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
3216  */
3217 void *
pmap_mapdev_attr(vm_paddr_t pa,vm_size_t size,vm_memattr_t ma)3218 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
3219 {
3220         vm_offset_t va, tmpva, offset;
3221 
3222 	/*
3223 	 * KSEG1 maps only first 512M of phys address space. For
3224 	 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
3225 	 */
3226 	if (MIPS_DIRECT_MAPPABLE(pa + size - 1) && ma == VM_MEMATTR_UNCACHEABLE)
3227 		return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
3228 	else {
3229 		offset = pa & PAGE_MASK;
3230 		size = roundup(size + offset, PAGE_SIZE);
3231 
3232 		va = kva_alloc(size);
3233 		if (!va)
3234 			panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3235 		pa = trunc_page(pa);
3236 		for (tmpva = va; size > 0;) {
3237 			pmap_kenter_attr(tmpva, pa, ma);
3238 			size -= PAGE_SIZE;
3239 			tmpva += PAGE_SIZE;
3240 			pa += PAGE_SIZE;
3241 		}
3242 	}
3243 
3244 	return ((void *)(va + offset));
3245 }
3246 
3247 void *
pmap_mapdev(vm_paddr_t pa,vm_size_t size)3248 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
3249 {
3250 	return pmap_mapdev_attr(pa, size, VM_MEMATTR_UNCACHEABLE);
3251 }
3252 
3253 void
pmap_unmapdev(vm_offset_t va,vm_size_t size)3254 pmap_unmapdev(vm_offset_t va, vm_size_t size)
3255 {
3256 #ifndef __mips_n64
3257 	vm_offset_t base, offset;
3258 
3259 	/* If the address is within KSEG1 then there is nothing to do */
3260 	if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
3261 		return;
3262 
3263 	base = trunc_page(va);
3264 	offset = va & PAGE_MASK;
3265 	size = roundup(size + offset, PAGE_SIZE);
3266 	pmap_qremove(base, atop(size));
3267 	kva_free(base, size);
3268 #endif
3269 }
3270 
3271 /*
3272  * Perform the pmap work for mincore(2).  If the page is not both referenced and
3273  * modified by this pmap, returns its physical address so that the caller can
3274  * find other mappings.
3275  */
3276 int
pmap_mincore(pmap_t pmap,vm_offset_t addr,vm_paddr_t * pap)3277 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
3278 {
3279 	pt_entry_t *ptep, pte;
3280 	vm_paddr_t pa;
3281 	vm_page_t m;
3282 	int val;
3283 
3284 	PMAP_LOCK(pmap);
3285 	ptep = pmap_pte(pmap, addr);
3286 	pte = (ptep != NULL) ? *ptep : 0;
3287 	if (!pte_test(&pte, PTE_V)) {
3288 		PMAP_UNLOCK(pmap);
3289 		return (0);
3290 	}
3291 	val = MINCORE_INCORE;
3292 	if (pte_test(&pte, PTE_D))
3293 		val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
3294 	pa = TLBLO_PTE_TO_PA(pte);
3295 	if (pte_test(&pte, PTE_MANAGED)) {
3296 		/*
3297 		 * This may falsely report the given address as
3298 		 * MINCORE_REFERENCED.  Unfortunately, due to the lack of
3299 		 * per-PTE reference information, it is impossible to
3300 		 * determine if the address is MINCORE_REFERENCED.
3301 		 */
3302 		m = PHYS_TO_VM_PAGE(pa);
3303 		if ((m->a.flags & PGA_REFERENCED) != 0)
3304 			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
3305 	}
3306 	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
3307 	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
3308 	    pte_test(&pte, PTE_MANAGED)) {
3309 		*pap = pa;
3310 	}
3311 	PMAP_UNLOCK(pmap);
3312 	return (val);
3313 }
3314 
3315 void
pmap_activate(struct thread * td)3316 pmap_activate(struct thread *td)
3317 {
3318 	pmap_t pmap, oldpmap;
3319 	struct proc *p = td->td_proc;
3320 	u_int cpuid;
3321 
3322 	critical_enter();
3323 
3324 	pmap = vmspace_pmap(p->p_vmspace);
3325 	oldpmap = PCPU_GET(curpmap);
3326 	cpuid = PCPU_GET(cpuid);
3327 
3328 	if (oldpmap)
3329 		CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
3330 	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
3331 	pmap_asid_alloc(pmap);
3332 	if (td == curthread) {
3333 		PCPU_SET(segbase, pmap->pm_segtab);
3334 		mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
3335 	}
3336 
3337 	PCPU_SET(curpmap, pmap);
3338 	critical_exit();
3339 }
3340 
3341 static void
pmap_sync_icache_one(void * arg __unused)3342 pmap_sync_icache_one(void *arg __unused)
3343 {
3344 
3345 	mips_icache_sync_all();
3346 	mips_dcache_wbinv_all();
3347 }
3348 
3349 void
pmap_sync_icache(pmap_t pm,vm_offset_t va,vm_size_t sz)3350 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3351 {
3352 
3353 	smp_rendezvous(NULL, pmap_sync_icache_one, NULL, NULL);
3354 }
3355 
3356 /*
3357  *	Increase the starting virtual address of the given mapping if a
3358  *	different alignment might result in more superpage mappings.
3359  */
3360 void
pmap_align_superpage(vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t size)3361 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3362     vm_offset_t *addr, vm_size_t size)
3363 {
3364 	vm_offset_t superpage_offset;
3365 
3366 	if (size < PDRSIZE)
3367 		return;
3368 	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3369 		offset += ptoa(object->pg_color);
3370 	superpage_offset = offset & PDRMASK;
3371 	if (size - ((PDRSIZE - superpage_offset) & PDRMASK) < PDRSIZE ||
3372 	    (*addr & PDRMASK) == superpage_offset)
3373 		return;
3374 	if ((*addr & PDRMASK) < superpage_offset)
3375 		*addr = (*addr & ~PDRMASK) + superpage_offset;
3376 	else
3377 		*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
3378 }
3379 
3380 #ifdef DDB
DB_SHOW_COMMAND(ptable,ddb_pid_dump)3381 DB_SHOW_COMMAND(ptable, ddb_pid_dump)
3382 {
3383 	pmap_t pmap;
3384 	struct thread *td = NULL;
3385 	struct proc *p;
3386 	int i, j, k;
3387 	vm_paddr_t pa;
3388 	vm_offset_t va;
3389 
3390 	if (have_addr) {
3391 		td = db_lookup_thread(addr, true);
3392 		if (td == NULL) {
3393 			db_printf("Invalid pid or tid");
3394 			return;
3395 		}
3396 		p = td->td_proc;
3397 		if (p->p_vmspace == NULL) {
3398 			db_printf("No vmspace for process");
3399 			return;
3400 		}
3401 			pmap = vmspace_pmap(p->p_vmspace);
3402 	} else
3403 		pmap = kernel_pmap;
3404 
3405 	db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
3406 	    pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
3407 	    pmap->pm_asid[0].gen);
3408 	for (i = 0; i < NPDEPG; i++) {
3409 		pd_entry_t *pdpe;
3410 		pt_entry_t *pde;
3411 		pt_entry_t pte;
3412 
3413 		pdpe = (pd_entry_t *)pmap->pm_segtab[i];
3414 		if (pdpe == NULL)
3415 			continue;
3416 		db_printf("[%4d] %p\n", i, pdpe);
3417 #ifdef __mips_n64
3418 		for (j = 0; j < NPDEPG; j++) {
3419 			pde = (pt_entry_t *)pdpe[j];
3420 			if (pde == NULL)
3421 				continue;
3422 			db_printf("\t[%4d] %p\n", j, pde);
3423 #else
3424 		{
3425 			j = 0;
3426 			pde =  (pt_entry_t *)pdpe;
3427 #endif
3428 			for (k = 0; k < NPTEPG; k++) {
3429 				pte = pde[k];
3430 				if (pte == 0 || !pte_test(&pte, PTE_V))
3431 					continue;
3432 				pa = TLBLO_PTE_TO_PA(pte);
3433 				va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3434 				db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
3435 				       k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
3436 			}
3437 		}
3438 	}
3439 }
3440 #endif
3441 
3442 /*
3443  * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3444  * It takes almost as much or more time to search the TLB for a
3445  * specific ASID and flush those entries as it does to flush the entire TLB.
3446  * Therefore, when we allocate a new ASID, we just take the next number. When
3447  * we run out of numbers, we flush the TLB, increment the generation count
3448  * and start over. ASID zero is reserved for kernel use.
3449  */
3450 static void
3451 pmap_asid_alloc(pmap)
3452 	pmap_t pmap;
3453 {
3454 	if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3455 	    pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3456 	else {
3457 		if (PCPU_GET(next_asid) == pmap_max_asid) {
3458 			tlb_invalidate_all_user(NULL);
3459 			PCPU_SET(asid_generation,
3460 			    (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3461 			if (PCPU_GET(asid_generation) == 0) {
3462 				PCPU_SET(asid_generation, 1);
3463 			}
3464 			PCPU_SET(next_asid, 1);	/* 0 means invalid */
3465 		}
3466 		pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3467 		pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3468 		PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3469 	}
3470 }
3471 
3472 static pt_entry_t
3473 init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot)
3474 {
3475 	pt_entry_t rw;
3476 
3477 	if (!(prot & VM_PROT_WRITE))
3478 		rw = PTE_V | PTE_RO;
3479 	else if ((m->oflags & VPO_UNMANAGED) == 0) {
3480 		if ((access & VM_PROT_WRITE) != 0)
3481 			rw = PTE_V | PTE_D;
3482 		else
3483 			rw = PTE_V;
3484 	} else
3485 		/* Needn't emulate a modified bit for unmanaged pages. */
3486 		rw = PTE_V | PTE_D;
3487 	return (rw);
3488 }
3489 
3490 /*
3491  * pmap_emulate_modified : do dirty bit emulation
3492  *
3493  * On SMP, update just the local TLB, other CPUs will update their
3494  * TLBs from PTE lazily, if they get the exception.
3495  * Returns 0 in case of sucess, 1 if the page is read only and we
3496  * need to fault.
3497  */
3498 int
3499 pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3500 {
3501 	pt_entry_t *pte;
3502 
3503 	PMAP_LOCK(pmap);
3504 	pte = pmap_pte(pmap, va);
3505 
3506 	/*
3507 	 * It is possible that some other CPU or thread changed the pmap while
3508 	 * we weren't looking; in the SMP case, this is readily apparent, but
3509 	 * it can even happen in the UP case, because we may have been blocked
3510 	 * on PMAP_LOCK(pmap) above while someone changed this out from
3511 	 * underneath us.
3512 	 */
3513 
3514 	if (pte == NULL) {
3515 		/*
3516 		 * This PTE's PTP (or one of its ancestors) has been reclaimed;
3517 		 * trigger a full fault to reconstruct it via pmap_enter.
3518 		 */
3519 		PMAP_UNLOCK(pmap);
3520 		return (1);
3521 	}
3522 
3523 	if (!pte_test(pte, PTE_V)) {
3524 		/*
3525 		 * This PTE is no longer valid; the other thread or other
3526 		 * processor must have arranged for our TLB to no longer
3527 		 * have this entry, possibly by IPI, so no tlb_update is
3528 		 * required.  Fall out of the fast path and go take a
3529 		 * general fault before retrying the instruction (or taking
3530 		 * a signal).
3531 		 */
3532 		PMAP_UNLOCK(pmap);
3533 		return (1);
3534 	}
3535 
3536 	if (pte_test(pte, PTE_D)) {
3537 		/*
3538 		 * This PTE is valid and has the PTE_D bit asserted; since
3539 		 * this is an increase in permission, we may have been expected
3540 		 * to update the TLB lazily.  Do so here and return, on the
3541 		 * fast path, to retry the instruction.
3542 		 */
3543 		tlb_update(pmap, va, *pte);
3544 		PMAP_UNLOCK(pmap);
3545 		return (0);
3546 	}
3547 
3548 	if (pte_test(pte, PTE_RO)) {
3549 		/*
3550 		 * This PTE is valid, not dirty, and read-only.  Go take a
3551 		 * full fault (most likely to upgrade this part of the address
3552 		 * space to writeable).
3553 		 */
3554 		PMAP_UNLOCK(pmap);
3555 		return (1);
3556 	}
3557 
3558 	if (!pte_test(pte, PTE_MANAGED))
3559 		panic("pmap_emulate_modified: unmanaged page");
3560 
3561 	/*
3562 	 * PTE is valid, managed, not dirty, and not read-only.  Set PTE_D
3563 	 * and eagerly update the local TLB, returning on the fast path.
3564 	 */
3565 
3566 	pte_set(pte, PTE_D);
3567 	tlb_update(pmap, va, *pte);
3568 	PMAP_UNLOCK(pmap);
3569 
3570 	return (0);
3571 }
3572 
3573 /*
3574  *	Routine:	pmap_kextract
3575  *	Function:
3576  *		Extract the physical page address associated
3577  *		virtual address.
3578  */
3579 vm_paddr_t
3580 pmap_kextract(vm_offset_t va)
3581 {
3582 	int mapped;
3583 
3584 	/*
3585 	 * First, the direct-mapped regions.
3586 	 */
3587 #if defined(__mips_n64)
3588 	if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3589 		return (MIPS_XKPHYS_TO_PHYS(va));
3590 #endif
3591 	if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3592 		return (MIPS_KSEG0_TO_PHYS(va));
3593 
3594 	if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3595 		return (MIPS_KSEG1_TO_PHYS(va));
3596 
3597 	/*
3598 	 * User virtual addresses.
3599 	 */
3600 	if (va < VM_MAXUSER_ADDRESS) {
3601 		pt_entry_t *ptep;
3602 
3603 		if (curproc && curproc->p_vmspace) {
3604 			ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3605 			if (ptep) {
3606 				return (TLBLO_PTE_TO_PA(*ptep) |
3607 				    (va & PAGE_MASK));
3608 			}
3609 			return (0);
3610 		}
3611 	}
3612 
3613 	/*
3614 	 * Should be kernel virtual here, otherwise fail
3615 	 */
3616 	mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3617 #if defined(__mips_n64)
3618 	mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3619 #endif
3620 	/*
3621 	 * Kernel virtual.
3622 	 */
3623 
3624 	if (mapped) {
3625 		pt_entry_t *ptep;
3626 
3627 		/* Is the kernel pmap initialized? */
3628 		if (!CPU_EMPTY(&kernel_pmap->pm_active)) {
3629 			/* It's inside the virtual address range */
3630 			ptep = pmap_pte(kernel_pmap, va);
3631 			if (ptep) {
3632 				return (TLBLO_PTE_TO_PA(*ptep) |
3633 				    (va & PAGE_MASK));
3634 			}
3635 		}
3636 		return (0);
3637 	}
3638 
3639 	panic("%s for unknown address space %p.", __func__, (void *)va);
3640 }
3641 
3642 void
3643 pmap_flush_pvcache(vm_page_t m)
3644 {
3645 	pv_entry_t pv;
3646 
3647 	if (m != NULL) {
3648 		for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3649 		    pv = TAILQ_NEXT(pv, pv_list)) {
3650 			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
3651 		}
3652 	}
3653 }
3654 
3655 void
3656 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
3657 {
3658 
3659 	/*
3660 	 * It appears that this function can only be called before any mappings
3661 	 * for the page are established.  If this ever changes, this code will
3662 	 * need to walk the pv_list and make each of the existing mappings
3663 	 * uncacheable, being careful to sync caches and PTEs (and maybe
3664 	 * invalidate TLB?) for any current mapping it modifies.
3665 	 */
3666 	if (TAILQ_FIRST(&m->md.pv_list) != NULL)
3667 		panic("Can't change memattr on page with existing mappings");
3668 
3669 	/* Clean memattr portion of pv_flags */
3670 	m->md.pv_flags &= ~PV_MEMATTR_MASK;
3671 	m->md.pv_flags |= (ma << PV_MEMATTR_SHIFT) & PV_MEMATTR_MASK;
3672 }
3673 
3674 static __inline void
3675 pmap_pte_attr(pt_entry_t *pte, vm_memattr_t ma)
3676 {
3677 	u_int npte;
3678 
3679 	npte = *(u_int *)pte;
3680 	npte &= ~PTE_C_MASK;
3681 	npte |= PTE_C(ma);
3682 	*pte = npte;
3683 }
3684 
3685 int
3686 pmap_change_attr(vm_offset_t sva, vm_size_t size, vm_memattr_t ma)
3687 {
3688 	pd_entry_t *pde, *pdpe;
3689 	pt_entry_t *pte;
3690 	vm_offset_t ova, eva, va, va_next;
3691 	pmap_t pmap;
3692 
3693 	ova = sva;
3694 	eva = sva + size;
3695 	if (eva < sva)
3696 		return (EINVAL);
3697 
3698 	pmap = kernel_pmap;
3699 	PMAP_LOCK(pmap);
3700 
3701 	for (; sva < eva; sva = va_next) {
3702 		pdpe = pmap_segmap(pmap, sva);
3703 #ifdef __mips_n64
3704 		if (*pdpe == 0) {
3705 			va_next = (sva + NBSEG) & ~SEGMASK;
3706 			if (va_next < sva)
3707 				va_next = eva;
3708 			continue;
3709 		}
3710 #endif
3711 		va_next = (sva + NBPDR) & ~PDRMASK;
3712 		if (va_next < sva)
3713 			va_next = eva;
3714 
3715 		pde = pmap_pdpe_to_pde(pdpe, sva);
3716 		if (*pde == NULL)
3717 			continue;
3718 
3719 		/*
3720 		 * Limit our scan to either the end of the va represented
3721 		 * by the current page table page, or to the end of the
3722 		 * range being removed.
3723 		 */
3724 		if (va_next > eva)
3725 			va_next = eva;
3726 
3727 		va = va_next;
3728 		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
3729 		    sva += PAGE_SIZE) {
3730 			if (!pte_test(pte, PTE_V) || pte_cache_bits(pte) == ma) {
3731 				if (va != va_next) {
3732 					pmap_invalidate_range(pmap, va, sva);
3733 					va = va_next;
3734 				}
3735 				continue;
3736 			}
3737 			if (va == va_next)
3738 				va = sva;
3739 
3740 			pmap_pte_attr(pte, ma);
3741 		}
3742 		if (va != va_next)
3743 			pmap_invalidate_range(pmap, va, sva);
3744 	}
3745 	PMAP_UNLOCK(pmap);
3746 
3747 	/* Flush caches to be in the safe side */
3748 	mips_dcache_wbinv_range(ova, size);
3749 	return 0;
3750 }
3751 
3752 boolean_t
3753 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
3754 {
3755 
3756 	switch (mode) {
3757 	case VM_MEMATTR_UNCACHEABLE:
3758 	case VM_MEMATTR_WRITE_BACK:
3759 #ifdef MIPS_CCA_WC
3760 	case VM_MEMATTR_WRITE_COMBINING:
3761 #endif
3762 		return (TRUE);
3763 	default:
3764 		return (FALSE);
3765 	}
3766 }
3767