1a9643ea8Slogwang /*- 2*22ce4affSfengbojiang * SPDX-License-Identifier: BSD-4-Clause 3*22ce4affSfengbojiang * 4a9643ea8Slogwang * Copyright (c) 1990 The Regents of the University of California. 5a9643ea8Slogwang * All rights reserved. 6a9643ea8Slogwang * Copyright (c) 1994 John S. Dyson 7a9643ea8Slogwang * All rights reserved. 8a9643ea8Slogwang * Copyright (c) 2003 Peter Wemm 9a9643ea8Slogwang * All rights reserved. 10a9643ea8Slogwang * 11a9643ea8Slogwang * This code is derived from software contributed to Berkeley by 12a9643ea8Slogwang * William Jolitz. 13a9643ea8Slogwang * 14a9643ea8Slogwang * Redistribution and use in source and binary forms, with or without 15a9643ea8Slogwang * modification, are permitted provided that the following conditions 16a9643ea8Slogwang * are met: 17a9643ea8Slogwang * 1. Redistributions of source code must retain the above copyright 18a9643ea8Slogwang * notice, this list of conditions and the following disclaimer. 19a9643ea8Slogwang * 2. Redistributions in binary form must reproduce the above copyright 20a9643ea8Slogwang * notice, this list of conditions and the following disclaimer in the 21a9643ea8Slogwang * documentation and/or other materials provided with the distribution. 22a9643ea8Slogwang * 3. All advertising materials mentioning features or use of this software 23a9643ea8Slogwang * must display the following acknowledgement: 24a9643ea8Slogwang * This product includes software developed by the University of 25a9643ea8Slogwang * California, Berkeley and its contributors. 26a9643ea8Slogwang * 4. Neither the name of the University nor the names of its contributors 27a9643ea8Slogwang * may be used to endorse or promote products derived from this software 28a9643ea8Slogwang * without specific prior written permission. 29a9643ea8Slogwang * 30a9643ea8Slogwang * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31a9643ea8Slogwang * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32a9643ea8Slogwang * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33a9643ea8Slogwang * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34a9643ea8Slogwang * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35a9643ea8Slogwang * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36a9643ea8Slogwang * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37a9643ea8Slogwang * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38a9643ea8Slogwang * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39a9643ea8Slogwang * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40a9643ea8Slogwang * SUCH DAMAGE. 41a9643ea8Slogwang * 42a9643ea8Slogwang * from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91 43a9643ea8Slogwang * $FreeBSD$ 44a9643ea8Slogwang */ 45a9643ea8Slogwang 46a9643ea8Slogwang #ifndef _MACHINE_VMPARAM_H_ 47a9643ea8Slogwang #define _MACHINE_VMPARAM_H_ 1 48a9643ea8Slogwang 49a9643ea8Slogwang /* 50a9643ea8Slogwang * Machine dependent constants for AMD64. 51a9643ea8Slogwang */ 52a9643ea8Slogwang 53a9643ea8Slogwang /* 54a9643ea8Slogwang * Virtual memory related constants, all in bytes 55a9643ea8Slogwang */ 56*22ce4affSfengbojiang #define MAXTSIZ (32768UL*1024*1024) /* max text size */ 57a9643ea8Slogwang #ifndef DFLDSIZ 58a9643ea8Slogwang #define DFLDSIZ (32768UL*1024*1024) /* initial data size limit */ 59a9643ea8Slogwang #endif 60a9643ea8Slogwang #ifndef MAXDSIZ 61a9643ea8Slogwang #define MAXDSIZ (32768UL*1024*1024) /* max data size */ 62a9643ea8Slogwang #endif 63a9643ea8Slogwang #ifndef DFLSSIZ 64a9643ea8Slogwang #define DFLSSIZ (8UL*1024*1024) /* initial stack size limit */ 65a9643ea8Slogwang #endif 66a9643ea8Slogwang #ifndef MAXSSIZ 67a9643ea8Slogwang #define MAXSSIZ (512UL*1024*1024) /* max stack size */ 68a9643ea8Slogwang #endif 69a9643ea8Slogwang #ifndef SGROWSIZ 70a9643ea8Slogwang #define SGROWSIZ (128UL*1024) /* amount to grow stack */ 71a9643ea8Slogwang #endif 72a9643ea8Slogwang 73*22ce4affSfengbojiang #ifndef FSTACK 74a9643ea8Slogwang /* 75a9643ea8Slogwang * We provide a machine specific single page allocator through the use 76a9643ea8Slogwang * of the direct mapped segment. This uses 2MB pages for reduced 77a9643ea8Slogwang * TLB pressure. 78a9643ea8Slogwang */ 79a9643ea8Slogwang #define UMA_MD_SMALL_ALLOC 80*22ce4affSfengbojiang #endif 81a9643ea8Slogwang 82a9643ea8Slogwang /* 83a9643ea8Slogwang * The physical address space is densely populated. 84a9643ea8Slogwang */ 85a9643ea8Slogwang #define VM_PHYSSEG_DENSE 86a9643ea8Slogwang 87a9643ea8Slogwang /* 88a9643ea8Slogwang * The number of PHYSSEG entries must be one greater than the number 89a9643ea8Slogwang * of phys_avail entries because the phys_avail entry that spans the 90a9643ea8Slogwang * largest physical address that is accessible by ISA DMA is split 91a9643ea8Slogwang * into two PHYSSEG entries. 92a9643ea8Slogwang */ 93a9643ea8Slogwang #define VM_PHYSSEG_MAX 63 94a9643ea8Slogwang 95a9643ea8Slogwang /* 96a9643ea8Slogwang * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool 97a9643ea8Slogwang * from which physical pages are allocated and VM_FREEPOOL_DIRECT is 98a9643ea8Slogwang * the pool from which physical pages for page tables and small UMA 99a9643ea8Slogwang * objects are allocated. 100a9643ea8Slogwang */ 101a9643ea8Slogwang #define VM_NFREEPOOL 2 102a9643ea8Slogwang #define VM_FREEPOOL_DEFAULT 0 103a9643ea8Slogwang #define VM_FREEPOOL_DIRECT 1 104a9643ea8Slogwang 105a9643ea8Slogwang /* 106a9643ea8Slogwang * Create up to three free page lists: VM_FREELIST_DMA32 is for physical pages 107a9643ea8Slogwang * that have physical addresses below 4G but are not accessible by ISA DMA, 108a9643ea8Slogwang * and VM_FREELIST_ISADMA is for physical pages that are accessible by ISA 109a9643ea8Slogwang * DMA. 110a9643ea8Slogwang */ 111a9643ea8Slogwang #define VM_NFREELIST 3 112a9643ea8Slogwang #define VM_FREELIST_DEFAULT 0 113a9643ea8Slogwang #define VM_FREELIST_DMA32 1 114*22ce4affSfengbojiang #define VM_FREELIST_LOWMEM 2 115*22ce4affSfengbojiang 116*22ce4affSfengbojiang #define VM_LOWMEM_BOUNDARY (16 << 20) /* 16MB ISA DMA limit */ 117a9643ea8Slogwang 118a9643ea8Slogwang /* 119a9643ea8Slogwang * Create the DMA32 free list only if the number of physical pages above 120a9643ea8Slogwang * physical address 4G is at least 16M, which amounts to 64GB of physical 121a9643ea8Slogwang * memory. 122a9643ea8Slogwang */ 123a9643ea8Slogwang #define VM_DMA32_NPAGES_THRESHOLD 16777216 124a9643ea8Slogwang 125a9643ea8Slogwang /* 126a9643ea8Slogwang * An allocation size of 16MB is supported in order to optimize the 127a9643ea8Slogwang * use of the direct map by UMA. Specifically, a cache line contains 128a9643ea8Slogwang * at most 8 PDEs, collectively mapping 16MB of physical memory. By 129a9643ea8Slogwang * reducing the number of distinct 16MB "pages" that are used by UMA, 130a9643ea8Slogwang * the physical memory allocator reduces the likelihood of both 2MB 131a9643ea8Slogwang * page TLB misses and cache misses caused by 2MB page TLB misses. 132a9643ea8Slogwang */ 133a9643ea8Slogwang #define VM_NFREEORDER 13 134a9643ea8Slogwang 135a9643ea8Slogwang /* 136a9643ea8Slogwang * Enable superpage reservations: 1 level. 137a9643ea8Slogwang */ 138a9643ea8Slogwang #ifndef VM_NRESERVLEVEL 139a9643ea8Slogwang #define VM_NRESERVLEVEL 1 140a9643ea8Slogwang #endif 141a9643ea8Slogwang 142a9643ea8Slogwang /* 143a9643ea8Slogwang * Level 0 reservations consist of 512 pages. 144a9643ea8Slogwang */ 145a9643ea8Slogwang #ifndef VM_LEVEL_0_ORDER 146a9643ea8Slogwang #define VM_LEVEL_0_ORDER 9 147a9643ea8Slogwang #endif 148a9643ea8Slogwang 149a9643ea8Slogwang #ifdef SMP 150a9643ea8Slogwang #define PA_LOCK_COUNT 256 151a9643ea8Slogwang #endif 152a9643ea8Slogwang 153a9643ea8Slogwang /* 154*22ce4affSfengbojiang * Kernel physical load address. Needs to be aligned at 2MB superpage 155*22ce4affSfengbojiang * boundary. 156*22ce4affSfengbojiang */ 157*22ce4affSfengbojiang #ifndef KERNLOAD 158*22ce4affSfengbojiang #define KERNLOAD 0x200000 159*22ce4affSfengbojiang #endif 160*22ce4affSfengbojiang 161*22ce4affSfengbojiang /* 162a9643ea8Slogwang * Virtual addresses of things. Derived from the page directory and 163a9643ea8Slogwang * page table indexes from pmap.h for precision. 164a9643ea8Slogwang * 165a9643ea8Slogwang * 0x0000000000000000 - 0x00007fffffffffff user map 166a9643ea8Slogwang * 0x0000800000000000 - 0xffff7fffffffffff does not exist (hole) 167a9643ea8Slogwang * 0xffff800000000000 - 0xffff804020100fff recursive page table (512GB slot) 168*22ce4affSfengbojiang * 0xffff804020100fff - 0xffff807fffffffff unused 169*22ce4affSfengbojiang * 0xffff808000000000 - 0xffff847fffffffff large map (can be tuned up) 170*22ce4affSfengbojiang * 0xffff848000000000 - 0xfffff7ffffffffff unused (large map extends there) 171a9643ea8Slogwang * 0xfffff80000000000 - 0xfffffbffffffffff 4TB direct map 172a9643ea8Slogwang * 0xfffffc0000000000 - 0xfffffdffffffffff unused 173a9643ea8Slogwang * 0xfffffe0000000000 - 0xffffffffffffffff 2TB kernel map 174a9643ea8Slogwang * 175a9643ea8Slogwang * Within the kernel map: 176a9643ea8Slogwang * 177*22ce4affSfengbojiang * 0xfffffe0000000000 vm_page_array 178a9643ea8Slogwang * 0xffffffff80000000 KERNBASE 179a9643ea8Slogwang */ 180a9643ea8Slogwang 181*22ce4affSfengbojiang #define VM_MIN_KERNEL_ADDRESS KV4ADDR(KPML4BASE, 0, 0, 0) 182*22ce4affSfengbojiang #define VM_MAX_KERNEL_ADDRESS KV4ADDR(KPML4BASE + NKPML4E - 1, \ 183a9643ea8Slogwang NPDPEPG-1, NPDEPG-1, NPTEPG-1) 184a9643ea8Slogwang 185*22ce4affSfengbojiang #define DMAP_MIN_ADDRESS KV4ADDR(DMPML4I, 0, 0, 0) 186*22ce4affSfengbojiang #define DMAP_MAX_ADDRESS KV4ADDR(DMPML4I + NDMPML4E, 0, 0, 0) 187a9643ea8Slogwang 188*22ce4affSfengbojiang #define LARGEMAP_MIN_ADDRESS KV4ADDR(LMSPML4I, 0, 0, 0) 189*22ce4affSfengbojiang #define LARGEMAP_MAX_ADDRESS KV4ADDR(LMEPML4I + 1, 0, 0, 0) 190a9643ea8Slogwang 191*22ce4affSfengbojiang #define KERNBASE KV4ADDR(KPML4I, KPDPI, 0, 0) 192a9643ea8Slogwang 193*22ce4affSfengbojiang #define UPT_MAX_ADDRESS KV4ADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I) 194*22ce4affSfengbojiang #define UPT_MIN_ADDRESS KV4ADDR(PML4PML4I, 0, 0, 0) 195a9643ea8Slogwang 196*22ce4affSfengbojiang #define VM_MAXUSER_ADDRESS_LA57 UVADDR(NUPML5E, 0, 0, 0, 0) 197*22ce4affSfengbojiang #define VM_MAXUSER_ADDRESS_LA48 UVADDR(0, NUP4ML4E, 0, 0, 0) 198*22ce4affSfengbojiang #define VM_MAXUSER_ADDRESS VM_MAXUSER_ADDRESS_LA57 199*22ce4affSfengbojiang 200*22ce4affSfengbojiang #define SHAREDPAGE_LA57 (VM_MAXUSER_ADDRESS_LA57 - PAGE_SIZE) 201*22ce4affSfengbojiang #define SHAREDPAGE_LA48 (VM_MAXUSER_ADDRESS_LA48 - PAGE_SIZE) 202*22ce4affSfengbojiang #define USRSTACK_LA57 SHAREDPAGE_LA57 203*22ce4affSfengbojiang #define USRSTACK_LA48 SHAREDPAGE_LA48 204*22ce4affSfengbojiang #define USRSTACK USRSTACK_LA48 205*22ce4affSfengbojiang #define PS_STRINGS_LA57 (USRSTACK_LA57 - sizeof(struct ps_strings)) 206*22ce4affSfengbojiang #define PS_STRINGS_LA48 (USRSTACK_LA48 - sizeof(struct ps_strings)) 207a9643ea8Slogwang 208a9643ea8Slogwang #define VM_MAX_ADDRESS UPT_MAX_ADDRESS 209a9643ea8Slogwang #define VM_MIN_ADDRESS (0) 210a9643ea8Slogwang 211a9643ea8Slogwang /* 212a9643ea8Slogwang * XXX Allowing dmaplimit == 0 is a temporary workaround for vt(4) efifb's 213a9643ea8Slogwang * early use of PHYS_TO_DMAP before the mapping is actually setup. This works 214a9643ea8Slogwang * because the result is not actually accessed until later, but the early 215a9643ea8Slogwang * vt fb startup needs to be reworked. 216a9643ea8Slogwang */ 217*22ce4affSfengbojiang #define PMAP_HAS_DMAP 1 218a9643ea8Slogwang #define PHYS_TO_DMAP(x) ({ \ 219a9643ea8Slogwang KASSERT(dmaplimit == 0 || (x) < dmaplimit, \ 220a9643ea8Slogwang ("physical address %#jx not covered by the DMAP", \ 221a9643ea8Slogwang (uintmax_t)x)); \ 222a9643ea8Slogwang (x) | DMAP_MIN_ADDRESS; }) 223a9643ea8Slogwang 224a9643ea8Slogwang #define DMAP_TO_PHYS(x) ({ \ 225a9643ea8Slogwang KASSERT((x) < (DMAP_MIN_ADDRESS + dmaplimit) && \ 226a9643ea8Slogwang (x) >= DMAP_MIN_ADDRESS, \ 227a9643ea8Slogwang ("virtual address %#jx not covered by the DMAP", \ 228a9643ea8Slogwang (uintmax_t)x)); \ 229a9643ea8Slogwang (x) & ~DMAP_MIN_ADDRESS; }) 230a9643ea8Slogwang 231a9643ea8Slogwang /* 232*22ce4affSfengbojiang * amd64 maps the page array into KVA so that it can be more easily 233*22ce4affSfengbojiang * allocated on the correct memory domains. 234*22ce4affSfengbojiang */ 235*22ce4affSfengbojiang #define PMAP_HAS_PAGE_ARRAY 1 236*22ce4affSfengbojiang 237*22ce4affSfengbojiang /* 238a9643ea8Slogwang * How many physical pages per kmem arena virtual page. 239a9643ea8Slogwang */ 240a9643ea8Slogwang #ifndef VM_KMEM_SIZE_SCALE 241a9643ea8Slogwang #define VM_KMEM_SIZE_SCALE (1) 242a9643ea8Slogwang #endif 243a9643ea8Slogwang 244a9643ea8Slogwang /* 245a9643ea8Slogwang * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the 246a9643ea8Slogwang * kernel map. 247a9643ea8Slogwang */ 248a9643ea8Slogwang #ifndef VM_KMEM_SIZE_MAX 249a9643ea8Slogwang #define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \ 250a9643ea8Slogwang VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5) 251a9643ea8Slogwang #endif 252a9643ea8Slogwang 253a9643ea8Slogwang /* initial pagein size of beginning of executable file */ 254a9643ea8Slogwang #ifndef VM_INITIAL_PAGEIN 255a9643ea8Slogwang #define VM_INITIAL_PAGEIN 16 256a9643ea8Slogwang #endif 257a9643ea8Slogwang 258a9643ea8Slogwang #define ZERO_REGION_SIZE (2 * 1024 * 1024) /* 2MB */ 259a9643ea8Slogwang 260*22ce4affSfengbojiang /* 261*22ce4affSfengbojiang * Use a fairly large batch size since we expect amd64 systems to have lots of 262*22ce4affSfengbojiang * memory. 263*22ce4affSfengbojiang */ 264*22ce4affSfengbojiang #define VM_BATCHQUEUE_SIZE 31 265*22ce4affSfengbojiang 266*22ce4affSfengbojiang /* 267*22ce4affSfengbojiang * The pmap can create non-transparent large page mappings. 268*22ce4affSfengbojiang */ 269*22ce4affSfengbojiang #define PMAP_HAS_LARGEPAGES 1 270*22ce4affSfengbojiang 271*22ce4affSfengbojiang /* 272*22ce4affSfengbojiang * Need a page dump array for minidump. 273*22ce4affSfengbojiang */ 274*22ce4affSfengbojiang #define MINIDUMP_PAGE_TRACKING 1 275*22ce4affSfengbojiang 276a9643ea8Slogwang #endif /* _MACHINE_VMPARAM_H_ */ 277