1 #ifndef _LINUX_MM_TYPES_H 2 #define _LINUX_MM_TYPES_H 3 4 #include <linux/auxvec.h> 5 #include <linux/types.h> 6 #include <linux/threads.h> 7 #include <linux/list.h> 8 #include <linux/spinlock.h> 9 #include <linux/rbtree.h> 10 #include <linux/rwsem.h> 11 #include <linux/completion.h> 12 #include <linux/cpumask.h> 13 #include <linux/page-debug-flags.h> 14 #include <linux/uprobes.h> 15 #include <asm/page.h> 16 #include <asm/mmu.h> 17 18 #ifndef AT_VECTOR_SIZE_ARCH 19 #define AT_VECTOR_SIZE_ARCH 0 20 #endif 21 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) 22 23 struct address_space; 24 25 #define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26 27 /* 28 * Each physical page in the system has a struct page associated with 29 * it to keep track of whatever it is we are using the page for at the 30 * moment. Note that we have no way to track which tasks are using 31 * a page, though if it is a pagecache page, rmap structures can tell us 32 * who is mapping it. 33 * 34 * The objects in struct page are organized in double word blocks in 35 * order to allows us to use atomic double word operations on portions 36 * of struct page. That is currently only used by slub but the arrangement 37 * allows the use of atomic double word operations on the flags/mapping 38 * and lru list pointers also. 39 */ 40 struct page { 41 /* First double word block */ 42 unsigned long flags; /* Atomic flags, some possibly 43 * updated asynchronously */ 44 struct address_space *mapping; /* If low bit clear, points to 45 * inode address_space, or NULL. 46 * If page mapped as anonymous 47 * memory, low bit is set, and 48 * it points to anon_vma object: 49 * see PAGE_MAPPING_ANON below. 50 */ 51 /* Second double word */ 52 struct { 53 union { 54 pgoff_t index; /* Our offset within mapping. */ 55 void *freelist; /* slub/slob first free object */ 56 bool pfmemalloc; /* If set by the page allocator, 57 * ALLOC_NO_WATERMARKS was set 58 * and the low watermark was not 59 * met implying that the system 60 * is under some pressure. The 61 * caller should try ensure 62 * this page is only used to 63 * free other pages. 64 */ 65 }; 66 67 union { 68 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ 69 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) 70 /* Used for cmpxchg_double in slub */ 71 unsigned long counters; 72 #else 73 /* 74 * Keep _count separate from slub cmpxchg_double data. 75 * As the rest of the double word is protected by 76 * slab_lock but _count is not. 77 */ 78 unsigned counters; 79 #endif 80 81 struct { 82 83 union { 84 /* 85 * Count of ptes mapped in 86 * mms, to show when page is 87 * mapped & limit reverse map 88 * searches. 89 * 90 * Used also for tail pages 91 * refcounting instead of 92 * _count. Tail pages cannot 93 * be mapped and keeping the 94 * tail page _count zero at 95 * all times guarantees 96 * get_page_unless_zero() will 97 * never succeed on tail 98 * pages. 99 */ 100 atomic_t _mapcount; 101 102 struct { /* SLUB */ 103 unsigned inuse:16; 104 unsigned objects:15; 105 unsigned frozen:1; 106 }; 107 int units; /* SLOB */ 108 }; 109 atomic_t _count; /* Usage count, see below. */ 110 }; 111 }; 112 }; 113 114 /* Third double word block */ 115 union { 116 struct list_head lru; /* Pageout list, eg. active_list 117 * protected by zone->lru_lock ! 118 */ 119 struct { /* slub per cpu partial pages */ 120 struct page *next; /* Next partial slab */ 121 #ifdef CONFIG_64BIT 122 int pages; /* Nr of partial slabs left */ 123 int pobjects; /* Approximate # of objects */ 124 #else 125 short int pages; 126 short int pobjects; 127 #endif 128 }; 129 130 struct list_head list; /* slobs list of pages */ 131 struct { /* slab fields */ 132 struct kmem_cache *slab_cache; 133 struct slab *slab_page; 134 }; 135 }; 136 137 /* Remainder is not double word aligned */ 138 union { 139 unsigned long private; /* Mapping-private opaque data: 140 * usually used for buffer_heads 141 * if PagePrivate set; used for 142 * swp_entry_t if PageSwapCache; 143 * indicates order in the buddy 144 * system if PG_buddy is set. 145 */ 146 #if USE_SPLIT_PTLOCKS 147 spinlock_t ptl; 148 #endif 149 struct kmem_cache *slab; /* SLUB: Pointer to slab */ 150 struct page *first_page; /* Compound tail pages */ 151 }; 152 153 /* 154 * On machines where all RAM is mapped into kernel address space, 155 * we can simply calculate the virtual address. On machines with 156 * highmem some memory is mapped into kernel virtual memory 157 * dynamically, so we need a place to store that address. 158 * Note that this field could be 16 bits on x86 ... ;) 159 * 160 * Architectures with slow multiplication can define 161 * WANT_PAGE_VIRTUAL in asm/page.h 162 */ 163 #if defined(WANT_PAGE_VIRTUAL) 164 void *virtual; /* Kernel virtual address (NULL if 165 not kmapped, ie. highmem) */ 166 #endif /* WANT_PAGE_VIRTUAL */ 167 #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS 168 unsigned long debug_flags; /* Use atomic bitops on this */ 169 #endif 170 171 #ifdef CONFIG_KMEMCHECK 172 /* 173 * kmemcheck wants to track the status of each byte in a page; this 174 * is a pointer to such a status block. NULL if not tracked. 175 */ 176 void *shadow; 177 #endif 178 179 #ifdef CONFIG_NUMA_BALANCING 180 int _last_nid; 181 #endif 182 } 183 /* 184 * The struct page can be forced to be double word aligned so that atomic ops 185 * on double words work. The SLUB allocator can make use of such a feature. 186 */ 187 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE 188 __aligned(2 * sizeof(unsigned long)) 189 #endif 190 ; 191 192 struct page_frag { 193 struct page *page; 194 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 195 __u32 offset; 196 __u32 size; 197 #else 198 __u16 offset; 199 __u16 size; 200 #endif 201 }; 202 203 typedef unsigned long __nocast vm_flags_t; 204 205 /* 206 * A region containing a mapping of a non-memory backed file under NOMMU 207 * conditions. These are held in a global tree and are pinned by the VMAs that 208 * map parts of them. 209 */ 210 struct vm_region { 211 struct rb_node vm_rb; /* link in global region tree */ 212 vm_flags_t vm_flags; /* VMA vm_flags */ 213 unsigned long vm_start; /* start address of region */ 214 unsigned long vm_end; /* region initialised to here */ 215 unsigned long vm_top; /* region allocated to here */ 216 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ 217 struct file *vm_file; /* the backing file or NULL */ 218 219 int vm_usage; /* region usage count (access under nommu_region_sem) */ 220 bool vm_icache_flushed : 1; /* true if the icache has been flushed for 221 * this region */ 222 }; 223 224 /* 225 * This struct defines a memory VMM memory area. There is one of these 226 * per VM-area/task. A VM area is any part of the process virtual memory 227 * space that has a special rule for the page-fault handlers (ie a shared 228 * library, the executable area etc). 229 */ 230 struct vm_area_struct { 231 /* The first cache line has the info for VMA tree walking. */ 232 233 unsigned long vm_start; /* Our start address within vm_mm. */ 234 unsigned long vm_end; /* The first byte after our end address 235 within vm_mm. */ 236 237 /* linked list of VM areas per task, sorted by address */ 238 struct vm_area_struct *vm_next, *vm_prev; 239 240 struct rb_node vm_rb; 241 242 /* 243 * Largest free memory gap in bytes to the left of this VMA. 244 * Either between this VMA and vma->vm_prev, or between one of the 245 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps 246 * get_unmapped_area find a free area of the right size. 247 */ 248 unsigned long rb_subtree_gap; 249 250 /* Second cache line starts here. */ 251 252 struct mm_struct *vm_mm; /* The address space we belong to. */ 253 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 254 unsigned long vm_flags; /* Flags, see mm.h. */ 255 256 /* 257 * For areas with an address space and backing store, 258 * linkage into the address_space->i_mmap interval tree, or 259 * linkage of vma in the address_space->i_mmap_nonlinear list. 260 */ 261 union { 262 struct { 263 struct rb_node rb; 264 unsigned long rb_subtree_last; 265 } linear; 266 struct list_head nonlinear; 267 } shared; 268 269 /* 270 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 271 * list, after a COW of one of the file pages. A MAP_SHARED vma 272 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 273 * or brk vma (with NULL file) can only be in an anon_vma list. 274 */ 275 struct list_head anon_vma_chain; /* Serialized by mmap_sem & 276 * page_table_lock */ 277 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 278 279 /* Function pointers to deal with this struct. */ 280 const struct vm_operations_struct *vm_ops; 281 282 /* Information about our backing store: */ 283 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 284 units, *not* PAGE_CACHE_SIZE */ 285 struct file * vm_file; /* File we map to (can be NULL). */ 286 void * vm_private_data; /* was vm_pte (shared mem) */ 287 288 #ifndef CONFIG_MMU 289 struct vm_region *vm_region; /* NOMMU mapping region */ 290 #endif 291 #ifdef CONFIG_NUMA 292 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 293 #endif 294 }; 295 296 struct core_thread { 297 struct task_struct *task; 298 struct core_thread *next; 299 }; 300 301 struct core_state { 302 atomic_t nr_threads; 303 struct core_thread dumper; 304 struct completion startup; 305 }; 306 307 enum { 308 MM_FILEPAGES, 309 MM_ANONPAGES, 310 MM_SWAPENTS, 311 NR_MM_COUNTERS 312 }; 313 314 #if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) 315 #define SPLIT_RSS_COUNTING 316 /* per-thread cached information, */ 317 struct task_rss_stat { 318 int events; /* for synchronization threshold */ 319 int count[NR_MM_COUNTERS]; 320 }; 321 #endif /* USE_SPLIT_PTLOCKS */ 322 323 struct mm_rss_stat { 324 atomic_long_t count[NR_MM_COUNTERS]; 325 }; 326 327 struct mm_struct { 328 struct vm_area_struct * mmap; /* list of VMAs */ 329 struct rb_root mm_rb; 330 struct vm_area_struct * mmap_cache; /* last find_vma result */ 331 #ifdef CONFIG_MMU 332 unsigned long (*get_unmapped_area) (struct file *filp, 333 unsigned long addr, unsigned long len, 334 unsigned long pgoff, unsigned long flags); 335 void (*unmap_area) (struct mm_struct *mm, unsigned long addr); 336 #endif 337 unsigned long mmap_base; /* base of mmap area */ 338 unsigned long task_size; /* size of task vm space */ 339 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ 340 unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ 341 unsigned long highest_vm_end; /* highest vma end address */ 342 pgd_t * pgd; 343 atomic_t mm_users; /* How many users with user space? */ 344 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 345 int map_count; /* number of VMAs */ 346 347 spinlock_t page_table_lock; /* Protects page tables and some counters */ 348 struct rw_semaphore mmap_sem; 349 350 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung 351 * together off init_mm.mmlist, and are protected 352 * by mmlist_lock 353 */ 354 355 356 unsigned long hiwater_rss; /* High-watermark of RSS usage */ 357 unsigned long hiwater_vm; /* High-water virtual memory usage */ 358 359 unsigned long total_vm; /* Total pages mapped */ 360 unsigned long locked_vm; /* Pages that have PG_mlocked set */ 361 unsigned long pinned_vm; /* Refcount permanently increased */ 362 unsigned long shared_vm; /* Shared pages (files) */ 363 unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ 364 unsigned long stack_vm; /* VM_GROWSUP/DOWN */ 365 unsigned long def_flags; 366 unsigned long nr_ptes; /* Page table pages */ 367 unsigned long start_code, end_code, start_data, end_data; 368 unsigned long start_brk, brk, start_stack; 369 unsigned long arg_start, arg_end, env_start, env_end; 370 371 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 372 373 /* 374 * Special counters, in some configurations protected by the 375 * page_table_lock, in other configurations by being atomic. 376 */ 377 struct mm_rss_stat rss_stat; 378 379 struct linux_binfmt *binfmt; 380 381 cpumask_var_t cpu_vm_mask_var; 382 383 /* Architecture-specific MM context */ 384 mm_context_t context; 385 386 unsigned long flags; /* Must use atomic bitops to access the bits */ 387 388 struct core_state *core_state; /* coredumping support */ 389 #ifdef CONFIG_AIO 390 spinlock_t ioctx_lock; 391 struct hlist_head ioctx_list; 392 #endif 393 #ifdef CONFIG_MM_OWNER 394 /* 395 * "owner" points to a task that is regarded as the canonical 396 * user/owner of this mm. All of the following must be true in 397 * order for it to be changed: 398 * 399 * current == mm->owner 400 * current->mm != mm 401 * new_owner->mm == mm 402 * new_owner->alloc_lock is held 403 */ 404 struct task_struct __rcu *owner; 405 #endif 406 407 /* store ref to file /proc/<pid>/exe symlink points to */ 408 struct file *exe_file; 409 #ifdef CONFIG_MMU_NOTIFIER 410 struct mmu_notifier_mm *mmu_notifier_mm; 411 #endif 412 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 413 pgtable_t pmd_huge_pte; /* protected by page_table_lock */ 414 #endif 415 #ifdef CONFIG_CPUMASK_OFFSTACK 416 struct cpumask cpumask_allocation; 417 #endif 418 #ifdef CONFIG_NUMA_BALANCING 419 /* 420 * numa_next_scan is the next time when the PTEs will me marked 421 * pte_numa to gather statistics and migrate pages to new nodes 422 * if necessary 423 */ 424 unsigned long numa_next_scan; 425 426 /* numa_next_reset is when the PTE scanner period will be reset */ 427 unsigned long numa_next_reset; 428 429 /* Restart point for scanning and setting pte_numa */ 430 unsigned long numa_scan_offset; 431 432 /* numa_scan_seq prevents two threads setting pte_numa */ 433 int numa_scan_seq; 434 435 /* 436 * The first node a task was scheduled on. If a task runs on 437 * a different node than Make PTE Scan Go Now. 438 */ 439 int first_nid; 440 #endif 441 struct uprobes_state uprobes_state; 442 }; 443 444 /* first nid will either be a valid NID or one of these values */ 445 #define NUMA_PTE_SCAN_INIT -1 446 #define NUMA_PTE_SCAN_ACTIVE -2 447 448 static inline void mm_init_cpumask(struct mm_struct *mm) 449 { 450 #ifdef CONFIG_CPUMASK_OFFSTACK 451 mm->cpu_vm_mask_var = &mm->cpumask_allocation; 452 #endif 453 } 454 455 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ 456 static inline cpumask_t *mm_cpumask(struct mm_struct *mm) 457 { 458 return mm->cpu_vm_mask_var; 459 } 460 461 #endif /* _LINUX_MM_TYPES_H */ 462