xref: /linux-6.15/include/linux/mm_types.h (revision f6bcbf2e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4 
5 #include <linux/mm_types_task.h>
6 
7 #include <linux/auxvec.h>
8 #include <linux/list.h>
9 #include <linux/spinlock.h>
10 #include <linux/rbtree.h>
11 #include <linux/rwsem.h>
12 #include <linux/completion.h>
13 #include <linux/cpumask.h>
14 #include <linux/uprobes.h>
15 #include <linux/page-flags-layout.h>
16 #include <linux/workqueue.h>
17 
18 #include <asm/mmu.h>
19 
20 #ifndef AT_VECTOR_SIZE_ARCH
21 #define AT_VECTOR_SIZE_ARCH 0
22 #endif
23 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
24 
25 struct address_space;
26 struct mem_cgroup;
27 struct hmm;
28 
29 /*
30  * Each physical page in the system has a struct page associated with
31  * it to keep track of whatever it is we are using the page for at the
32  * moment. Note that we have no way to track which tasks are using
33  * a page, though if it is a pagecache page, rmap structures can tell us
34  * who is mapping it. If you allocate the page using alloc_pages(), you
35  * can use some of the space in struct page for your own purposes.
36  *
37  * Pages that were once in the page cache may be found under the RCU lock
38  * even after they have been recycled to a different purpose.  The page
39  * cache reads and writes some of the fields in struct page to pin the
40  * page before checking that it's still in the page cache.  It is vital
41  * that all users of struct page:
42  * 1. Use the first word as PageFlags.
43  * 2. Clear or preserve bit 0 of page->compound_head.  It is used as
44  *    PageTail for compound pages, and the page cache must not see false
45  *    positives.  Some users put a pointer here (guaranteed to be at least
46  *    4-byte aligned), other users avoid using the field altogether.
47  * 3. page->_refcount must either not be used, or must be used in such a
48  *    way that other CPUs temporarily incrementing and then decrementing the
49  *    refcount does not cause problems.  On receiving the page from
50  *    alloc_pages(), the refcount will be positive.
51  * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
52  *
53  * If you allocate pages of order > 0, you can use the fields in the struct
54  * page associated with each page, but bear in mind that the pages may have
55  * been inserted individually into the page cache, so you must use the above
56  * four fields in a compatible way for each struct page.
57  *
58  * SLUB uses cmpxchg_double() to atomically update its freelist and
59  * counters.  That requires that freelist & counters be adjacent and
60  * double-word aligned.  We align all struct pages to double-word
61  * boundaries, and ensure that 'freelist' is aligned within the
62  * struct.
63  */
64 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
65 #define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
66 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
67 #define _slub_counter_t		unsigned long
68 #else
69 #define _slub_counter_t		unsigned int
70 #endif
71 #else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
72 #define _struct_page_alignment
73 #define _slub_counter_t		unsigned int
74 #endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
75 
76 struct page {
77 	/* First double word block */
78 	unsigned long flags;		/* Atomic flags, some possibly
79 					 * updated asynchronously */
80 	union {
81 		/* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
82 		struct address_space *mapping;
83 
84 		void *s_mem;			/* slab first object */
85 		atomic_t compound_mapcount;	/* first tail page */
86 		/* page_deferred_list().next	 -- second tail page */
87 	};
88 
89 	/* Second double word */
90 	union {
91 		pgoff_t index;		/* Our offset within mapping. */
92 		void *freelist;		/* sl[aou]b first free object */
93 		/* page_deferred_list().prev	-- second tail page */
94 	};
95 
96 	union {
97 		_slub_counter_t counters;
98 		unsigned int active;		/* SLAB */
99 		struct {			/* SLUB */
100 			unsigned inuse:16;
101 			unsigned objects:15;
102 			unsigned frozen:1;
103 		};
104 		int units;			/* SLOB */
105 
106 		struct {			/* Page cache */
107 			/*
108 			 * Count of ptes mapped in mms, to show when
109 			 * page is mapped & limit reverse map searches.
110 			 *
111 			 * Extra information about page type may be
112 			 * stored here for pages that are never mapped,
113 			 * in which case the value MUST BE <= -2.
114 			 * See page-flags.h for more details.
115 			 */
116 			atomic_t _mapcount;
117 
118 			/*
119 			 * Usage count, *USE WRAPPER FUNCTION* when manual
120 			 * accounting. See page_ref.h
121 			 */
122 			atomic_t _refcount;
123 		};
124 	};
125 
126 	/*
127 	 * WARNING: bit 0 of the first word encode PageTail(). That means
128 	 * the rest users of the storage space MUST NOT use the bit to
129 	 * avoid collision and false-positive PageTail().
130 	 */
131 	union {
132 		struct list_head lru;	/* Pageout list, eg. active_list
133 					 * protected by zone_lru_lock !
134 					 * Can be used as a generic list
135 					 * by the page owner.
136 					 */
137 		struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
138 					    * lru or handled by a slab
139 					    * allocator, this points to the
140 					    * hosting device page map.
141 					    */
142 		struct {		/* slub per cpu partial pages */
143 			struct page *next;	/* Next partial slab */
144 #ifdef CONFIG_64BIT
145 			int pages;	/* Nr of partial slabs left */
146 			int pobjects;	/* Approximate # of objects */
147 #else
148 			short int pages;
149 			short int pobjects;
150 #endif
151 		};
152 
153 		struct rcu_head rcu_head;	/* Used by SLAB
154 						 * when destroying via RCU
155 						 */
156 		/* Tail pages of compound page */
157 		struct {
158 			unsigned long compound_head; /* If bit zero is set */
159 
160 			/* First tail page only */
161 			unsigned char compound_dtor;
162 			unsigned char compound_order;
163 			/* two/six bytes available here */
164 		};
165 
166 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
167 		struct {
168 			unsigned long __pad;	/* do not overlay pmd_huge_pte
169 						 * with compound_head to avoid
170 						 * possible bit 0 collision.
171 						 */
172 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
173 		};
174 #endif
175 	};
176 
177 	union {
178 		/*
179 		 * Mapping-private opaque data:
180 		 * Usually used for buffer_heads if PagePrivate
181 		 * Used for swp_entry_t if PageSwapCache
182 		 * Indicates order in the buddy system if PageBuddy
183 		 */
184 		unsigned long private;
185 #if USE_SPLIT_PTE_PTLOCKS
186 #if ALLOC_SPLIT_PTLOCKS
187 		spinlock_t *ptl;
188 #else
189 		spinlock_t ptl;
190 #endif
191 #endif
192 		struct kmem_cache *slab_cache;	/* SL[AU]B: Pointer to slab */
193 	};
194 
195 #ifdef CONFIG_MEMCG
196 	struct mem_cgroup *mem_cgroup;
197 #endif
198 
199 	/*
200 	 * On machines where all RAM is mapped into kernel address space,
201 	 * we can simply calculate the virtual address. On machines with
202 	 * highmem some memory is mapped into kernel virtual memory
203 	 * dynamically, so we need a place to store that address.
204 	 * Note that this field could be 16 bits on x86 ... ;)
205 	 *
206 	 * Architectures with slow multiplication can define
207 	 * WANT_PAGE_VIRTUAL in asm/page.h
208 	 */
209 #if defined(WANT_PAGE_VIRTUAL)
210 	void *virtual;			/* Kernel virtual address (NULL if
211 					   not kmapped, ie. highmem) */
212 #endif /* WANT_PAGE_VIRTUAL */
213 
214 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
215 	int _last_cpupid;
216 #endif
217 } _struct_page_alignment;
218 
219 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
220 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
221 
222 struct page_frag_cache {
223 	void * va;
224 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
225 	__u16 offset;
226 	__u16 size;
227 #else
228 	__u32 offset;
229 #endif
230 	/* we maintain a pagecount bias, so that we dont dirty cache line
231 	 * containing page->_refcount every time we allocate a fragment.
232 	 */
233 	unsigned int		pagecnt_bias;
234 	bool pfmemalloc;
235 };
236 
237 typedef unsigned long vm_flags_t;
238 
239 /*
240  * A region containing a mapping of a non-memory backed file under NOMMU
241  * conditions.  These are held in a global tree and are pinned by the VMAs that
242  * map parts of them.
243  */
244 struct vm_region {
245 	struct rb_node	vm_rb;		/* link in global region tree */
246 	vm_flags_t	vm_flags;	/* VMA vm_flags */
247 	unsigned long	vm_start;	/* start address of region */
248 	unsigned long	vm_end;		/* region initialised to here */
249 	unsigned long	vm_top;		/* region allocated to here */
250 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
251 	struct file	*vm_file;	/* the backing file or NULL */
252 
253 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
254 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
255 						* this region */
256 };
257 
258 #ifdef CONFIG_USERFAULTFD
259 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
260 struct vm_userfaultfd_ctx {
261 	struct userfaultfd_ctx *ctx;
262 };
263 #else /* CONFIG_USERFAULTFD */
264 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
265 struct vm_userfaultfd_ctx {};
266 #endif /* CONFIG_USERFAULTFD */
267 
268 /*
269  * This struct defines a memory VMM memory area. There is one of these
270  * per VM-area/task.  A VM area is any part of the process virtual memory
271  * space that has a special rule for the page-fault handlers (ie a shared
272  * library, the executable area etc).
273  */
274 struct vm_area_struct {
275 	/* The first cache line has the info for VMA tree walking. */
276 
277 	unsigned long vm_start;		/* Our start address within vm_mm. */
278 	unsigned long vm_end;		/* The first byte after our end address
279 					   within vm_mm. */
280 
281 	/* linked list of VM areas per task, sorted by address */
282 	struct vm_area_struct *vm_next, *vm_prev;
283 
284 	struct rb_node vm_rb;
285 
286 	/*
287 	 * Largest free memory gap in bytes to the left of this VMA.
288 	 * Either between this VMA and vma->vm_prev, or between one of the
289 	 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
290 	 * get_unmapped_area find a free area of the right size.
291 	 */
292 	unsigned long rb_subtree_gap;
293 
294 	/* Second cache line starts here. */
295 
296 	struct mm_struct *vm_mm;	/* The address space we belong to. */
297 	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
298 	unsigned long vm_flags;		/* Flags, see mm.h. */
299 
300 	/*
301 	 * For areas with an address space and backing store,
302 	 * linkage into the address_space->i_mmap interval tree.
303 	 */
304 	struct {
305 		struct rb_node rb;
306 		unsigned long rb_subtree_last;
307 	} shared;
308 
309 	/*
310 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
311 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
312 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
313 	 * or brk vma (with NULL file) can only be in an anon_vma list.
314 	 */
315 	struct list_head anon_vma_chain; /* Serialized by mmap_sem &
316 					  * page_table_lock */
317 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
318 
319 	/* Function pointers to deal with this struct. */
320 	const struct vm_operations_struct *vm_ops;
321 
322 	/* Information about our backing store: */
323 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
324 					   units */
325 	struct file * vm_file;		/* File we map to (can be NULL). */
326 	void * vm_private_data;		/* was vm_pte (shared mem) */
327 
328 	atomic_long_t swap_readahead_info;
329 #ifndef CONFIG_MMU
330 	struct vm_region *vm_region;	/* NOMMU mapping region */
331 #endif
332 #ifdef CONFIG_NUMA
333 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
334 #endif
335 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
336 } __randomize_layout;
337 
338 struct core_thread {
339 	struct task_struct *task;
340 	struct core_thread *next;
341 };
342 
343 struct core_state {
344 	atomic_t nr_threads;
345 	struct core_thread dumper;
346 	struct completion startup;
347 };
348 
349 struct kioctx_table;
350 struct mm_struct {
351 	struct vm_area_struct *mmap;		/* list of VMAs */
352 	struct rb_root mm_rb;
353 	u32 vmacache_seqnum;                   /* per-thread vmacache */
354 #ifdef CONFIG_MMU
355 	unsigned long (*get_unmapped_area) (struct file *filp,
356 				unsigned long addr, unsigned long len,
357 				unsigned long pgoff, unsigned long flags);
358 #endif
359 	unsigned long mmap_base;		/* base of mmap area */
360 	unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
361 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
362 	/* Base adresses for compatible mmap() */
363 	unsigned long mmap_compat_base;
364 	unsigned long mmap_compat_legacy_base;
365 #endif
366 	unsigned long task_size;		/* size of task vm space */
367 	unsigned long highest_vm_end;		/* highest vma end address */
368 	pgd_t * pgd;
369 
370 	/**
371 	 * @mm_users: The number of users including userspace.
372 	 *
373 	 * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
374 	 * to 0 (i.e. when the task exits and there are no other temporary
375 	 * reference holders), we also release a reference on @mm_count
376 	 * (which may then free the &struct mm_struct if @mm_count also
377 	 * drops to 0).
378 	 */
379 	atomic_t mm_users;
380 
381 	/**
382 	 * @mm_count: The number of references to &struct mm_struct
383 	 * (@mm_users count as 1).
384 	 *
385 	 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
386 	 * &struct mm_struct is freed.
387 	 */
388 	atomic_t mm_count;
389 
390 #ifdef CONFIG_MMU
391 	atomic_long_t pgtables_bytes;		/* PTE page table pages */
392 #endif
393 	int map_count;				/* number of VMAs */
394 
395 	spinlock_t page_table_lock;		/* Protects page tables and some counters */
396 	struct rw_semaphore mmap_sem;
397 
398 	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
399 						 * together off init_mm.mmlist, and are protected
400 						 * by mmlist_lock
401 						 */
402 
403 
404 	unsigned long hiwater_rss;	/* High-watermark of RSS usage */
405 	unsigned long hiwater_vm;	/* High-water virtual memory usage */
406 
407 	unsigned long total_vm;		/* Total pages mapped */
408 	unsigned long locked_vm;	/* Pages that have PG_mlocked set */
409 	unsigned long pinned_vm;	/* Refcount permanently increased */
410 	unsigned long data_vm;		/* VM_WRITE & ~VM_SHARED & ~VM_STACK */
411 	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE & ~VM_STACK */
412 	unsigned long stack_vm;		/* VM_STACK */
413 	unsigned long def_flags;
414 	unsigned long start_code, end_code, start_data, end_data;
415 	unsigned long start_brk, brk, start_stack;
416 	unsigned long arg_start, arg_end, env_start, env_end;
417 
418 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
419 
420 	/*
421 	 * Special counters, in some configurations protected by the
422 	 * page_table_lock, in other configurations by being atomic.
423 	 */
424 	struct mm_rss_stat rss_stat;
425 
426 	struct linux_binfmt *binfmt;
427 
428 	cpumask_var_t cpu_vm_mask_var;
429 
430 	/* Architecture-specific MM context */
431 	mm_context_t context;
432 
433 	unsigned long flags; /* Must use atomic bitops to access the bits */
434 
435 	struct core_state *core_state; /* coredumping support */
436 #ifdef CONFIG_MEMBARRIER
437 	atomic_t membarrier_state;
438 #endif
439 #ifdef CONFIG_AIO
440 	spinlock_t			ioctx_lock;
441 	struct kioctx_table __rcu	*ioctx_table;
442 #endif
443 #ifdef CONFIG_MEMCG
444 	/*
445 	 * "owner" points to a task that is regarded as the canonical
446 	 * user/owner of this mm. All of the following must be true in
447 	 * order for it to be changed:
448 	 *
449 	 * current == mm->owner
450 	 * current->mm != mm
451 	 * new_owner->mm == mm
452 	 * new_owner->alloc_lock is held
453 	 */
454 	struct task_struct __rcu *owner;
455 #endif
456 	struct user_namespace *user_ns;
457 
458 	/* store ref to file /proc/<pid>/exe symlink points to */
459 	struct file __rcu *exe_file;
460 #ifdef CONFIG_MMU_NOTIFIER
461 	struct mmu_notifier_mm *mmu_notifier_mm;
462 #endif
463 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
464 	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
465 #endif
466 #ifdef CONFIG_CPUMASK_OFFSTACK
467 	struct cpumask cpumask_allocation;
468 #endif
469 #ifdef CONFIG_NUMA_BALANCING
470 	/*
471 	 * numa_next_scan is the next time that the PTEs will be marked
472 	 * pte_numa. NUMA hinting faults will gather statistics and migrate
473 	 * pages to new nodes if necessary.
474 	 */
475 	unsigned long numa_next_scan;
476 
477 	/* Restart point for scanning and setting pte_numa */
478 	unsigned long numa_scan_offset;
479 
480 	/* numa_scan_seq prevents two threads setting pte_numa */
481 	int numa_scan_seq;
482 #endif
483 	/*
484 	 * An operation with batched TLB flushing is going on. Anything that
485 	 * can move process memory needs to flush the TLB when moving a
486 	 * PROT_NONE or PROT_NUMA mapped page.
487 	 */
488 	atomic_t tlb_flush_pending;
489 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
490 	/* See flush_tlb_batched_pending() */
491 	bool tlb_flush_batched;
492 #endif
493 	struct uprobes_state uprobes_state;
494 #ifdef CONFIG_HUGETLB_PAGE
495 	atomic_long_t hugetlb_usage;
496 #endif
497 	struct work_struct async_put_work;
498 
499 #if IS_ENABLED(CONFIG_HMM)
500 	/* HMM needs to track a few things per mm */
501 	struct hmm *hmm;
502 #endif
503 } __randomize_layout;
504 
505 extern struct mm_struct init_mm;
506 
507 static inline void mm_init_cpumask(struct mm_struct *mm)
508 {
509 #ifdef CONFIG_CPUMASK_OFFSTACK
510 	mm->cpu_vm_mask_var = &mm->cpumask_allocation;
511 #endif
512 	cpumask_clear(mm->cpu_vm_mask_var);
513 }
514 
515 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
516 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
517 {
518 	return mm->cpu_vm_mask_var;
519 }
520 
521 struct mmu_gather;
522 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
523 				unsigned long start, unsigned long end);
524 extern void tlb_finish_mmu(struct mmu_gather *tlb,
525 				unsigned long start, unsigned long end);
526 
527 static inline void init_tlb_flush_pending(struct mm_struct *mm)
528 {
529 	atomic_set(&mm->tlb_flush_pending, 0);
530 }
531 
532 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
533 {
534 	atomic_inc(&mm->tlb_flush_pending);
535 	/*
536 	 * The only time this value is relevant is when there are indeed pages
537 	 * to flush. And we'll only flush pages after changing them, which
538 	 * requires the PTL.
539 	 *
540 	 * So the ordering here is:
541 	 *
542 	 *	atomic_inc(&mm->tlb_flush_pending);
543 	 *	spin_lock(&ptl);
544 	 *	...
545 	 *	set_pte_at();
546 	 *	spin_unlock(&ptl);
547 	 *
548 	 *				spin_lock(&ptl)
549 	 *				mm_tlb_flush_pending();
550 	 *				....
551 	 *				spin_unlock(&ptl);
552 	 *
553 	 *	flush_tlb_range();
554 	 *	atomic_dec(&mm->tlb_flush_pending);
555 	 *
556 	 * Where the increment if constrained by the PTL unlock, it thus
557 	 * ensures that the increment is visible if the PTE modification is
558 	 * visible. After all, if there is no PTE modification, nobody cares
559 	 * about TLB flushes either.
560 	 *
561 	 * This very much relies on users (mm_tlb_flush_pending() and
562 	 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
563 	 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
564 	 * locks (PPC) the unlock of one doesn't order against the lock of
565 	 * another PTL.
566 	 *
567 	 * The decrement is ordered by the flush_tlb_range(), such that
568 	 * mm_tlb_flush_pending() will not return false unless all flushes have
569 	 * completed.
570 	 */
571 }
572 
573 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
574 {
575 	/*
576 	 * See inc_tlb_flush_pending().
577 	 *
578 	 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
579 	 * not order against TLB invalidate completion, which is what we need.
580 	 *
581 	 * Therefore we must rely on tlb_flush_*() to guarantee order.
582 	 */
583 	atomic_dec(&mm->tlb_flush_pending);
584 }
585 
586 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
587 {
588 	/*
589 	 * Must be called after having acquired the PTL; orders against that
590 	 * PTLs release and therefore ensures that if we observe the modified
591 	 * PTE we must also observe the increment from inc_tlb_flush_pending().
592 	 *
593 	 * That is, it only guarantees to return true if there is a flush
594 	 * pending for _this_ PTL.
595 	 */
596 	return atomic_read(&mm->tlb_flush_pending);
597 }
598 
599 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
600 {
601 	/*
602 	 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
603 	 * for which there is a TLB flush pending in order to guarantee
604 	 * we've seen both that PTE modification and the increment.
605 	 *
606 	 * (no requirement on actually still holding the PTL, that is irrelevant)
607 	 */
608 	return atomic_read(&mm->tlb_flush_pending) > 1;
609 }
610 
611 struct vm_fault;
612 
613 struct vm_special_mapping {
614 	const char *name;	/* The name, e.g. "[vdso]". */
615 
616 	/*
617 	 * If .fault is not provided, this points to a
618 	 * NULL-terminated array of pages that back the special mapping.
619 	 *
620 	 * This must not be NULL unless .fault is provided.
621 	 */
622 	struct page **pages;
623 
624 	/*
625 	 * If non-NULL, then this is called to resolve page faults
626 	 * on the special mapping.  If used, .pages is not checked.
627 	 */
628 	int (*fault)(const struct vm_special_mapping *sm,
629 		     struct vm_area_struct *vma,
630 		     struct vm_fault *vmf);
631 
632 	int (*mremap)(const struct vm_special_mapping *sm,
633 		     struct vm_area_struct *new_vma);
634 };
635 
636 enum tlb_flush_reason {
637 	TLB_FLUSH_ON_TASK_SWITCH,
638 	TLB_REMOTE_SHOOTDOWN,
639 	TLB_LOCAL_SHOOTDOWN,
640 	TLB_LOCAL_MM_SHOOTDOWN,
641 	TLB_REMOTE_SEND_IPI,
642 	NR_TLB_FLUSH_REASONS,
643 };
644 
645  /*
646   * A swap entry has to fit into a "unsigned long", as the entry is hidden
647   * in the "index" field of the swapper address space.
648   */
649 typedef struct {
650 	unsigned long val;
651 } swp_entry_t;
652 
653 #endif /* _LINUX_MM_TYPES_H */
654