xref: /linux-6.15/include/linux/mm_types.h (revision 00a62703)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4 
5 #include <linux/mm_types_task.h>
6 
7 #include <linux/auxvec.h>
8 #include <linux/list.h>
9 #include <linux/spinlock.h>
10 #include <linux/rbtree.h>
11 #include <linux/rwsem.h>
12 #include <linux/completion.h>
13 #include <linux/cpumask.h>
14 #include <linux/uprobes.h>
15 #include <linux/page-flags-layout.h>
16 #include <linux/workqueue.h>
17 
18 #include <asm/mmu.h>
19 
20 #ifndef AT_VECTOR_SIZE_ARCH
21 #define AT_VECTOR_SIZE_ARCH 0
22 #endif
23 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
24 
25 typedef int vm_fault_t;
26 
27 struct address_space;
28 struct mem_cgroup;
29 struct hmm;
30 
31 /*
32  * Each physical page in the system has a struct page associated with
33  * it to keep track of whatever it is we are using the page for at the
34  * moment. Note that we have no way to track which tasks are using
35  * a page, though if it is a pagecache page, rmap structures can tell us
36  * who is mapping it. If you allocate the page using alloc_pages(), you
37  * can use some of the space in struct page for your own purposes.
38  *
39  * Pages that were once in the page cache may be found under the RCU lock
40  * even after they have been recycled to a different purpose.  The page
41  * cache reads and writes some of the fields in struct page to pin the
42  * page before checking that it's still in the page cache.  It is vital
43  * that all users of struct page:
44  * 1. Use the first word as PageFlags.
45  * 2. Clear or preserve bit 0 of page->compound_head.  It is used as
46  *    PageTail for compound pages, and the page cache must not see false
47  *    positives.  Some users put a pointer here (guaranteed to be at least
48  *    4-byte aligned), other users avoid using the field altogether.
49  * 3. page->_refcount must either not be used, or must be used in such a
50  *    way that other CPUs temporarily incrementing and then decrementing the
51  *    refcount does not cause problems.  On receiving the page from
52  *    alloc_pages(), the refcount will be positive.
53  * 4. Either preserve page->_mapcount or restore it to -1 before freeing it.
54  *
55  * If you allocate pages of order > 0, you can use the fields in the struct
56  * page associated with each page, but bear in mind that the pages may have
57  * been inserted individually into the page cache, so you must use the above
58  * four fields in a compatible way for each struct page.
59  *
60  * SLUB uses cmpxchg_double() to atomically update its freelist and
61  * counters.  That requires that freelist & counters be adjacent and
62  * double-word aligned.  We align all struct pages to double-word
63  * boundaries, and ensure that 'freelist' is aligned within the
64  * struct.
65  */
66 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
67 #define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
68 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
69 #define _slub_counter_t		unsigned long
70 #else
71 #define _slub_counter_t		unsigned int
72 #endif
73 #else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
74 #define _struct_page_alignment
75 #define _slub_counter_t		unsigned int
76 #endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
77 
78 struct page {
79 	/* First double word block */
80 	unsigned long flags;		/* Atomic flags, some possibly
81 					 * updated asynchronously */
82 	union {
83 		/* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
84 		struct address_space *mapping;
85 
86 		void *s_mem;			/* slab first object */
87 		atomic_t compound_mapcount;	/* first tail page */
88 		/* page_deferred_list().next	 -- second tail page */
89 	};
90 
91 	/* Second double word */
92 	union {
93 		pgoff_t index;		/* Our offset within mapping. */
94 		void *freelist;		/* sl[aou]b first free object */
95 		/* page_deferred_list().prev	-- second tail page */
96 	};
97 
98 	union {
99 		_slub_counter_t counters;
100 		unsigned int active;		/* SLAB */
101 		struct {			/* SLUB */
102 			unsigned inuse:16;
103 			unsigned objects:15;
104 			unsigned frozen:1;
105 		};
106 		int units;			/* SLOB */
107 
108 		struct {			/* Page cache */
109 			/*
110 			 * Count of ptes mapped in mms, to show when
111 			 * page is mapped & limit reverse map searches.
112 			 *
113 			 * Extra information about page type may be
114 			 * stored here for pages that are never mapped,
115 			 * in which case the value MUST BE <= -2.
116 			 * See page-flags.h for more details.
117 			 */
118 			atomic_t _mapcount;
119 
120 			/*
121 			 * Usage count, *USE WRAPPER FUNCTION* when manual
122 			 * accounting. See page_ref.h
123 			 */
124 			atomic_t _refcount;
125 		};
126 	};
127 
128 	/*
129 	 * WARNING: bit 0 of the first word encode PageTail(). That means
130 	 * the rest users of the storage space MUST NOT use the bit to
131 	 * avoid collision and false-positive PageTail().
132 	 */
133 	union {
134 		struct list_head lru;	/* Pageout list, eg. active_list
135 					 * protected by zone_lru_lock !
136 					 * Can be used as a generic list
137 					 * by the page owner.
138 					 */
139 		struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
140 					    * lru or handled by a slab
141 					    * allocator, this points to the
142 					    * hosting device page map.
143 					    */
144 		struct {		/* slub per cpu partial pages */
145 			struct page *next;	/* Next partial slab */
146 #ifdef CONFIG_64BIT
147 			int pages;	/* Nr of partial slabs left */
148 			int pobjects;	/* Approximate # of objects */
149 #else
150 			short int pages;
151 			short int pobjects;
152 #endif
153 		};
154 
155 		struct rcu_head rcu_head;	/* Used by SLAB
156 						 * when destroying via RCU
157 						 */
158 		/* Tail pages of compound page */
159 		struct {
160 			unsigned long compound_head; /* If bit zero is set */
161 
162 			/* First tail page only */
163 			unsigned char compound_dtor;
164 			unsigned char compound_order;
165 			/* two/six bytes available here */
166 		};
167 
168 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
169 		struct {
170 			unsigned long __pad;	/* do not overlay pmd_huge_pte
171 						 * with compound_head to avoid
172 						 * possible bit 0 collision.
173 						 */
174 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
175 		};
176 #endif
177 	};
178 
179 	union {
180 		/*
181 		 * Mapping-private opaque data:
182 		 * Usually used for buffer_heads if PagePrivate
183 		 * Used for swp_entry_t if PageSwapCache
184 		 * Indicates order in the buddy system if PageBuddy
185 		 */
186 		unsigned long private;
187 #if USE_SPLIT_PTE_PTLOCKS
188 #if ALLOC_SPLIT_PTLOCKS
189 		spinlock_t *ptl;
190 #else
191 		spinlock_t ptl;
192 #endif
193 #endif
194 		struct kmem_cache *slab_cache;	/* SL[AU]B: Pointer to slab */
195 	};
196 
197 #ifdef CONFIG_MEMCG
198 	struct mem_cgroup *mem_cgroup;
199 #endif
200 
201 	/*
202 	 * On machines where all RAM is mapped into kernel address space,
203 	 * we can simply calculate the virtual address. On machines with
204 	 * highmem some memory is mapped into kernel virtual memory
205 	 * dynamically, so we need a place to store that address.
206 	 * Note that this field could be 16 bits on x86 ... ;)
207 	 *
208 	 * Architectures with slow multiplication can define
209 	 * WANT_PAGE_VIRTUAL in asm/page.h
210 	 */
211 #if defined(WANT_PAGE_VIRTUAL)
212 	void *virtual;			/* Kernel virtual address (NULL if
213 					   not kmapped, ie. highmem) */
214 #endif /* WANT_PAGE_VIRTUAL */
215 
216 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
217 	int _last_cpupid;
218 #endif
219 } _struct_page_alignment;
220 
221 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
222 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
223 
224 struct page_frag_cache {
225 	void * va;
226 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
227 	__u16 offset;
228 	__u16 size;
229 #else
230 	__u32 offset;
231 #endif
232 	/* we maintain a pagecount bias, so that we dont dirty cache line
233 	 * containing page->_refcount every time we allocate a fragment.
234 	 */
235 	unsigned int		pagecnt_bias;
236 	bool pfmemalloc;
237 };
238 
239 typedef unsigned long vm_flags_t;
240 
241 /*
242  * A region containing a mapping of a non-memory backed file under NOMMU
243  * conditions.  These are held in a global tree and are pinned by the VMAs that
244  * map parts of them.
245  */
246 struct vm_region {
247 	struct rb_node	vm_rb;		/* link in global region tree */
248 	vm_flags_t	vm_flags;	/* VMA vm_flags */
249 	unsigned long	vm_start;	/* start address of region */
250 	unsigned long	vm_end;		/* region initialised to here */
251 	unsigned long	vm_top;		/* region allocated to here */
252 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
253 	struct file	*vm_file;	/* the backing file or NULL */
254 
255 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
256 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
257 						* this region */
258 };
259 
260 #ifdef CONFIG_USERFAULTFD
261 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
262 struct vm_userfaultfd_ctx {
263 	struct userfaultfd_ctx *ctx;
264 };
265 #else /* CONFIG_USERFAULTFD */
266 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
267 struct vm_userfaultfd_ctx {};
268 #endif /* CONFIG_USERFAULTFD */
269 
270 /*
271  * This struct defines a memory VMM memory area. There is one of these
272  * per VM-area/task.  A VM area is any part of the process virtual memory
273  * space that has a special rule for the page-fault handlers (ie a shared
274  * library, the executable area etc).
275  */
276 struct vm_area_struct {
277 	/* The first cache line has the info for VMA tree walking. */
278 
279 	unsigned long vm_start;		/* Our start address within vm_mm. */
280 	unsigned long vm_end;		/* The first byte after our end address
281 					   within vm_mm. */
282 
283 	/* linked list of VM areas per task, sorted by address */
284 	struct vm_area_struct *vm_next, *vm_prev;
285 
286 	struct rb_node vm_rb;
287 
288 	/*
289 	 * Largest free memory gap in bytes to the left of this VMA.
290 	 * Either between this VMA and vma->vm_prev, or between one of the
291 	 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
292 	 * get_unmapped_area find a free area of the right size.
293 	 */
294 	unsigned long rb_subtree_gap;
295 
296 	/* Second cache line starts here. */
297 
298 	struct mm_struct *vm_mm;	/* The address space we belong to. */
299 	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
300 	unsigned long vm_flags;		/* Flags, see mm.h. */
301 
302 	/*
303 	 * For areas with an address space and backing store,
304 	 * linkage into the address_space->i_mmap interval tree.
305 	 */
306 	struct {
307 		struct rb_node rb;
308 		unsigned long rb_subtree_last;
309 	} shared;
310 
311 	/*
312 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
313 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
314 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
315 	 * or brk vma (with NULL file) can only be in an anon_vma list.
316 	 */
317 	struct list_head anon_vma_chain; /* Serialized by mmap_sem &
318 					  * page_table_lock */
319 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
320 
321 	/* Function pointers to deal with this struct. */
322 	const struct vm_operations_struct *vm_ops;
323 
324 	/* Information about our backing store: */
325 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
326 					   units */
327 	struct file * vm_file;		/* File we map to (can be NULL). */
328 	void * vm_private_data;		/* was vm_pte (shared mem) */
329 
330 	atomic_long_t swap_readahead_info;
331 #ifndef CONFIG_MMU
332 	struct vm_region *vm_region;	/* NOMMU mapping region */
333 #endif
334 #ifdef CONFIG_NUMA
335 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
336 #endif
337 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
338 } __randomize_layout;
339 
340 struct core_thread {
341 	struct task_struct *task;
342 	struct core_thread *next;
343 };
344 
345 struct core_state {
346 	atomic_t nr_threads;
347 	struct core_thread dumper;
348 	struct completion startup;
349 };
350 
351 struct kioctx_table;
352 struct mm_struct {
353 	struct vm_area_struct *mmap;		/* list of VMAs */
354 	struct rb_root mm_rb;
355 	u32 vmacache_seqnum;                   /* per-thread vmacache */
356 #ifdef CONFIG_MMU
357 	unsigned long (*get_unmapped_area) (struct file *filp,
358 				unsigned long addr, unsigned long len,
359 				unsigned long pgoff, unsigned long flags);
360 #endif
361 	unsigned long mmap_base;		/* base of mmap area */
362 	unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
363 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
364 	/* Base adresses for compatible mmap() */
365 	unsigned long mmap_compat_base;
366 	unsigned long mmap_compat_legacy_base;
367 #endif
368 	unsigned long task_size;		/* size of task vm space */
369 	unsigned long highest_vm_end;		/* highest vma end address */
370 	pgd_t * pgd;
371 
372 	/**
373 	 * @mm_users: The number of users including userspace.
374 	 *
375 	 * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
376 	 * to 0 (i.e. when the task exits and there are no other temporary
377 	 * reference holders), we also release a reference on @mm_count
378 	 * (which may then free the &struct mm_struct if @mm_count also
379 	 * drops to 0).
380 	 */
381 	atomic_t mm_users;
382 
383 	/**
384 	 * @mm_count: The number of references to &struct mm_struct
385 	 * (@mm_users count as 1).
386 	 *
387 	 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
388 	 * &struct mm_struct is freed.
389 	 */
390 	atomic_t mm_count;
391 
392 #ifdef CONFIG_MMU
393 	atomic_long_t pgtables_bytes;		/* PTE page table pages */
394 #endif
395 	int map_count;				/* number of VMAs */
396 
397 	spinlock_t page_table_lock;		/* Protects page tables and some counters */
398 	struct rw_semaphore mmap_sem;
399 
400 	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
401 						 * together off init_mm.mmlist, and are protected
402 						 * by mmlist_lock
403 						 */
404 
405 
406 	unsigned long hiwater_rss;	/* High-watermark of RSS usage */
407 	unsigned long hiwater_vm;	/* High-water virtual memory usage */
408 
409 	unsigned long total_vm;		/* Total pages mapped */
410 	unsigned long locked_vm;	/* Pages that have PG_mlocked set */
411 	unsigned long pinned_vm;	/* Refcount permanently increased */
412 	unsigned long data_vm;		/* VM_WRITE & ~VM_SHARED & ~VM_STACK */
413 	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE & ~VM_STACK */
414 	unsigned long stack_vm;		/* VM_STACK */
415 	unsigned long def_flags;
416 	unsigned long start_code, end_code, start_data, end_data;
417 	unsigned long start_brk, brk, start_stack;
418 	unsigned long arg_start, arg_end, env_start, env_end;
419 
420 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
421 
422 	/*
423 	 * Special counters, in some configurations protected by the
424 	 * page_table_lock, in other configurations by being atomic.
425 	 */
426 	struct mm_rss_stat rss_stat;
427 
428 	struct linux_binfmt *binfmt;
429 
430 	cpumask_var_t cpu_vm_mask_var;
431 
432 	/* Architecture-specific MM context */
433 	mm_context_t context;
434 
435 	unsigned long flags; /* Must use atomic bitops to access the bits */
436 
437 	struct core_state *core_state; /* coredumping support */
438 #ifdef CONFIG_MEMBARRIER
439 	atomic_t membarrier_state;
440 #endif
441 #ifdef CONFIG_AIO
442 	spinlock_t			ioctx_lock;
443 	struct kioctx_table __rcu	*ioctx_table;
444 #endif
445 #ifdef CONFIG_MEMCG
446 	/*
447 	 * "owner" points to a task that is regarded as the canonical
448 	 * user/owner of this mm. All of the following must be true in
449 	 * order for it to be changed:
450 	 *
451 	 * current == mm->owner
452 	 * current->mm != mm
453 	 * new_owner->mm == mm
454 	 * new_owner->alloc_lock is held
455 	 */
456 	struct task_struct __rcu *owner;
457 #endif
458 	struct user_namespace *user_ns;
459 
460 	/* store ref to file /proc/<pid>/exe symlink points to */
461 	struct file __rcu *exe_file;
462 #ifdef CONFIG_MMU_NOTIFIER
463 	struct mmu_notifier_mm *mmu_notifier_mm;
464 #endif
465 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
466 	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
467 #endif
468 #ifdef CONFIG_CPUMASK_OFFSTACK
469 	struct cpumask cpumask_allocation;
470 #endif
471 #ifdef CONFIG_NUMA_BALANCING
472 	/*
473 	 * numa_next_scan is the next time that the PTEs will be marked
474 	 * pte_numa. NUMA hinting faults will gather statistics and migrate
475 	 * pages to new nodes if necessary.
476 	 */
477 	unsigned long numa_next_scan;
478 
479 	/* Restart point for scanning and setting pte_numa */
480 	unsigned long numa_scan_offset;
481 
482 	/* numa_scan_seq prevents two threads setting pte_numa */
483 	int numa_scan_seq;
484 #endif
485 	/*
486 	 * An operation with batched TLB flushing is going on. Anything that
487 	 * can move process memory needs to flush the TLB when moving a
488 	 * PROT_NONE or PROT_NUMA mapped page.
489 	 */
490 	atomic_t tlb_flush_pending;
491 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
492 	/* See flush_tlb_batched_pending() */
493 	bool tlb_flush_batched;
494 #endif
495 	struct uprobes_state uprobes_state;
496 #ifdef CONFIG_HUGETLB_PAGE
497 	atomic_long_t hugetlb_usage;
498 #endif
499 	struct work_struct async_put_work;
500 
501 #if IS_ENABLED(CONFIG_HMM)
502 	/* HMM needs to track a few things per mm */
503 	struct hmm *hmm;
504 #endif
505 } __randomize_layout;
506 
507 extern struct mm_struct init_mm;
508 
509 static inline void mm_init_cpumask(struct mm_struct *mm)
510 {
511 #ifdef CONFIG_CPUMASK_OFFSTACK
512 	mm->cpu_vm_mask_var = &mm->cpumask_allocation;
513 #endif
514 	cpumask_clear(mm->cpu_vm_mask_var);
515 }
516 
517 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
518 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
519 {
520 	return mm->cpu_vm_mask_var;
521 }
522 
523 struct mmu_gather;
524 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
525 				unsigned long start, unsigned long end);
526 extern void tlb_finish_mmu(struct mmu_gather *tlb,
527 				unsigned long start, unsigned long end);
528 
529 static inline void init_tlb_flush_pending(struct mm_struct *mm)
530 {
531 	atomic_set(&mm->tlb_flush_pending, 0);
532 }
533 
534 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
535 {
536 	atomic_inc(&mm->tlb_flush_pending);
537 	/*
538 	 * The only time this value is relevant is when there are indeed pages
539 	 * to flush. And we'll only flush pages after changing them, which
540 	 * requires the PTL.
541 	 *
542 	 * So the ordering here is:
543 	 *
544 	 *	atomic_inc(&mm->tlb_flush_pending);
545 	 *	spin_lock(&ptl);
546 	 *	...
547 	 *	set_pte_at();
548 	 *	spin_unlock(&ptl);
549 	 *
550 	 *				spin_lock(&ptl)
551 	 *				mm_tlb_flush_pending();
552 	 *				....
553 	 *				spin_unlock(&ptl);
554 	 *
555 	 *	flush_tlb_range();
556 	 *	atomic_dec(&mm->tlb_flush_pending);
557 	 *
558 	 * Where the increment if constrained by the PTL unlock, it thus
559 	 * ensures that the increment is visible if the PTE modification is
560 	 * visible. After all, if there is no PTE modification, nobody cares
561 	 * about TLB flushes either.
562 	 *
563 	 * This very much relies on users (mm_tlb_flush_pending() and
564 	 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
565 	 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
566 	 * locks (PPC) the unlock of one doesn't order against the lock of
567 	 * another PTL.
568 	 *
569 	 * The decrement is ordered by the flush_tlb_range(), such that
570 	 * mm_tlb_flush_pending() will not return false unless all flushes have
571 	 * completed.
572 	 */
573 }
574 
575 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
576 {
577 	/*
578 	 * See inc_tlb_flush_pending().
579 	 *
580 	 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
581 	 * not order against TLB invalidate completion, which is what we need.
582 	 *
583 	 * Therefore we must rely on tlb_flush_*() to guarantee order.
584 	 */
585 	atomic_dec(&mm->tlb_flush_pending);
586 }
587 
588 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
589 {
590 	/*
591 	 * Must be called after having acquired the PTL; orders against that
592 	 * PTLs release and therefore ensures that if we observe the modified
593 	 * PTE we must also observe the increment from inc_tlb_flush_pending().
594 	 *
595 	 * That is, it only guarantees to return true if there is a flush
596 	 * pending for _this_ PTL.
597 	 */
598 	return atomic_read(&mm->tlb_flush_pending);
599 }
600 
601 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
602 {
603 	/*
604 	 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
605 	 * for which there is a TLB flush pending in order to guarantee
606 	 * we've seen both that PTE modification and the increment.
607 	 *
608 	 * (no requirement on actually still holding the PTL, that is irrelevant)
609 	 */
610 	return atomic_read(&mm->tlb_flush_pending) > 1;
611 }
612 
613 struct vm_fault;
614 
615 struct vm_special_mapping {
616 	const char *name;	/* The name, e.g. "[vdso]". */
617 
618 	/*
619 	 * If .fault is not provided, this points to a
620 	 * NULL-terminated array of pages that back the special mapping.
621 	 *
622 	 * This must not be NULL unless .fault is provided.
623 	 */
624 	struct page **pages;
625 
626 	/*
627 	 * If non-NULL, then this is called to resolve page faults
628 	 * on the special mapping.  If used, .pages is not checked.
629 	 */
630 	int (*fault)(const struct vm_special_mapping *sm,
631 		     struct vm_area_struct *vma,
632 		     struct vm_fault *vmf);
633 
634 	int (*mremap)(const struct vm_special_mapping *sm,
635 		     struct vm_area_struct *new_vma);
636 };
637 
638 enum tlb_flush_reason {
639 	TLB_FLUSH_ON_TASK_SWITCH,
640 	TLB_REMOTE_SHOOTDOWN,
641 	TLB_LOCAL_SHOOTDOWN,
642 	TLB_LOCAL_MM_SHOOTDOWN,
643 	TLB_REMOTE_SEND_IPI,
644 	NR_TLB_FLUSH_REASONS,
645 };
646 
647  /*
648   * A swap entry has to fit into a "unsigned long", as the entry is hidden
649   * in the "index" field of the swapper address space.
650   */
651 typedef struct {
652 	unsigned long val;
653 } swp_entry_t;
654 
655 #endif /* _LINUX_MM_TYPES_H */
656