xref: /linux-6.15/include/linux/mm_types.h (revision bb7e5ce7)
1 #ifndef _LINUX_MM_TYPES_H
2 #define _LINUX_MM_TYPES_H
3 
4 #include <linux/mm_types_task.h>
5 
6 #include <linux/auxvec.h>
7 #include <linux/list.h>
8 #include <linux/spinlock.h>
9 #include <linux/rbtree.h>
10 #include <linux/rwsem.h>
11 #include <linux/completion.h>
12 #include <linux/cpumask.h>
13 #include <linux/uprobes.h>
14 #include <linux/page-flags-layout.h>
15 #include <linux/workqueue.h>
16 
17 #include <asm/mmu.h>
18 
19 #ifndef AT_VECTOR_SIZE_ARCH
20 #define AT_VECTOR_SIZE_ARCH 0
21 #endif
22 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
23 
24 struct address_space;
25 struct mem_cgroup;
26 struct hmm;
27 
28 /*
29  * Each physical page in the system has a struct page associated with
30  * it to keep track of whatever it is we are using the page for at the
31  * moment. Note that we have no way to track which tasks are using
32  * a page, though if it is a pagecache page, rmap structures can tell us
33  * who is mapping it.
34  *
35  * The objects in struct page are organized in double word blocks in
36  * order to allows us to use atomic double word operations on portions
37  * of struct page. That is currently only used by slub but the arrangement
38  * allows the use of atomic double word operations on the flags/mapping
39  * and lru list pointers also.
40  */
41 struct page {
42 	/* First double word block */
43 	unsigned long flags;		/* Atomic flags, some possibly
44 					 * updated asynchronously */
45 	union {
46 		struct address_space *mapping;	/* If low bit clear, points to
47 						 * inode address_space, or NULL.
48 						 * If page mapped as anonymous
49 						 * memory, low bit is set, and
50 						 * it points to anon_vma object:
51 						 * see PAGE_MAPPING_ANON below.
52 						 */
53 		void *s_mem;			/* slab first object */
54 		atomic_t compound_mapcount;	/* first tail page */
55 		/* page_deferred_list().next	 -- second tail page */
56 	};
57 
58 	/* Second double word */
59 	union {
60 		pgoff_t index;		/* Our offset within mapping. */
61 		void *freelist;		/* sl[aou]b first free object */
62 		/* page_deferred_list().prev	-- second tail page */
63 	};
64 
65 	union {
66 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
67 	defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
68 		/* Used for cmpxchg_double in slub */
69 		unsigned long counters;
70 #else
71 		/*
72 		 * Keep _refcount separate from slub cmpxchg_double data.
73 		 * As the rest of the double word is protected by slab_lock
74 		 * but _refcount is not.
75 		 */
76 		unsigned counters;
77 #endif
78 		struct {
79 
80 			union {
81 				/*
82 				 * Count of ptes mapped in mms, to show when
83 				 * page is mapped & limit reverse map searches.
84 				 *
85 				 * Extra information about page type may be
86 				 * stored here for pages that are never mapped,
87 				 * in which case the value MUST BE <= -2.
88 				 * See page-flags.h for more details.
89 				 */
90 				atomic_t _mapcount;
91 
92 				unsigned int active;		/* SLAB */
93 				struct {			/* SLUB */
94 					unsigned inuse:16;
95 					unsigned objects:15;
96 					unsigned frozen:1;
97 				};
98 				int units;			/* SLOB */
99 			};
100 			/*
101 			 * Usage count, *USE WRAPPER FUNCTION* when manual
102 			 * accounting. See page_ref.h
103 			 */
104 			atomic_t _refcount;
105 		};
106 	};
107 
108 	/*
109 	 * Third double word block
110 	 *
111 	 * WARNING: bit 0 of the first word encode PageTail(). That means
112 	 * the rest users of the storage space MUST NOT use the bit to
113 	 * avoid collision and false-positive PageTail().
114 	 */
115 	union {
116 		struct list_head lru;	/* Pageout list, eg. active_list
117 					 * protected by zone_lru_lock !
118 					 * Can be used as a generic list
119 					 * by the page owner.
120 					 */
121 		struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
122 					    * lru or handled by a slab
123 					    * allocator, this points to the
124 					    * hosting device page map.
125 					    */
126 		struct {		/* slub per cpu partial pages */
127 			struct page *next;	/* Next partial slab */
128 #ifdef CONFIG_64BIT
129 			int pages;	/* Nr of partial slabs left */
130 			int pobjects;	/* Approximate # of objects */
131 #else
132 			short int pages;
133 			short int pobjects;
134 #endif
135 		};
136 
137 		struct rcu_head rcu_head;	/* Used by SLAB
138 						 * when destroying via RCU
139 						 */
140 		/* Tail pages of compound page */
141 		struct {
142 			unsigned long compound_head; /* If bit zero is set */
143 
144 			/* First tail page only */
145 #ifdef CONFIG_64BIT
146 			/*
147 			 * On 64 bit system we have enough space in struct page
148 			 * to encode compound_dtor and compound_order with
149 			 * unsigned int. It can help compiler generate better or
150 			 * smaller code on some archtectures.
151 			 */
152 			unsigned int compound_dtor;
153 			unsigned int compound_order;
154 #else
155 			unsigned short int compound_dtor;
156 			unsigned short int compound_order;
157 #endif
158 		};
159 
160 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
161 		struct {
162 			unsigned long __pad;	/* do not overlay pmd_huge_pte
163 						 * with compound_head to avoid
164 						 * possible bit 0 collision.
165 						 */
166 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
167 		};
168 #endif
169 	};
170 
171 	/* Remainder is not double word aligned */
172 	union {
173 		unsigned long private;		/* Mapping-private opaque data:
174 					 	 * usually used for buffer_heads
175 						 * if PagePrivate set; used for
176 						 * swp_entry_t if PageSwapCache;
177 						 * indicates order in the buddy
178 						 * system if PG_buddy is set.
179 						 */
180 #if USE_SPLIT_PTE_PTLOCKS
181 #if ALLOC_SPLIT_PTLOCKS
182 		spinlock_t *ptl;
183 #else
184 		spinlock_t ptl;
185 #endif
186 #endif
187 		struct kmem_cache *slab_cache;	/* SL[AU]B: Pointer to slab */
188 	};
189 
190 #ifdef CONFIG_MEMCG
191 	struct mem_cgroup *mem_cgroup;
192 #endif
193 
194 	/*
195 	 * On machines where all RAM is mapped into kernel address space,
196 	 * we can simply calculate the virtual address. On machines with
197 	 * highmem some memory is mapped into kernel virtual memory
198 	 * dynamically, so we need a place to store that address.
199 	 * Note that this field could be 16 bits on x86 ... ;)
200 	 *
201 	 * Architectures with slow multiplication can define
202 	 * WANT_PAGE_VIRTUAL in asm/page.h
203 	 */
204 #if defined(WANT_PAGE_VIRTUAL)
205 	void *virtual;			/* Kernel virtual address (NULL if
206 					   not kmapped, ie. highmem) */
207 #endif /* WANT_PAGE_VIRTUAL */
208 
209 #ifdef CONFIG_KMEMCHECK
210 	/*
211 	 * kmemcheck wants to track the status of each byte in a page; this
212 	 * is a pointer to such a status block. NULL if not tracked.
213 	 */
214 	void *shadow;
215 #endif
216 
217 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
218 	int _last_cpupid;
219 #endif
220 }
221 /*
222  * The struct page can be forced to be double word aligned so that atomic ops
223  * on double words work. The SLUB allocator can make use of such a feature.
224  */
225 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
226 	__aligned(2 * sizeof(unsigned long))
227 #endif
228 ;
229 
230 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
231 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
232 
233 struct page_frag_cache {
234 	void * va;
235 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
236 	__u16 offset;
237 	__u16 size;
238 #else
239 	__u32 offset;
240 #endif
241 	/* we maintain a pagecount bias, so that we dont dirty cache line
242 	 * containing page->_refcount every time we allocate a fragment.
243 	 */
244 	unsigned int		pagecnt_bias;
245 	bool pfmemalloc;
246 };
247 
248 typedef unsigned long vm_flags_t;
249 
250 /*
251  * A region containing a mapping of a non-memory backed file under NOMMU
252  * conditions.  These are held in a global tree and are pinned by the VMAs that
253  * map parts of them.
254  */
255 struct vm_region {
256 	struct rb_node	vm_rb;		/* link in global region tree */
257 	vm_flags_t	vm_flags;	/* VMA vm_flags */
258 	unsigned long	vm_start;	/* start address of region */
259 	unsigned long	vm_end;		/* region initialised to here */
260 	unsigned long	vm_top;		/* region allocated to here */
261 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
262 	struct file	*vm_file;	/* the backing file or NULL */
263 
264 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
265 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
266 						* this region */
267 };
268 
269 #ifdef CONFIG_USERFAULTFD
270 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
271 struct vm_userfaultfd_ctx {
272 	struct userfaultfd_ctx *ctx;
273 };
274 #else /* CONFIG_USERFAULTFD */
275 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
276 struct vm_userfaultfd_ctx {};
277 #endif /* CONFIG_USERFAULTFD */
278 
279 /*
280  * This struct defines a memory VMM memory area. There is one of these
281  * per VM-area/task.  A VM area is any part of the process virtual memory
282  * space that has a special rule for the page-fault handlers (ie a shared
283  * library, the executable area etc).
284  */
285 struct vm_area_struct {
286 	/* The first cache line has the info for VMA tree walking. */
287 
288 	unsigned long vm_start;		/* Our start address within vm_mm. */
289 	unsigned long vm_end;		/* The first byte after our end address
290 					   within vm_mm. */
291 
292 	/* linked list of VM areas per task, sorted by address */
293 	struct vm_area_struct *vm_next, *vm_prev;
294 
295 	struct rb_node vm_rb;
296 
297 	/*
298 	 * Largest free memory gap in bytes to the left of this VMA.
299 	 * Either between this VMA and vma->vm_prev, or between one of the
300 	 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
301 	 * get_unmapped_area find a free area of the right size.
302 	 */
303 	unsigned long rb_subtree_gap;
304 
305 	/* Second cache line starts here. */
306 
307 	struct mm_struct *vm_mm;	/* The address space we belong to. */
308 	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
309 	unsigned long vm_flags;		/* Flags, see mm.h. */
310 
311 	/*
312 	 * For areas with an address space and backing store,
313 	 * linkage into the address_space->i_mmap interval tree.
314 	 */
315 	struct {
316 		struct rb_node rb;
317 		unsigned long rb_subtree_last;
318 	} shared;
319 
320 	/*
321 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
322 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
323 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
324 	 * or brk vma (with NULL file) can only be in an anon_vma list.
325 	 */
326 	struct list_head anon_vma_chain; /* Serialized by mmap_sem &
327 					  * page_table_lock */
328 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
329 
330 	/* Function pointers to deal with this struct. */
331 	const struct vm_operations_struct *vm_ops;
332 
333 	/* Information about our backing store: */
334 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
335 					   units */
336 	struct file * vm_file;		/* File we map to (can be NULL). */
337 	void * vm_private_data;		/* was vm_pte (shared mem) */
338 
339 	atomic_long_t swap_readahead_info;
340 #ifndef CONFIG_MMU
341 	struct vm_region *vm_region;	/* NOMMU mapping region */
342 #endif
343 #ifdef CONFIG_NUMA
344 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
345 #endif
346 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
347 } __randomize_layout;
348 
349 struct core_thread {
350 	struct task_struct *task;
351 	struct core_thread *next;
352 };
353 
354 struct core_state {
355 	atomic_t nr_threads;
356 	struct core_thread dumper;
357 	struct completion startup;
358 };
359 
360 struct kioctx_table;
361 struct mm_struct {
362 	struct vm_area_struct *mmap;		/* list of VMAs */
363 	struct rb_root mm_rb;
364 	u32 vmacache_seqnum;                   /* per-thread vmacache */
365 #ifdef CONFIG_MMU
366 	unsigned long (*get_unmapped_area) (struct file *filp,
367 				unsigned long addr, unsigned long len,
368 				unsigned long pgoff, unsigned long flags);
369 #endif
370 	unsigned long mmap_base;		/* base of mmap area */
371 	unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
372 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
373 	/* Base adresses for compatible mmap() */
374 	unsigned long mmap_compat_base;
375 	unsigned long mmap_compat_legacy_base;
376 #endif
377 	unsigned long task_size;		/* size of task vm space */
378 	unsigned long highest_vm_end;		/* highest vma end address */
379 	pgd_t * pgd;
380 
381 	/**
382 	 * @mm_users: The number of users including userspace.
383 	 *
384 	 * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
385 	 * to 0 (i.e. when the task exits and there are no other temporary
386 	 * reference holders), we also release a reference on @mm_count
387 	 * (which may then free the &struct mm_struct if @mm_count also
388 	 * drops to 0).
389 	 */
390 	atomic_t mm_users;
391 
392 	/**
393 	 * @mm_count: The number of references to &struct mm_struct
394 	 * (@mm_users count as 1).
395 	 *
396 	 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
397 	 * &struct mm_struct is freed.
398 	 */
399 	atomic_t mm_count;
400 
401 	atomic_long_t nr_ptes;			/* PTE page table pages */
402 #if CONFIG_PGTABLE_LEVELS > 2
403 	atomic_long_t nr_pmds;			/* PMD page table pages */
404 #endif
405 	int map_count;				/* number of VMAs */
406 
407 	spinlock_t page_table_lock;		/* Protects page tables and some counters */
408 	struct rw_semaphore mmap_sem;
409 
410 	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
411 						 * together off init_mm.mmlist, and are protected
412 						 * by mmlist_lock
413 						 */
414 
415 
416 	unsigned long hiwater_rss;	/* High-watermark of RSS usage */
417 	unsigned long hiwater_vm;	/* High-water virtual memory usage */
418 
419 	unsigned long total_vm;		/* Total pages mapped */
420 	unsigned long locked_vm;	/* Pages that have PG_mlocked set */
421 	unsigned long pinned_vm;	/* Refcount permanently increased */
422 	unsigned long data_vm;		/* VM_WRITE & ~VM_SHARED & ~VM_STACK */
423 	unsigned long exec_vm;		/* VM_EXEC & ~VM_WRITE & ~VM_STACK */
424 	unsigned long stack_vm;		/* VM_STACK */
425 	unsigned long def_flags;
426 	unsigned long start_code, end_code, start_data, end_data;
427 	unsigned long start_brk, brk, start_stack;
428 	unsigned long arg_start, arg_end, env_start, env_end;
429 
430 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
431 
432 	/*
433 	 * Special counters, in some configurations protected by the
434 	 * page_table_lock, in other configurations by being atomic.
435 	 */
436 	struct mm_rss_stat rss_stat;
437 
438 	struct linux_binfmt *binfmt;
439 
440 	cpumask_var_t cpu_vm_mask_var;
441 
442 	/* Architecture-specific MM context */
443 	mm_context_t context;
444 
445 	unsigned long flags; /* Must use atomic bitops to access the bits */
446 
447 	struct core_state *core_state; /* coredumping support */
448 #ifdef CONFIG_AIO
449 	spinlock_t			ioctx_lock;
450 	struct kioctx_table __rcu	*ioctx_table;
451 #endif
452 #ifdef CONFIG_MEMCG
453 	/*
454 	 * "owner" points to a task that is regarded as the canonical
455 	 * user/owner of this mm. All of the following must be true in
456 	 * order for it to be changed:
457 	 *
458 	 * current == mm->owner
459 	 * current->mm != mm
460 	 * new_owner->mm == mm
461 	 * new_owner->alloc_lock is held
462 	 */
463 	struct task_struct __rcu *owner;
464 #endif
465 	struct user_namespace *user_ns;
466 
467 	/* store ref to file /proc/<pid>/exe symlink points to */
468 	struct file __rcu *exe_file;
469 #ifdef CONFIG_MMU_NOTIFIER
470 	struct mmu_notifier_mm *mmu_notifier_mm;
471 #endif
472 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
473 	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
474 #endif
475 #ifdef CONFIG_CPUMASK_OFFSTACK
476 	struct cpumask cpumask_allocation;
477 #endif
478 #ifdef CONFIG_NUMA_BALANCING
479 	/*
480 	 * numa_next_scan is the next time that the PTEs will be marked
481 	 * pte_numa. NUMA hinting faults will gather statistics and migrate
482 	 * pages to new nodes if necessary.
483 	 */
484 	unsigned long numa_next_scan;
485 
486 	/* Restart point for scanning and setting pte_numa */
487 	unsigned long numa_scan_offset;
488 
489 	/* numa_scan_seq prevents two threads setting pte_numa */
490 	int numa_scan_seq;
491 #endif
492 	/*
493 	 * An operation with batched TLB flushing is going on. Anything that
494 	 * can move process memory needs to flush the TLB when moving a
495 	 * PROT_NONE or PROT_NUMA mapped page.
496 	 */
497 	atomic_t tlb_flush_pending;
498 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
499 	/* See flush_tlb_batched_pending() */
500 	bool tlb_flush_batched;
501 #endif
502 	struct uprobes_state uprobes_state;
503 #ifdef CONFIG_HUGETLB_PAGE
504 	atomic_long_t hugetlb_usage;
505 #endif
506 	struct work_struct async_put_work;
507 
508 #if IS_ENABLED(CONFIG_HMM)
509 	/* HMM needs to track a few things per mm */
510 	struct hmm *hmm;
511 #endif
512 } __randomize_layout;
513 
514 extern struct mm_struct init_mm;
515 
516 static inline void mm_init_cpumask(struct mm_struct *mm)
517 {
518 #ifdef CONFIG_CPUMASK_OFFSTACK
519 	mm->cpu_vm_mask_var = &mm->cpumask_allocation;
520 #endif
521 	cpumask_clear(mm->cpu_vm_mask_var);
522 }
523 
524 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
525 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
526 {
527 	return mm->cpu_vm_mask_var;
528 }
529 
530 struct mmu_gather;
531 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
532 				unsigned long start, unsigned long end);
533 extern void tlb_finish_mmu(struct mmu_gather *tlb,
534 				unsigned long start, unsigned long end);
535 
536 static inline void init_tlb_flush_pending(struct mm_struct *mm)
537 {
538 	atomic_set(&mm->tlb_flush_pending, 0);
539 }
540 
541 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
542 {
543 	atomic_inc(&mm->tlb_flush_pending);
544 	/*
545 	 * The only time this value is relevant is when there are indeed pages
546 	 * to flush. And we'll only flush pages after changing them, which
547 	 * requires the PTL.
548 	 *
549 	 * So the ordering here is:
550 	 *
551 	 *	atomic_inc(&mm->tlb_flush_pending);
552 	 *	spin_lock(&ptl);
553 	 *	...
554 	 *	set_pte_at();
555 	 *	spin_unlock(&ptl);
556 	 *
557 	 *				spin_lock(&ptl)
558 	 *				mm_tlb_flush_pending();
559 	 *				....
560 	 *				spin_unlock(&ptl);
561 	 *
562 	 *	flush_tlb_range();
563 	 *	atomic_dec(&mm->tlb_flush_pending);
564 	 *
565 	 * Where the increment if constrained by the PTL unlock, it thus
566 	 * ensures that the increment is visible if the PTE modification is
567 	 * visible. After all, if there is no PTE modification, nobody cares
568 	 * about TLB flushes either.
569 	 *
570 	 * This very much relies on users (mm_tlb_flush_pending() and
571 	 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
572 	 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
573 	 * locks (PPC) the unlock of one doesn't order against the lock of
574 	 * another PTL.
575 	 *
576 	 * The decrement is ordered by the flush_tlb_range(), such that
577 	 * mm_tlb_flush_pending() will not return false unless all flushes have
578 	 * completed.
579 	 */
580 }
581 
582 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
583 {
584 	/*
585 	 * See inc_tlb_flush_pending().
586 	 *
587 	 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
588 	 * not order against TLB invalidate completion, which is what we need.
589 	 *
590 	 * Therefore we must rely on tlb_flush_*() to guarantee order.
591 	 */
592 	atomic_dec(&mm->tlb_flush_pending);
593 }
594 
595 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
596 {
597 	/*
598 	 * Must be called after having acquired the PTL; orders against that
599 	 * PTLs release and therefore ensures that if we observe the modified
600 	 * PTE we must also observe the increment from inc_tlb_flush_pending().
601 	 *
602 	 * That is, it only guarantees to return true if there is a flush
603 	 * pending for _this_ PTL.
604 	 */
605 	return atomic_read(&mm->tlb_flush_pending);
606 }
607 
608 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
609 {
610 	/*
611 	 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
612 	 * for which there is a TLB flush pending in order to guarantee
613 	 * we've seen both that PTE modification and the increment.
614 	 *
615 	 * (no requirement on actually still holding the PTL, that is irrelevant)
616 	 */
617 	return atomic_read(&mm->tlb_flush_pending) > 1;
618 }
619 
620 struct vm_fault;
621 
622 struct vm_special_mapping {
623 	const char *name;	/* The name, e.g. "[vdso]". */
624 
625 	/*
626 	 * If .fault is not provided, this points to a
627 	 * NULL-terminated array of pages that back the special mapping.
628 	 *
629 	 * This must not be NULL unless .fault is provided.
630 	 */
631 	struct page **pages;
632 
633 	/*
634 	 * If non-NULL, then this is called to resolve page faults
635 	 * on the special mapping.  If used, .pages is not checked.
636 	 */
637 	int (*fault)(const struct vm_special_mapping *sm,
638 		     struct vm_area_struct *vma,
639 		     struct vm_fault *vmf);
640 
641 	int (*mremap)(const struct vm_special_mapping *sm,
642 		     struct vm_area_struct *new_vma);
643 };
644 
645 enum tlb_flush_reason {
646 	TLB_FLUSH_ON_TASK_SWITCH,
647 	TLB_REMOTE_SHOOTDOWN,
648 	TLB_LOCAL_SHOOTDOWN,
649 	TLB_LOCAL_MM_SHOOTDOWN,
650 	TLB_REMOTE_SEND_IPI,
651 	NR_TLB_FLUSH_REASONS,
652 };
653 
654  /*
655   * A swap entry has to fit into a "unsigned long", as the entry is hidden
656   * in the "index" field of the swapper address space.
657   */
658 typedef struct {
659 	unsigned long val;
660 } swp_entry_t;
661 
662 #endif /* _LINUX_MM_TYPES_H */
663