xref: /linux-6.15/include/linux/mm_types.h (revision bbc90bc1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4 
5 #include <linux/mm_types_task.h>
6 
7 #include <linux/auxvec.h>
8 #include <linux/kref.h>
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
11 #include <linux/rbtree.h>
12 #include <linux/rwsem.h>
13 #include <linux/completion.h>
14 #include <linux/cpumask.h>
15 #include <linux/uprobes.h>
16 #include <linux/rcupdate.h>
17 #include <linux/page-flags-layout.h>
18 #include <linux/workqueue.h>
19 #include <linux/seqlock.h>
20 
21 #include <asm/mmu.h>
22 
23 #ifndef AT_VECTOR_SIZE_ARCH
24 #define AT_VECTOR_SIZE_ARCH 0
25 #endif
26 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
27 
28 #define INIT_PASID	0
29 
30 struct address_space;
31 struct mem_cgroup;
32 
33 /*
34  * Each physical page in the system has a struct page associated with
35  * it to keep track of whatever it is we are using the page for at the
36  * moment. Note that we have no way to track which tasks are using
37  * a page, though if it is a pagecache page, rmap structures can tell us
38  * who is mapping it.
39  *
40  * If you allocate the page using alloc_pages(), you can use some of the
41  * space in struct page for your own purposes.  The five words in the main
42  * union are available, except for bit 0 of the first word which must be
43  * kept clear.  Many users use this word to store a pointer to an object
44  * which is guaranteed to be aligned.  If you use the same storage as
45  * page->mapping, you must restore it to NULL before freeing the page.
46  *
47  * If your page will not be mapped to userspace, you can also use the four
48  * bytes in the mapcount union, but you must call page_mapcount_reset()
49  * before freeing it.
50  *
51  * If you want to use the refcount field, it must be used in such a way
52  * that other CPUs temporarily incrementing and then decrementing the
53  * refcount does not cause problems.  On receiving the page from
54  * alloc_pages(), the refcount will be positive.
55  *
56  * If you allocate pages of order > 0, you can use some of the fields
57  * in each subpage, but you may need to restore some of their values
58  * afterwards.
59  *
60  * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
61  * That requires that freelist & counters in struct slab be adjacent and
62  * double-word aligned. Because struct slab currently just reinterprets the
63  * bits of struct page, we align all struct pages to double-word boundaries,
64  * and ensure that 'freelist' is aligned within struct slab.
65  */
66 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
67 #define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
68 #else
69 #define _struct_page_alignment
70 #endif
71 
72 struct page {
73 	unsigned long flags;		/* Atomic flags, some possibly
74 					 * updated asynchronously */
75 	/*
76 	 * Five words (20/40 bytes) are available in this union.
77 	 * WARNING: bit 0 of the first word is used for PageTail(). That
78 	 * means the other users of this union MUST NOT use the bit to
79 	 * avoid collision and false-positive PageTail().
80 	 */
81 	union {
82 		struct {	/* Page cache and anonymous pages */
83 			/**
84 			 * @lru: Pageout list, eg. active_list protected by
85 			 * lruvec->lru_lock.  Sometimes used as a generic list
86 			 * by the page owner.
87 			 */
88 			struct list_head lru;
89 			/* See page-flags.h for PAGE_MAPPING_FLAGS */
90 			struct address_space *mapping;
91 			pgoff_t index;		/* Our offset within mapping. */
92 			/**
93 			 * @private: Mapping-private opaque data.
94 			 * Usually used for buffer_heads if PagePrivate.
95 			 * Used for swp_entry_t if PageSwapCache.
96 			 * Indicates order in the buddy system if PageBuddy.
97 			 */
98 			unsigned long private;
99 		};
100 		struct {	/* page_pool used by netstack */
101 			/**
102 			 * @pp_magic: magic value to avoid recycling non
103 			 * page_pool allocated pages.
104 			 */
105 			unsigned long pp_magic;
106 			struct page_pool *pp;
107 			unsigned long _pp_mapping_pad;
108 			unsigned long dma_addr;
109 			union {
110 				/**
111 				 * dma_addr_upper: might require a 64-bit
112 				 * value on 32-bit architectures.
113 				 */
114 				unsigned long dma_addr_upper;
115 				/**
116 				 * For frag page support, not supported in
117 				 * 32-bit architectures with 64-bit DMA.
118 				 */
119 				atomic_long_t pp_frag_count;
120 			};
121 		};
122 		struct {	/* Tail pages of compound page */
123 			unsigned long compound_head;	/* Bit zero is set */
124 
125 			/* First tail page only */
126 			unsigned char compound_dtor;
127 			unsigned char compound_order;
128 			atomic_t compound_mapcount;
129 			unsigned int compound_nr; /* 1 << compound_order */
130 		};
131 		struct {	/* Second tail page of compound page */
132 			unsigned long _compound_pad_1;	/* compound_head */
133 			atomic_t hpage_pinned_refcount;
134 			/* For both global and memcg */
135 			struct list_head deferred_list;
136 		};
137 		struct {	/* Page table pages */
138 			unsigned long _pt_pad_1;	/* compound_head */
139 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
140 			unsigned long _pt_pad_2;	/* mapping */
141 			union {
142 				struct mm_struct *pt_mm; /* x86 pgds only */
143 				atomic_t pt_frag_refcount; /* powerpc */
144 			};
145 #if ALLOC_SPLIT_PTLOCKS
146 			spinlock_t *ptl;
147 #else
148 			spinlock_t ptl;
149 #endif
150 		};
151 		struct {	/* ZONE_DEVICE pages */
152 			/** @pgmap: Points to the hosting device page map. */
153 			struct dev_pagemap *pgmap;
154 			void *zone_device_data;
155 			/*
156 			 * ZONE_DEVICE private pages are counted as being
157 			 * mapped so the next 3 words hold the mapping, index,
158 			 * and private fields from the source anonymous or
159 			 * page cache page while the page is migrated to device
160 			 * private memory.
161 			 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
162 			 * use the mapping, index, and private fields when
163 			 * pmem backed DAX files are mapped.
164 			 */
165 		};
166 
167 		/** @rcu_head: You can use this to free a page by RCU. */
168 		struct rcu_head rcu_head;
169 	};
170 
171 	union {		/* This union is 4 bytes in size. */
172 		/*
173 		 * If the page can be mapped to userspace, encodes the number
174 		 * of times this page is referenced by a page table.
175 		 */
176 		atomic_t _mapcount;
177 
178 		/*
179 		 * If the page is neither PageSlab nor mappable to userspace,
180 		 * the value stored here may help determine what this page
181 		 * is used for.  See page-flags.h for a list of page types
182 		 * which are currently stored here.
183 		 */
184 		unsigned int page_type;
185 	};
186 
187 	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
188 	atomic_t _refcount;
189 
190 #ifdef CONFIG_MEMCG
191 	unsigned long memcg_data;
192 #endif
193 
194 	/*
195 	 * On machines where all RAM is mapped into kernel address space,
196 	 * we can simply calculate the virtual address. On machines with
197 	 * highmem some memory is mapped into kernel virtual memory
198 	 * dynamically, so we need a place to store that address.
199 	 * Note that this field could be 16 bits on x86 ... ;)
200 	 *
201 	 * Architectures with slow multiplication can define
202 	 * WANT_PAGE_VIRTUAL in asm/page.h
203 	 */
204 #if defined(WANT_PAGE_VIRTUAL)
205 	void *virtual;			/* Kernel virtual address (NULL if
206 					   not kmapped, ie. highmem) */
207 #endif /* WANT_PAGE_VIRTUAL */
208 
209 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
210 	int _last_cpupid;
211 #endif
212 } _struct_page_alignment;
213 
214 /**
215  * struct folio - Represents a contiguous set of bytes.
216  * @flags: Identical to the page flags.
217  * @lru: Least Recently Used list; tracks how recently this folio was used.
218  * @mapping: The file this page belongs to, or refers to the anon_vma for
219  *    anonymous memory.
220  * @index: Offset within the file, in units of pages.  For anonymous memory,
221  *    this is the index from the beginning of the mmap.
222  * @private: Filesystem per-folio data (see folio_attach_private()).
223  *    Used for swp_entry_t if folio_test_swapcache().
224  * @_mapcount: Do not access this member directly.  Use folio_mapcount() to
225  *    find out how many times this folio is mapped by userspace.
226  * @_refcount: Do not access this member directly.  Use folio_ref_count()
227  *    to find how many references there are to this folio.
228  * @memcg_data: Memory Control Group data.
229  *
230  * A folio is a physically, virtually and logically contiguous set
231  * of bytes.  It is a power-of-two in size, and it is aligned to that
232  * same power-of-two.  It is at least as large as %PAGE_SIZE.  If it is
233  * in the page cache, it is at a file offset which is a multiple of that
234  * power-of-two.  It may be mapped into userspace at an address which is
235  * at an arbitrary page offset, but its kernel virtual address is aligned
236  * to its size.
237  */
238 struct folio {
239 	/* private: don't document the anon union */
240 	union {
241 		struct {
242 	/* public: */
243 			unsigned long flags;
244 			struct list_head lru;
245 			struct address_space *mapping;
246 			pgoff_t index;
247 			void *private;
248 			atomic_t _mapcount;
249 			atomic_t _refcount;
250 #ifdef CONFIG_MEMCG
251 			unsigned long memcg_data;
252 #endif
253 	/* private: the union with struct page is transitional */
254 		};
255 		struct page page;
256 	};
257 };
258 
259 static_assert(sizeof(struct page) == sizeof(struct folio));
260 #define FOLIO_MATCH(pg, fl)						\
261 	static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
262 FOLIO_MATCH(flags, flags);
263 FOLIO_MATCH(lru, lru);
264 FOLIO_MATCH(mapping, mapping);
265 FOLIO_MATCH(compound_head, lru);
266 FOLIO_MATCH(index, index);
267 FOLIO_MATCH(private, private);
268 FOLIO_MATCH(_mapcount, _mapcount);
269 FOLIO_MATCH(_refcount, _refcount);
270 #ifdef CONFIG_MEMCG
271 FOLIO_MATCH(memcg_data, memcg_data);
272 #endif
273 #undef FOLIO_MATCH
274 
275 static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
276 {
277 	struct page *tail = &folio->page + 1;
278 	return &tail->compound_mapcount;
279 }
280 
281 static inline atomic_t *compound_mapcount_ptr(struct page *page)
282 {
283 	return &page[1].compound_mapcount;
284 }
285 
286 static inline atomic_t *compound_pincount_ptr(struct page *page)
287 {
288 	return &page[2].hpage_pinned_refcount;
289 }
290 
291 /*
292  * Used for sizing the vmemmap region on some architectures
293  */
294 #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
295 
296 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
297 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
298 
299 /*
300  * page_private can be used on tail pages.  However, PagePrivate is only
301  * checked by the VM on the head page.  So page_private on the tail pages
302  * should be used for data that's ancillary to the head page (eg attaching
303  * buffer heads to tail pages after attaching buffer heads to the head page)
304  */
305 #define page_private(page)		((page)->private)
306 
307 static inline void set_page_private(struct page *page, unsigned long private)
308 {
309 	page->private = private;
310 }
311 
312 static inline void *folio_get_private(struct folio *folio)
313 {
314 	return folio->private;
315 }
316 
317 struct page_frag_cache {
318 	void * va;
319 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
320 	__u16 offset;
321 	__u16 size;
322 #else
323 	__u32 offset;
324 #endif
325 	/* we maintain a pagecount bias, so that we dont dirty cache line
326 	 * containing page->_refcount every time we allocate a fragment.
327 	 */
328 	unsigned int		pagecnt_bias;
329 	bool pfmemalloc;
330 };
331 
332 typedef unsigned long vm_flags_t;
333 
334 /*
335  * A region containing a mapping of a non-memory backed file under NOMMU
336  * conditions.  These are held in a global tree and are pinned by the VMAs that
337  * map parts of them.
338  */
339 struct vm_region {
340 	struct rb_node	vm_rb;		/* link in global region tree */
341 	vm_flags_t	vm_flags;	/* VMA vm_flags */
342 	unsigned long	vm_start;	/* start address of region */
343 	unsigned long	vm_end;		/* region initialised to here */
344 	unsigned long	vm_top;		/* region allocated to here */
345 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
346 	struct file	*vm_file;	/* the backing file or NULL */
347 
348 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
349 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
350 						* this region */
351 };
352 
353 #ifdef CONFIG_USERFAULTFD
354 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
355 struct vm_userfaultfd_ctx {
356 	struct userfaultfd_ctx *ctx;
357 };
358 #else /* CONFIG_USERFAULTFD */
359 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
360 struct vm_userfaultfd_ctx {};
361 #endif /* CONFIG_USERFAULTFD */
362 
363 struct anon_vma_name {
364 	struct kref kref;
365 	/* The name needs to be at the end because it is dynamically sized. */
366 	char name[];
367 };
368 
369 /*
370  * This struct describes a virtual memory area. There is one of these
371  * per VM-area/task. A VM area is any part of the process virtual memory
372  * space that has a special rule for the page-fault handlers (ie a shared
373  * library, the executable area etc).
374  */
375 struct vm_area_struct {
376 	/* The first cache line has the info for VMA tree walking. */
377 
378 	unsigned long vm_start;		/* Our start address within vm_mm. */
379 	unsigned long vm_end;		/* The first byte after our end address
380 					   within vm_mm. */
381 
382 	/* linked list of VM areas per task, sorted by address */
383 	struct vm_area_struct *vm_next, *vm_prev;
384 
385 	struct rb_node vm_rb;
386 
387 	/*
388 	 * Largest free memory gap in bytes to the left of this VMA.
389 	 * Either between this VMA and vma->vm_prev, or between one of the
390 	 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
391 	 * get_unmapped_area find a free area of the right size.
392 	 */
393 	unsigned long rb_subtree_gap;
394 
395 	/* Second cache line starts here. */
396 
397 	struct mm_struct *vm_mm;	/* The address space we belong to. */
398 
399 	/*
400 	 * Access permissions of this VMA.
401 	 * See vmf_insert_mixed_prot() for discussion.
402 	 */
403 	pgprot_t vm_page_prot;
404 	unsigned long vm_flags;		/* Flags, see mm.h. */
405 
406 	/*
407 	 * For areas with an address space and backing store,
408 	 * linkage into the address_space->i_mmap interval tree.
409 	 *
410 	 * For private anonymous mappings, a pointer to a null terminated string
411 	 * containing the name given to the vma, or NULL if unnamed.
412 	 */
413 
414 	union {
415 		struct {
416 			struct rb_node rb;
417 			unsigned long rb_subtree_last;
418 		} shared;
419 		/* Serialized by mmap_sem. */
420 		struct anon_vma_name *anon_name;
421 	};
422 
423 	/*
424 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
425 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
426 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
427 	 * or brk vma (with NULL file) can only be in an anon_vma list.
428 	 */
429 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
430 					  * page_table_lock */
431 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
432 
433 	/* Function pointers to deal with this struct. */
434 	const struct vm_operations_struct *vm_ops;
435 
436 	/* Information about our backing store: */
437 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
438 					   units */
439 	struct file * vm_file;		/* File we map to (can be NULL). */
440 	void * vm_private_data;		/* was vm_pte (shared mem) */
441 
442 #ifdef CONFIG_SWAP
443 	atomic_long_t swap_readahead_info;
444 #endif
445 #ifndef CONFIG_MMU
446 	struct vm_region *vm_region;	/* NOMMU mapping region */
447 #endif
448 #ifdef CONFIG_NUMA
449 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
450 #endif
451 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
452 } __randomize_layout;
453 
454 struct kioctx_table;
455 struct mm_struct {
456 	struct {
457 		struct vm_area_struct *mmap;		/* list of VMAs */
458 		struct rb_root mm_rb;
459 		u64 vmacache_seqnum;                   /* per-thread vmacache */
460 #ifdef CONFIG_MMU
461 		unsigned long (*get_unmapped_area) (struct file *filp,
462 				unsigned long addr, unsigned long len,
463 				unsigned long pgoff, unsigned long flags);
464 #endif
465 		unsigned long mmap_base;	/* base of mmap area */
466 		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */
467 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
468 		/* Base addresses for compatible mmap() */
469 		unsigned long mmap_compat_base;
470 		unsigned long mmap_compat_legacy_base;
471 #endif
472 		unsigned long task_size;	/* size of task vm space */
473 		unsigned long highest_vm_end;	/* highest vma end address */
474 		pgd_t * pgd;
475 
476 #ifdef CONFIG_MEMBARRIER
477 		/**
478 		 * @membarrier_state: Flags controlling membarrier behavior.
479 		 *
480 		 * This field is close to @pgd to hopefully fit in the same
481 		 * cache-line, which needs to be touched by switch_mm().
482 		 */
483 		atomic_t membarrier_state;
484 #endif
485 
486 		/**
487 		 * @mm_users: The number of users including userspace.
488 		 *
489 		 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
490 		 * drops to 0 (i.e. when the task exits and there are no other
491 		 * temporary reference holders), we also release a reference on
492 		 * @mm_count (which may then free the &struct mm_struct if
493 		 * @mm_count also drops to 0).
494 		 */
495 		atomic_t mm_users;
496 
497 		/**
498 		 * @mm_count: The number of references to &struct mm_struct
499 		 * (@mm_users count as 1).
500 		 *
501 		 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
502 		 * &struct mm_struct is freed.
503 		 */
504 		atomic_t mm_count;
505 
506 #ifdef CONFIG_MMU
507 		atomic_long_t pgtables_bytes;	/* PTE page table pages */
508 #endif
509 		int map_count;			/* number of VMAs */
510 
511 		spinlock_t page_table_lock; /* Protects page tables and some
512 					     * counters
513 					     */
514 		/*
515 		 * With some kernel config, the current mmap_lock's offset
516 		 * inside 'mm_struct' is at 0x120, which is very optimal, as
517 		 * its two hot fields 'count' and 'owner' sit in 2 different
518 		 * cachelines,  and when mmap_lock is highly contended, both
519 		 * of the 2 fields will be accessed frequently, current layout
520 		 * will help to reduce cache bouncing.
521 		 *
522 		 * So please be careful with adding new fields before
523 		 * mmap_lock, which can easily push the 2 fields into one
524 		 * cacheline.
525 		 */
526 		struct rw_semaphore mmap_lock;
527 
528 		struct list_head mmlist; /* List of maybe swapped mm's.	These
529 					  * are globally strung together off
530 					  * init_mm.mmlist, and are protected
531 					  * by mmlist_lock
532 					  */
533 
534 
535 		unsigned long hiwater_rss; /* High-watermark of RSS usage */
536 		unsigned long hiwater_vm;  /* High-water virtual memory usage */
537 
538 		unsigned long total_vm;	   /* Total pages mapped */
539 		unsigned long locked_vm;   /* Pages that have PG_mlocked set */
540 		atomic64_t    pinned_vm;   /* Refcount permanently increased */
541 		unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
542 		unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
543 		unsigned long stack_vm;	   /* VM_STACK */
544 		unsigned long def_flags;
545 
546 		/**
547 		 * @write_protect_seq: Locked when any thread is write
548 		 * protecting pages mapped by this mm to enforce a later COW,
549 		 * for instance during page table copying for fork().
550 		 */
551 		seqcount_t write_protect_seq;
552 
553 		spinlock_t arg_lock; /* protect the below fields */
554 
555 		unsigned long start_code, end_code, start_data, end_data;
556 		unsigned long start_brk, brk, start_stack;
557 		unsigned long arg_start, arg_end, env_start, env_end;
558 
559 		unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
560 
561 		/*
562 		 * Special counters, in some configurations protected by the
563 		 * page_table_lock, in other configurations by being atomic.
564 		 */
565 		struct mm_rss_stat rss_stat;
566 
567 		struct linux_binfmt *binfmt;
568 
569 		/* Architecture-specific MM context */
570 		mm_context_t context;
571 
572 		unsigned long flags; /* Must use atomic bitops to access */
573 
574 #ifdef CONFIG_AIO
575 		spinlock_t			ioctx_lock;
576 		struct kioctx_table __rcu	*ioctx_table;
577 #endif
578 #ifdef CONFIG_MEMCG
579 		/*
580 		 * "owner" points to a task that is regarded as the canonical
581 		 * user/owner of this mm. All of the following must be true in
582 		 * order for it to be changed:
583 		 *
584 		 * current == mm->owner
585 		 * current->mm != mm
586 		 * new_owner->mm == mm
587 		 * new_owner->alloc_lock is held
588 		 */
589 		struct task_struct __rcu *owner;
590 #endif
591 		struct user_namespace *user_ns;
592 
593 		/* store ref to file /proc/<pid>/exe symlink points to */
594 		struct file __rcu *exe_file;
595 #ifdef CONFIG_MMU_NOTIFIER
596 		struct mmu_notifier_subscriptions *notifier_subscriptions;
597 #endif
598 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
599 		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
600 #endif
601 #ifdef CONFIG_NUMA_BALANCING
602 		/*
603 		 * numa_next_scan is the next time that the PTEs will be marked
604 		 * pte_numa. NUMA hinting faults will gather statistics and
605 		 * migrate pages to new nodes if necessary.
606 		 */
607 		unsigned long numa_next_scan;
608 
609 		/* Restart point for scanning and setting pte_numa */
610 		unsigned long numa_scan_offset;
611 
612 		/* numa_scan_seq prevents two threads setting pte_numa */
613 		int numa_scan_seq;
614 #endif
615 		/*
616 		 * An operation with batched TLB flushing is going on. Anything
617 		 * that can move process memory needs to flush the TLB when
618 		 * moving a PROT_NONE or PROT_NUMA mapped page.
619 		 */
620 		atomic_t tlb_flush_pending;
621 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
622 		/* See flush_tlb_batched_pending() */
623 		atomic_t tlb_flush_batched;
624 #endif
625 		struct uprobes_state uprobes_state;
626 #ifdef CONFIG_PREEMPT_RT
627 		struct rcu_head delayed_drop;
628 #endif
629 #ifdef CONFIG_HUGETLB_PAGE
630 		atomic_long_t hugetlb_usage;
631 #endif
632 		struct work_struct async_put_work;
633 
634 #ifdef CONFIG_IOMMU_SUPPORT
635 		u32 pasid;
636 #endif
637 	} __randomize_layout;
638 
639 	/*
640 	 * The mm_cpumask needs to be at the end of mm_struct, because it
641 	 * is dynamically sized based on nr_cpu_ids.
642 	 */
643 	unsigned long cpu_bitmap[];
644 };
645 
646 extern struct mm_struct init_mm;
647 
648 /* Pointer magic because the dynamic array size confuses some compilers. */
649 static inline void mm_init_cpumask(struct mm_struct *mm)
650 {
651 	unsigned long cpu_bitmap = (unsigned long)mm;
652 
653 	cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
654 	cpumask_clear((struct cpumask *)cpu_bitmap);
655 }
656 
657 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
658 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
659 {
660 	return (struct cpumask *)&mm->cpu_bitmap;
661 }
662 
663 struct mmu_gather;
664 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
665 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
666 extern void tlb_finish_mmu(struct mmu_gather *tlb);
667 
668 struct vm_fault;
669 
670 /**
671  * typedef vm_fault_t - Return type for page fault handlers.
672  *
673  * Page fault handlers return a bitmask of %VM_FAULT values.
674  */
675 typedef __bitwise unsigned int vm_fault_t;
676 
677 /**
678  * enum vm_fault_reason - Page fault handlers return a bitmask of
679  * these values to tell the core VM what happened when handling the
680  * fault. Used to decide whether a process gets delivered SIGBUS or
681  * just gets major/minor fault counters bumped up.
682  *
683  * @VM_FAULT_OOM:		Out Of Memory
684  * @VM_FAULT_SIGBUS:		Bad access
685  * @VM_FAULT_MAJOR:		Page read from storage
686  * @VM_FAULT_WRITE:		Special case for get_user_pages
687  * @VM_FAULT_HWPOISON:		Hit poisoned small page
688  * @VM_FAULT_HWPOISON_LARGE:	Hit poisoned large page. Index encoded
689  *				in upper bits
690  * @VM_FAULT_SIGSEGV:		segmentation fault
691  * @VM_FAULT_NOPAGE:		->fault installed the pte, not return page
692  * @VM_FAULT_LOCKED:		->fault locked the returned page
693  * @VM_FAULT_RETRY:		->fault blocked, must retry
694  * @VM_FAULT_FALLBACK:		huge page fault failed, fall back to small
695  * @VM_FAULT_DONE_COW:		->fault has fully handled COW
696  * @VM_FAULT_NEEDDSYNC:		->fault did not modify page tables and needs
697  *				fsync() to complete (for synchronous page faults
698  *				in DAX)
699  * @VM_FAULT_HINDEX_MASK:	mask HINDEX value
700  *
701  */
702 enum vm_fault_reason {
703 	VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
704 	VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
705 	VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
706 	VM_FAULT_WRITE          = (__force vm_fault_t)0x000008,
707 	VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
708 	VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
709 	VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
710 	VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
711 	VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
712 	VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
713 	VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
714 	VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
715 	VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
716 	VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
717 };
718 
719 /* Encode hstate index for a hwpoisoned large page */
720 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
721 #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
722 
723 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |	\
724 			VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |	\
725 			VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
726 
727 #define VM_FAULT_RESULT_TRACE \
728 	{ VM_FAULT_OOM,                 "OOM" },	\
729 	{ VM_FAULT_SIGBUS,              "SIGBUS" },	\
730 	{ VM_FAULT_MAJOR,               "MAJOR" },	\
731 	{ VM_FAULT_WRITE,               "WRITE" },	\
732 	{ VM_FAULT_HWPOISON,            "HWPOISON" },	\
733 	{ VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },	\
734 	{ VM_FAULT_SIGSEGV,             "SIGSEGV" },	\
735 	{ VM_FAULT_NOPAGE,              "NOPAGE" },	\
736 	{ VM_FAULT_LOCKED,              "LOCKED" },	\
737 	{ VM_FAULT_RETRY,               "RETRY" },	\
738 	{ VM_FAULT_FALLBACK,            "FALLBACK" },	\
739 	{ VM_FAULT_DONE_COW,            "DONE_COW" },	\
740 	{ VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
741 
742 struct vm_special_mapping {
743 	const char *name;	/* The name, e.g. "[vdso]". */
744 
745 	/*
746 	 * If .fault is not provided, this points to a
747 	 * NULL-terminated array of pages that back the special mapping.
748 	 *
749 	 * This must not be NULL unless .fault is provided.
750 	 */
751 	struct page **pages;
752 
753 	/*
754 	 * If non-NULL, then this is called to resolve page faults
755 	 * on the special mapping.  If used, .pages is not checked.
756 	 */
757 	vm_fault_t (*fault)(const struct vm_special_mapping *sm,
758 				struct vm_area_struct *vma,
759 				struct vm_fault *vmf);
760 
761 	int (*mremap)(const struct vm_special_mapping *sm,
762 		     struct vm_area_struct *new_vma);
763 };
764 
765 enum tlb_flush_reason {
766 	TLB_FLUSH_ON_TASK_SWITCH,
767 	TLB_REMOTE_SHOOTDOWN,
768 	TLB_LOCAL_SHOOTDOWN,
769 	TLB_LOCAL_MM_SHOOTDOWN,
770 	TLB_REMOTE_SEND_IPI,
771 	NR_TLB_FLUSH_REASONS,
772 };
773 
774  /*
775   * A swap entry has to fit into a "unsigned long", as the entry is hidden
776   * in the "index" field of the swapper address space.
777   */
778 typedef struct {
779 	unsigned long val;
780 } swp_entry_t;
781 
782 /**
783  * enum fault_flag - Fault flag definitions.
784  * @FAULT_FLAG_WRITE: Fault was a write fault.
785  * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
786  * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
787  * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
788  * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
789  * @FAULT_FLAG_TRIED: The fault has been tried once.
790  * @FAULT_FLAG_USER: The fault originated in userspace.
791  * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
792  * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
793  * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
794  *
795  * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
796  * whether we would allow page faults to retry by specifying these two
797  * fault flags correctly.  Currently there can be three legal combinations:
798  *
799  * (a) ALLOW_RETRY and !TRIED:  this means the page fault allows retry, and
800  *                              this is the first try
801  *
802  * (b) ALLOW_RETRY and TRIED:   this means the page fault allows retry, and
803  *                              we've already tried at least once
804  *
805  * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
806  *
807  * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
808  * be used.  Note that page faults can be allowed to retry for multiple times,
809  * in which case we'll have an initial fault with flags (a) then later on
810  * continuous faults with flags (b).  We should always try to detect pending
811  * signals before a retry to make sure the continuous page faults can still be
812  * interrupted if necessary.
813  */
814 enum fault_flag {
815 	FAULT_FLAG_WRITE =		1 << 0,
816 	FAULT_FLAG_MKWRITE =		1 << 1,
817 	FAULT_FLAG_ALLOW_RETRY =	1 << 2,
818 	FAULT_FLAG_RETRY_NOWAIT = 	1 << 3,
819 	FAULT_FLAG_KILLABLE =		1 << 4,
820 	FAULT_FLAG_TRIED = 		1 << 5,
821 	FAULT_FLAG_USER =		1 << 6,
822 	FAULT_FLAG_REMOTE =		1 << 7,
823 	FAULT_FLAG_INSTRUCTION =	1 << 8,
824 	FAULT_FLAG_INTERRUPTIBLE =	1 << 9,
825 };
826 
827 #endif /* _LINUX_MM_TYPES_H */
828