xref: /linux-6.15/include/linux/rmap.h (revision e8fa600e)
1 #ifndef _LINUX_RMAP_H
2 #define _LINUX_RMAP_H
3 /*
4  * Declarations for Reverse Mapping functions in mm/rmap.c
5  */
6 
7 #include <linux/list.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/mutex.h>
11 #include <linux/memcontrol.h>
12 
13 /*
14  * The anon_vma heads a list of private "related" vmas, to scan if
15  * an anonymous page pointing to this anon_vma needs to be unmapped:
16  * the vmas on the list will be related by forking, or by splitting.
17  *
18  * Since vmas come and go as they are split and merged (particularly
19  * in mprotect), the mapping field of an anonymous page cannot point
20  * directly to a vma: instead it points to an anon_vma, on whose list
21  * the related vmas can be easily linked or unlinked.
22  *
23  * After unlinking the last vma on the list, we must garbage collect
24  * the anon_vma object itself: we're guaranteed no page can be
25  * pointing to this anon_vma once its vma list is empty.
26  */
27 struct anon_vma {
28 	struct anon_vma *root;	/* Root of this anon_vma tree */
29 	struct mutex mutex;	/* Serialize access to vma list */
30 	/*
31 	 * The refcount is taken on an anon_vma when there is no
32 	 * guarantee that the vma of page tables will exist for
33 	 * the duration of the operation. A caller that takes
34 	 * the reference is responsible for clearing up the
35 	 * anon_vma if they are the last user on release
36 	 */
37 	atomic_t refcount;
38 
39 	/*
40 	 * NOTE: the LSB of the head.next is set by
41 	 * mm_take_all_locks() _after_ taking the above lock. So the
42 	 * head must only be read/written after taking the above lock
43 	 * to be sure to see a valid next pointer. The LSB bit itself
44 	 * is serialized by a system wide lock only visible to
45 	 * mm_take_all_locks() (mm_all_locks_mutex).
46 	 */
47 	struct list_head head;	/* Chain of private "related" vmas */
48 };
49 
50 /*
51  * The copy-on-write semantics of fork mean that an anon_vma
52  * can become associated with multiple processes. Furthermore,
53  * each child process will have its own anon_vma, where new
54  * pages for that process are instantiated.
55  *
56  * This structure allows us to find the anon_vmas associated
57  * with a VMA, or the VMAs associated with an anon_vma.
58  * The "same_vma" list contains the anon_vma_chains linking
59  * all the anon_vmas associated with this VMA.
60  * The "same_anon_vma" list contains the anon_vma_chains
61  * which link all the VMAs associated with this anon_vma.
62  */
63 struct anon_vma_chain {
64 	struct vm_area_struct *vma;
65 	struct anon_vma *anon_vma;
66 	struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
67 	struct list_head same_anon_vma;	/* locked by anon_vma->mutex */
68 };
69 
70 #ifdef CONFIG_MMU
71 static inline void get_anon_vma(struct anon_vma *anon_vma)
72 {
73 	atomic_inc(&anon_vma->refcount);
74 }
75 
76 void __put_anon_vma(struct anon_vma *anon_vma);
77 
78 static inline void put_anon_vma(struct anon_vma *anon_vma)
79 {
80 	if (atomic_dec_and_test(&anon_vma->refcount))
81 		__put_anon_vma(anon_vma);
82 }
83 
84 static inline struct anon_vma *page_anon_vma(struct page *page)
85 {
86 	if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
87 					    PAGE_MAPPING_ANON)
88 		return NULL;
89 	return page_rmapping(page);
90 }
91 
92 static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
93 {
94 	struct anon_vma *anon_vma = vma->anon_vma;
95 	if (anon_vma)
96 		mutex_lock(&anon_vma->root->mutex);
97 }
98 
99 static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
100 {
101 	struct anon_vma *anon_vma = vma->anon_vma;
102 	if (anon_vma)
103 		mutex_unlock(&anon_vma->root->mutex);
104 }
105 
106 static inline void anon_vma_lock(struct anon_vma *anon_vma)
107 {
108 	mutex_lock(&anon_vma->root->mutex);
109 }
110 
111 static inline void anon_vma_unlock(struct anon_vma *anon_vma)
112 {
113 	mutex_unlock(&anon_vma->root->mutex);
114 }
115 
116 /*
117  * anon_vma helper functions.
118  */
119 void anon_vma_init(void);	/* create anon_vma_cachep */
120 int  anon_vma_prepare(struct vm_area_struct *);
121 void unlink_anon_vmas(struct vm_area_struct *);
122 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
123 void anon_vma_moveto_tail(struct vm_area_struct *);
124 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
125 
126 static inline void anon_vma_merge(struct vm_area_struct *vma,
127 				  struct vm_area_struct *next)
128 {
129 	VM_BUG_ON(vma->anon_vma != next->anon_vma);
130 	unlink_anon_vmas(next);
131 }
132 
133 struct anon_vma *page_get_anon_vma(struct page *page);
134 
135 /*
136  * rmap interfaces called when adding or removing pte of page
137  */
138 void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
139 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
140 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
141 			   unsigned long, int);
142 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
143 void page_add_file_rmap(struct page *);
144 void page_remove_rmap(struct page *);
145 
146 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
147 			    unsigned long);
148 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
149 				unsigned long);
150 
151 static inline void page_dup_rmap(struct page *page)
152 {
153 	atomic_inc(&page->_mapcount);
154 }
155 
156 /*
157  * Called from mm/vmscan.c to handle paging out
158  */
159 int page_referenced(struct page *, int is_locked,
160 			struct mem_cgroup *memcg, unsigned long *vm_flags);
161 int page_referenced_one(struct page *, struct vm_area_struct *,
162 	unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
163 
164 enum ttu_flags {
165 	TTU_UNMAP = 0,			/* unmap mode */
166 	TTU_MIGRATION = 1,		/* migration mode */
167 	TTU_MUNLOCK = 2,		/* munlock mode */
168 	TTU_ACTION_MASK = 0xff,
169 
170 	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
171 	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
172 	TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
173 };
174 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
175 
176 int try_to_unmap(struct page *, enum ttu_flags flags);
177 int try_to_unmap_one(struct page *, struct vm_area_struct *,
178 			unsigned long address, enum ttu_flags flags);
179 
180 /*
181  * Called from mm/filemap_xip.c to unmap empty zero page
182  */
183 pte_t *__page_check_address(struct page *, struct mm_struct *,
184 				unsigned long, spinlock_t **, int);
185 
186 static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
187 					unsigned long address,
188 					spinlock_t **ptlp, int sync)
189 {
190 	pte_t *ptep;
191 
192 	__cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
193 						       ptlp, sync));
194 	return ptep;
195 }
196 
197 /*
198  * Used by swapoff to help locate where page is expected in vma.
199  */
200 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
201 
202 /*
203  * Cleans the PTEs of shared mappings.
204  * (and since clean PTEs should also be readonly, write protects them too)
205  *
206  * returns the number of cleaned PTEs.
207  */
208 int page_mkclean(struct page *);
209 
210 /*
211  * called in munlock()/munmap() path to check for other vmas holding
212  * the page mlocked.
213  */
214 int try_to_munlock(struct page *);
215 
216 /*
217  * Called by memory-failure.c to kill processes.
218  */
219 struct anon_vma *page_lock_anon_vma(struct page *page);
220 void page_unlock_anon_vma(struct anon_vma *anon_vma);
221 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
222 
223 /*
224  * Called by migrate.c to remove migration ptes, but might be used more later.
225  */
226 int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
227 		struct vm_area_struct *, unsigned long, void *), void *arg);
228 
229 #else	/* !CONFIG_MMU */
230 
231 #define anon_vma_init()		do {} while (0)
232 #define anon_vma_prepare(vma)	(0)
233 #define anon_vma_link(vma)	do {} while (0)
234 
235 static inline int page_referenced(struct page *page, int is_locked,
236 				  struct mem_cgroup *memcg,
237 				  unsigned long *vm_flags)
238 {
239 	*vm_flags = 0;
240 	return 0;
241 }
242 
243 #define try_to_unmap(page, refs) SWAP_FAIL
244 
245 static inline int page_mkclean(struct page *page)
246 {
247 	return 0;
248 }
249 
250 
251 #endif	/* CONFIG_MMU */
252 
253 /*
254  * Return values of try_to_unmap
255  */
256 #define SWAP_SUCCESS	0
257 #define SWAP_AGAIN	1
258 #define SWAP_FAIL	2
259 #define SWAP_MLOCK	3
260 
261 #endif	/* _LINUX_RMAP_H */
262