xref: /linux-6.15/include/linux/mmu_notifier.h (revision fe2a1bb1)
1 #ifndef _LINUX_MMU_NOTIFIER_H
2 #define _LINUX_MMU_NOTIFIER_H
3 
4 #include <linux/list.h>
5 #include <linux/spinlock.h>
6 #include <linux/mm_types.h>
7 #include <linux/srcu.h>
8 
9 struct mmu_notifier;
10 struct mmu_notifier_ops;
11 
12 #ifdef CONFIG_MMU_NOTIFIER
13 
14 /*
15  * The mmu notifier_mm structure is allocated and installed in
16  * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
17  * critical section and it's released only when mm_count reaches zero
18  * in mmdrop().
19  */
20 struct mmu_notifier_mm {
21 	/* all mmu notifiers registerd in this mm are queued in this list */
22 	struct hlist_head list;
23 	/* to serialize the list modifications and hlist_unhashed */
24 	spinlock_t lock;
25 };
26 
27 struct mmu_notifier_ops {
28 	/*
29 	 * Called either by mmu_notifier_unregister or when the mm is
30 	 * being destroyed by exit_mmap, always before all pages are
31 	 * freed. This can run concurrently with other mmu notifier
32 	 * methods (the ones invoked outside the mm context) and it
33 	 * should tear down all secondary mmu mappings and freeze the
34 	 * secondary mmu. If this method isn't implemented you've to
35 	 * be sure that nothing could possibly write to the pages
36 	 * through the secondary mmu by the time the last thread with
37 	 * tsk->mm == mm exits.
38 	 *
39 	 * As side note: the pages freed after ->release returns could
40 	 * be immediately reallocated by the gart at an alias physical
41 	 * address with a different cache model, so if ->release isn't
42 	 * implemented because all _software_ driven memory accesses
43 	 * through the secondary mmu are terminated by the time the
44 	 * last thread of this mm quits, you've also to be sure that
45 	 * speculative _hardware_ operations can't allocate dirty
46 	 * cachelines in the cpu that could not be snooped and made
47 	 * coherent with the other read and write operations happening
48 	 * through the gart alias address, so leading to memory
49 	 * corruption.
50 	 */
51 	void (*release)(struct mmu_notifier *mn,
52 			struct mm_struct *mm);
53 
54 	/*
55 	 * clear_flush_young is called after the VM is
56 	 * test-and-clearing the young/accessed bitflag in the
57 	 * pte. This way the VM will provide proper aging to the
58 	 * accesses to the page through the secondary MMUs and not
59 	 * only to the ones through the Linux pte.
60 	 */
61 	int (*clear_flush_young)(struct mmu_notifier *mn,
62 				 struct mm_struct *mm,
63 				 unsigned long address);
64 
65 	/*
66 	 * test_young is called to check the young/accessed bitflag in
67 	 * the secondary pte. This is used to know if the page is
68 	 * frequently used without actually clearing the flag or tearing
69 	 * down the secondary mapping on the page.
70 	 */
71 	int (*test_young)(struct mmu_notifier *mn,
72 			  struct mm_struct *mm,
73 			  unsigned long address);
74 
75 	/*
76 	 * change_pte is called in cases that pte mapping to page is changed:
77 	 * for example, when ksm remaps pte to point to a new shared page.
78 	 */
79 	void (*change_pte)(struct mmu_notifier *mn,
80 			   struct mm_struct *mm,
81 			   unsigned long address,
82 			   pte_t pte);
83 
84 	/*
85 	 * Before this is invoked any secondary MMU is still ok to
86 	 * read/write to the page previously pointed to by the Linux
87 	 * pte because the page hasn't been freed yet and it won't be
88 	 * freed until this returns. If required set_page_dirty has to
89 	 * be called internally to this method.
90 	 */
91 	void (*invalidate_page)(struct mmu_notifier *mn,
92 				struct mm_struct *mm,
93 				unsigned long address);
94 
95 	/*
96 	 * invalidate_range_start() and invalidate_range_end() must be
97 	 * paired and are called only when the mmap_sem and/or the
98 	 * locks protecting the reverse maps are held. The subsystem
99 	 * must guarantee that no additional references are taken to
100 	 * the pages in the range established between the call to
101 	 * invalidate_range_start() and the matching call to
102 	 * invalidate_range_end().
103 	 *
104 	 * Invalidation of multiple concurrent ranges may be
105 	 * optionally permitted by the driver. Either way the
106 	 * establishment of sptes is forbidden in the range passed to
107 	 * invalidate_range_begin/end for the whole duration of the
108 	 * invalidate_range_begin/end critical section.
109 	 *
110 	 * invalidate_range_start() is called when all pages in the
111 	 * range are still mapped and have at least a refcount of one.
112 	 *
113 	 * invalidate_range_end() is called when all pages in the
114 	 * range have been unmapped and the pages have been freed by
115 	 * the VM.
116 	 *
117 	 * The VM will remove the page table entries and potentially
118 	 * the page between invalidate_range_start() and
119 	 * invalidate_range_end(). If the page must not be freed
120 	 * because of pending I/O or other circumstances then the
121 	 * invalidate_range_start() callback (or the initial mapping
122 	 * by the driver) must make sure that the refcount is kept
123 	 * elevated.
124 	 *
125 	 * If the driver increases the refcount when the pages are
126 	 * initially mapped into an address space then either
127 	 * invalidate_range_start() or invalidate_range_end() may
128 	 * decrease the refcount. If the refcount is decreased on
129 	 * invalidate_range_start() then the VM can free pages as page
130 	 * table entries are removed.  If the refcount is only
131 	 * droppped on invalidate_range_end() then the driver itself
132 	 * will drop the last refcount but it must take care to flush
133 	 * any secondary tlb before doing the final free on the
134 	 * page. Pages will no longer be referenced by the linux
135 	 * address space but may still be referenced by sptes until
136 	 * the last refcount is dropped.
137 	 */
138 	void (*invalidate_range_start)(struct mmu_notifier *mn,
139 				       struct mm_struct *mm,
140 				       unsigned long start, unsigned long end);
141 	void (*invalidate_range_end)(struct mmu_notifier *mn,
142 				     struct mm_struct *mm,
143 				     unsigned long start, unsigned long end);
144 };
145 
146 /*
147  * The notifier chains are protected by mmap_sem and/or the reverse map
148  * semaphores. Notifier chains are only changed when all reverse maps and
149  * the mmap_sem locks are taken.
150  *
151  * Therefore notifier chains can only be traversed when either
152  *
153  * 1. mmap_sem is held.
154  * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem).
155  * 3. No other concurrent thread can access the list (release)
156  */
157 struct mmu_notifier {
158 	struct hlist_node hlist;
159 	const struct mmu_notifier_ops *ops;
160 };
161 
162 static inline int mm_has_notifiers(struct mm_struct *mm)
163 {
164 	return unlikely(mm->mmu_notifier_mm);
165 }
166 
167 extern int mmu_notifier_register(struct mmu_notifier *mn,
168 				 struct mm_struct *mm);
169 extern int __mmu_notifier_register(struct mmu_notifier *mn,
170 				   struct mm_struct *mm);
171 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
172 				    struct mm_struct *mm);
173 extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
174 					       struct mm_struct *mm);
175 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
176 extern void __mmu_notifier_release(struct mm_struct *mm);
177 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
178 					  unsigned long address);
179 extern int __mmu_notifier_test_young(struct mm_struct *mm,
180 				     unsigned long address);
181 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
182 				      unsigned long address, pte_t pte);
183 extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
184 					  unsigned long address);
185 extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
186 				  unsigned long start, unsigned long end);
187 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
188 				  unsigned long start, unsigned long end);
189 
190 static inline void mmu_notifier_release(struct mm_struct *mm)
191 {
192 	if (mm_has_notifiers(mm))
193 		__mmu_notifier_release(mm);
194 }
195 
196 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
197 					  unsigned long address)
198 {
199 	if (mm_has_notifiers(mm))
200 		return __mmu_notifier_clear_flush_young(mm, address);
201 	return 0;
202 }
203 
204 static inline int mmu_notifier_test_young(struct mm_struct *mm,
205 					  unsigned long address)
206 {
207 	if (mm_has_notifiers(mm))
208 		return __mmu_notifier_test_young(mm, address);
209 	return 0;
210 }
211 
212 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
213 					   unsigned long address, pte_t pte)
214 {
215 	if (mm_has_notifiers(mm))
216 		__mmu_notifier_change_pte(mm, address, pte);
217 }
218 
219 static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
220 					  unsigned long address)
221 {
222 	if (mm_has_notifiers(mm))
223 		__mmu_notifier_invalidate_page(mm, address);
224 }
225 
226 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
227 				  unsigned long start, unsigned long end)
228 {
229 	if (mm_has_notifiers(mm))
230 		__mmu_notifier_invalidate_range_start(mm, start, end);
231 }
232 
233 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
234 				  unsigned long start, unsigned long end)
235 {
236 	if (mm_has_notifiers(mm))
237 		__mmu_notifier_invalidate_range_end(mm, start, end);
238 }
239 
240 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
241 {
242 	mm->mmu_notifier_mm = NULL;
243 }
244 
245 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
246 {
247 	if (mm_has_notifiers(mm))
248 		__mmu_notifier_mm_destroy(mm);
249 }
250 
251 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
252 ({									\
253 	int __young;							\
254 	struct vm_area_struct *___vma = __vma;				\
255 	unsigned long ___address = __address;				\
256 	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
257 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
258 						  ___address);		\
259 	__young;							\
260 })
261 
262 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
263 ({									\
264 	int __young;							\
265 	struct vm_area_struct *___vma = __vma;				\
266 	unsigned long ___address = __address;				\
267 	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
268 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
269 						  ___address);		\
270 	__young;							\
271 })
272 
273 /*
274  * set_pte_at_notify() sets the pte _after_ running the notifier.
275  * This is safe to start by updating the secondary MMUs, because the primary MMU
276  * pte invalidate must have already happened with a ptep_clear_flush() before
277  * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
278  * required when we change both the protection of the mapping from read-only to
279  * read-write and the pfn (like during copy on write page faults). Otherwise the
280  * old page would remain mapped readonly in the secondary MMUs after the new
281  * page is already writable by some CPU through the primary MMU.
282  */
283 #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
284 ({									\
285 	struct mm_struct *___mm = __mm;					\
286 	unsigned long ___address = __address;				\
287 	pte_t ___pte = __pte;						\
288 									\
289 	mmu_notifier_change_pte(___mm, ___address, ___pte);		\
290 	set_pte_at(___mm, ___address, __ptep, ___pte);			\
291 })
292 
293 extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
294 				   void (*func)(struct rcu_head *rcu));
295 extern void mmu_notifier_synchronize(void);
296 
297 #else /* CONFIG_MMU_NOTIFIER */
298 
299 static inline void mmu_notifier_release(struct mm_struct *mm)
300 {
301 }
302 
303 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
304 					  unsigned long address)
305 {
306 	return 0;
307 }
308 
309 static inline int mmu_notifier_test_young(struct mm_struct *mm,
310 					  unsigned long address)
311 {
312 	return 0;
313 }
314 
315 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
316 					   unsigned long address, pte_t pte)
317 {
318 }
319 
320 static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
321 					  unsigned long address)
322 {
323 }
324 
325 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
326 				  unsigned long start, unsigned long end)
327 {
328 }
329 
330 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
331 				  unsigned long start, unsigned long end)
332 {
333 }
334 
335 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
336 {
337 }
338 
339 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
340 {
341 }
342 
343 #define ptep_clear_flush_young_notify ptep_clear_flush_young
344 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
345 #define set_pte_at_notify set_pte_at
346 
347 #endif /* CONFIG_MMU_NOTIFIER */
348 
349 #endif /* _LINUX_MMU_NOTIFIER_H */
350