xref: /linux-6.15/include/linux/mmu_notifier.h (revision bb7e5ce7)
1 #ifndef _LINUX_MMU_NOTIFIER_H
2 #define _LINUX_MMU_NOTIFIER_H
3 
4 #include <linux/list.h>
5 #include <linux/spinlock.h>
6 #include <linux/mm_types.h>
7 #include <linux/srcu.h>
8 
9 struct mmu_notifier;
10 struct mmu_notifier_ops;
11 
12 #ifdef CONFIG_MMU_NOTIFIER
13 
14 /*
15  * The mmu notifier_mm structure is allocated and installed in
16  * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
17  * critical section and it's released only when mm_count reaches zero
18  * in mmdrop().
19  */
20 struct mmu_notifier_mm {
21 	/* all mmu notifiers registerd in this mm are queued in this list */
22 	struct hlist_head list;
23 	/* to serialize the list modifications and hlist_unhashed */
24 	spinlock_t lock;
25 };
26 
27 struct mmu_notifier_ops {
28 	/*
29 	 * Called either by mmu_notifier_unregister or when the mm is
30 	 * being destroyed by exit_mmap, always before all pages are
31 	 * freed. This can run concurrently with other mmu notifier
32 	 * methods (the ones invoked outside the mm context) and it
33 	 * should tear down all secondary mmu mappings and freeze the
34 	 * secondary mmu. If this method isn't implemented you've to
35 	 * be sure that nothing could possibly write to the pages
36 	 * through the secondary mmu by the time the last thread with
37 	 * tsk->mm == mm exits.
38 	 *
39 	 * As side note: the pages freed after ->release returns could
40 	 * be immediately reallocated by the gart at an alias physical
41 	 * address with a different cache model, so if ->release isn't
42 	 * implemented because all _software_ driven memory accesses
43 	 * through the secondary mmu are terminated by the time the
44 	 * last thread of this mm quits, you've also to be sure that
45 	 * speculative _hardware_ operations can't allocate dirty
46 	 * cachelines in the cpu that could not be snooped and made
47 	 * coherent with the other read and write operations happening
48 	 * through the gart alias address, so leading to memory
49 	 * corruption.
50 	 */
51 	void (*release)(struct mmu_notifier *mn,
52 			struct mm_struct *mm);
53 
54 	/*
55 	 * clear_flush_young is called after the VM is
56 	 * test-and-clearing the young/accessed bitflag in the
57 	 * pte. This way the VM will provide proper aging to the
58 	 * accesses to the page through the secondary MMUs and not
59 	 * only to the ones through the Linux pte.
60 	 * Start-end is necessary in case the secondary MMU is mapping the page
61 	 * at a smaller granularity than the primary MMU.
62 	 */
63 	int (*clear_flush_young)(struct mmu_notifier *mn,
64 				 struct mm_struct *mm,
65 				 unsigned long start,
66 				 unsigned long end);
67 
68 	/*
69 	 * clear_young is a lightweight version of clear_flush_young. Like the
70 	 * latter, it is supposed to test-and-clear the young/accessed bitflag
71 	 * in the secondary pte, but it may omit flushing the secondary tlb.
72 	 */
73 	int (*clear_young)(struct mmu_notifier *mn,
74 			   struct mm_struct *mm,
75 			   unsigned long start,
76 			   unsigned long end);
77 
78 	/*
79 	 * test_young is called to check the young/accessed bitflag in
80 	 * the secondary pte. This is used to know if the page is
81 	 * frequently used without actually clearing the flag or tearing
82 	 * down the secondary mapping on the page.
83 	 */
84 	int (*test_young)(struct mmu_notifier *mn,
85 			  struct mm_struct *mm,
86 			  unsigned long address);
87 
88 	/*
89 	 * change_pte is called in cases that pte mapping to page is changed:
90 	 * for example, when ksm remaps pte to point to a new shared page.
91 	 */
92 	void (*change_pte)(struct mmu_notifier *mn,
93 			   struct mm_struct *mm,
94 			   unsigned long address,
95 			   pte_t pte);
96 
97 	/*
98 	 * invalidate_range_start() and invalidate_range_end() must be
99 	 * paired and are called only when the mmap_sem and/or the
100 	 * locks protecting the reverse maps are held. If the subsystem
101 	 * can't guarantee that no additional references are taken to
102 	 * the pages in the range, it has to implement the
103 	 * invalidate_range() notifier to remove any references taken
104 	 * after invalidate_range_start().
105 	 *
106 	 * Invalidation of multiple concurrent ranges may be
107 	 * optionally permitted by the driver. Either way the
108 	 * establishment of sptes is forbidden in the range passed to
109 	 * invalidate_range_begin/end for the whole duration of the
110 	 * invalidate_range_begin/end critical section.
111 	 *
112 	 * invalidate_range_start() is called when all pages in the
113 	 * range are still mapped and have at least a refcount of one.
114 	 *
115 	 * invalidate_range_end() is called when all pages in the
116 	 * range have been unmapped and the pages have been freed by
117 	 * the VM.
118 	 *
119 	 * The VM will remove the page table entries and potentially
120 	 * the page between invalidate_range_start() and
121 	 * invalidate_range_end(). If the page must not be freed
122 	 * because of pending I/O or other circumstances then the
123 	 * invalidate_range_start() callback (or the initial mapping
124 	 * by the driver) must make sure that the refcount is kept
125 	 * elevated.
126 	 *
127 	 * If the driver increases the refcount when the pages are
128 	 * initially mapped into an address space then either
129 	 * invalidate_range_start() or invalidate_range_end() may
130 	 * decrease the refcount. If the refcount is decreased on
131 	 * invalidate_range_start() then the VM can free pages as page
132 	 * table entries are removed.  If the refcount is only
133 	 * droppped on invalidate_range_end() then the driver itself
134 	 * will drop the last refcount but it must take care to flush
135 	 * any secondary tlb before doing the final free on the
136 	 * page. Pages will no longer be referenced by the linux
137 	 * address space but may still be referenced by sptes until
138 	 * the last refcount is dropped.
139 	 */
140 	void (*invalidate_range_start)(struct mmu_notifier *mn,
141 				       struct mm_struct *mm,
142 				       unsigned long start, unsigned long end);
143 	void (*invalidate_range_end)(struct mmu_notifier *mn,
144 				     struct mm_struct *mm,
145 				     unsigned long start, unsigned long end);
146 
147 	/*
148 	 * invalidate_range() is either called between
149 	 * invalidate_range_start() and invalidate_range_end() when the
150 	 * VM has to free pages that where unmapped, but before the
151 	 * pages are actually freed, or outside of _start()/_end() when
152 	 * a (remote) TLB is necessary.
153 	 *
154 	 * If invalidate_range() is used to manage a non-CPU TLB with
155 	 * shared page-tables, it not necessary to implement the
156 	 * invalidate_range_start()/end() notifiers, as
157 	 * invalidate_range() alread catches the points in time when an
158 	 * external TLB range needs to be flushed.
159 	 *
160 	 * The invalidate_range() function is called under the ptl
161 	 * spin-lock and not allowed to sleep.
162 	 *
163 	 * Note that this function might be called with just a sub-range
164 	 * of what was passed to invalidate_range_start()/end(), if
165 	 * called between those functions.
166 	 */
167 	void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
168 				 unsigned long start, unsigned long end);
169 };
170 
171 /*
172  * The notifier chains are protected by mmap_sem and/or the reverse map
173  * semaphores. Notifier chains are only changed when all reverse maps and
174  * the mmap_sem locks are taken.
175  *
176  * Therefore notifier chains can only be traversed when either
177  *
178  * 1. mmap_sem is held.
179  * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
180  * 3. No other concurrent thread can access the list (release)
181  */
182 struct mmu_notifier {
183 	struct hlist_node hlist;
184 	const struct mmu_notifier_ops *ops;
185 };
186 
187 static inline int mm_has_notifiers(struct mm_struct *mm)
188 {
189 	return unlikely(mm->mmu_notifier_mm);
190 }
191 
192 extern int mmu_notifier_register(struct mmu_notifier *mn,
193 				 struct mm_struct *mm);
194 extern int __mmu_notifier_register(struct mmu_notifier *mn,
195 				   struct mm_struct *mm);
196 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
197 				    struct mm_struct *mm);
198 extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
199 					       struct mm_struct *mm);
200 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
201 extern void __mmu_notifier_release(struct mm_struct *mm);
202 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
203 					  unsigned long start,
204 					  unsigned long end);
205 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
206 				      unsigned long start,
207 				      unsigned long end);
208 extern int __mmu_notifier_test_young(struct mm_struct *mm,
209 				     unsigned long address);
210 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
211 				      unsigned long address, pte_t pte);
212 extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
213 				  unsigned long start, unsigned long end);
214 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
215 				  unsigned long start, unsigned long end);
216 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
217 				  unsigned long start, unsigned long end);
218 
219 static inline void mmu_notifier_release(struct mm_struct *mm)
220 {
221 	if (mm_has_notifiers(mm))
222 		__mmu_notifier_release(mm);
223 }
224 
225 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
226 					  unsigned long start,
227 					  unsigned long end)
228 {
229 	if (mm_has_notifiers(mm))
230 		return __mmu_notifier_clear_flush_young(mm, start, end);
231 	return 0;
232 }
233 
234 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
235 					   unsigned long start,
236 					   unsigned long end)
237 {
238 	if (mm_has_notifiers(mm))
239 		return __mmu_notifier_clear_young(mm, start, end);
240 	return 0;
241 }
242 
243 static inline int mmu_notifier_test_young(struct mm_struct *mm,
244 					  unsigned long address)
245 {
246 	if (mm_has_notifiers(mm))
247 		return __mmu_notifier_test_young(mm, address);
248 	return 0;
249 }
250 
251 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
252 					   unsigned long address, pte_t pte)
253 {
254 	if (mm_has_notifiers(mm))
255 		__mmu_notifier_change_pte(mm, address, pte);
256 }
257 
258 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
259 				  unsigned long start, unsigned long end)
260 {
261 	if (mm_has_notifiers(mm))
262 		__mmu_notifier_invalidate_range_start(mm, start, end);
263 }
264 
265 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
266 				  unsigned long start, unsigned long end)
267 {
268 	if (mm_has_notifiers(mm))
269 		__mmu_notifier_invalidate_range_end(mm, start, end);
270 }
271 
272 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
273 				  unsigned long start, unsigned long end)
274 {
275 	if (mm_has_notifiers(mm))
276 		__mmu_notifier_invalidate_range(mm, start, end);
277 }
278 
279 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
280 {
281 	mm->mmu_notifier_mm = NULL;
282 }
283 
284 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
285 {
286 	if (mm_has_notifiers(mm))
287 		__mmu_notifier_mm_destroy(mm);
288 }
289 
290 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
291 ({									\
292 	int __young;							\
293 	struct vm_area_struct *___vma = __vma;				\
294 	unsigned long ___address = __address;				\
295 	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
296 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
297 						  ___address,		\
298 						  ___address +		\
299 							PAGE_SIZE);	\
300 	__young;							\
301 })
302 
303 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
304 ({									\
305 	int __young;							\
306 	struct vm_area_struct *___vma = __vma;				\
307 	unsigned long ___address = __address;				\
308 	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
309 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
310 						  ___address,		\
311 						  ___address +		\
312 							PMD_SIZE);	\
313 	__young;							\
314 })
315 
316 #define ptep_clear_young_notify(__vma, __address, __ptep)		\
317 ({									\
318 	int __young;							\
319 	struct vm_area_struct *___vma = __vma;				\
320 	unsigned long ___address = __address;				\
321 	__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
322 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
323 					    ___address + PAGE_SIZE);	\
324 	__young;							\
325 })
326 
327 #define pmdp_clear_young_notify(__vma, __address, __pmdp)		\
328 ({									\
329 	int __young;							\
330 	struct vm_area_struct *___vma = __vma;				\
331 	unsigned long ___address = __address;				\
332 	__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
333 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
334 					    ___address + PMD_SIZE);	\
335 	__young;							\
336 })
337 
338 #define	ptep_clear_flush_notify(__vma, __address, __ptep)		\
339 ({									\
340 	unsigned long ___addr = __address & PAGE_MASK;			\
341 	struct mm_struct *___mm = (__vma)->vm_mm;			\
342 	pte_t ___pte;							\
343 									\
344 	___pte = ptep_clear_flush(__vma, __address, __ptep);		\
345 	mmu_notifier_invalidate_range(___mm, ___addr,			\
346 					___addr + PAGE_SIZE);		\
347 									\
348 	___pte;								\
349 })
350 
351 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)		\
352 ({									\
353 	unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;		\
354 	struct mm_struct *___mm = (__vma)->vm_mm;			\
355 	pmd_t ___pmd;							\
356 									\
357 	___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);		\
358 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
359 				      ___haddr + HPAGE_PMD_SIZE);	\
360 									\
361 	___pmd;								\
362 })
363 
364 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)		\
365 ({									\
366 	unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;		\
367 	struct mm_struct *___mm = (__vma)->vm_mm;			\
368 	pud_t ___pud;							\
369 									\
370 	___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);		\
371 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
372 				      ___haddr + HPAGE_PUD_SIZE);	\
373 									\
374 	___pud;								\
375 })
376 
377 /*
378  * set_pte_at_notify() sets the pte _after_ running the notifier.
379  * This is safe to start by updating the secondary MMUs, because the primary MMU
380  * pte invalidate must have already happened with a ptep_clear_flush() before
381  * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
382  * required when we change both the protection of the mapping from read-only to
383  * read-write and the pfn (like during copy on write page faults). Otherwise the
384  * old page would remain mapped readonly in the secondary MMUs after the new
385  * page is already writable by some CPU through the primary MMU.
386  */
387 #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
388 ({									\
389 	struct mm_struct *___mm = __mm;					\
390 	unsigned long ___address = __address;				\
391 	pte_t ___pte = __pte;						\
392 									\
393 	mmu_notifier_change_pte(___mm, ___address, ___pte);		\
394 	set_pte_at(___mm, ___address, __ptep, ___pte);			\
395 })
396 
397 extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
398 				   void (*func)(struct rcu_head *rcu));
399 extern void mmu_notifier_synchronize(void);
400 
401 #else /* CONFIG_MMU_NOTIFIER */
402 
403 static inline int mm_has_notifiers(struct mm_struct *mm)
404 {
405 	return 0;
406 }
407 
408 static inline void mmu_notifier_release(struct mm_struct *mm)
409 {
410 }
411 
412 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
413 					  unsigned long start,
414 					  unsigned long end)
415 {
416 	return 0;
417 }
418 
419 static inline int mmu_notifier_test_young(struct mm_struct *mm,
420 					  unsigned long address)
421 {
422 	return 0;
423 }
424 
425 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
426 					   unsigned long address, pte_t pte)
427 {
428 }
429 
430 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
431 				  unsigned long start, unsigned long end)
432 {
433 }
434 
435 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
436 				  unsigned long start, unsigned long end)
437 {
438 }
439 
440 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
441 				  unsigned long start, unsigned long end)
442 {
443 }
444 
445 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
446 {
447 }
448 
449 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
450 {
451 }
452 
453 #define ptep_clear_flush_young_notify ptep_clear_flush_young
454 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
455 #define ptep_clear_young_notify ptep_test_and_clear_young
456 #define pmdp_clear_young_notify pmdp_test_and_clear_young
457 #define	ptep_clear_flush_notify ptep_clear_flush
458 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
459 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
460 #define set_pte_at_notify set_pte_at
461 
462 #endif /* CONFIG_MMU_NOTIFIER */
463 
464 #endif /* _LINUX_MMU_NOTIFIER_H */
465