xref: /linux-6.15/include/linux/mmu_notifier.h (revision 02aeb2f2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4 
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/srcu.h>
9 #include <linux/interval_tree.h>
10 
11 struct mmu_notifier_mm;
12 struct mmu_notifier;
13 struct mmu_notifier_range;
14 struct mmu_interval_notifier;
15 
16 /**
17  * enum mmu_notifier_event - reason for the mmu notifier callback
18  * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
19  * move the range
20  *
21  * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
22  * madvise() or replacing a page by another one, ...).
23  *
24  * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
25  * ie using the vma access permission (vm_page_prot) to update the whole range
26  * is enough no need to inspect changes to the CPU page table (mprotect()
27  * syscall)
28  *
29  * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
30  * pages in the range so to mirror those changes the user must inspect the CPU
31  * page table (from the end callback).
32  *
33  * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
34  * access flags). User should soft dirty the page in the end callback to make
35  * sure that anyone relying on soft dirtyness catch pages that might be written
36  * through non CPU mappings.
37  *
38  * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
39  * that the mm refcount is zero and the range is no longer accessible.
40  */
41 enum mmu_notifier_event {
42 	MMU_NOTIFY_UNMAP = 0,
43 	MMU_NOTIFY_CLEAR,
44 	MMU_NOTIFY_PROTECTION_VMA,
45 	MMU_NOTIFY_PROTECTION_PAGE,
46 	MMU_NOTIFY_SOFT_DIRTY,
47 	MMU_NOTIFY_RELEASE,
48 };
49 
50 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
51 
52 struct mmu_notifier_ops {
53 	/*
54 	 * Called either by mmu_notifier_unregister or when the mm is
55 	 * being destroyed by exit_mmap, always before all pages are
56 	 * freed. This can run concurrently with other mmu notifier
57 	 * methods (the ones invoked outside the mm context) and it
58 	 * should tear down all secondary mmu mappings and freeze the
59 	 * secondary mmu. If this method isn't implemented you've to
60 	 * be sure that nothing could possibly write to the pages
61 	 * through the secondary mmu by the time the last thread with
62 	 * tsk->mm == mm exits.
63 	 *
64 	 * As side note: the pages freed after ->release returns could
65 	 * be immediately reallocated by the gart at an alias physical
66 	 * address with a different cache model, so if ->release isn't
67 	 * implemented because all _software_ driven memory accesses
68 	 * through the secondary mmu are terminated by the time the
69 	 * last thread of this mm quits, you've also to be sure that
70 	 * speculative _hardware_ operations can't allocate dirty
71 	 * cachelines in the cpu that could not be snooped and made
72 	 * coherent with the other read and write operations happening
73 	 * through the gart alias address, so leading to memory
74 	 * corruption.
75 	 */
76 	void (*release)(struct mmu_notifier *mn,
77 			struct mm_struct *mm);
78 
79 	/*
80 	 * clear_flush_young is called after the VM is
81 	 * test-and-clearing the young/accessed bitflag in the
82 	 * pte. This way the VM will provide proper aging to the
83 	 * accesses to the page through the secondary MMUs and not
84 	 * only to the ones through the Linux pte.
85 	 * Start-end is necessary in case the secondary MMU is mapping the page
86 	 * at a smaller granularity than the primary MMU.
87 	 */
88 	int (*clear_flush_young)(struct mmu_notifier *mn,
89 				 struct mm_struct *mm,
90 				 unsigned long start,
91 				 unsigned long end);
92 
93 	/*
94 	 * clear_young is a lightweight version of clear_flush_young. Like the
95 	 * latter, it is supposed to test-and-clear the young/accessed bitflag
96 	 * in the secondary pte, but it may omit flushing the secondary tlb.
97 	 */
98 	int (*clear_young)(struct mmu_notifier *mn,
99 			   struct mm_struct *mm,
100 			   unsigned long start,
101 			   unsigned long end);
102 
103 	/*
104 	 * test_young is called to check the young/accessed bitflag in
105 	 * the secondary pte. This is used to know if the page is
106 	 * frequently used without actually clearing the flag or tearing
107 	 * down the secondary mapping on the page.
108 	 */
109 	int (*test_young)(struct mmu_notifier *mn,
110 			  struct mm_struct *mm,
111 			  unsigned long address);
112 
113 	/*
114 	 * change_pte is called in cases that pte mapping to page is changed:
115 	 * for example, when ksm remaps pte to point to a new shared page.
116 	 */
117 	void (*change_pte)(struct mmu_notifier *mn,
118 			   struct mm_struct *mm,
119 			   unsigned long address,
120 			   pte_t pte);
121 
122 	/*
123 	 * invalidate_range_start() and invalidate_range_end() must be
124 	 * paired and are called only when the mmap_sem and/or the
125 	 * locks protecting the reverse maps are held. If the subsystem
126 	 * can't guarantee that no additional references are taken to
127 	 * the pages in the range, it has to implement the
128 	 * invalidate_range() notifier to remove any references taken
129 	 * after invalidate_range_start().
130 	 *
131 	 * Invalidation of multiple concurrent ranges may be
132 	 * optionally permitted by the driver. Either way the
133 	 * establishment of sptes is forbidden in the range passed to
134 	 * invalidate_range_begin/end for the whole duration of the
135 	 * invalidate_range_begin/end critical section.
136 	 *
137 	 * invalidate_range_start() is called when all pages in the
138 	 * range are still mapped and have at least a refcount of one.
139 	 *
140 	 * invalidate_range_end() is called when all pages in the
141 	 * range have been unmapped and the pages have been freed by
142 	 * the VM.
143 	 *
144 	 * The VM will remove the page table entries and potentially
145 	 * the page between invalidate_range_start() and
146 	 * invalidate_range_end(). If the page must not be freed
147 	 * because of pending I/O or other circumstances then the
148 	 * invalidate_range_start() callback (or the initial mapping
149 	 * by the driver) must make sure that the refcount is kept
150 	 * elevated.
151 	 *
152 	 * If the driver increases the refcount when the pages are
153 	 * initially mapped into an address space then either
154 	 * invalidate_range_start() or invalidate_range_end() may
155 	 * decrease the refcount. If the refcount is decreased on
156 	 * invalidate_range_start() then the VM can free pages as page
157 	 * table entries are removed.  If the refcount is only
158 	 * droppped on invalidate_range_end() then the driver itself
159 	 * will drop the last refcount but it must take care to flush
160 	 * any secondary tlb before doing the final free on the
161 	 * page. Pages will no longer be referenced by the linux
162 	 * address space but may still be referenced by sptes until
163 	 * the last refcount is dropped.
164 	 *
165 	 * If blockable argument is set to false then the callback cannot
166 	 * sleep and has to return with -EAGAIN. 0 should be returned
167 	 * otherwise. Please note that if invalidate_range_start approves
168 	 * a non-blocking behavior then the same applies to
169 	 * invalidate_range_end.
170 	 *
171 	 */
172 	int (*invalidate_range_start)(struct mmu_notifier *mn,
173 				      const struct mmu_notifier_range *range);
174 	void (*invalidate_range_end)(struct mmu_notifier *mn,
175 				     const struct mmu_notifier_range *range);
176 
177 	/*
178 	 * invalidate_range() is either called between
179 	 * invalidate_range_start() and invalidate_range_end() when the
180 	 * VM has to free pages that where unmapped, but before the
181 	 * pages are actually freed, or outside of _start()/_end() when
182 	 * a (remote) TLB is necessary.
183 	 *
184 	 * If invalidate_range() is used to manage a non-CPU TLB with
185 	 * shared page-tables, it not necessary to implement the
186 	 * invalidate_range_start()/end() notifiers, as
187 	 * invalidate_range() alread catches the points in time when an
188 	 * external TLB range needs to be flushed. For more in depth
189 	 * discussion on this see Documentation/vm/mmu_notifier.rst
190 	 *
191 	 * Note that this function might be called with just a sub-range
192 	 * of what was passed to invalidate_range_start()/end(), if
193 	 * called between those functions.
194 	 */
195 	void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
196 				 unsigned long start, unsigned long end);
197 
198 	/*
199 	 * These callbacks are used with the get/put interface to manage the
200 	 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
201 	 * notifier for use with the mm.
202 	 *
203 	 * free_notifier() is only called after the mmu_notifier has been
204 	 * fully put, calls to any ops callback are prevented and no ops
205 	 * callbacks are currently running. It is called from a SRCU callback
206 	 * and cannot sleep.
207 	 */
208 	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
209 	void (*free_notifier)(struct mmu_notifier *mn);
210 };
211 
212 /*
213  * The notifier chains are protected by mmap_sem and/or the reverse map
214  * semaphores. Notifier chains are only changed when all reverse maps and
215  * the mmap_sem locks are taken.
216  *
217  * Therefore notifier chains can only be traversed when either
218  *
219  * 1. mmap_sem is held.
220  * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
221  * 3. No other concurrent thread can access the list (release)
222  */
223 struct mmu_notifier {
224 	struct hlist_node hlist;
225 	const struct mmu_notifier_ops *ops;
226 	struct mm_struct *mm;
227 	struct rcu_head rcu;
228 	unsigned int users;
229 };
230 
231 /**
232  * struct mmu_interval_notifier_ops
233  * @invalidate: Upon return the caller must stop using any SPTEs within this
234  *              range. This function can sleep. Return false only if sleeping
235  *              was required but mmu_notifier_range_blockable(range) is false.
236  */
237 struct mmu_interval_notifier_ops {
238 	bool (*invalidate)(struct mmu_interval_notifier *mni,
239 			   const struct mmu_notifier_range *range,
240 			   unsigned long cur_seq);
241 };
242 
243 struct mmu_interval_notifier {
244 	struct interval_tree_node interval_tree;
245 	const struct mmu_interval_notifier_ops *ops;
246 	struct mm_struct *mm;
247 	struct hlist_node deferred_item;
248 	unsigned long invalidate_seq;
249 };
250 
251 #ifdef CONFIG_MMU_NOTIFIER
252 
253 #ifdef CONFIG_LOCKDEP
254 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
255 #endif
256 
257 struct mmu_notifier_range {
258 	struct vm_area_struct *vma;
259 	struct mm_struct *mm;
260 	unsigned long start;
261 	unsigned long end;
262 	unsigned flags;
263 	enum mmu_notifier_event event;
264 };
265 
266 static inline int mm_has_notifiers(struct mm_struct *mm)
267 {
268 	return unlikely(mm->mmu_notifier_mm);
269 }
270 
271 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
272 					     struct mm_struct *mm);
273 static inline struct mmu_notifier *
274 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
275 {
276 	struct mmu_notifier *ret;
277 
278 	down_write(&mm->mmap_sem);
279 	ret = mmu_notifier_get_locked(ops, mm);
280 	up_write(&mm->mmap_sem);
281 	return ret;
282 }
283 void mmu_notifier_put(struct mmu_notifier *mn);
284 void mmu_notifier_synchronize(void);
285 
286 extern int mmu_notifier_register(struct mmu_notifier *mn,
287 				 struct mm_struct *mm);
288 extern int __mmu_notifier_register(struct mmu_notifier *mn,
289 				   struct mm_struct *mm);
290 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
291 				    struct mm_struct *mm);
292 
293 unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni);
294 int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
295 				 struct mm_struct *mm, unsigned long start,
296 				 unsigned long length,
297 				 const struct mmu_interval_notifier_ops *ops);
298 int mmu_interval_notifier_insert_locked(
299 	struct mmu_interval_notifier *mni, struct mm_struct *mm,
300 	unsigned long start, unsigned long length,
301 	const struct mmu_interval_notifier_ops *ops);
302 void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
303 
304 /**
305  * mmu_interval_set_seq - Save the invalidation sequence
306  * @mni - The mni passed to invalidate
307  * @cur_seq - The cur_seq passed to the invalidate() callback
308  *
309  * This must be called unconditionally from the invalidate callback of a
310  * struct mmu_interval_notifier_ops under the same lock that is used to call
311  * mmu_interval_read_retry(). It updates the sequence number for later use by
312  * mmu_interval_read_retry(). The provided cur_seq will always be odd.
313  *
314  * If the caller does not call mmu_interval_read_begin() or
315  * mmu_interval_read_retry() then this call is not required.
316  */
317 static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
318 					unsigned long cur_seq)
319 {
320 	WRITE_ONCE(mni->invalidate_seq, cur_seq);
321 }
322 
323 /**
324  * mmu_interval_read_retry - End a read side critical section against a VA range
325  * mni: The range
326  * seq: The return of the paired mmu_interval_read_begin()
327  *
328  * This MUST be called under a user provided lock that is also held
329  * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
330  *
331  * Each call should be paired with a single mmu_interval_read_begin() and
332  * should be used to conclude the read side.
333  *
334  * Returns true if an invalidation collided with this critical section, and
335  * the caller should retry.
336  */
337 static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
338 					   unsigned long seq)
339 {
340 	return mni->invalidate_seq != seq;
341 }
342 
343 /**
344  * mmu_interval_check_retry - Test if a collision has occurred
345  * mni: The range
346  * seq: The return of the matching mmu_interval_read_begin()
347  *
348  * This can be used in the critical section between mmu_interval_read_begin()
349  * and mmu_interval_read_retry().  A return of true indicates an invalidation
350  * has collided with this critical region and a future
351  * mmu_interval_read_retry() will return true.
352  *
353  * False is not reliable and only suggests a collision may not have
354  * occured. It can be called many times and does not have to hold the user
355  * provided lock.
356  *
357  * This call can be used as part of loops and other expensive operations to
358  * expedite a retry.
359  */
360 static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni,
361 					    unsigned long seq)
362 {
363 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
364 	return READ_ONCE(mni->invalidate_seq) != seq;
365 }
366 
367 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
368 extern void __mmu_notifier_release(struct mm_struct *mm);
369 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
370 					  unsigned long start,
371 					  unsigned long end);
372 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
373 				      unsigned long start,
374 				      unsigned long end);
375 extern int __mmu_notifier_test_young(struct mm_struct *mm,
376 				     unsigned long address);
377 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
378 				      unsigned long address, pte_t pte);
379 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
380 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
381 				  bool only_end);
382 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
383 				  unsigned long start, unsigned long end);
384 extern bool
385 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
386 
387 static inline bool
388 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
389 {
390 	return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
391 }
392 
393 static inline void mmu_notifier_release(struct mm_struct *mm)
394 {
395 	if (mm_has_notifiers(mm))
396 		__mmu_notifier_release(mm);
397 }
398 
399 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
400 					  unsigned long start,
401 					  unsigned long end)
402 {
403 	if (mm_has_notifiers(mm))
404 		return __mmu_notifier_clear_flush_young(mm, start, end);
405 	return 0;
406 }
407 
408 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
409 					   unsigned long start,
410 					   unsigned long end)
411 {
412 	if (mm_has_notifiers(mm))
413 		return __mmu_notifier_clear_young(mm, start, end);
414 	return 0;
415 }
416 
417 static inline int mmu_notifier_test_young(struct mm_struct *mm,
418 					  unsigned long address)
419 {
420 	if (mm_has_notifiers(mm))
421 		return __mmu_notifier_test_young(mm, address);
422 	return 0;
423 }
424 
425 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
426 					   unsigned long address, pte_t pte)
427 {
428 	if (mm_has_notifiers(mm))
429 		__mmu_notifier_change_pte(mm, address, pte);
430 }
431 
432 static inline void
433 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
434 {
435 	might_sleep();
436 
437 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
438 	if (mm_has_notifiers(range->mm)) {
439 		range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
440 		__mmu_notifier_invalidate_range_start(range);
441 	}
442 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
443 }
444 
445 static inline int
446 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
447 {
448 	int ret = 0;
449 
450 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
451 	if (mm_has_notifiers(range->mm)) {
452 		range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
453 		ret = __mmu_notifier_invalidate_range_start(range);
454 	}
455 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
456 	return ret;
457 }
458 
459 static inline void
460 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
461 {
462 	if (mmu_notifier_range_blockable(range))
463 		might_sleep();
464 
465 	if (mm_has_notifiers(range->mm))
466 		__mmu_notifier_invalidate_range_end(range, false);
467 }
468 
469 static inline void
470 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
471 {
472 	if (mm_has_notifiers(range->mm))
473 		__mmu_notifier_invalidate_range_end(range, true);
474 }
475 
476 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
477 				  unsigned long start, unsigned long end)
478 {
479 	if (mm_has_notifiers(mm))
480 		__mmu_notifier_invalidate_range(mm, start, end);
481 }
482 
483 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
484 {
485 	mm->mmu_notifier_mm = NULL;
486 }
487 
488 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
489 {
490 	if (mm_has_notifiers(mm))
491 		__mmu_notifier_mm_destroy(mm);
492 }
493 
494 
495 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
496 					   enum mmu_notifier_event event,
497 					   unsigned flags,
498 					   struct vm_area_struct *vma,
499 					   struct mm_struct *mm,
500 					   unsigned long start,
501 					   unsigned long end)
502 {
503 	range->vma = vma;
504 	range->event = event;
505 	range->mm = mm;
506 	range->start = start;
507 	range->end = end;
508 	range->flags = flags;
509 }
510 
511 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
512 ({									\
513 	int __young;							\
514 	struct vm_area_struct *___vma = __vma;				\
515 	unsigned long ___address = __address;				\
516 	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
517 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
518 						  ___address,		\
519 						  ___address +		\
520 							PAGE_SIZE);	\
521 	__young;							\
522 })
523 
524 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
525 ({									\
526 	int __young;							\
527 	struct vm_area_struct *___vma = __vma;				\
528 	unsigned long ___address = __address;				\
529 	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
530 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
531 						  ___address,		\
532 						  ___address +		\
533 							PMD_SIZE);	\
534 	__young;							\
535 })
536 
537 #define ptep_clear_young_notify(__vma, __address, __ptep)		\
538 ({									\
539 	int __young;							\
540 	struct vm_area_struct *___vma = __vma;				\
541 	unsigned long ___address = __address;				\
542 	__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
543 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
544 					    ___address + PAGE_SIZE);	\
545 	__young;							\
546 })
547 
548 #define pmdp_clear_young_notify(__vma, __address, __pmdp)		\
549 ({									\
550 	int __young;							\
551 	struct vm_area_struct *___vma = __vma;				\
552 	unsigned long ___address = __address;				\
553 	__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
554 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
555 					    ___address + PMD_SIZE);	\
556 	__young;							\
557 })
558 
559 #define	ptep_clear_flush_notify(__vma, __address, __ptep)		\
560 ({									\
561 	unsigned long ___addr = __address & PAGE_MASK;			\
562 	struct mm_struct *___mm = (__vma)->vm_mm;			\
563 	pte_t ___pte;							\
564 									\
565 	___pte = ptep_clear_flush(__vma, __address, __ptep);		\
566 	mmu_notifier_invalidate_range(___mm, ___addr,			\
567 					___addr + PAGE_SIZE);		\
568 									\
569 	___pte;								\
570 })
571 
572 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)		\
573 ({									\
574 	unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;		\
575 	struct mm_struct *___mm = (__vma)->vm_mm;			\
576 	pmd_t ___pmd;							\
577 									\
578 	___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);		\
579 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
580 				      ___haddr + HPAGE_PMD_SIZE);	\
581 									\
582 	___pmd;								\
583 })
584 
585 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)		\
586 ({									\
587 	unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;		\
588 	struct mm_struct *___mm = (__vma)->vm_mm;			\
589 	pud_t ___pud;							\
590 									\
591 	___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);		\
592 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
593 				      ___haddr + HPAGE_PUD_SIZE);	\
594 									\
595 	___pud;								\
596 })
597 
598 /*
599  * set_pte_at_notify() sets the pte _after_ running the notifier.
600  * This is safe to start by updating the secondary MMUs, because the primary MMU
601  * pte invalidate must have already happened with a ptep_clear_flush() before
602  * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
603  * required when we change both the protection of the mapping from read-only to
604  * read-write and the pfn (like during copy on write page faults). Otherwise the
605  * old page would remain mapped readonly in the secondary MMUs after the new
606  * page is already writable by some CPU through the primary MMU.
607  */
608 #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
609 ({									\
610 	struct mm_struct *___mm = __mm;					\
611 	unsigned long ___address = __address;				\
612 	pte_t ___pte = __pte;						\
613 									\
614 	mmu_notifier_change_pte(___mm, ___address, ___pte);		\
615 	set_pte_at(___mm, ___address, __ptep, ___pte);			\
616 })
617 
618 #else /* CONFIG_MMU_NOTIFIER */
619 
620 struct mmu_notifier_range {
621 	unsigned long start;
622 	unsigned long end;
623 };
624 
625 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
626 					    unsigned long start,
627 					    unsigned long end)
628 {
629 	range->start = start;
630 	range->end = end;
631 }
632 
633 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end)  \
634 	_mmu_notifier_range_init(range, start, end)
635 
636 static inline bool
637 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
638 {
639 	return true;
640 }
641 
642 static inline int mm_has_notifiers(struct mm_struct *mm)
643 {
644 	return 0;
645 }
646 
647 static inline void mmu_notifier_release(struct mm_struct *mm)
648 {
649 }
650 
651 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
652 					  unsigned long start,
653 					  unsigned long end)
654 {
655 	return 0;
656 }
657 
658 static inline int mmu_notifier_test_young(struct mm_struct *mm,
659 					  unsigned long address)
660 {
661 	return 0;
662 }
663 
664 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
665 					   unsigned long address, pte_t pte)
666 {
667 }
668 
669 static inline void
670 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
671 {
672 }
673 
674 static inline int
675 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
676 {
677 	return 0;
678 }
679 
680 static inline
681 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
682 {
683 }
684 
685 static inline void
686 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
687 {
688 }
689 
690 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
691 				  unsigned long start, unsigned long end)
692 {
693 }
694 
695 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
696 {
697 }
698 
699 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
700 {
701 }
702 
703 #define mmu_notifier_range_update_to_read_only(r) false
704 
705 #define ptep_clear_flush_young_notify ptep_clear_flush_young
706 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
707 #define ptep_clear_young_notify ptep_test_and_clear_young
708 #define pmdp_clear_young_notify pmdp_test_and_clear_young
709 #define	ptep_clear_flush_notify ptep_clear_flush
710 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
711 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
712 #define set_pte_at_notify set_pte_at
713 
714 static inline void mmu_notifier_synchronize(void)
715 {
716 }
717 
718 #endif /* CONFIG_MMU_NOTIFIER */
719 
720 #endif /* _LINUX_MMU_NOTIFIER_H */
721