xref: /linux-6.15/include/linux/mmu_notifier.h (revision bb79974c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
4 
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/srcu.h>
10 #include <linux/interval_tree.h>
11 
12 struct mmu_notifier_subscriptions;
13 struct mmu_notifier;
14 struct mmu_notifier_range;
15 struct mmu_interval_notifier;
16 
17 /**
18  * enum mmu_notifier_event - reason for the mmu notifier callback
19  * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
20  * move the range
21  *
22  * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
23  * madvise() or replacing a page by another one, ...).
24  *
25  * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
26  * ie using the vma access permission (vm_page_prot) to update the whole range
27  * is enough no need to inspect changes to the CPU page table (mprotect()
28  * syscall)
29  *
30  * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
31  * pages in the range so to mirror those changes the user must inspect the CPU
32  * page table (from the end callback).
33  *
34  * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
35  * access flags). User should soft dirty the page in the end callback to make
36  * sure that anyone relying on soft dirtyness catch pages that might be written
37  * through non CPU mappings.
38  *
39  * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
40  * that the mm refcount is zero and the range is no longer accessible.
41  */
42 enum mmu_notifier_event {
43 	MMU_NOTIFY_UNMAP = 0,
44 	MMU_NOTIFY_CLEAR,
45 	MMU_NOTIFY_PROTECTION_VMA,
46 	MMU_NOTIFY_PROTECTION_PAGE,
47 	MMU_NOTIFY_SOFT_DIRTY,
48 	MMU_NOTIFY_RELEASE,
49 };
50 
51 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
52 
53 struct mmu_notifier_ops {
54 	/*
55 	 * Called either by mmu_notifier_unregister or when the mm is
56 	 * being destroyed by exit_mmap, always before all pages are
57 	 * freed. This can run concurrently with other mmu notifier
58 	 * methods (the ones invoked outside the mm context) and it
59 	 * should tear down all secondary mmu mappings and freeze the
60 	 * secondary mmu. If this method isn't implemented you've to
61 	 * be sure that nothing could possibly write to the pages
62 	 * through the secondary mmu by the time the last thread with
63 	 * tsk->mm == mm exits.
64 	 *
65 	 * As side note: the pages freed after ->release returns could
66 	 * be immediately reallocated by the gart at an alias physical
67 	 * address with a different cache model, so if ->release isn't
68 	 * implemented because all _software_ driven memory accesses
69 	 * through the secondary mmu are terminated by the time the
70 	 * last thread of this mm quits, you've also to be sure that
71 	 * speculative _hardware_ operations can't allocate dirty
72 	 * cachelines in the cpu that could not be snooped and made
73 	 * coherent with the other read and write operations happening
74 	 * through the gart alias address, so leading to memory
75 	 * corruption.
76 	 */
77 	void (*release)(struct mmu_notifier *subscription,
78 			struct mm_struct *mm);
79 
80 	/*
81 	 * clear_flush_young is called after the VM is
82 	 * test-and-clearing the young/accessed bitflag in the
83 	 * pte. This way the VM will provide proper aging to the
84 	 * accesses to the page through the secondary MMUs and not
85 	 * only to the ones through the Linux pte.
86 	 * Start-end is necessary in case the secondary MMU is mapping the page
87 	 * at a smaller granularity than the primary MMU.
88 	 */
89 	int (*clear_flush_young)(struct mmu_notifier *subscription,
90 				 struct mm_struct *mm,
91 				 unsigned long start,
92 				 unsigned long end);
93 
94 	/*
95 	 * clear_young is a lightweight version of clear_flush_young. Like the
96 	 * latter, it is supposed to test-and-clear the young/accessed bitflag
97 	 * in the secondary pte, but it may omit flushing the secondary tlb.
98 	 */
99 	int (*clear_young)(struct mmu_notifier *subscription,
100 			   struct mm_struct *mm,
101 			   unsigned long start,
102 			   unsigned long end);
103 
104 	/*
105 	 * test_young is called to check the young/accessed bitflag in
106 	 * the secondary pte. This is used to know if the page is
107 	 * frequently used without actually clearing the flag or tearing
108 	 * down the secondary mapping on the page.
109 	 */
110 	int (*test_young)(struct mmu_notifier *subscription,
111 			  struct mm_struct *mm,
112 			  unsigned long address);
113 
114 	/*
115 	 * change_pte is called in cases that pte mapping to page is changed:
116 	 * for example, when ksm remaps pte to point to a new shared page.
117 	 */
118 	void (*change_pte)(struct mmu_notifier *subscription,
119 			   struct mm_struct *mm,
120 			   unsigned long address,
121 			   pte_t pte);
122 
123 	/*
124 	 * invalidate_range_start() and invalidate_range_end() must be
125 	 * paired and are called only when the mmap_lock and/or the
126 	 * locks protecting the reverse maps are held. If the subsystem
127 	 * can't guarantee that no additional references are taken to
128 	 * the pages in the range, it has to implement the
129 	 * invalidate_range() notifier to remove any references taken
130 	 * after invalidate_range_start().
131 	 *
132 	 * Invalidation of multiple concurrent ranges may be
133 	 * optionally permitted by the driver. Either way the
134 	 * establishment of sptes is forbidden in the range passed to
135 	 * invalidate_range_begin/end for the whole duration of the
136 	 * invalidate_range_begin/end critical section.
137 	 *
138 	 * invalidate_range_start() is called when all pages in the
139 	 * range are still mapped and have at least a refcount of one.
140 	 *
141 	 * invalidate_range_end() is called when all pages in the
142 	 * range have been unmapped and the pages have been freed by
143 	 * the VM.
144 	 *
145 	 * The VM will remove the page table entries and potentially
146 	 * the page between invalidate_range_start() and
147 	 * invalidate_range_end(). If the page must not be freed
148 	 * because of pending I/O or other circumstances then the
149 	 * invalidate_range_start() callback (or the initial mapping
150 	 * by the driver) must make sure that the refcount is kept
151 	 * elevated.
152 	 *
153 	 * If the driver increases the refcount when the pages are
154 	 * initially mapped into an address space then either
155 	 * invalidate_range_start() or invalidate_range_end() may
156 	 * decrease the refcount. If the refcount is decreased on
157 	 * invalidate_range_start() then the VM can free pages as page
158 	 * table entries are removed.  If the refcount is only
159 	 * droppped on invalidate_range_end() then the driver itself
160 	 * will drop the last refcount but it must take care to flush
161 	 * any secondary tlb before doing the final free on the
162 	 * page. Pages will no longer be referenced by the linux
163 	 * address space but may still be referenced by sptes until
164 	 * the last refcount is dropped.
165 	 *
166 	 * If blockable argument is set to false then the callback cannot
167 	 * sleep and has to return with -EAGAIN. 0 should be returned
168 	 * otherwise. Please note that if invalidate_range_start approves
169 	 * a non-blocking behavior then the same applies to
170 	 * invalidate_range_end.
171 	 *
172 	 */
173 	int (*invalidate_range_start)(struct mmu_notifier *subscription,
174 				      const struct mmu_notifier_range *range);
175 	void (*invalidate_range_end)(struct mmu_notifier *subscription,
176 				     const struct mmu_notifier_range *range);
177 
178 	/*
179 	 * invalidate_range() is either called between
180 	 * invalidate_range_start() and invalidate_range_end() when the
181 	 * VM has to free pages that where unmapped, but before the
182 	 * pages are actually freed, or outside of _start()/_end() when
183 	 * a (remote) TLB is necessary.
184 	 *
185 	 * If invalidate_range() is used to manage a non-CPU TLB with
186 	 * shared page-tables, it not necessary to implement the
187 	 * invalidate_range_start()/end() notifiers, as
188 	 * invalidate_range() alread catches the points in time when an
189 	 * external TLB range needs to be flushed. For more in depth
190 	 * discussion on this see Documentation/vm/mmu_notifier.rst
191 	 *
192 	 * Note that this function might be called with just a sub-range
193 	 * of what was passed to invalidate_range_start()/end(), if
194 	 * called between those functions.
195 	 */
196 	void (*invalidate_range)(struct mmu_notifier *subscription,
197 				 struct mm_struct *mm,
198 				 unsigned long start,
199 				 unsigned long end);
200 
201 	/*
202 	 * These callbacks are used with the get/put interface to manage the
203 	 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
204 	 * notifier for use with the mm.
205 	 *
206 	 * free_notifier() is only called after the mmu_notifier has been
207 	 * fully put, calls to any ops callback are prevented and no ops
208 	 * callbacks are currently running. It is called from a SRCU callback
209 	 * and cannot sleep.
210 	 */
211 	struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
212 	void (*free_notifier)(struct mmu_notifier *subscription);
213 };
214 
215 /*
216  * The notifier chains are protected by mmap_lock and/or the reverse map
217  * semaphores. Notifier chains are only changed when all reverse maps and
218  * the mmap_lock locks are taken.
219  *
220  * Therefore notifier chains can only be traversed when either
221  *
222  * 1. mmap_lock is held.
223  * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
224  * 3. No other concurrent thread can access the list (release)
225  */
226 struct mmu_notifier {
227 	struct hlist_node hlist;
228 	const struct mmu_notifier_ops *ops;
229 	struct mm_struct *mm;
230 	struct rcu_head rcu;
231 	unsigned int users;
232 };
233 
234 /**
235  * struct mmu_interval_notifier_ops
236  * @invalidate: Upon return the caller must stop using any SPTEs within this
237  *              range. This function can sleep. Return false only if sleeping
238  *              was required but mmu_notifier_range_blockable(range) is false.
239  */
240 struct mmu_interval_notifier_ops {
241 	bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
242 			   const struct mmu_notifier_range *range,
243 			   unsigned long cur_seq);
244 };
245 
246 struct mmu_interval_notifier {
247 	struct interval_tree_node interval_tree;
248 	const struct mmu_interval_notifier_ops *ops;
249 	struct mm_struct *mm;
250 	struct hlist_node deferred_item;
251 	unsigned long invalidate_seq;
252 };
253 
254 #ifdef CONFIG_MMU_NOTIFIER
255 
256 #ifdef CONFIG_LOCKDEP
257 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
258 #endif
259 
260 struct mmu_notifier_range {
261 	struct vm_area_struct *vma;
262 	struct mm_struct *mm;
263 	unsigned long start;
264 	unsigned long end;
265 	unsigned flags;
266 	enum mmu_notifier_event event;
267 };
268 
269 static inline int mm_has_notifiers(struct mm_struct *mm)
270 {
271 	return unlikely(mm->notifier_subscriptions);
272 }
273 
274 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
275 					     struct mm_struct *mm);
276 static inline struct mmu_notifier *
277 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
278 {
279 	struct mmu_notifier *ret;
280 
281 	mmap_write_lock(mm);
282 	ret = mmu_notifier_get_locked(ops, mm);
283 	mmap_write_unlock(mm);
284 	return ret;
285 }
286 void mmu_notifier_put(struct mmu_notifier *subscription);
287 void mmu_notifier_synchronize(void);
288 
289 extern int mmu_notifier_register(struct mmu_notifier *subscription,
290 				 struct mm_struct *mm);
291 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
292 				   struct mm_struct *mm);
293 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
294 				    struct mm_struct *mm);
295 
296 unsigned long
297 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
298 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
299 				 struct mm_struct *mm, unsigned long start,
300 				 unsigned long length,
301 				 const struct mmu_interval_notifier_ops *ops);
302 int mmu_interval_notifier_insert_locked(
303 	struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
304 	unsigned long start, unsigned long length,
305 	const struct mmu_interval_notifier_ops *ops);
306 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
307 
308 /**
309  * mmu_interval_set_seq - Save the invalidation sequence
310  * @interval_sub - The subscription passed to invalidate
311  * @cur_seq - The cur_seq passed to the invalidate() callback
312  *
313  * This must be called unconditionally from the invalidate callback of a
314  * struct mmu_interval_notifier_ops under the same lock that is used to call
315  * mmu_interval_read_retry(). It updates the sequence number for later use by
316  * mmu_interval_read_retry(). The provided cur_seq will always be odd.
317  *
318  * If the caller does not call mmu_interval_read_begin() or
319  * mmu_interval_read_retry() then this call is not required.
320  */
321 static inline void
322 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
323 		     unsigned long cur_seq)
324 {
325 	WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
326 }
327 
328 /**
329  * mmu_interval_read_retry - End a read side critical section against a VA range
330  * interval_sub: The subscription
331  * seq: The return of the paired mmu_interval_read_begin()
332  *
333  * This MUST be called under a user provided lock that is also held
334  * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
335  *
336  * Each call should be paired with a single mmu_interval_read_begin() and
337  * should be used to conclude the read side.
338  *
339  * Returns true if an invalidation collided with this critical section, and
340  * the caller should retry.
341  */
342 static inline bool
343 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
344 			unsigned long seq)
345 {
346 	return interval_sub->invalidate_seq != seq;
347 }
348 
349 /**
350  * mmu_interval_check_retry - Test if a collision has occurred
351  * interval_sub: The subscription
352  * seq: The return of the matching mmu_interval_read_begin()
353  *
354  * This can be used in the critical section between mmu_interval_read_begin()
355  * and mmu_interval_read_retry().  A return of true indicates an invalidation
356  * has collided with this critical region and a future
357  * mmu_interval_read_retry() will return true.
358  *
359  * False is not reliable and only suggests a collision may not have
360  * occured. It can be called many times and does not have to hold the user
361  * provided lock.
362  *
363  * This call can be used as part of loops and other expensive operations to
364  * expedite a retry.
365  */
366 static inline bool
367 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
368 			 unsigned long seq)
369 {
370 	/* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
371 	return READ_ONCE(interval_sub->invalidate_seq) != seq;
372 }
373 
374 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
375 extern void __mmu_notifier_release(struct mm_struct *mm);
376 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
377 					  unsigned long start,
378 					  unsigned long end);
379 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
380 				      unsigned long start,
381 				      unsigned long end);
382 extern int __mmu_notifier_test_young(struct mm_struct *mm,
383 				     unsigned long address);
384 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
385 				      unsigned long address, pte_t pte);
386 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
387 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
388 				  bool only_end);
389 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
390 				  unsigned long start, unsigned long end);
391 extern bool
392 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
393 
394 static inline bool
395 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
396 {
397 	return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
398 }
399 
400 static inline void mmu_notifier_release(struct mm_struct *mm)
401 {
402 	if (mm_has_notifiers(mm))
403 		__mmu_notifier_release(mm);
404 }
405 
406 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
407 					  unsigned long start,
408 					  unsigned long end)
409 {
410 	if (mm_has_notifiers(mm))
411 		return __mmu_notifier_clear_flush_young(mm, start, end);
412 	return 0;
413 }
414 
415 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
416 					   unsigned long start,
417 					   unsigned long end)
418 {
419 	if (mm_has_notifiers(mm))
420 		return __mmu_notifier_clear_young(mm, start, end);
421 	return 0;
422 }
423 
424 static inline int mmu_notifier_test_young(struct mm_struct *mm,
425 					  unsigned long address)
426 {
427 	if (mm_has_notifiers(mm))
428 		return __mmu_notifier_test_young(mm, address);
429 	return 0;
430 }
431 
432 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
433 					   unsigned long address, pte_t pte)
434 {
435 	if (mm_has_notifiers(mm))
436 		__mmu_notifier_change_pte(mm, address, pte);
437 }
438 
439 static inline void
440 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
441 {
442 	might_sleep();
443 
444 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
445 	if (mm_has_notifiers(range->mm)) {
446 		range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
447 		__mmu_notifier_invalidate_range_start(range);
448 	}
449 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
450 }
451 
452 static inline int
453 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
454 {
455 	int ret = 0;
456 
457 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
458 	if (mm_has_notifiers(range->mm)) {
459 		range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
460 		ret = __mmu_notifier_invalidate_range_start(range);
461 	}
462 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
463 	return ret;
464 }
465 
466 static inline void
467 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
468 {
469 	if (mmu_notifier_range_blockable(range))
470 		might_sleep();
471 
472 	if (mm_has_notifiers(range->mm))
473 		__mmu_notifier_invalidate_range_end(range, false);
474 }
475 
476 static inline void
477 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
478 {
479 	if (mm_has_notifiers(range->mm))
480 		__mmu_notifier_invalidate_range_end(range, true);
481 }
482 
483 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
484 				  unsigned long start, unsigned long end)
485 {
486 	if (mm_has_notifiers(mm))
487 		__mmu_notifier_invalidate_range(mm, start, end);
488 }
489 
490 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
491 {
492 	mm->notifier_subscriptions = NULL;
493 }
494 
495 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
496 {
497 	if (mm_has_notifiers(mm))
498 		__mmu_notifier_subscriptions_destroy(mm);
499 }
500 
501 
502 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
503 					   enum mmu_notifier_event event,
504 					   unsigned flags,
505 					   struct vm_area_struct *vma,
506 					   struct mm_struct *mm,
507 					   unsigned long start,
508 					   unsigned long end)
509 {
510 	range->vma = vma;
511 	range->event = event;
512 	range->mm = mm;
513 	range->start = start;
514 	range->end = end;
515 	range->flags = flags;
516 }
517 
518 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
519 ({									\
520 	int __young;							\
521 	struct vm_area_struct *___vma = __vma;				\
522 	unsigned long ___address = __address;				\
523 	__young = ptep_clear_flush_young(___vma, ___address, __ptep);	\
524 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
525 						  ___address,		\
526 						  ___address +		\
527 							PAGE_SIZE);	\
528 	__young;							\
529 })
530 
531 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
532 ({									\
533 	int __young;							\
534 	struct vm_area_struct *___vma = __vma;				\
535 	unsigned long ___address = __address;				\
536 	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
537 	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
538 						  ___address,		\
539 						  ___address +		\
540 							PMD_SIZE);	\
541 	__young;							\
542 })
543 
544 #define ptep_clear_young_notify(__vma, __address, __ptep)		\
545 ({									\
546 	int __young;							\
547 	struct vm_area_struct *___vma = __vma;				\
548 	unsigned long ___address = __address;				\
549 	__young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
550 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
551 					    ___address + PAGE_SIZE);	\
552 	__young;							\
553 })
554 
555 #define pmdp_clear_young_notify(__vma, __address, __pmdp)		\
556 ({									\
557 	int __young;							\
558 	struct vm_area_struct *___vma = __vma;				\
559 	unsigned long ___address = __address;				\
560 	__young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
561 	__young |= mmu_notifier_clear_young(___vma->vm_mm, ___address,	\
562 					    ___address + PMD_SIZE);	\
563 	__young;							\
564 })
565 
566 #define	ptep_clear_flush_notify(__vma, __address, __ptep)		\
567 ({									\
568 	unsigned long ___addr = __address & PAGE_MASK;			\
569 	struct mm_struct *___mm = (__vma)->vm_mm;			\
570 	pte_t ___pte;							\
571 									\
572 	___pte = ptep_clear_flush(__vma, __address, __ptep);		\
573 	mmu_notifier_invalidate_range(___mm, ___addr,			\
574 					___addr + PAGE_SIZE);		\
575 									\
576 	___pte;								\
577 })
578 
579 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd)		\
580 ({									\
581 	unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;		\
582 	struct mm_struct *___mm = (__vma)->vm_mm;			\
583 	pmd_t ___pmd;							\
584 									\
585 	___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd);		\
586 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
587 				      ___haddr + HPAGE_PMD_SIZE);	\
588 									\
589 	___pmd;								\
590 })
591 
592 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)		\
593 ({									\
594 	unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;		\
595 	struct mm_struct *___mm = (__vma)->vm_mm;			\
596 	pud_t ___pud;							\
597 									\
598 	___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);		\
599 	mmu_notifier_invalidate_range(___mm, ___haddr,			\
600 				      ___haddr + HPAGE_PUD_SIZE);	\
601 									\
602 	___pud;								\
603 })
604 
605 /*
606  * set_pte_at_notify() sets the pte _after_ running the notifier.
607  * This is safe to start by updating the secondary MMUs, because the primary MMU
608  * pte invalidate must have already happened with a ptep_clear_flush() before
609  * set_pte_at_notify() has been invoked.  Updating the secondary MMUs first is
610  * required when we change both the protection of the mapping from read-only to
611  * read-write and the pfn (like during copy on write page faults). Otherwise the
612  * old page would remain mapped readonly in the secondary MMUs after the new
613  * page is already writable by some CPU through the primary MMU.
614  */
615 #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
616 ({									\
617 	struct mm_struct *___mm = __mm;					\
618 	unsigned long ___address = __address;				\
619 	pte_t ___pte = __pte;						\
620 									\
621 	mmu_notifier_change_pte(___mm, ___address, ___pte);		\
622 	set_pte_at(___mm, ___address, __ptep, ___pte);			\
623 })
624 
625 #else /* CONFIG_MMU_NOTIFIER */
626 
627 struct mmu_notifier_range {
628 	unsigned long start;
629 	unsigned long end;
630 };
631 
632 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
633 					    unsigned long start,
634 					    unsigned long end)
635 {
636 	range->start = start;
637 	range->end = end;
638 }
639 
640 #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end)  \
641 	_mmu_notifier_range_init(range, start, end)
642 
643 static inline bool
644 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
645 {
646 	return true;
647 }
648 
649 static inline int mm_has_notifiers(struct mm_struct *mm)
650 {
651 	return 0;
652 }
653 
654 static inline void mmu_notifier_release(struct mm_struct *mm)
655 {
656 }
657 
658 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
659 					  unsigned long start,
660 					  unsigned long end)
661 {
662 	return 0;
663 }
664 
665 static inline int mmu_notifier_test_young(struct mm_struct *mm,
666 					  unsigned long address)
667 {
668 	return 0;
669 }
670 
671 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
672 					   unsigned long address, pte_t pte)
673 {
674 }
675 
676 static inline void
677 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
678 {
679 }
680 
681 static inline int
682 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
683 {
684 	return 0;
685 }
686 
687 static inline
688 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
689 {
690 }
691 
692 static inline void
693 mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
694 {
695 }
696 
697 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
698 				  unsigned long start, unsigned long end)
699 {
700 }
701 
702 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
703 {
704 }
705 
706 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
707 {
708 }
709 
710 #define mmu_notifier_range_update_to_read_only(r) false
711 
712 #define ptep_clear_flush_young_notify ptep_clear_flush_young
713 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
714 #define ptep_clear_young_notify ptep_test_and_clear_young
715 #define pmdp_clear_young_notify pmdp_test_and_clear_young
716 #define	ptep_clear_flush_notify ptep_clear_flush
717 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
718 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
719 #define set_pte_at_notify set_pte_at
720 
721 static inline void mmu_notifier_synchronize(void)
722 {
723 }
724 
725 #endif /* CONFIG_MMU_NOTIFIER */
726 
727 #endif /* _LINUX_MMU_NOTIFIER_H */
728