xref: /linux-6.15/include/linux/userfaultfd_k.h (revision 0cef0bb8)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2932b18e0SAndrea Arcangeli /*
3932b18e0SAndrea Arcangeli  *  include/linux/userfaultfd_k.h
4932b18e0SAndrea Arcangeli  *
5932b18e0SAndrea Arcangeli  *  Copyright (C) 2015  Red Hat, Inc.
6932b18e0SAndrea Arcangeli  *
7932b18e0SAndrea Arcangeli  */
8932b18e0SAndrea Arcangeli 
9932b18e0SAndrea Arcangeli #ifndef _LINUX_USERFAULTFD_K_H
10932b18e0SAndrea Arcangeli #define _LINUX_USERFAULTFD_K_H
11932b18e0SAndrea Arcangeli 
12932b18e0SAndrea Arcangeli #ifdef CONFIG_USERFAULTFD
13932b18e0SAndrea Arcangeli 
14932b18e0SAndrea Arcangeli #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
15932b18e0SAndrea Arcangeli 
16932b18e0SAndrea Arcangeli #include <linux/fcntl.h>
1755adf4deSAndrea Arcangeli #include <linux/mm.h>
181db9dbc2SPeter Xu #include <linux/swap.h>
191db9dbc2SPeter Xu #include <linux/swapops.h>
2055adf4deSAndrea Arcangeli #include <asm-generic/pgtable_uffd.h>
21b1f9e876SPeter Xu #include <linux/hugetlb_inline.h>
22932b18e0SAndrea Arcangeli 
237677f7fdSAxel Rasmussen /* The set of all possible UFFD-related VM flags. */
247677f7fdSAxel Rasmussen #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
257677f7fdSAxel Rasmussen 
26932b18e0SAndrea Arcangeli /*
27932b18e0SAndrea Arcangeli  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
28932b18e0SAndrea Arcangeli  * new flags, since they might collide with O_* ones. We want
29932b18e0SAndrea Arcangeli  * to re-use O_* flags that couldn't possibly have a meaning
30932b18e0SAndrea Arcangeli  * from userfaultfd, in order to leave a free define-space for
31932b18e0SAndrea Arcangeli  * shared O_* flags.
32932b18e0SAndrea Arcangeli  */
33932b18e0SAndrea Arcangeli #define UFFD_CLOEXEC O_CLOEXEC
34932b18e0SAndrea Arcangeli #define UFFD_NONBLOCK O_NONBLOCK
35932b18e0SAndrea Arcangeli 
36932b18e0SAndrea Arcangeli #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
37932b18e0SAndrea Arcangeli #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
38932b18e0SAndrea Arcangeli 
39f91e6b41SLokesh Gidra /*
40f91e6b41SLokesh Gidra  * Start with fault_pending_wqh and fault_wqh so they're more likely
41f91e6b41SLokesh Gidra  * to be in the same cacheline.
42f91e6b41SLokesh Gidra  *
43f91e6b41SLokesh Gidra  * Locking order:
44f91e6b41SLokesh Gidra  *	fd_wqh.lock
45f91e6b41SLokesh Gidra  *		fault_pending_wqh.lock
46f91e6b41SLokesh Gidra  *			fault_wqh.lock
47f91e6b41SLokesh Gidra  *		event_wqh.lock
48f91e6b41SLokesh Gidra  *
49f91e6b41SLokesh Gidra  * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
50f91e6b41SLokesh Gidra  * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
51f91e6b41SLokesh Gidra  * also taken in IRQ context.
52f91e6b41SLokesh Gidra  */
53f91e6b41SLokesh Gidra struct userfaultfd_ctx {
54f91e6b41SLokesh Gidra 	/* waitqueue head for the pending (i.e. not read) userfaults */
55f91e6b41SLokesh Gidra 	wait_queue_head_t fault_pending_wqh;
56f91e6b41SLokesh Gidra 	/* waitqueue head for the userfaults */
57f91e6b41SLokesh Gidra 	wait_queue_head_t fault_wqh;
58f91e6b41SLokesh Gidra 	/* waitqueue head for the pseudo fd to wakeup poll/read */
59f91e6b41SLokesh Gidra 	wait_queue_head_t fd_wqh;
60f91e6b41SLokesh Gidra 	/* waitqueue head for events */
61f91e6b41SLokesh Gidra 	wait_queue_head_t event_wqh;
62f91e6b41SLokesh Gidra 	/* a refile sequence protected by fault_pending_wqh lock */
63f91e6b41SLokesh Gidra 	seqcount_spinlock_t refile_seq;
64f91e6b41SLokesh Gidra 	/* pseudo fd refcounting */
65f91e6b41SLokesh Gidra 	refcount_t refcount;
66f91e6b41SLokesh Gidra 	/* userfaultfd syscall flags */
67f91e6b41SLokesh Gidra 	unsigned int flags;
68f91e6b41SLokesh Gidra 	/* features requested from the userspace */
69f91e6b41SLokesh Gidra 	unsigned int features;
70f91e6b41SLokesh Gidra 	/* released */
71f91e6b41SLokesh Gidra 	bool released;
725e4c24a5SLokesh Gidra 	/*
735e4c24a5SLokesh Gidra 	 * Prevents userfaultfd operations (fill/move/wp) from happening while
745e4c24a5SLokesh Gidra 	 * some non-cooperative event(s) is taking place. Increments are done
755e4c24a5SLokesh Gidra 	 * in write-mode. Whereas, userfaultfd operations, which includes
765e4c24a5SLokesh Gidra 	 * reading mmap_changing, is done under read-mode.
775e4c24a5SLokesh Gidra 	 */
785e4c24a5SLokesh Gidra 	struct rw_semaphore map_changing_lock;
79f91e6b41SLokesh Gidra 	/* memory mappings are changing because of non-cooperative event */
80f91e6b41SLokesh Gidra 	atomic_t mmap_changing;
81f91e6b41SLokesh Gidra 	/* mm with one ore more vmas attached to this userfaultfd_ctx */
82f91e6b41SLokesh Gidra 	struct mm_struct *mm;
83f91e6b41SLokesh Gidra };
84f91e6b41SLokesh Gidra 
852b740303SSouptick Joarder extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
86932b18e0SAndrea Arcangeli 
87d9712937SAxel Rasmussen /* A combined operation mode + behavior flags. */
88d9712937SAxel Rasmussen typedef unsigned int __bitwise uffd_flags_t;
89d9712937SAxel Rasmussen 
90d9712937SAxel Rasmussen /* Mutually exclusive modes of operation. */
91d9712937SAxel Rasmussen enum mfill_atomic_mode {
92d9712937SAxel Rasmussen 	MFILL_ATOMIC_COPY,
93d9712937SAxel Rasmussen 	MFILL_ATOMIC_ZEROPAGE,
94d9712937SAxel Rasmussen 	MFILL_ATOMIC_CONTINUE,
95fc71884aSAxel Rasmussen 	MFILL_ATOMIC_POISON,
96d9712937SAxel Rasmussen 	NR_MFILL_ATOMIC_MODES,
97f6191471SAxel Rasmussen };
98f6191471SAxel Rasmussen 
99d9712937SAxel Rasmussen #define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
100d9712937SAxel Rasmussen #define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
101d9712937SAxel Rasmussen #define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
102d9712937SAxel Rasmussen #define MFILL_ATOMIC_MODE_MASK ((__force uffd_flags_t) (MFILL_ATOMIC_BIT(0) - 1))
103d9712937SAxel Rasmussen 
uffd_flags_mode_is(uffd_flags_t flags,enum mfill_atomic_mode expected)104d9712937SAxel Rasmussen static inline bool uffd_flags_mode_is(uffd_flags_t flags, enum mfill_atomic_mode expected)
105d9712937SAxel Rasmussen {
106d9712937SAxel Rasmussen 	return (flags & MFILL_ATOMIC_MODE_MASK) == ((__force uffd_flags_t) expected);
107d9712937SAxel Rasmussen }
108d9712937SAxel Rasmussen 
uffd_flags_set_mode(uffd_flags_t flags,enum mfill_atomic_mode mode)109d9712937SAxel Rasmussen static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_atomic_mode mode)
110d9712937SAxel Rasmussen {
111d9712937SAxel Rasmussen 	flags &= ~MFILL_ATOMIC_MODE_MASK;
112d9712937SAxel Rasmussen 	return flags | ((__force uffd_flags_t) mode);
113d9712937SAxel Rasmussen }
114d9712937SAxel Rasmussen 
115d9712937SAxel Rasmussen /* Flags controlling behavior. These behavior changes are mode-independent. */
116d9712937SAxel Rasmussen #define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0)
117d9712937SAxel Rasmussen 
11861c50040SAxel Rasmussen extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
1197d64ae3aSAxel Rasmussen 				    struct vm_area_struct *dst_vma,
1207d64ae3aSAxel Rasmussen 				    unsigned long dst_addr, struct page *page,
121d9712937SAxel Rasmussen 				    bool newly_allocated, uffd_flags_t flags);
1227d64ae3aSAxel Rasmussen 
1235e4c24a5SLokesh Gidra extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start,
124df2cc96eSMike Rapoport 				 unsigned long src_start, unsigned long len,
1255e4c24a5SLokesh Gidra 				 uffd_flags_t flags);
1265e4c24a5SLokesh Gidra extern ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx,
127c1a4de99SAndrea Arcangeli 				     unsigned long dst_start,
1285e4c24a5SLokesh Gidra 				     unsigned long len);
1295e4c24a5SLokesh Gidra extern ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long dst_start,
1305e4c24a5SLokesh Gidra 				     unsigned long len, uffd_flags_t flags);
1315e4c24a5SLokesh Gidra extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start,
1325e4c24a5SLokesh Gidra 				   unsigned long len, uffd_flags_t flags);
1335e4c24a5SLokesh Gidra extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
1345e4c24a5SLokesh Gidra 			       unsigned long len, bool enable_wp);
13561c50040SAxel Rasmussen extern long uffd_wp_range(struct vm_area_struct *vma,
136f369b07cSPeter Xu 			  unsigned long start, unsigned long len, bool enable_wp);
137c1a4de99SAndrea Arcangeli 
138adef4406SAndrea Arcangeli /* move_pages */
139adef4406SAndrea Arcangeli void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
140adef4406SAndrea Arcangeli void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
141867a43a3SLokesh Gidra ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
142867a43a3SLokesh Gidra 		   unsigned long src_start, unsigned long len, __u64 flags);
143adef4406SAndrea Arcangeli int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
144adef4406SAndrea Arcangeli 			struct vm_area_struct *dst_vma,
145adef4406SAndrea Arcangeli 			struct vm_area_struct *src_vma,
146adef4406SAndrea Arcangeli 			unsigned long dst_addr, unsigned long src_addr);
147adef4406SAndrea Arcangeli 
148932b18e0SAndrea Arcangeli /* mm helpers */
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)149932b18e0SAndrea Arcangeli static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
150932b18e0SAndrea Arcangeli 					struct vm_userfaultfd_ctx vm_ctx)
151932b18e0SAndrea Arcangeli {
152932b18e0SAndrea Arcangeli 	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
153932b18e0SAndrea Arcangeli }
154932b18e0SAndrea Arcangeli 
155c1991e07SPeter Xu /*
1560d9cadabSAxel Rasmussen  * Never enable huge pmd sharing on some uffd registered vmas:
1570d9cadabSAxel Rasmussen  *
1580d9cadabSAxel Rasmussen  * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
1590d9cadabSAxel Rasmussen  *
1600d9cadabSAxel Rasmussen  * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
1610d9cadabSAxel Rasmussen  *   VMAs which share huge pmds. (If you have two mappings to the same
1620d9cadabSAxel Rasmussen  *   underlying pages, and fault in the non-UFFD-registered one with a write,
1630d9cadabSAxel Rasmussen  *   with huge pmd sharing this would *also* setup the second UFFD-registered
1640d9cadabSAxel Rasmussen  *   mapping, and we'd not get minor faults.)
165c1991e07SPeter Xu  */
uffd_disable_huge_pmd_share(struct vm_area_struct * vma)166c1991e07SPeter Xu static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
167c1991e07SPeter Xu {
1680d9cadabSAxel Rasmussen 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
169c1991e07SPeter Xu }
170c1991e07SPeter Xu 
1719c28a205SPeter Xu /*
1729c28a205SPeter Xu  * Don't do fault around for either WP or MINOR registered uffd range.  For
1739c28a205SPeter Xu  * MINOR registered range, fault around will be a total disaster and ptes can
1749c28a205SPeter Xu  * be installed without notifications; for WP it should mostly be fine as long
1759c28a205SPeter Xu  * as the fault around checks for pte_none() before the installation, however
1769c28a205SPeter Xu  * to be super safe we just forbid it.
1779c28a205SPeter Xu  */
uffd_disable_fault_around(struct vm_area_struct * vma)1789c28a205SPeter Xu static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
1799c28a205SPeter Xu {
1809c28a205SPeter Xu 	return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
1819c28a205SPeter Xu }
1829c28a205SPeter Xu 
userfaultfd_missing(struct vm_area_struct * vma)183932b18e0SAndrea Arcangeli static inline bool userfaultfd_missing(struct vm_area_struct *vma)
184932b18e0SAndrea Arcangeli {
185932b18e0SAndrea Arcangeli 	return vma->vm_flags & VM_UFFD_MISSING;
186932b18e0SAndrea Arcangeli }
187932b18e0SAndrea Arcangeli 
userfaultfd_wp(struct vm_area_struct * vma)1881df319e0SShaohua Li static inline bool userfaultfd_wp(struct vm_area_struct *vma)
1891df319e0SShaohua Li {
1901df319e0SShaohua Li 	return vma->vm_flags & VM_UFFD_WP;
1911df319e0SShaohua Li }
1921df319e0SShaohua Li 
userfaultfd_minor(struct vm_area_struct * vma)1937677f7fdSAxel Rasmussen static inline bool userfaultfd_minor(struct vm_area_struct *vma)
1947677f7fdSAxel Rasmussen {
1957677f7fdSAxel Rasmussen 	return vma->vm_flags & VM_UFFD_MINOR;
1967677f7fdSAxel Rasmussen }
1977677f7fdSAxel Rasmussen 
userfaultfd_pte_wp(struct vm_area_struct * vma,pte_t pte)19855adf4deSAndrea Arcangeli static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
19955adf4deSAndrea Arcangeli 				      pte_t pte)
20055adf4deSAndrea Arcangeli {
20155adf4deSAndrea Arcangeli 	return userfaultfd_wp(vma) && pte_uffd_wp(pte);
20255adf4deSAndrea Arcangeli }
20355adf4deSAndrea Arcangeli 
userfaultfd_huge_pmd_wp(struct vm_area_struct * vma,pmd_t pmd)20455adf4deSAndrea Arcangeli static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
20555adf4deSAndrea Arcangeli 					   pmd_t pmd)
20655adf4deSAndrea Arcangeli {
20755adf4deSAndrea Arcangeli 	return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
20855adf4deSAndrea Arcangeli }
20955adf4deSAndrea Arcangeli 
userfaultfd_armed(struct vm_area_struct * vma)210932b18e0SAndrea Arcangeli static inline bool userfaultfd_armed(struct vm_area_struct *vma)
211932b18e0SAndrea Arcangeli {
2127677f7fdSAxel Rasmussen 	return vma->vm_flags & __VM_UFFD_FLAGS;
213932b18e0SAndrea Arcangeli }
214932b18e0SAndrea Arcangeli 
vma_can_userfault(struct vm_area_struct * vma,unsigned long vm_flags,bool wp_async)215b1f9e876SPeter Xu static inline bool vma_can_userfault(struct vm_area_struct *vma,
216d61ea1cbSPeter Xu 				     unsigned long vm_flags,
217d61ea1cbSPeter Xu 				     bool wp_async)
218b1f9e876SPeter Xu {
219d61ea1cbSPeter Xu 	vm_flags &= __VM_UFFD_FLAGS;
220d61ea1cbSPeter Xu 
2219651fcedSJason A. Donenfeld 	if (vm_flags & VM_DROPPABLE)
2229651fcedSJason A. Donenfeld 		return false;
2239651fcedSJason A. Donenfeld 
22467eae54bSPeter Xu 	if ((vm_flags & VM_UFFD_MINOR) &&
22567eae54bSPeter Xu 	    (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
22667eae54bSPeter Xu 		return false;
227d61ea1cbSPeter Xu 
228d61ea1cbSPeter Xu 	/*
229d61ea1cbSPeter Xu 	 * If wp async enabled, and WP is the only mode enabled, allow any
230d61ea1cbSPeter Xu 	 * memory type.
231d61ea1cbSPeter Xu 	 */
232d61ea1cbSPeter Xu 	if (wp_async && (vm_flags == VM_UFFD_WP))
233d61ea1cbSPeter Xu 		return true;
234d61ea1cbSPeter Xu 
235b1f9e876SPeter Xu #ifndef CONFIG_PTE_MARKER_UFFD_WP
236b1f9e876SPeter Xu 	/*
237b1f9e876SPeter Xu 	 * If user requested uffd-wp but not enabled pte markers for
238b1f9e876SPeter Xu 	 * uffd-wp, then shmem & hugetlbfs are not supported but only
239b1f9e876SPeter Xu 	 * anonymous.
240b1f9e876SPeter Xu 	 */
241b1f9e876SPeter Xu 	if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
242b1f9e876SPeter Xu 		return false;
243b1f9e876SPeter Xu #endif
244d61ea1cbSPeter Xu 
245d61ea1cbSPeter Xu 	/* By default, allow any of anon|shmem|hugetlb */
246b1f9e876SPeter Xu 	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
247b1f9e876SPeter Xu 	    vma_is_shmem(vma);
248b1f9e876SPeter Xu }
249b1f9e876SPeter Xu 
vma_has_uffd_without_event_remap(struct vm_area_struct * vma)250*0cef0bb8SRyan Roberts static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
251*0cef0bb8SRyan Roberts {
252*0cef0bb8SRyan Roberts 	struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx;
253*0cef0bb8SRyan Roberts 
254*0cef0bb8SRyan Roberts 	return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0;
255*0cef0bb8SRyan Roberts }
256*0cef0bb8SRyan Roberts 
257893e26e6SPavel Emelyanov extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
258893e26e6SPavel Emelyanov extern void dup_userfaultfd_complete(struct list_head *);
259f64e67e5SLorenzo Stoakes void dup_userfaultfd_fail(struct list_head *);
260893e26e6SPavel Emelyanov 
26172f87654SPavel Emelyanov extern void mremap_userfaultfd_prep(struct vm_area_struct *,
26272f87654SPavel Emelyanov 				    struct vm_userfaultfd_ctx *);
26390794bf1SAndrea Arcangeli extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
26472f87654SPavel Emelyanov 					unsigned long from, unsigned long to,
26572f87654SPavel Emelyanov 					unsigned long len);
26672f87654SPavel Emelyanov 
26770ccb92fSAndrea Arcangeli extern bool userfaultfd_remove(struct vm_area_struct *vma,
26805ce7724SPavel Emelyanov 			       unsigned long start,
26905ce7724SPavel Emelyanov 			       unsigned long end);
27005ce7724SPavel Emelyanov 
27165ac1320SLiam R. Howlett extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
27265ac1320SLiam R. Howlett 		unsigned long start, unsigned long end, struct list_head *uf);
273897ab3e0SMike Rapoport extern void userfaultfd_unmap_complete(struct mm_struct *mm,
274897ab3e0SMike Rapoport 				       struct list_head *uf);
2752bad466cSPeter Xu extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
276d61ea1cbSPeter Xu extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
277897ab3e0SMike Rapoport 
278a17c7d8fSLorenzo Stoakes void userfaultfd_reset_ctx(struct vm_area_struct *vma);
279a17c7d8fSLorenzo Stoakes 
280a17c7d8fSLorenzo Stoakes struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
281a17c7d8fSLorenzo Stoakes 					     struct vm_area_struct *prev,
282a17c7d8fSLorenzo Stoakes 					     struct vm_area_struct *vma,
283a17c7d8fSLorenzo Stoakes 					     unsigned long start,
284a17c7d8fSLorenzo Stoakes 					     unsigned long end);
285a17c7d8fSLorenzo Stoakes 
286a17c7d8fSLorenzo Stoakes int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
287a17c7d8fSLorenzo Stoakes 			       struct vm_area_struct *vma,
288a17c7d8fSLorenzo Stoakes 			       unsigned long vm_flags,
289a17c7d8fSLorenzo Stoakes 			       unsigned long start, unsigned long end,
290a17c7d8fSLorenzo Stoakes 			       bool wp_async);
291a17c7d8fSLorenzo Stoakes 
292a17c7d8fSLorenzo Stoakes void userfaultfd_release_new(struct userfaultfd_ctx *ctx);
293a17c7d8fSLorenzo Stoakes 
294a17c7d8fSLorenzo Stoakes void userfaultfd_release_all(struct mm_struct *mm,
295a17c7d8fSLorenzo Stoakes 			     struct userfaultfd_ctx *ctx);
296a17c7d8fSLorenzo Stoakes 
297932b18e0SAndrea Arcangeli #else /* CONFIG_USERFAULTFD */
298932b18e0SAndrea Arcangeli 
299932b18e0SAndrea Arcangeli /* mm helpers */
handle_userfault(struct vm_fault * vmf,unsigned long reason)3002b740303SSouptick Joarder static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
3012b740303SSouptick Joarder 				unsigned long reason)
302932b18e0SAndrea Arcangeli {
303932b18e0SAndrea Arcangeli 	return VM_FAULT_SIGBUS;
304932b18e0SAndrea Arcangeli }
305932b18e0SAndrea Arcangeli 
uffd_wp_range(struct vm_area_struct * vma,unsigned long start,unsigned long len,bool enable_wp)30652526ca7SMuhammad Usama Anjum static inline long uffd_wp_range(struct vm_area_struct *vma,
30752526ca7SMuhammad Usama Anjum 				 unsigned long start, unsigned long len,
30852526ca7SMuhammad Usama Anjum 				 bool enable_wp)
30952526ca7SMuhammad Usama Anjum {
31052526ca7SMuhammad Usama Anjum 	return false;
31152526ca7SMuhammad Usama Anjum }
31252526ca7SMuhammad Usama Anjum 
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)313932b18e0SAndrea Arcangeli static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
314932b18e0SAndrea Arcangeli 					struct vm_userfaultfd_ctx vm_ctx)
315932b18e0SAndrea Arcangeli {
316932b18e0SAndrea Arcangeli 	return true;
317932b18e0SAndrea Arcangeli }
318932b18e0SAndrea Arcangeli 
userfaultfd_missing(struct vm_area_struct * vma)319932b18e0SAndrea Arcangeli static inline bool userfaultfd_missing(struct vm_area_struct *vma)
320932b18e0SAndrea Arcangeli {
321932b18e0SAndrea Arcangeli 	return false;
322932b18e0SAndrea Arcangeli }
323932b18e0SAndrea Arcangeli 
userfaultfd_wp(struct vm_area_struct * vma)3241df319e0SShaohua Li static inline bool userfaultfd_wp(struct vm_area_struct *vma)
3251df319e0SShaohua Li {
3261df319e0SShaohua Li 	return false;
3271df319e0SShaohua Li }
3281df319e0SShaohua Li 
userfaultfd_minor(struct vm_area_struct * vma)3297677f7fdSAxel Rasmussen static inline bool userfaultfd_minor(struct vm_area_struct *vma)
3307677f7fdSAxel Rasmussen {
3317677f7fdSAxel Rasmussen 	return false;
3327677f7fdSAxel Rasmussen }
3337677f7fdSAxel Rasmussen 
userfaultfd_pte_wp(struct vm_area_struct * vma,pte_t pte)33455adf4deSAndrea Arcangeli static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
33555adf4deSAndrea Arcangeli 				      pte_t pte)
33655adf4deSAndrea Arcangeli {
33755adf4deSAndrea Arcangeli 	return false;
33855adf4deSAndrea Arcangeli }
33955adf4deSAndrea Arcangeli 
userfaultfd_huge_pmd_wp(struct vm_area_struct * vma,pmd_t pmd)34055adf4deSAndrea Arcangeli static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
34155adf4deSAndrea Arcangeli 					   pmd_t pmd)
34255adf4deSAndrea Arcangeli {
34355adf4deSAndrea Arcangeli 	return false;
34455adf4deSAndrea Arcangeli }
34555adf4deSAndrea Arcangeli 
34655adf4deSAndrea Arcangeli 
userfaultfd_armed(struct vm_area_struct * vma)347932b18e0SAndrea Arcangeli static inline bool userfaultfd_armed(struct vm_area_struct *vma)
348932b18e0SAndrea Arcangeli {
349932b18e0SAndrea Arcangeli 	return false;
350932b18e0SAndrea Arcangeli }
351932b18e0SAndrea Arcangeli 
dup_userfaultfd(struct vm_area_struct * vma,struct list_head * l)352893e26e6SPavel Emelyanov static inline int dup_userfaultfd(struct vm_area_struct *vma,
353893e26e6SPavel Emelyanov 				  struct list_head *l)
354893e26e6SPavel Emelyanov {
355893e26e6SPavel Emelyanov 	return 0;
356893e26e6SPavel Emelyanov }
357893e26e6SPavel Emelyanov 
dup_userfaultfd_complete(struct list_head * l)358893e26e6SPavel Emelyanov static inline void dup_userfaultfd_complete(struct list_head *l)
359893e26e6SPavel Emelyanov {
360893e26e6SPavel Emelyanov }
361893e26e6SPavel Emelyanov 
dup_userfaultfd_fail(struct list_head * l)362f64e67e5SLorenzo Stoakes static inline void dup_userfaultfd_fail(struct list_head *l)
363f64e67e5SLorenzo Stoakes {
364f64e67e5SLorenzo Stoakes }
365f64e67e5SLorenzo Stoakes 
mremap_userfaultfd_prep(struct vm_area_struct * vma,struct vm_userfaultfd_ctx * ctx)36672f87654SPavel Emelyanov static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
36772f87654SPavel Emelyanov 					   struct vm_userfaultfd_ctx *ctx)
36872f87654SPavel Emelyanov {
36972f87654SPavel Emelyanov }
37072f87654SPavel Emelyanov 
mremap_userfaultfd_complete(struct vm_userfaultfd_ctx * ctx,unsigned long from,unsigned long to,unsigned long len)37190794bf1SAndrea Arcangeli static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
37272f87654SPavel Emelyanov 					       unsigned long from,
37372f87654SPavel Emelyanov 					       unsigned long to,
37472f87654SPavel Emelyanov 					       unsigned long len)
37572f87654SPavel Emelyanov {
37672f87654SPavel Emelyanov }
37705ce7724SPavel Emelyanov 
userfaultfd_remove(struct vm_area_struct * vma,unsigned long start,unsigned long end)37870ccb92fSAndrea Arcangeli static inline bool userfaultfd_remove(struct vm_area_struct *vma,
37905ce7724SPavel Emelyanov 				      unsigned long start,
38005ce7724SPavel Emelyanov 				      unsigned long end)
38105ce7724SPavel Emelyanov {
38270ccb92fSAndrea Arcangeli 	return true;
38305ce7724SPavel Emelyanov }
384897ab3e0SMike Rapoport 
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf)38565ac1320SLiam R. Howlett static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
386897ab3e0SMike Rapoport 					 unsigned long start, unsigned long end,
387897ab3e0SMike Rapoport 					 struct list_head *uf)
388897ab3e0SMike Rapoport {
389897ab3e0SMike Rapoport 	return 0;
390897ab3e0SMike Rapoport }
391897ab3e0SMike Rapoport 
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)392897ab3e0SMike Rapoport static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
393897ab3e0SMike Rapoport 					      struct list_head *uf)
394897ab3e0SMike Rapoport {
395897ab3e0SMike Rapoport }
396ca49ca71SMike Rapoport 
uffd_disable_fault_around(struct vm_area_struct * vma)3979c28a205SPeter Xu static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
3989c28a205SPeter Xu {
3999c28a205SPeter Xu 	return false;
4009c28a205SPeter Xu }
4019c28a205SPeter Xu 
userfaultfd_wp_unpopulated(struct vm_area_struct * vma)4022bad466cSPeter Xu static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
4032bad466cSPeter Xu {
4042bad466cSPeter Xu 	return false;
4052bad466cSPeter Xu }
4062bad466cSPeter Xu 
userfaultfd_wp_async(struct vm_area_struct * vma)407d61ea1cbSPeter Xu static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
408d61ea1cbSPeter Xu {
409d61ea1cbSPeter Xu 	return false;
410d61ea1cbSPeter Xu }
411d61ea1cbSPeter Xu 
vma_has_uffd_without_event_remap(struct vm_area_struct * vma)412*0cef0bb8SRyan Roberts static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
413*0cef0bb8SRyan Roberts {
414*0cef0bb8SRyan Roberts 	return false;
415*0cef0bb8SRyan Roberts }
416*0cef0bb8SRyan Roberts 
417932b18e0SAndrea Arcangeli #endif /* CONFIG_USERFAULTFD */
418932b18e0SAndrea Arcangeli 
userfaultfd_wp_use_markers(struct vm_area_struct * vma)4192bad466cSPeter Xu static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
4202bad466cSPeter Xu {
4212bad466cSPeter Xu 	/* Only wr-protect mode uses pte markers */
4222bad466cSPeter Xu 	if (!userfaultfd_wp(vma))
4232bad466cSPeter Xu 		return false;
4242bad466cSPeter Xu 
4252bad466cSPeter Xu 	/* File-based uffd-wp always need markers */
4262bad466cSPeter Xu 	if (!vma_is_anonymous(vma))
4272bad466cSPeter Xu 		return true;
4282bad466cSPeter Xu 
4292bad466cSPeter Xu 	/*
4302bad466cSPeter Xu 	 * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED
4312bad466cSPeter Xu 	 * enabled (to apply markers on zero pages).
4322bad466cSPeter Xu 	 */
4332bad466cSPeter Xu 	return userfaultfd_wp_unpopulated(vma);
4342bad466cSPeter Xu }
4352bad466cSPeter Xu 
pte_marker_entry_uffd_wp(swp_entry_t entry)4361db9dbc2SPeter Xu static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
4371db9dbc2SPeter Xu {
4381db9dbc2SPeter Xu #ifdef CONFIG_PTE_MARKER_UFFD_WP
4391db9dbc2SPeter Xu 	return is_pte_marker_entry(entry) &&
4401db9dbc2SPeter Xu 	    (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
4411db9dbc2SPeter Xu #else
4421db9dbc2SPeter Xu 	return false;
4431db9dbc2SPeter Xu #endif
4441db9dbc2SPeter Xu }
4451db9dbc2SPeter Xu 
pte_marker_uffd_wp(pte_t pte)4461db9dbc2SPeter Xu static inline bool pte_marker_uffd_wp(pte_t pte)
4471db9dbc2SPeter Xu {
4481db9dbc2SPeter Xu #ifdef CONFIG_PTE_MARKER_UFFD_WP
4491db9dbc2SPeter Xu 	swp_entry_t entry;
4501db9dbc2SPeter Xu 
4511db9dbc2SPeter Xu 	if (!is_swap_pte(pte))
4521db9dbc2SPeter Xu 		return false;
4531db9dbc2SPeter Xu 
4541db9dbc2SPeter Xu 	entry = pte_to_swp_entry(pte);
4551db9dbc2SPeter Xu 
4561db9dbc2SPeter Xu 	return pte_marker_entry_uffd_wp(entry);
4571db9dbc2SPeter Xu #else
4581db9dbc2SPeter Xu 	return false;
4591db9dbc2SPeter Xu #endif
4601db9dbc2SPeter Xu }
4611db9dbc2SPeter Xu 
4621db9dbc2SPeter Xu /*
4631db9dbc2SPeter Xu  * Returns true if this is a swap pte and was uffd-wp wr-protected in either
4641db9dbc2SPeter Xu  * forms (pte marker or a normal swap pte), false otherwise.
4651db9dbc2SPeter Xu  */
pte_swp_uffd_wp_any(pte_t pte)4661db9dbc2SPeter Xu static inline bool pte_swp_uffd_wp_any(pte_t pte)
4671db9dbc2SPeter Xu {
4681db9dbc2SPeter Xu #ifdef CONFIG_PTE_MARKER_UFFD_WP
4691db9dbc2SPeter Xu 	if (!is_swap_pte(pte))
4701db9dbc2SPeter Xu 		return false;
4711db9dbc2SPeter Xu 
4721db9dbc2SPeter Xu 	if (pte_swp_uffd_wp(pte))
4731db9dbc2SPeter Xu 		return true;
4741db9dbc2SPeter Xu 
4751db9dbc2SPeter Xu 	if (pte_marker_uffd_wp(pte))
4761db9dbc2SPeter Xu 		return true;
4771db9dbc2SPeter Xu #endif
4781db9dbc2SPeter Xu 	return false;
4791db9dbc2SPeter Xu }
4801db9dbc2SPeter Xu 
481932b18e0SAndrea Arcangeli #endif /* _LINUX_USERFAULTFD_K_H */
482