1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * include/linux/userfaultfd_k.h 4 * 5 * Copyright (C) 2015 Red Hat, Inc. 6 * 7 */ 8 9 #ifndef _LINUX_USERFAULTFD_K_H 10 #define _LINUX_USERFAULTFD_K_H 11 12 #ifdef CONFIG_USERFAULTFD 13 14 #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */ 15 16 #include <linux/fcntl.h> 17 #include <linux/mm.h> 18 #include <linux/swap.h> 19 #include <linux/swapops.h> 20 #include <asm-generic/pgtable_uffd.h> 21 22 /* The set of all possible UFFD-related VM flags. */ 23 #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR) 24 25 /* 26 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining 27 * new flags, since they might collide with O_* ones. We want 28 * to re-use O_* flags that couldn't possibly have a meaning 29 * from userfaultfd, in order to leave a free define-space for 30 * shared O_* flags. 31 */ 32 #define UFFD_CLOEXEC O_CLOEXEC 33 #define UFFD_NONBLOCK O_NONBLOCK 34 35 #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) 36 #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) 37 38 extern int sysctl_unprivileged_userfaultfd; 39 40 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); 41 42 /* 43 * The mode of operation for __mcopy_atomic and its helpers. 44 * 45 * This is almost an implementation detail (mcopy_atomic below doesn't take this 46 * as a parameter), but it's exposed here because memory-kind-specific 47 * implementations (e.g. hugetlbfs) need to know the mode of operation. 48 */ 49 enum mcopy_atomic_mode { 50 /* A normal copy_from_user into the destination range. */ 51 MCOPY_ATOMIC_NORMAL, 52 /* Don't copy; map the destination range to the zero page. */ 53 MCOPY_ATOMIC_ZEROPAGE, 54 /* Just install pte(s) with the existing page(s) in the page cache. */ 55 MCOPY_ATOMIC_CONTINUE, 56 }; 57 58 extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, 59 struct vm_area_struct *dst_vma, 60 unsigned long dst_addr, struct page *page, 61 bool newly_allocated, bool wp_copy); 62 63 extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 64 unsigned long src_start, unsigned long len, 65 atomic_t *mmap_changing, __u64 mode); 66 extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, 67 unsigned long dst_start, 68 unsigned long len, 69 atomic_t *mmap_changing); 70 extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start, 71 unsigned long len, atomic_t *mmap_changing); 72 extern int mwriteprotect_range(struct mm_struct *dst_mm, 73 unsigned long start, unsigned long len, 74 bool enable_wp, atomic_t *mmap_changing); 75 76 /* mm helpers */ 77 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, 78 struct vm_userfaultfd_ctx vm_ctx) 79 { 80 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; 81 } 82 83 /* 84 * Never enable huge pmd sharing on some uffd registered vmas: 85 * 86 * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry. 87 * 88 * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for 89 * VMAs which share huge pmds. (If you have two mappings to the same 90 * underlying pages, and fault in the non-UFFD-registered one with a write, 91 * with huge pmd sharing this would *also* setup the second UFFD-registered 92 * mapping, and we'd not get minor faults.) 93 */ 94 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) 95 { 96 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); 97 } 98 99 static inline bool userfaultfd_missing(struct vm_area_struct *vma) 100 { 101 return vma->vm_flags & VM_UFFD_MISSING; 102 } 103 104 static inline bool userfaultfd_wp(struct vm_area_struct *vma) 105 { 106 return vma->vm_flags & VM_UFFD_WP; 107 } 108 109 static inline bool userfaultfd_minor(struct vm_area_struct *vma) 110 { 111 return vma->vm_flags & VM_UFFD_MINOR; 112 } 113 114 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, 115 pte_t pte) 116 { 117 return userfaultfd_wp(vma) && pte_uffd_wp(pte); 118 } 119 120 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, 121 pmd_t pmd) 122 { 123 return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); 124 } 125 126 static inline bool userfaultfd_armed(struct vm_area_struct *vma) 127 { 128 return vma->vm_flags & __VM_UFFD_FLAGS; 129 } 130 131 extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); 132 extern void dup_userfaultfd_complete(struct list_head *); 133 134 extern void mremap_userfaultfd_prep(struct vm_area_struct *, 135 struct vm_userfaultfd_ctx *); 136 extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, 137 unsigned long from, unsigned long to, 138 unsigned long len); 139 140 extern bool userfaultfd_remove(struct vm_area_struct *vma, 141 unsigned long start, 142 unsigned long end); 143 144 extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, 145 unsigned long start, unsigned long end, 146 struct list_head *uf); 147 extern void userfaultfd_unmap_complete(struct mm_struct *mm, 148 struct list_head *uf); 149 150 #else /* CONFIG_USERFAULTFD */ 151 152 /* mm helpers */ 153 static inline vm_fault_t handle_userfault(struct vm_fault *vmf, 154 unsigned long reason) 155 { 156 return VM_FAULT_SIGBUS; 157 } 158 159 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, 160 struct vm_userfaultfd_ctx vm_ctx) 161 { 162 return true; 163 } 164 165 static inline bool userfaultfd_missing(struct vm_area_struct *vma) 166 { 167 return false; 168 } 169 170 static inline bool userfaultfd_wp(struct vm_area_struct *vma) 171 { 172 return false; 173 } 174 175 static inline bool userfaultfd_minor(struct vm_area_struct *vma) 176 { 177 return false; 178 } 179 180 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, 181 pte_t pte) 182 { 183 return false; 184 } 185 186 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, 187 pmd_t pmd) 188 { 189 return false; 190 } 191 192 193 static inline bool userfaultfd_armed(struct vm_area_struct *vma) 194 { 195 return false; 196 } 197 198 static inline int dup_userfaultfd(struct vm_area_struct *vma, 199 struct list_head *l) 200 { 201 return 0; 202 } 203 204 static inline void dup_userfaultfd_complete(struct list_head *l) 205 { 206 } 207 208 static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma, 209 struct vm_userfaultfd_ctx *ctx) 210 { 211 } 212 213 static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, 214 unsigned long from, 215 unsigned long to, 216 unsigned long len) 217 { 218 } 219 220 static inline bool userfaultfd_remove(struct vm_area_struct *vma, 221 unsigned long start, 222 unsigned long end) 223 { 224 return true; 225 } 226 227 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, 228 unsigned long start, unsigned long end, 229 struct list_head *uf) 230 { 231 return 0; 232 } 233 234 static inline void userfaultfd_unmap_complete(struct mm_struct *mm, 235 struct list_head *uf) 236 { 237 } 238 239 #endif /* CONFIG_USERFAULTFD */ 240 241 static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry) 242 { 243 #ifdef CONFIG_PTE_MARKER_UFFD_WP 244 return is_pte_marker_entry(entry) && 245 (pte_marker_get(entry) & PTE_MARKER_UFFD_WP); 246 #else 247 return false; 248 #endif 249 } 250 251 static inline bool pte_marker_uffd_wp(pte_t pte) 252 { 253 #ifdef CONFIG_PTE_MARKER_UFFD_WP 254 swp_entry_t entry; 255 256 if (!is_swap_pte(pte)) 257 return false; 258 259 entry = pte_to_swp_entry(pte); 260 261 return pte_marker_entry_uffd_wp(entry); 262 #else 263 return false; 264 #endif 265 } 266 267 /* 268 * Returns true if this is a swap pte and was uffd-wp wr-protected in either 269 * forms (pte marker or a normal swap pte), false otherwise. 270 */ 271 static inline bool pte_swp_uffd_wp_any(pte_t pte) 272 { 273 #ifdef CONFIG_PTE_MARKER_UFFD_WP 274 if (!is_swap_pte(pte)) 275 return false; 276 277 if (pte_swp_uffd_wp(pte)) 278 return true; 279 280 if (pte_marker_uffd_wp(pte)) 281 return true; 282 #endif 283 return false; 284 } 285 286 #endif /* _LINUX_USERFAULTFD_K_H */ 287