1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_UACCESS_H__ 3 #define __LINUX_UACCESS_H__ 4 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/instrumented.h> 7 #include <linux/minmax.h> 8 #include <linux/sched.h> 9 #include <linux/thread_info.h> 10 11 #include <asm/uaccess.h> 12 13 #ifdef CONFIG_SET_FS 14 /* 15 * Force the uaccess routines to be wired up for actual userspace access, 16 * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone 17 * using force_uaccess_end below. 18 */ 19 static inline mm_segment_t force_uaccess_begin(void) 20 { 21 mm_segment_t fs = get_fs(); 22 23 set_fs(USER_DS); 24 return fs; 25 } 26 27 static inline void force_uaccess_end(mm_segment_t oldfs) 28 { 29 set_fs(oldfs); 30 } 31 #else /* CONFIG_SET_FS */ 32 typedef struct { 33 /* empty dummy */ 34 } mm_segment_t; 35 36 #define uaccess_kernel() (false) 37 #define user_addr_max() (TASK_SIZE_MAX) 38 39 static inline mm_segment_t force_uaccess_begin(void) 40 { 41 return (mm_segment_t) { }; 42 } 43 44 static inline void force_uaccess_end(mm_segment_t oldfs) 45 { 46 } 47 #endif /* CONFIG_SET_FS */ 48 49 /* 50 * Architectures should provide two primitives (raw_copy_{to,from}_user()) 51 * and get rid of their private instances of copy_{to,from}_user() and 52 * __copy_{to,from}_user{,_inatomic}(). 53 * 54 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and 55 * return the amount left to copy. They should assume that access_ok() has 56 * already been checked (and succeeded); they should *not* zero-pad anything. 57 * No KASAN or object size checks either - those belong here. 58 * 59 * Both of these functions should attempt to copy size bytes starting at from 60 * into the area starting at to. They must not fetch or store anything 61 * outside of those areas. Return value must be between 0 (everything 62 * copied successfully) and size (nothing copied). 63 * 64 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting 65 * at to must become equal to the bytes fetched from the corresponding area 66 * starting at from. All data past to + size - N must be left unmodified. 67 * 68 * If copying succeeds, the return value must be 0. If some data cannot be 69 * fetched, it is permitted to copy less than had been fetched; the only 70 * hard requirement is that not storing anything at all (i.e. returning size) 71 * should happen only when nothing could be copied. In other words, you don't 72 * have to squeeze as much as possible - it is allowed, but not necessary. 73 * 74 * For raw_copy_from_user() to always points to kernel memory and no faults 75 * on store should happen. Interpretation of from is affected by set_fs(). 76 * For raw_copy_to_user() it's the other way round. 77 * 78 * Both can be inlined - it's up to architectures whether it wants to bother 79 * with that. They should not be used directly; they are used to implement 80 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) 81 * that are used instead. Out of those, __... ones are inlined. Plain 82 * copy_{to,from}_user() might or might not be inlined. If you want them 83 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. 84 * 85 * NOTE: only copy_from_user() zero-pads the destination in case of short copy. 86 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything 87 * at all; their callers absolutely must check the return value. 88 * 89 * Biarch ones should also provide raw_copy_in_user() - similar to the above, 90 * but both source and destination are __user pointers (affected by set_fs() 91 * as usual) and both source and destination can trigger faults. 92 */ 93 94 static __always_inline __must_check unsigned long 95 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 96 { 97 instrument_copy_from_user(to, from, n); 98 check_object_size(to, n, false); 99 return raw_copy_from_user(to, from, n); 100 } 101 102 static __always_inline __must_check unsigned long 103 __copy_from_user(void *to, const void __user *from, unsigned long n) 104 { 105 might_fault(); 106 if (should_fail_usercopy()) 107 return n; 108 instrument_copy_from_user(to, from, n); 109 check_object_size(to, n, false); 110 return raw_copy_from_user(to, from, n); 111 } 112 113 /** 114 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. 115 * @to: Destination address, in user space. 116 * @from: Source address, in kernel space. 117 * @n: Number of bytes to copy. 118 * 119 * Context: User context only. 120 * 121 * Copy data from kernel space to user space. Caller must check 122 * the specified block with access_ok() before calling this function. 123 * The caller should also make sure he pins the user space address 124 * so that we don't result in page fault and sleep. 125 */ 126 static __always_inline __must_check unsigned long 127 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 128 { 129 if (should_fail_usercopy()) 130 return n; 131 instrument_copy_to_user(to, from, n); 132 check_object_size(from, n, true); 133 return raw_copy_to_user(to, from, n); 134 } 135 136 static __always_inline __must_check unsigned long 137 __copy_to_user(void __user *to, const void *from, unsigned long n) 138 { 139 might_fault(); 140 if (should_fail_usercopy()) 141 return n; 142 instrument_copy_to_user(to, from, n); 143 check_object_size(from, n, true); 144 return raw_copy_to_user(to, from, n); 145 } 146 147 #ifdef INLINE_COPY_FROM_USER 148 static inline __must_check unsigned long 149 _copy_from_user(void *to, const void __user *from, unsigned long n) 150 { 151 unsigned long res = n; 152 might_fault(); 153 if (!should_fail_usercopy() && likely(access_ok(from, n))) { 154 instrument_copy_from_user(to, from, n); 155 res = raw_copy_from_user(to, from, n); 156 } 157 if (unlikely(res)) 158 memset(to + (n - res), 0, res); 159 return res; 160 } 161 #else 162 extern __must_check unsigned long 163 _copy_from_user(void *, const void __user *, unsigned long); 164 #endif 165 166 #ifdef INLINE_COPY_TO_USER 167 static inline __must_check unsigned long 168 _copy_to_user(void __user *to, const void *from, unsigned long n) 169 { 170 might_fault(); 171 if (should_fail_usercopy()) 172 return n; 173 if (access_ok(to, n)) { 174 instrument_copy_to_user(to, from, n); 175 n = raw_copy_to_user(to, from, n); 176 } 177 return n; 178 } 179 #else 180 extern __must_check unsigned long 181 _copy_to_user(void __user *, const void *, unsigned long); 182 #endif 183 184 static __always_inline unsigned long __must_check 185 copy_from_user(void *to, const void __user *from, unsigned long n) 186 { 187 if (likely(check_copy_size(to, n, false))) 188 n = _copy_from_user(to, from, n); 189 return n; 190 } 191 192 static __always_inline unsigned long __must_check 193 copy_to_user(void __user *to, const void *from, unsigned long n) 194 { 195 if (likely(check_copy_size(from, n, true))) 196 n = _copy_to_user(to, from, n); 197 return n; 198 } 199 #ifdef CONFIG_COMPAT 200 static __always_inline unsigned long __must_check 201 copy_in_user(void __user *to, const void __user *from, unsigned long n) 202 { 203 might_fault(); 204 if (access_ok(to, n) && access_ok(from, n)) 205 n = raw_copy_in_user(to, from, n); 206 return n; 207 } 208 #endif 209 210 #ifndef copy_mc_to_kernel 211 /* 212 * Without arch opt-in this generic copy_mc_to_kernel() will not handle 213 * #MC (or arch equivalent) during source read. 214 */ 215 static inline unsigned long __must_check 216 copy_mc_to_kernel(void *dst, const void *src, size_t cnt) 217 { 218 memcpy(dst, src, cnt); 219 return 0; 220 } 221 #endif 222 223 static __always_inline void pagefault_disabled_inc(void) 224 { 225 current->pagefault_disabled++; 226 } 227 228 static __always_inline void pagefault_disabled_dec(void) 229 { 230 current->pagefault_disabled--; 231 } 232 233 /* 234 * These routines enable/disable the pagefault handler. If disabled, it will 235 * not take any locks and go straight to the fixup table. 236 * 237 * User access methods will not sleep when called from a pagefault_disabled() 238 * environment. 239 */ 240 static inline void pagefault_disable(void) 241 { 242 pagefault_disabled_inc(); 243 /* 244 * make sure to have issued the store before a pagefault 245 * can hit. 246 */ 247 barrier(); 248 } 249 250 static inline void pagefault_enable(void) 251 { 252 /* 253 * make sure to issue those last loads/stores before enabling 254 * the pagefault handler again. 255 */ 256 barrier(); 257 pagefault_disabled_dec(); 258 } 259 260 /* 261 * Is the pagefault handler disabled? If so, user access methods will not sleep. 262 */ 263 static inline bool pagefault_disabled(void) 264 { 265 return current->pagefault_disabled != 0; 266 } 267 268 /* 269 * The pagefault handler is in general disabled by pagefault_disable() or 270 * when in irq context (via in_atomic()). 271 * 272 * This function should only be used by the fault handlers. Other users should 273 * stick to pagefault_disabled(). 274 * Please NEVER use preempt_disable() to disable the fault handler. With 275 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. 276 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. 277 */ 278 #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) 279 280 #ifndef ARCH_HAS_NOCACHE_UACCESS 281 282 static inline __must_check unsigned long 283 __copy_from_user_inatomic_nocache(void *to, const void __user *from, 284 unsigned long n) 285 { 286 return __copy_from_user_inatomic(to, from, n); 287 } 288 289 #endif /* ARCH_HAS_NOCACHE_UACCESS */ 290 291 extern __must_check int check_zeroed_user(const void __user *from, size_t size); 292 293 /** 294 * copy_struct_from_user: copy a struct from userspace 295 * @dst: Destination address, in kernel space. This buffer must be @ksize 296 * bytes long. 297 * @ksize: Size of @dst struct. 298 * @src: Source address, in userspace. 299 * @usize: (Alleged) size of @src struct. 300 * 301 * Copies a struct from userspace to kernel space, in a way that guarantees 302 * backwards-compatibility for struct syscall arguments (as long as future 303 * struct extensions are made such that all new fields are *appended* to the 304 * old struct, and zeroed-out new fields have the same meaning as the old 305 * struct). 306 * 307 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. 308 * The recommended usage is something like the following: 309 * 310 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) 311 * { 312 * int err; 313 * struct foo karg = {}; 314 * 315 * if (usize > PAGE_SIZE) 316 * return -E2BIG; 317 * if (usize < FOO_SIZE_VER0) 318 * return -EINVAL; 319 * 320 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); 321 * if (err) 322 * return err; 323 * 324 * // ... 325 * } 326 * 327 * There are three cases to consider: 328 * * If @usize == @ksize, then it's copied verbatim. 329 * * If @usize < @ksize, then the userspace has passed an old struct to a 330 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) 331 * are to be zero-filled. 332 * * If @usize > @ksize, then the userspace has passed a new struct to an 333 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) 334 * are checked to ensure they are zeroed, otherwise -E2BIG is returned. 335 * 336 * Returns (in all cases, some data may have been copied): 337 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. 338 * * -EFAULT: access to userspace failed. 339 */ 340 static __always_inline __must_check int 341 copy_struct_from_user(void *dst, size_t ksize, const void __user *src, 342 size_t usize) 343 { 344 size_t size = min(ksize, usize); 345 size_t rest = max(ksize, usize) - size; 346 347 /* Deal with trailing bytes. */ 348 if (usize < ksize) { 349 memset(dst + size, 0, rest); 350 } else if (usize > ksize) { 351 int ret = check_zeroed_user(src + size, rest); 352 if (ret <= 0) 353 return ret ?: -E2BIG; 354 } 355 /* Copy the interoperable parts of the struct. */ 356 if (copy_from_user(dst, src, size)) 357 return -EFAULT; 358 return 0; 359 } 360 361 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); 362 363 long copy_from_kernel_nofault(void *dst, const void *src, size_t size); 364 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); 365 366 long copy_from_user_nofault(void *dst, const void __user *src, size_t size); 367 long notrace copy_to_user_nofault(void __user *dst, const void *src, 368 size_t size); 369 370 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, 371 long count); 372 373 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, 374 long count); 375 long strnlen_user_nofault(const void __user *unsafe_addr, long count); 376 377 /** 378 * get_kernel_nofault(): safely attempt to read from a location 379 * @val: read into this variable 380 * @ptr: address to read from 381 * 382 * Returns 0 on success, or -EFAULT. 383 */ 384 #define get_kernel_nofault(val, ptr) ({ \ 385 const typeof(val) *__gk_ptr = (ptr); \ 386 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ 387 }) 388 389 #ifndef user_access_begin 390 #define user_access_begin(ptr,len) access_ok(ptr, len) 391 #define user_access_end() do { } while (0) 392 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) 393 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) 394 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) 395 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) 396 static inline unsigned long user_access_save(void) { return 0UL; } 397 static inline void user_access_restore(unsigned long flags) { } 398 #endif 399 #ifndef user_write_access_begin 400 #define user_write_access_begin user_access_begin 401 #define user_write_access_end user_access_end 402 #endif 403 #ifndef user_read_access_begin 404 #define user_read_access_begin user_access_begin 405 #define user_read_access_end user_access_end 406 #endif 407 408 #ifdef CONFIG_HARDENED_USERCOPY 409 void usercopy_warn(const char *name, const char *detail, bool to_user, 410 unsigned long offset, unsigned long len); 411 void __noreturn usercopy_abort(const char *name, const char *detail, 412 bool to_user, unsigned long offset, 413 unsigned long len); 414 #endif 415 416 #endif /* __LINUX_UACCESS_H__ */ 417