1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_UACCESS_H__ 3 #define __LINUX_UACCESS_H__ 4 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/instrumented.h> 7 #include <linux/minmax.h> 8 #include <linux/sched.h> 9 #include <linux/thread_info.h> 10 11 #include <asm/uaccess.h> 12 13 /* 14 * Force the uaccess routines to be wired up for actual userspace access, 15 * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone 16 * using force_uaccess_end below. 17 */ 18 static inline mm_segment_t force_uaccess_begin(void) 19 { 20 mm_segment_t fs = get_fs(); 21 22 set_fs(USER_DS); 23 return fs; 24 } 25 26 static inline void force_uaccess_end(mm_segment_t oldfs) 27 { 28 set_fs(oldfs); 29 } 30 31 /* 32 * Architectures should provide two primitives (raw_copy_{to,from}_user()) 33 * and get rid of their private instances of copy_{to,from}_user() and 34 * __copy_{to,from}_user{,_inatomic}(). 35 * 36 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and 37 * return the amount left to copy. They should assume that access_ok() has 38 * already been checked (and succeeded); they should *not* zero-pad anything. 39 * No KASAN or object size checks either - those belong here. 40 * 41 * Both of these functions should attempt to copy size bytes starting at from 42 * into the area starting at to. They must not fetch or store anything 43 * outside of those areas. Return value must be between 0 (everything 44 * copied successfully) and size (nothing copied). 45 * 46 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting 47 * at to must become equal to the bytes fetched from the corresponding area 48 * starting at from. All data past to + size - N must be left unmodified. 49 * 50 * If copying succeeds, the return value must be 0. If some data cannot be 51 * fetched, it is permitted to copy less than had been fetched; the only 52 * hard requirement is that not storing anything at all (i.e. returning size) 53 * should happen only when nothing could be copied. In other words, you don't 54 * have to squeeze as much as possible - it is allowed, but not necessary. 55 * 56 * For raw_copy_from_user() to always points to kernel memory and no faults 57 * on store should happen. Interpretation of from is affected by set_fs(). 58 * For raw_copy_to_user() it's the other way round. 59 * 60 * Both can be inlined - it's up to architectures whether it wants to bother 61 * with that. They should not be used directly; they are used to implement 62 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) 63 * that are used instead. Out of those, __... ones are inlined. Plain 64 * copy_{to,from}_user() might or might not be inlined. If you want them 65 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. 66 * 67 * NOTE: only copy_from_user() zero-pads the destination in case of short copy. 68 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything 69 * at all; their callers absolutely must check the return value. 70 * 71 * Biarch ones should also provide raw_copy_in_user() - similar to the above, 72 * but both source and destination are __user pointers (affected by set_fs() 73 * as usual) and both source and destination can trigger faults. 74 */ 75 76 static __always_inline __must_check unsigned long 77 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 78 { 79 instrument_copy_from_user(to, from, n); 80 check_object_size(to, n, false); 81 return raw_copy_from_user(to, from, n); 82 } 83 84 static __always_inline __must_check unsigned long 85 __copy_from_user(void *to, const void __user *from, unsigned long n) 86 { 87 might_fault(); 88 if (should_fail_usercopy()) 89 return n; 90 instrument_copy_from_user(to, from, n); 91 check_object_size(to, n, false); 92 return raw_copy_from_user(to, from, n); 93 } 94 95 /** 96 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. 97 * @to: Destination address, in user space. 98 * @from: Source address, in kernel space. 99 * @n: Number of bytes to copy. 100 * 101 * Context: User context only. 102 * 103 * Copy data from kernel space to user space. Caller must check 104 * the specified block with access_ok() before calling this function. 105 * The caller should also make sure he pins the user space address 106 * so that we don't result in page fault and sleep. 107 */ 108 static __always_inline __must_check unsigned long 109 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 110 { 111 if (should_fail_usercopy()) 112 return n; 113 instrument_copy_to_user(to, from, n); 114 check_object_size(from, n, true); 115 return raw_copy_to_user(to, from, n); 116 } 117 118 static __always_inline __must_check unsigned long 119 __copy_to_user(void __user *to, const void *from, unsigned long n) 120 { 121 might_fault(); 122 if (should_fail_usercopy()) 123 return n; 124 instrument_copy_to_user(to, from, n); 125 check_object_size(from, n, true); 126 return raw_copy_to_user(to, from, n); 127 } 128 129 #ifdef INLINE_COPY_FROM_USER 130 static inline __must_check unsigned long 131 _copy_from_user(void *to, const void __user *from, unsigned long n) 132 { 133 unsigned long res = n; 134 might_fault(); 135 if (!should_fail_usercopy() && likely(access_ok(from, n))) { 136 instrument_copy_from_user(to, from, n); 137 res = raw_copy_from_user(to, from, n); 138 } 139 if (unlikely(res)) 140 memset(to + (n - res), 0, res); 141 return res; 142 } 143 #else 144 extern __must_check unsigned long 145 _copy_from_user(void *, const void __user *, unsigned long); 146 #endif 147 148 #ifdef INLINE_COPY_TO_USER 149 static inline __must_check unsigned long 150 _copy_to_user(void __user *to, const void *from, unsigned long n) 151 { 152 might_fault(); 153 if (should_fail_usercopy()) 154 return n; 155 if (access_ok(to, n)) { 156 instrument_copy_to_user(to, from, n); 157 n = raw_copy_to_user(to, from, n); 158 } 159 return n; 160 } 161 #else 162 extern __must_check unsigned long 163 _copy_to_user(void __user *, const void *, unsigned long); 164 #endif 165 166 static __always_inline unsigned long __must_check 167 copy_from_user(void *to, const void __user *from, unsigned long n) 168 { 169 if (likely(check_copy_size(to, n, false))) 170 n = _copy_from_user(to, from, n); 171 return n; 172 } 173 174 static __always_inline unsigned long __must_check 175 copy_to_user(void __user *to, const void *from, unsigned long n) 176 { 177 if (likely(check_copy_size(from, n, true))) 178 n = _copy_to_user(to, from, n); 179 return n; 180 } 181 #ifdef CONFIG_COMPAT 182 static __always_inline unsigned long __must_check 183 copy_in_user(void __user *to, const void __user *from, unsigned long n) 184 { 185 might_fault(); 186 if (access_ok(to, n) && access_ok(from, n)) 187 n = raw_copy_in_user(to, from, n); 188 return n; 189 } 190 #endif 191 192 #ifndef copy_mc_to_kernel 193 /* 194 * Without arch opt-in this generic copy_mc_to_kernel() will not handle 195 * #MC (or arch equivalent) during source read. 196 */ 197 static inline unsigned long __must_check 198 copy_mc_to_kernel(void *dst, const void *src, size_t cnt) 199 { 200 memcpy(dst, src, cnt); 201 return 0; 202 } 203 #endif 204 205 static __always_inline void pagefault_disabled_inc(void) 206 { 207 current->pagefault_disabled++; 208 } 209 210 static __always_inline void pagefault_disabled_dec(void) 211 { 212 current->pagefault_disabled--; 213 } 214 215 /* 216 * These routines enable/disable the pagefault handler. If disabled, it will 217 * not take any locks and go straight to the fixup table. 218 * 219 * User access methods will not sleep when called from a pagefault_disabled() 220 * environment. 221 */ 222 static inline void pagefault_disable(void) 223 { 224 pagefault_disabled_inc(); 225 /* 226 * make sure to have issued the store before a pagefault 227 * can hit. 228 */ 229 barrier(); 230 } 231 232 static inline void pagefault_enable(void) 233 { 234 /* 235 * make sure to issue those last loads/stores before enabling 236 * the pagefault handler again. 237 */ 238 barrier(); 239 pagefault_disabled_dec(); 240 } 241 242 /* 243 * Is the pagefault handler disabled? If so, user access methods will not sleep. 244 */ 245 static inline bool pagefault_disabled(void) 246 { 247 return current->pagefault_disabled != 0; 248 } 249 250 /* 251 * The pagefault handler is in general disabled by pagefault_disable() or 252 * when in irq context (via in_atomic()). 253 * 254 * This function should only be used by the fault handlers. Other users should 255 * stick to pagefault_disabled(). 256 * Please NEVER use preempt_disable() to disable the fault handler. With 257 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. 258 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. 259 */ 260 #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) 261 262 #ifndef ARCH_HAS_NOCACHE_UACCESS 263 264 static inline __must_check unsigned long 265 __copy_from_user_inatomic_nocache(void *to, const void __user *from, 266 unsigned long n) 267 { 268 return __copy_from_user_inatomic(to, from, n); 269 } 270 271 #endif /* ARCH_HAS_NOCACHE_UACCESS */ 272 273 extern __must_check int check_zeroed_user(const void __user *from, size_t size); 274 275 /** 276 * copy_struct_from_user: copy a struct from userspace 277 * @dst: Destination address, in kernel space. This buffer must be @ksize 278 * bytes long. 279 * @ksize: Size of @dst struct. 280 * @src: Source address, in userspace. 281 * @usize: (Alleged) size of @src struct. 282 * 283 * Copies a struct from userspace to kernel space, in a way that guarantees 284 * backwards-compatibility for struct syscall arguments (as long as future 285 * struct extensions are made such that all new fields are *appended* to the 286 * old struct, and zeroed-out new fields have the same meaning as the old 287 * struct). 288 * 289 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. 290 * The recommended usage is something like the following: 291 * 292 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) 293 * { 294 * int err; 295 * struct foo karg = {}; 296 * 297 * if (usize > PAGE_SIZE) 298 * return -E2BIG; 299 * if (usize < FOO_SIZE_VER0) 300 * return -EINVAL; 301 * 302 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); 303 * if (err) 304 * return err; 305 * 306 * // ... 307 * } 308 * 309 * There are three cases to consider: 310 * * If @usize == @ksize, then it's copied verbatim. 311 * * If @usize < @ksize, then the userspace has passed an old struct to a 312 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) 313 * are to be zero-filled. 314 * * If @usize > @ksize, then the userspace has passed a new struct to an 315 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) 316 * are checked to ensure they are zeroed, otherwise -E2BIG is returned. 317 * 318 * Returns (in all cases, some data may have been copied): 319 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. 320 * * -EFAULT: access to userspace failed. 321 */ 322 static __always_inline __must_check int 323 copy_struct_from_user(void *dst, size_t ksize, const void __user *src, 324 size_t usize) 325 { 326 size_t size = min(ksize, usize); 327 size_t rest = max(ksize, usize) - size; 328 329 /* Deal with trailing bytes. */ 330 if (usize < ksize) { 331 memset(dst + size, 0, rest); 332 } else if (usize > ksize) { 333 int ret = check_zeroed_user(src + size, rest); 334 if (ret <= 0) 335 return ret ?: -E2BIG; 336 } 337 /* Copy the interoperable parts of the struct. */ 338 if (copy_from_user(dst, src, size)) 339 return -EFAULT; 340 return 0; 341 } 342 343 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); 344 345 long copy_from_kernel_nofault(void *dst, const void *src, size_t size); 346 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); 347 348 long copy_from_user_nofault(void *dst, const void __user *src, size_t size); 349 long notrace copy_to_user_nofault(void __user *dst, const void *src, 350 size_t size); 351 352 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, 353 long count); 354 355 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, 356 long count); 357 long strnlen_user_nofault(const void __user *unsafe_addr, long count); 358 359 /** 360 * get_kernel_nofault(): safely attempt to read from a location 361 * @val: read into this variable 362 * @ptr: address to read from 363 * 364 * Returns 0 on success, or -EFAULT. 365 */ 366 #define get_kernel_nofault(val, ptr) ({ \ 367 const typeof(val) *__gk_ptr = (ptr); \ 368 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ 369 }) 370 371 #ifndef user_access_begin 372 #define user_access_begin(ptr,len) access_ok(ptr, len) 373 #define user_access_end() do { } while (0) 374 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) 375 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) 376 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) 377 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) 378 static inline unsigned long user_access_save(void) { return 0UL; } 379 static inline void user_access_restore(unsigned long flags) { } 380 #endif 381 #ifndef user_write_access_begin 382 #define user_write_access_begin user_access_begin 383 #define user_write_access_end user_access_end 384 #endif 385 #ifndef user_read_access_begin 386 #define user_read_access_begin user_access_begin 387 #define user_read_access_end user_access_end 388 #endif 389 390 #ifdef CONFIG_HARDENED_USERCOPY 391 void usercopy_warn(const char *name, const char *detail, bool to_user, 392 unsigned long offset, unsigned long len); 393 void __noreturn usercopy_abort(const char *name, const char *detail, 394 bool to_user, unsigned long offset, 395 unsigned long len); 396 #endif 397 398 #endif /* __LINUX_UACCESS_H__ */ 399