xref: /linux-6.15/include/linux/uaccess.h (revision 8a0cfd8a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4 
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/instrumented.h>
7 #include <linux/minmax.h>
8 #include <linux/nospec.h>
9 #include <linux/sched.h>
10 #include <linux/thread_info.h>
11 
12 #include <asm/uaccess.h>
13 
14 /*
15  * Architectures that support memory tagging (assigning tags to memory regions,
16  * embedding these tags into addresses that point to these memory regions, and
17  * checking that the memory and the pointer tags match on memory accesses)
18  * redefine this macro to strip tags from pointers.
19  *
20  * Passing down mm_struct allows to define untagging rules on per-process
21  * basis.
22  *
23  * It's defined as noop for architectures that don't support memory tagging.
24  */
25 #ifndef untagged_addr
26 #define untagged_addr(addr) (addr)
27 #endif
28 
29 #ifndef untagged_addr_remote
30 #define untagged_addr_remote(mm, addr)	({		\
31 	mmap_assert_locked(mm);				\
32 	untagged_addr(addr);				\
33 })
34 #endif
35 
36 #ifdef masked_user_access_begin
37  #define can_do_masked_user_access() 1
38 #else
39  #define can_do_masked_user_access() 0
40  #define masked_user_access_begin(src) NULL
41 #endif
42 
43 /*
44  * Architectures should provide two primitives (raw_copy_{to,from}_user())
45  * and get rid of their private instances of copy_{to,from}_user() and
46  * __copy_{to,from}_user{,_inatomic}().
47  *
48  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
49  * return the amount left to copy.  They should assume that access_ok() has
50  * already been checked (and succeeded); they should *not* zero-pad anything.
51  * No KASAN or object size checks either - those belong here.
52  *
53  * Both of these functions should attempt to copy size bytes starting at from
54  * into the area starting at to.  They must not fetch or store anything
55  * outside of those areas.  Return value must be between 0 (everything
56  * copied successfully) and size (nothing copied).
57  *
58  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
59  * at to must become equal to the bytes fetched from the corresponding area
60  * starting at from.  All data past to + size - N must be left unmodified.
61  *
62  * If copying succeeds, the return value must be 0.  If some data cannot be
63  * fetched, it is permitted to copy less than had been fetched; the only
64  * hard requirement is that not storing anything at all (i.e. returning size)
65  * should happen only when nothing could be copied.  In other words, you don't
66  * have to squeeze as much as possible - it is allowed, but not necessary.
67  *
68  * For raw_copy_from_user() to always points to kernel memory and no faults
69  * on store should happen.  Interpretation of from is affected by set_fs().
70  * For raw_copy_to_user() it's the other way round.
71  *
72  * Both can be inlined - it's up to architectures whether it wants to bother
73  * with that.  They should not be used directly; they are used to implement
74  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
75  * that are used instead.  Out of those, __... ones are inlined.  Plain
76  * copy_{to,from}_user() might or might not be inlined.  If you want them
77  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
78  *
79  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
80  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
81  * at all; their callers absolutely must check the return value.
82  *
83  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
84  * but both source and destination are __user pointers (affected by set_fs()
85  * as usual) and both source and destination can trigger faults.
86  */
87 
88 static __always_inline __must_check unsigned long
89 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
90 {
91 	unsigned long res;
92 
93 	instrument_copy_from_user_before(to, from, n);
94 	check_object_size(to, n, false);
95 	res = raw_copy_from_user(to, from, n);
96 	instrument_copy_from_user_after(to, from, n, res);
97 	return res;
98 }
99 
100 static __always_inline __must_check unsigned long
101 __copy_from_user(void *to, const void __user *from, unsigned long n)
102 {
103 	unsigned long res;
104 
105 	might_fault();
106 	instrument_copy_from_user_before(to, from, n);
107 	if (should_fail_usercopy())
108 		return n;
109 	check_object_size(to, n, false);
110 	res = raw_copy_from_user(to, from, n);
111 	instrument_copy_from_user_after(to, from, n, res);
112 	return res;
113 }
114 
115 /**
116  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
117  * @to:   Destination address, in user space.
118  * @from: Source address, in kernel space.
119  * @n:    Number of bytes to copy.
120  *
121  * Context: User context only.
122  *
123  * Copy data from kernel space to user space.  Caller must check
124  * the specified block with access_ok() before calling this function.
125  * The caller should also make sure he pins the user space address
126  * so that we don't result in page fault and sleep.
127  */
128 static __always_inline __must_check unsigned long
129 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
130 {
131 	if (should_fail_usercopy())
132 		return n;
133 	instrument_copy_to_user(to, from, n);
134 	check_object_size(from, n, true);
135 	return raw_copy_to_user(to, from, n);
136 }
137 
138 static __always_inline __must_check unsigned long
139 __copy_to_user(void __user *to, const void *from, unsigned long n)
140 {
141 	might_fault();
142 	if (should_fail_usercopy())
143 		return n;
144 	instrument_copy_to_user(to, from, n);
145 	check_object_size(from, n, true);
146 	return raw_copy_to_user(to, from, n);
147 }
148 
149 /*
150  * Architectures that #define INLINE_COPY_TO_USER use this function
151  * directly in the normal copy_to/from_user(), the other ones go
152  * through an extern _copy_to/from_user(), which expands the same code
153  * here.
154  *
155  * Rust code always uses the extern definition.
156  */
157 static inline __must_check unsigned long
158 _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
159 {
160 	unsigned long res = n;
161 	might_fault();
162 	if (!should_fail_usercopy() && likely(access_ok(from, n))) {
163 		/*
164 		 * Ensure that bad access_ok() speculation will not
165 		 * lead to nasty side effects *after* the copy is
166 		 * finished:
167 		 */
168 		barrier_nospec();
169 		instrument_copy_from_user_before(to, from, n);
170 		res = raw_copy_from_user(to, from, n);
171 		instrument_copy_from_user_after(to, from, n, res);
172 	}
173 	if (unlikely(res))
174 		memset(to + (n - res), 0, res);
175 	return res;
176 }
177 extern __must_check unsigned long
178 _copy_from_user(void *, const void __user *, unsigned long);
179 
180 static inline __must_check unsigned long
181 _inline_copy_to_user(void __user *to, const void *from, unsigned long n)
182 {
183 	might_fault();
184 	if (should_fail_usercopy())
185 		return n;
186 	if (access_ok(to, n)) {
187 		instrument_copy_to_user(to, from, n);
188 		n = raw_copy_to_user(to, from, n);
189 	}
190 	return n;
191 }
192 extern __must_check unsigned long
193 _copy_to_user(void __user *, const void *, unsigned long);
194 
195 static __always_inline unsigned long __must_check
196 copy_from_user(void *to, const void __user *from, unsigned long n)
197 {
198 	if (!check_copy_size(to, n, false))
199 		return n;
200 #ifdef INLINE_COPY_FROM_USER
201 	return _inline_copy_from_user(to, from, n);
202 #else
203 	return _copy_from_user(to, from, n);
204 #endif
205 }
206 
207 static __always_inline unsigned long __must_check
208 copy_to_user(void __user *to, const void *from, unsigned long n)
209 {
210 	if (!check_copy_size(from, n, true))
211 		return n;
212 
213 #ifdef INLINE_COPY_TO_USER
214 	return _inline_copy_to_user(to, from, n);
215 #else
216 	return _copy_to_user(to, from, n);
217 #endif
218 }
219 
220 #ifndef copy_mc_to_kernel
221 /*
222  * Without arch opt-in this generic copy_mc_to_kernel() will not handle
223  * #MC (or arch equivalent) during source read.
224  */
225 static inline unsigned long __must_check
226 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
227 {
228 	memcpy(dst, src, cnt);
229 	return 0;
230 }
231 #endif
232 
233 static __always_inline void pagefault_disabled_inc(void)
234 {
235 	current->pagefault_disabled++;
236 }
237 
238 static __always_inline void pagefault_disabled_dec(void)
239 {
240 	current->pagefault_disabled--;
241 }
242 
243 /*
244  * These routines enable/disable the pagefault handler. If disabled, it will
245  * not take any locks and go straight to the fixup table.
246  *
247  * User access methods will not sleep when called from a pagefault_disabled()
248  * environment.
249  */
250 static inline void pagefault_disable(void)
251 {
252 	pagefault_disabled_inc();
253 	/*
254 	 * make sure to have issued the store before a pagefault
255 	 * can hit.
256 	 */
257 	barrier();
258 }
259 
260 static inline void pagefault_enable(void)
261 {
262 	/*
263 	 * make sure to issue those last loads/stores before enabling
264 	 * the pagefault handler again.
265 	 */
266 	barrier();
267 	pagefault_disabled_dec();
268 }
269 
270 /*
271  * Is the pagefault handler disabled? If so, user access methods will not sleep.
272  */
273 static inline bool pagefault_disabled(void)
274 {
275 	return current->pagefault_disabled != 0;
276 }
277 
278 /*
279  * The pagefault handler is in general disabled by pagefault_disable() or
280  * when in irq context (via in_atomic()).
281  *
282  * This function should only be used by the fault handlers. Other users should
283  * stick to pagefault_disabled().
284  * Please NEVER use preempt_disable() to disable the fault handler. With
285  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
286  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
287  */
288 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
289 
290 #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
291 
292 /**
293  * probe_subpage_writeable: probe the user range for write faults at sub-page
294  *			    granularity (e.g. arm64 MTE)
295  * @uaddr: start of address range
296  * @size: size of address range
297  *
298  * Returns 0 on success, the number of bytes not probed on fault.
299  *
300  * It is expected that the caller checked for the write permission of each
301  * page in the range either by put_user() or GUP. The architecture port can
302  * implement a more efficient get_user() probing if the same sub-page faults
303  * are triggered by either a read or a write.
304  */
305 static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
306 {
307 	return 0;
308 }
309 
310 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
311 
312 #ifndef ARCH_HAS_NOCACHE_UACCESS
313 
314 static inline __must_check unsigned long
315 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
316 				  unsigned long n)
317 {
318 	return __copy_from_user_inatomic(to, from, n);
319 }
320 
321 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
322 
323 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
324 
325 /**
326  * copy_struct_from_user: copy a struct from userspace
327  * @dst:   Destination address, in kernel space. This buffer must be @ksize
328  *         bytes long.
329  * @ksize: Size of @dst struct.
330  * @src:   Source address, in userspace.
331  * @usize: (Alleged) size of @src struct.
332  *
333  * Copies a struct from userspace to kernel space, in a way that guarantees
334  * backwards-compatibility for struct syscall arguments (as long as future
335  * struct extensions are made such that all new fields are *appended* to the
336  * old struct, and zeroed-out new fields have the same meaning as the old
337  * struct).
338  *
339  * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
340  * The recommended usage is something like the following:
341  *
342  *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
343  *   {
344  *      int err;
345  *      struct foo karg = {};
346  *
347  *      if (usize > PAGE_SIZE)
348  *        return -E2BIG;
349  *      if (usize < FOO_SIZE_VER0)
350  *        return -EINVAL;
351  *
352  *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
353  *      if (err)
354  *        return err;
355  *
356  *      // ...
357  *   }
358  *
359  * There are three cases to consider:
360  *  * If @usize == @ksize, then it's copied verbatim.
361  *  * If @usize < @ksize, then the userspace has passed an old struct to a
362  *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
363  *    are to be zero-filled.
364  *  * If @usize > @ksize, then the userspace has passed a new struct to an
365  *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
366  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
367  *
368  * Returns (in all cases, some data may have been copied):
369  *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
370  *  * -EFAULT: access to userspace failed.
371  */
372 static __always_inline __must_check int
373 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
374 		      size_t usize)
375 {
376 	size_t size = min(ksize, usize);
377 	size_t rest = max(ksize, usize) - size;
378 
379 	/* Double check if ksize is larger than a known object size. */
380 	if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
381 		return -E2BIG;
382 
383 	/* Deal with trailing bytes. */
384 	if (usize < ksize) {
385 		memset(dst + size, 0, rest);
386 	} else if (usize > ksize) {
387 		int ret = check_zeroed_user(src + size, rest);
388 		if (ret <= 0)
389 			return ret ?: -E2BIG;
390 	}
391 	/* Copy the interoperable parts of the struct. */
392 	if (copy_from_user(dst, src, size))
393 		return -EFAULT;
394 	return 0;
395 }
396 
397 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
398 
399 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
400 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
401 
402 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
403 long notrace copy_to_user_nofault(void __user *dst, const void *src,
404 		size_t size);
405 
406 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
407 		long count);
408 
409 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
410 		long count);
411 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
412 
413 #ifndef __get_kernel_nofault
414 #define __get_kernel_nofault(dst, src, type, label)	\
415 do {							\
416 	type __user *p = (type __force __user *)(src);	\
417 	type data;					\
418 	if (__get_user(data, p))			\
419 		goto label;				\
420 	*(type *)dst = data;				\
421 } while (0)
422 
423 #define __put_kernel_nofault(dst, src, type, label)	\
424 do {							\
425 	type __user *p = (type __force __user *)(dst);	\
426 	type data = *(type *)src;			\
427 	if (__put_user(data, p))			\
428 		goto label;				\
429 } while (0)
430 #endif
431 
432 /**
433  * get_kernel_nofault(): safely attempt to read from a location
434  * @val: read into this variable
435  * @ptr: address to read from
436  *
437  * Returns 0 on success, or -EFAULT.
438  */
439 #define get_kernel_nofault(val, ptr) ({				\
440 	const typeof(val) *__gk_ptr = (ptr);			\
441 	copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
442 })
443 
444 #ifndef user_access_begin
445 #define user_access_begin(ptr,len) access_ok(ptr, len)
446 #define user_access_end() do { } while (0)
447 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
448 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
449 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
450 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
451 #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
452 static inline unsigned long user_access_save(void) { return 0UL; }
453 static inline void user_access_restore(unsigned long flags) { }
454 #endif
455 #ifndef user_write_access_begin
456 #define user_write_access_begin user_access_begin
457 #define user_write_access_end user_access_end
458 #endif
459 #ifndef user_read_access_begin
460 #define user_read_access_begin user_access_begin
461 #define user_read_access_end user_access_end
462 #endif
463 
464 #ifdef CONFIG_HARDENED_USERCOPY
465 void __noreturn usercopy_abort(const char *name, const char *detail,
466 			       bool to_user, unsigned long offset,
467 			       unsigned long len);
468 #endif
469 
470 #endif		/* __LINUX_UACCESS_H__ */
471