xref: /linux-6.15/include/linux/uaccess.h (revision bbf62599)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4 
5 #include <linux/instrumented.h>
6 #include <linux/sched.h>
7 #include <linux/thread_info.h>
8 
9 #include <asm/uaccess.h>
10 
11 /*
12  * Force the uaccess routines to be wired up for actual userspace access,
13  * overriding any possible set_fs(KERNEL_DS) still lingering around.  Undone
14  * using force_uaccess_end below.
15  */
16 static inline mm_segment_t force_uaccess_begin(void)
17 {
18 	mm_segment_t fs = get_fs();
19 
20 	set_fs(USER_DS);
21 	return fs;
22 }
23 
24 static inline void force_uaccess_end(mm_segment_t oldfs)
25 {
26 	set_fs(oldfs);
27 }
28 
29 /*
30  * Architectures should provide two primitives (raw_copy_{to,from}_user())
31  * and get rid of their private instances of copy_{to,from}_user() and
32  * __copy_{to,from}_user{,_inatomic}().
33  *
34  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
35  * return the amount left to copy.  They should assume that access_ok() has
36  * already been checked (and succeeded); they should *not* zero-pad anything.
37  * No KASAN or object size checks either - those belong here.
38  *
39  * Both of these functions should attempt to copy size bytes starting at from
40  * into the area starting at to.  They must not fetch or store anything
41  * outside of those areas.  Return value must be between 0 (everything
42  * copied successfully) and size (nothing copied).
43  *
44  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
45  * at to must become equal to the bytes fetched from the corresponding area
46  * starting at from.  All data past to + size - N must be left unmodified.
47  *
48  * If copying succeeds, the return value must be 0.  If some data cannot be
49  * fetched, it is permitted to copy less than had been fetched; the only
50  * hard requirement is that not storing anything at all (i.e. returning size)
51  * should happen only when nothing could be copied.  In other words, you don't
52  * have to squeeze as much as possible - it is allowed, but not necessary.
53  *
54  * For raw_copy_from_user() to always points to kernel memory and no faults
55  * on store should happen.  Interpretation of from is affected by set_fs().
56  * For raw_copy_to_user() it's the other way round.
57  *
58  * Both can be inlined - it's up to architectures whether it wants to bother
59  * with that.  They should not be used directly; they are used to implement
60  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
61  * that are used instead.  Out of those, __... ones are inlined.  Plain
62  * copy_{to,from}_user() might or might not be inlined.  If you want them
63  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
64  *
65  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
66  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
67  * at all; their callers absolutely must check the return value.
68  *
69  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
70  * but both source and destination are __user pointers (affected by set_fs()
71  * as usual) and both source and destination can trigger faults.
72  */
73 
74 static __always_inline __must_check unsigned long
75 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
76 {
77 	instrument_copy_from_user(to, from, n);
78 	check_object_size(to, n, false);
79 	return raw_copy_from_user(to, from, n);
80 }
81 
82 static __always_inline __must_check unsigned long
83 __copy_from_user(void *to, const void __user *from, unsigned long n)
84 {
85 	might_fault();
86 	instrument_copy_from_user(to, from, n);
87 	check_object_size(to, n, false);
88 	return raw_copy_from_user(to, from, n);
89 }
90 
91 /**
92  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
93  * @to:   Destination address, in user space.
94  * @from: Source address, in kernel space.
95  * @n:    Number of bytes to copy.
96  *
97  * Context: User context only.
98  *
99  * Copy data from kernel space to user space.  Caller must check
100  * the specified block with access_ok() before calling this function.
101  * The caller should also make sure he pins the user space address
102  * so that we don't result in page fault and sleep.
103  */
104 static __always_inline __must_check unsigned long
105 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
106 {
107 	instrument_copy_to_user(to, from, n);
108 	check_object_size(from, n, true);
109 	return raw_copy_to_user(to, from, n);
110 }
111 
112 static __always_inline __must_check unsigned long
113 __copy_to_user(void __user *to, const void *from, unsigned long n)
114 {
115 	might_fault();
116 	instrument_copy_to_user(to, from, n);
117 	check_object_size(from, n, true);
118 	return raw_copy_to_user(to, from, n);
119 }
120 
121 #ifdef INLINE_COPY_FROM_USER
122 static inline __must_check unsigned long
123 _copy_from_user(void *to, const void __user *from, unsigned long n)
124 {
125 	unsigned long res = n;
126 	might_fault();
127 	if (likely(access_ok(from, n))) {
128 		instrument_copy_from_user(to, from, n);
129 		res = raw_copy_from_user(to, from, n);
130 	}
131 	if (unlikely(res))
132 		memset(to + (n - res), 0, res);
133 	return res;
134 }
135 #else
136 extern __must_check unsigned long
137 _copy_from_user(void *, const void __user *, unsigned long);
138 #endif
139 
140 #ifdef INLINE_COPY_TO_USER
141 static inline __must_check unsigned long
142 _copy_to_user(void __user *to, const void *from, unsigned long n)
143 {
144 	might_fault();
145 	if (access_ok(to, n)) {
146 		instrument_copy_to_user(to, from, n);
147 		n = raw_copy_to_user(to, from, n);
148 	}
149 	return n;
150 }
151 #else
152 extern __must_check unsigned long
153 _copy_to_user(void __user *, const void *, unsigned long);
154 #endif
155 
156 static __always_inline unsigned long __must_check
157 copy_from_user(void *to, const void __user *from, unsigned long n)
158 {
159 	if (likely(check_copy_size(to, n, false)))
160 		n = _copy_from_user(to, from, n);
161 	return n;
162 }
163 
164 static __always_inline unsigned long __must_check
165 copy_to_user(void __user *to, const void *from, unsigned long n)
166 {
167 	if (likely(check_copy_size(from, n, true)))
168 		n = _copy_to_user(to, from, n);
169 	return n;
170 }
171 #ifdef CONFIG_COMPAT
172 static __always_inline unsigned long __must_check
173 copy_in_user(void __user *to, const void __user *from, unsigned long n)
174 {
175 	might_fault();
176 	if (access_ok(to, n) && access_ok(from, n))
177 		n = raw_copy_in_user(to, from, n);
178 	return n;
179 }
180 #endif
181 
182 #ifndef copy_mc_to_kernel
183 /*
184  * Without arch opt-in this generic copy_mc_to_kernel() will not handle
185  * #MC (or arch equivalent) during source read.
186  */
187 static inline unsigned long __must_check
188 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
189 {
190 	memcpy(dst, src, cnt);
191 	return 0;
192 }
193 #endif
194 
195 static __always_inline void pagefault_disabled_inc(void)
196 {
197 	current->pagefault_disabled++;
198 }
199 
200 static __always_inline void pagefault_disabled_dec(void)
201 {
202 	current->pagefault_disabled--;
203 }
204 
205 /*
206  * These routines enable/disable the pagefault handler. If disabled, it will
207  * not take any locks and go straight to the fixup table.
208  *
209  * User access methods will not sleep when called from a pagefault_disabled()
210  * environment.
211  */
212 static inline void pagefault_disable(void)
213 {
214 	pagefault_disabled_inc();
215 	/*
216 	 * make sure to have issued the store before a pagefault
217 	 * can hit.
218 	 */
219 	barrier();
220 }
221 
222 static inline void pagefault_enable(void)
223 {
224 	/*
225 	 * make sure to issue those last loads/stores before enabling
226 	 * the pagefault handler again.
227 	 */
228 	barrier();
229 	pagefault_disabled_dec();
230 }
231 
232 /*
233  * Is the pagefault handler disabled? If so, user access methods will not sleep.
234  */
235 static inline bool pagefault_disabled(void)
236 {
237 	return current->pagefault_disabled != 0;
238 }
239 
240 /*
241  * The pagefault handler is in general disabled by pagefault_disable() or
242  * when in irq context (via in_atomic()).
243  *
244  * This function should only be used by the fault handlers. Other users should
245  * stick to pagefault_disabled().
246  * Please NEVER use preempt_disable() to disable the fault handler. With
247  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
248  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
249  */
250 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
251 
252 #ifndef ARCH_HAS_NOCACHE_UACCESS
253 
254 static inline __must_check unsigned long
255 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
256 				  unsigned long n)
257 {
258 	return __copy_from_user_inatomic(to, from, n);
259 }
260 
261 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
262 
263 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
264 
265 /**
266  * copy_struct_from_user: copy a struct from userspace
267  * @dst:   Destination address, in kernel space. This buffer must be @ksize
268  *         bytes long.
269  * @ksize: Size of @dst struct.
270  * @src:   Source address, in userspace.
271  * @usize: (Alleged) size of @src struct.
272  *
273  * Copies a struct from userspace to kernel space, in a way that guarantees
274  * backwards-compatibility for struct syscall arguments (as long as future
275  * struct extensions are made such that all new fields are *appended* to the
276  * old struct, and zeroed-out new fields have the same meaning as the old
277  * struct).
278  *
279  * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
280  * The recommended usage is something like the following:
281  *
282  *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
283  *   {
284  *      int err;
285  *      struct foo karg = {};
286  *
287  *      if (usize > PAGE_SIZE)
288  *        return -E2BIG;
289  *      if (usize < FOO_SIZE_VER0)
290  *        return -EINVAL;
291  *
292  *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
293  *      if (err)
294  *        return err;
295  *
296  *      // ...
297  *   }
298  *
299  * There are three cases to consider:
300  *  * If @usize == @ksize, then it's copied verbatim.
301  *  * If @usize < @ksize, then the userspace has passed an old struct to a
302  *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
303  *    are to be zero-filled.
304  *  * If @usize > @ksize, then the userspace has passed a new struct to an
305  *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
306  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
307  *
308  * Returns (in all cases, some data may have been copied):
309  *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
310  *  * -EFAULT: access to userspace failed.
311  */
312 static __always_inline __must_check int
313 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
314 		      size_t usize)
315 {
316 	size_t size = min(ksize, usize);
317 	size_t rest = max(ksize, usize) - size;
318 
319 	/* Deal with trailing bytes. */
320 	if (usize < ksize) {
321 		memset(dst + size, 0, rest);
322 	} else if (usize > ksize) {
323 		int ret = check_zeroed_user(src + size, rest);
324 		if (ret <= 0)
325 			return ret ?: -E2BIG;
326 	}
327 	/* Copy the interoperable parts of the struct. */
328 	if (copy_from_user(dst, src, size))
329 		return -EFAULT;
330 	return 0;
331 }
332 
333 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
334 
335 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
336 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
337 
338 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
339 long notrace copy_to_user_nofault(void __user *dst, const void *src,
340 		size_t size);
341 
342 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
343 		long count);
344 
345 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
346 		long count);
347 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
348 
349 /**
350  * get_kernel_nofault(): safely attempt to read from a location
351  * @val: read into this variable
352  * @ptr: address to read from
353  *
354  * Returns 0 on success, or -EFAULT.
355  */
356 #define get_kernel_nofault(val, ptr) ({				\
357 	const typeof(val) *__gk_ptr = (ptr);			\
358 	copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
359 })
360 
361 #ifndef user_access_begin
362 #define user_access_begin(ptr,len) access_ok(ptr, len)
363 #define user_access_end() do { } while (0)
364 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
365 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
366 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
367 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
368 static inline unsigned long user_access_save(void) { return 0UL; }
369 static inline void user_access_restore(unsigned long flags) { }
370 #endif
371 #ifndef user_write_access_begin
372 #define user_write_access_begin user_access_begin
373 #define user_write_access_end user_access_end
374 #endif
375 #ifndef user_read_access_begin
376 #define user_read_access_begin user_access_begin
377 #define user_read_access_end user_access_end
378 #endif
379 
380 #ifdef CONFIG_HARDENED_USERCOPY
381 void usercopy_warn(const char *name, const char *detail, bool to_user,
382 		   unsigned long offset, unsigned long len);
383 void __noreturn usercopy_abort(const char *name, const char *detail,
384 			       bool to_user, unsigned long offset,
385 			       unsigned long len);
386 #endif
387 
388 #endif		/* __LINUX_UACCESS_H__ */
389