xref: /linux-6.15/include/linux/uio.h (revision 10ffebbe)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <crypto/hash.h>
11 #include <uapi/linux/uio.h>
12 
13 struct page;
14 struct pipe_inode_info;
15 
16 struct kvec {
17 	void *iov_base; /* and that should *never* hold a userland pointer */
18 	size_t iov_len;
19 };
20 
21 enum iter_type {
22 	/* set if ITER_BVEC doesn't hold a bv_page ref */
23 	ITER_BVEC_FLAG_NO_REF = 2,
24 
25 	/* iter types */
26 	ITER_IOVEC = 4,
27 	ITER_KVEC = 8,
28 	ITER_BVEC = 16,
29 	ITER_PIPE = 32,
30 	ITER_DISCARD = 64,
31 };
32 
33 struct iov_iter {
34 	/*
35 	 * Bit 0 is the read/write bit, set if we're writing.
36 	 * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
37 	 * the caller isn't expecting to drop a page reference when done.
38 	 */
39 	unsigned int type;
40 	size_t iov_offset;
41 	size_t count;
42 	union {
43 		const struct iovec *iov;
44 		const struct kvec *kvec;
45 		const struct bio_vec *bvec;
46 		struct pipe_inode_info *pipe;
47 	};
48 	union {
49 		unsigned long nr_segs;
50 		struct {
51 			int idx;
52 			int start_idx;
53 		};
54 	};
55 };
56 
57 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
58 {
59 	return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF);
60 }
61 
62 static inline bool iter_is_iovec(const struct iov_iter *i)
63 {
64 	return iov_iter_type(i) == ITER_IOVEC;
65 }
66 
67 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
68 {
69 	return iov_iter_type(i) == ITER_KVEC;
70 }
71 
72 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
73 {
74 	return iov_iter_type(i) == ITER_BVEC;
75 }
76 
77 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
78 {
79 	return iov_iter_type(i) == ITER_PIPE;
80 }
81 
82 static inline bool iov_iter_is_discard(const struct iov_iter *i)
83 {
84 	return iov_iter_type(i) == ITER_DISCARD;
85 }
86 
87 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
88 {
89 	return i->type & (READ | WRITE);
90 }
91 
92 static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i)
93 {
94 	return (i->type & ITER_BVEC_FLAG_NO_REF) != 0;
95 }
96 
97 /*
98  * Total number of bytes covered by an iovec.
99  *
100  * NOTE that it is not safe to use this function until all the iovec's
101  * segment lengths have been validated.  Because the individual lengths can
102  * overflow a size_t when added together.
103  */
104 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
105 {
106 	unsigned long seg;
107 	size_t ret = 0;
108 
109 	for (seg = 0; seg < nr_segs; seg++)
110 		ret += iov[seg].iov_len;
111 	return ret;
112 }
113 
114 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
115 {
116 	return (struct iovec) {
117 		.iov_base = iter->iov->iov_base + iter->iov_offset,
118 		.iov_len = min(iter->count,
119 			       iter->iov->iov_len - iter->iov_offset),
120 	};
121 }
122 
123 size_t iov_iter_copy_from_user_atomic(struct page *page,
124 		struct iov_iter *i, unsigned long offset, size_t bytes);
125 void iov_iter_advance(struct iov_iter *i, size_t bytes);
126 void iov_iter_revert(struct iov_iter *i, size_t bytes);
127 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
128 size_t iov_iter_single_seg_count(const struct iov_iter *i);
129 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
130 			 struct iov_iter *i);
131 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
132 			 struct iov_iter *i);
133 
134 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
135 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
136 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
137 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
138 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
139 
140 static __always_inline __must_check
141 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
142 {
143 	if (unlikely(!check_copy_size(addr, bytes, true)))
144 		return 0;
145 	else
146 		return _copy_to_iter(addr, bytes, i);
147 }
148 
149 static __always_inline __must_check
150 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
151 {
152 	if (unlikely(!check_copy_size(addr, bytes, false)))
153 		return 0;
154 	else
155 		return _copy_from_iter(addr, bytes, i);
156 }
157 
158 static __always_inline __must_check
159 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
160 {
161 	if (unlikely(!check_copy_size(addr, bytes, false)))
162 		return false;
163 	else
164 		return _copy_from_iter_full(addr, bytes, i);
165 }
166 
167 static __always_inline __must_check
168 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
169 {
170 	if (unlikely(!check_copy_size(addr, bytes, false)))
171 		return 0;
172 	else
173 		return _copy_from_iter_nocache(addr, bytes, i);
174 }
175 
176 static __always_inline __must_check
177 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
178 {
179 	if (unlikely(!check_copy_size(addr, bytes, false)))
180 		return false;
181 	else
182 		return _copy_from_iter_full_nocache(addr, bytes, i);
183 }
184 
185 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
186 /*
187  * Note, users like pmem that depend on the stricter semantics of
188  * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
189  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
190  * destination is flushed from the cache on return.
191  */
192 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
193 #else
194 #define _copy_from_iter_flushcache _copy_from_iter_nocache
195 #endif
196 
197 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
198 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
199 #else
200 #define _copy_to_iter_mcsafe _copy_to_iter
201 #endif
202 
203 static __always_inline __must_check
204 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
205 {
206 	if (unlikely(!check_copy_size(addr, bytes, false)))
207 		return 0;
208 	else
209 		return _copy_from_iter_flushcache(addr, bytes, i);
210 }
211 
212 static __always_inline __must_check
213 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
214 {
215 	if (unlikely(!check_copy_size(addr, bytes, true)))
216 		return 0;
217 	else
218 		return _copy_to_iter_mcsafe(addr, bytes, i);
219 }
220 
221 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
222 unsigned long iov_iter_alignment(const struct iov_iter *i);
223 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
224 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
225 			unsigned long nr_segs, size_t count);
226 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
227 			unsigned long nr_segs, size_t count);
228 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
229 			unsigned long nr_segs, size_t count);
230 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
231 			size_t count);
232 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
233 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
234 			size_t maxsize, unsigned maxpages, size_t *start);
235 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
236 			size_t maxsize, size_t *start);
237 int iov_iter_npages(const struct iov_iter *i, int maxpages);
238 
239 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
240 
241 static inline size_t iov_iter_count(const struct iov_iter *i)
242 {
243 	return i->count;
244 }
245 
246 /*
247  * Cap the iov_iter by given limit; note that the second argument is
248  * *not* the new size - it's upper limit for such.  Passing it a value
249  * greater than the amount of data in iov_iter is fine - it'll just do
250  * nothing in that case.
251  */
252 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
253 {
254 	/*
255 	 * count doesn't have to fit in size_t - comparison extends both
256 	 * operands to u64 here and any value that would be truncated by
257 	 * conversion in assignement is by definition greater than all
258 	 * values of size_t, including old i->count.
259 	 */
260 	if (i->count > count)
261 		i->count = count;
262 }
263 
264 /*
265  * reexpand a previously truncated iterator; count must be no more than how much
266  * we had shrunk it.
267  */
268 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
269 {
270 	i->count = count;
271 }
272 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
273 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
274 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
275 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
276 		struct iov_iter *i);
277 
278 int import_iovec(int type, const struct iovec __user * uvector,
279 		 unsigned nr_segs, unsigned fast_segs,
280 		 struct iovec **iov, struct iov_iter *i);
281 
282 #ifdef CONFIG_COMPAT
283 struct compat_iovec;
284 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
285 		 unsigned nr_segs, unsigned fast_segs,
286 		 struct iovec **iov, struct iov_iter *i);
287 #endif
288 
289 int import_single_range(int type, void __user *buf, size_t len,
290 		 struct iovec *iov, struct iov_iter *i);
291 
292 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
293 			    int (*f)(struct kvec *vec, void *context),
294 			    void *context);
295 
296 #endif
297