xref: /linux-6.15/include/linux/uio.h (revision 8d7253dc)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12 
13 struct page;
14 struct pipe_inode_info;
15 
16 struct kvec {
17 	void *iov_base; /* and that should *never* hold a userland pointer */
18 	size_t iov_len;
19 };
20 
21 enum iter_type {
22 	/* iter types */
23 	ITER_IOVEC,
24 	ITER_KVEC,
25 	ITER_BVEC,
26 	ITER_PIPE,
27 	ITER_XARRAY,
28 	ITER_DISCARD,
29 	ITER_UBUF,
30 };
31 
32 #define ITER_SOURCE	1	// == WRITE
33 #define ITER_DEST	0	// == READ
34 
35 struct iov_iter_state {
36 	size_t iov_offset;
37 	size_t count;
38 	unsigned long nr_segs;
39 };
40 
41 struct iov_iter {
42 	u8 iter_type;
43 	bool nofault;
44 	bool data_source;
45 	bool user_backed;
46 	union {
47 		size_t iov_offset;
48 		int last_offset;
49 	};
50 	size_t count;
51 	union {
52 		const struct iovec *iov;
53 		const struct kvec *kvec;
54 		const struct bio_vec *bvec;
55 		struct xarray *xarray;
56 		struct pipe_inode_info *pipe;
57 		void __user *ubuf;
58 	};
59 	union {
60 		unsigned long nr_segs;
61 		struct {
62 			unsigned int head;
63 			unsigned int start_head;
64 		};
65 		loff_t xarray_start;
66 	};
67 };
68 
69 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
70 {
71 	return i->iter_type;
72 }
73 
74 static inline void iov_iter_save_state(struct iov_iter *iter,
75 				       struct iov_iter_state *state)
76 {
77 	state->iov_offset = iter->iov_offset;
78 	state->count = iter->count;
79 	state->nr_segs = iter->nr_segs;
80 }
81 
82 static inline bool iter_is_ubuf(const struct iov_iter *i)
83 {
84 	return iov_iter_type(i) == ITER_UBUF;
85 }
86 
87 static inline bool iter_is_iovec(const struct iov_iter *i)
88 {
89 	return iov_iter_type(i) == ITER_IOVEC;
90 }
91 
92 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
93 {
94 	return iov_iter_type(i) == ITER_KVEC;
95 }
96 
97 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
98 {
99 	return iov_iter_type(i) == ITER_BVEC;
100 }
101 
102 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
103 {
104 	return iov_iter_type(i) == ITER_PIPE;
105 }
106 
107 static inline bool iov_iter_is_discard(const struct iov_iter *i)
108 {
109 	return iov_iter_type(i) == ITER_DISCARD;
110 }
111 
112 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
113 {
114 	return iov_iter_type(i) == ITER_XARRAY;
115 }
116 
117 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
118 {
119 	return i->data_source ? WRITE : READ;
120 }
121 
122 static inline bool user_backed_iter(const struct iov_iter *i)
123 {
124 	return i->user_backed;
125 }
126 
127 /*
128  * Total number of bytes covered by an iovec.
129  *
130  * NOTE that it is not safe to use this function until all the iovec's
131  * segment lengths have been validated.  Because the individual lengths can
132  * overflow a size_t when added together.
133  */
134 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
135 {
136 	unsigned long seg;
137 	size_t ret = 0;
138 
139 	for (seg = 0; seg < nr_segs; seg++)
140 		ret += iov[seg].iov_len;
141 	return ret;
142 }
143 
144 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
145 {
146 	return (struct iovec) {
147 		.iov_base = iter->iov->iov_base + iter->iov_offset,
148 		.iov_len = min(iter->count,
149 			       iter->iov->iov_len - iter->iov_offset),
150 	};
151 }
152 
153 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
154 				  size_t bytes, struct iov_iter *i);
155 void iov_iter_advance(struct iov_iter *i, size_t bytes);
156 void iov_iter_revert(struct iov_iter *i, size_t bytes);
157 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
158 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
159 size_t iov_iter_single_seg_count(const struct iov_iter *i);
160 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
161 			 struct iov_iter *i);
162 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
163 			 struct iov_iter *i);
164 
165 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
166 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
167 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
168 
169 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
170 		size_t bytes, struct iov_iter *i)
171 {
172 	return copy_page_to_iter(&folio->page, offset, bytes, i);
173 }
174 
175 static __always_inline __must_check
176 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
177 {
178 	if (check_copy_size(addr, bytes, true))
179 		return _copy_to_iter(addr, bytes, i);
180 	return 0;
181 }
182 
183 static __always_inline __must_check
184 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
185 {
186 	if (check_copy_size(addr, bytes, false))
187 		return _copy_from_iter(addr, bytes, i);
188 	return 0;
189 }
190 
191 static __always_inline __must_check
192 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
193 {
194 	size_t copied = copy_from_iter(addr, bytes, i);
195 	if (likely(copied == bytes))
196 		return true;
197 	iov_iter_revert(i, copied);
198 	return false;
199 }
200 
201 static __always_inline __must_check
202 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
203 {
204 	if (check_copy_size(addr, bytes, false))
205 		return _copy_from_iter_nocache(addr, bytes, i);
206 	return 0;
207 }
208 
209 static __always_inline __must_check
210 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
211 {
212 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
213 	if (likely(copied == bytes))
214 		return true;
215 	iov_iter_revert(i, copied);
216 	return false;
217 }
218 
219 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
220 /*
221  * Note, users like pmem that depend on the stricter semantics of
222  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
223  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
224  * destination is flushed from the cache on return.
225  */
226 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
227 #else
228 #define _copy_from_iter_flushcache _copy_from_iter_nocache
229 #endif
230 
231 #ifdef CONFIG_ARCH_HAS_COPY_MC
232 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
233 #else
234 #define _copy_mc_to_iter _copy_to_iter
235 #endif
236 
237 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
238 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
239 			unsigned len_mask);
240 unsigned long iov_iter_alignment(const struct iov_iter *i);
241 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
242 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
243 			unsigned long nr_segs, size_t count);
244 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
245 			unsigned long nr_segs, size_t count);
246 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
247 			unsigned long nr_segs, size_t count);
248 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
249 			size_t count);
250 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
251 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
252 		     loff_t start, size_t count);
253 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
254 		size_t maxsize, unsigned maxpages, size_t *start,
255 		unsigned gup_flags);
256 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
257 			size_t maxsize, unsigned maxpages, size_t *start);
258 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
259 		struct page ***pages, size_t maxsize, size_t *start,
260 		unsigned gup_flags);
261 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
262 			size_t maxsize, size_t *start);
263 int iov_iter_npages(const struct iov_iter *i, int maxpages);
264 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
265 
266 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
267 
268 static inline size_t iov_iter_count(const struct iov_iter *i)
269 {
270 	return i->count;
271 }
272 
273 /*
274  * Cap the iov_iter by given limit; note that the second argument is
275  * *not* the new size - it's upper limit for such.  Passing it a value
276  * greater than the amount of data in iov_iter is fine - it'll just do
277  * nothing in that case.
278  */
279 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
280 {
281 	/*
282 	 * count doesn't have to fit in size_t - comparison extends both
283 	 * operands to u64 here and any value that would be truncated by
284 	 * conversion in assignement is by definition greater than all
285 	 * values of size_t, including old i->count.
286 	 */
287 	if (i->count > count)
288 		i->count = count;
289 }
290 
291 /*
292  * reexpand a previously truncated iterator; count must be no more than how much
293  * we had shrunk it.
294  */
295 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
296 {
297 	i->count = count;
298 }
299 
300 static inline int
301 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
302 {
303 	size_t shorted = 0;
304 	int npages;
305 
306 	if (iov_iter_count(i) > max_bytes) {
307 		shorted = iov_iter_count(i) - max_bytes;
308 		iov_iter_truncate(i, max_bytes);
309 	}
310 	npages = iov_iter_npages(i, maxpages);
311 	if (shorted)
312 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
313 
314 	return npages;
315 }
316 
317 struct csum_state {
318 	__wsum csum;
319 	size_t off;
320 };
321 
322 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
323 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
324 
325 static __always_inline __must_check
326 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
327 				  __wsum *csum, struct iov_iter *i)
328 {
329 	size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
330 	if (likely(copied == bytes))
331 		return true;
332 	iov_iter_revert(i, copied);
333 	return false;
334 }
335 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
336 		struct iov_iter *i);
337 
338 struct iovec *iovec_from_user(const struct iovec __user *uvector,
339 		unsigned long nr_segs, unsigned long fast_segs,
340 		struct iovec *fast_iov, bool compat);
341 ssize_t import_iovec(int type, const struct iovec __user *uvec,
342 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
343 		 struct iov_iter *i);
344 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
345 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
346 		 struct iov_iter *i, bool compat);
347 int import_single_range(int type, void __user *buf, size_t len,
348 		 struct iovec *iov, struct iov_iter *i);
349 
350 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
351 			void __user *buf, size_t count)
352 {
353 	WARN_ON(direction & ~(READ | WRITE));
354 	*i = (struct iov_iter) {
355 		.iter_type = ITER_UBUF,
356 		.user_backed = true,
357 		.data_source = direction,
358 		.ubuf = buf,
359 		.count = count
360 	};
361 }
362 
363 #endif
364