xref: /linux-6.15/include/linux/uio.h (revision b3e8701d)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12 
13 struct page;
14 struct pipe_inode_info;
15 
16 typedef unsigned int __bitwise iov_iter_extraction_t;
17 
18 struct kvec {
19 	void *iov_base; /* and that should *never* hold a userland pointer */
20 	size_t iov_len;
21 };
22 
23 enum iter_type {
24 	/* iter types */
25 	ITER_IOVEC,
26 	ITER_KVEC,
27 	ITER_BVEC,
28 	ITER_PIPE,
29 	ITER_XARRAY,
30 	ITER_DISCARD,
31 	ITER_UBUF,
32 };
33 
34 #define ITER_SOURCE	1	// == WRITE
35 #define ITER_DEST	0	// == READ
36 
37 struct iov_iter_state {
38 	size_t iov_offset;
39 	size_t count;
40 	unsigned long nr_segs;
41 };
42 
43 struct iov_iter {
44 	u8 iter_type;
45 	bool nofault;
46 	bool data_source;
47 	bool user_backed;
48 	union {
49 		size_t iov_offset;
50 		int last_offset;
51 	};
52 	/*
53 	 * Hack alert: overlay ubuf_iovec with iovec + count, so
54 	 * that the members resolve correctly regardless of the type
55 	 * of iterator used. This means that you can use:
56 	 *
57 	 * &iter->__ubuf_iovec or iter->__iov
58 	 *
59 	 * interchangably for the user_backed cases, hence simplifying
60 	 * some of the cases that need to deal with both.
61 	 */
62 	union {
63 		/*
64 		 * This really should be a const, but we cannot do that without
65 		 * also modifying any of the zero-filling iter init functions.
66 		 * Leave it non-const for now, but it should be treated as such.
67 		 */
68 		struct iovec __ubuf_iovec;
69 		struct {
70 			union {
71 				/* use iter_iov() to get the current vec */
72 				const struct iovec *__iov;
73 				const struct kvec *kvec;
74 				const struct bio_vec *bvec;
75 				struct xarray *xarray;
76 				struct pipe_inode_info *pipe;
77 				void __user *ubuf;
78 			};
79 			size_t count;
80 		};
81 	};
82 	union {
83 		unsigned long nr_segs;
84 		struct {
85 			unsigned int head;
86 			unsigned int start_head;
87 		};
88 		loff_t xarray_start;
89 	};
90 };
91 
92 static inline const struct iovec *iter_iov(const struct iov_iter *iter)
93 {
94 	if (iter->iter_type == ITER_UBUF)
95 		return (const struct iovec *) &iter->__ubuf_iovec;
96 	return iter->__iov;
97 }
98 
99 #define iter_iov_addr(iter)	(iter_iov(iter)->iov_base + (iter)->iov_offset)
100 #define iter_iov_len(iter)	(iter_iov(iter)->iov_len - (iter)->iov_offset)
101 
102 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
103 {
104 	return i->iter_type;
105 }
106 
107 static inline void iov_iter_save_state(struct iov_iter *iter,
108 				       struct iov_iter_state *state)
109 {
110 	state->iov_offset = iter->iov_offset;
111 	state->count = iter->count;
112 	state->nr_segs = iter->nr_segs;
113 }
114 
115 static inline bool iter_is_ubuf(const struct iov_iter *i)
116 {
117 	return iov_iter_type(i) == ITER_UBUF;
118 }
119 
120 static inline bool iter_is_iovec(const struct iov_iter *i)
121 {
122 	return iov_iter_type(i) == ITER_IOVEC;
123 }
124 
125 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
126 {
127 	return iov_iter_type(i) == ITER_KVEC;
128 }
129 
130 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
131 {
132 	return iov_iter_type(i) == ITER_BVEC;
133 }
134 
135 static inline bool iov_iter_is_pipe(const struct iov_iter *i)
136 {
137 	return iov_iter_type(i) == ITER_PIPE;
138 }
139 
140 static inline bool iov_iter_is_discard(const struct iov_iter *i)
141 {
142 	return iov_iter_type(i) == ITER_DISCARD;
143 }
144 
145 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
146 {
147 	return iov_iter_type(i) == ITER_XARRAY;
148 }
149 
150 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
151 {
152 	return i->data_source ? WRITE : READ;
153 }
154 
155 static inline bool user_backed_iter(const struct iov_iter *i)
156 {
157 	return i->user_backed;
158 }
159 
160 /*
161  * Total number of bytes covered by an iovec.
162  *
163  * NOTE that it is not safe to use this function until all the iovec's
164  * segment lengths have been validated.  Because the individual lengths can
165  * overflow a size_t when added together.
166  */
167 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
168 {
169 	unsigned long seg;
170 	size_t ret = 0;
171 
172 	for (seg = 0; seg < nr_segs; seg++)
173 		ret += iov[seg].iov_len;
174 	return ret;
175 }
176 
177 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
178 				  size_t bytes, struct iov_iter *i);
179 void iov_iter_advance(struct iov_iter *i, size_t bytes);
180 void iov_iter_revert(struct iov_iter *i, size_t bytes);
181 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
182 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
183 size_t iov_iter_single_seg_count(const struct iov_iter *i);
184 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
185 			 struct iov_iter *i);
186 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
187 			 struct iov_iter *i);
188 
189 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
190 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
191 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
192 
193 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
194 		size_t bytes, struct iov_iter *i)
195 {
196 	return copy_page_to_iter(&folio->page, offset, bytes, i);
197 }
198 
199 static __always_inline __must_check
200 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
201 {
202 	if (check_copy_size(addr, bytes, true))
203 		return _copy_to_iter(addr, bytes, i);
204 	return 0;
205 }
206 
207 static __always_inline __must_check
208 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
209 {
210 	if (check_copy_size(addr, bytes, false))
211 		return _copy_from_iter(addr, bytes, i);
212 	return 0;
213 }
214 
215 static __always_inline __must_check
216 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
217 {
218 	size_t copied = copy_from_iter(addr, bytes, i);
219 	if (likely(copied == bytes))
220 		return true;
221 	iov_iter_revert(i, copied);
222 	return false;
223 }
224 
225 static __always_inline __must_check
226 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
227 {
228 	if (check_copy_size(addr, bytes, false))
229 		return _copy_from_iter_nocache(addr, bytes, i);
230 	return 0;
231 }
232 
233 static __always_inline __must_check
234 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
235 {
236 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
237 	if (likely(copied == bytes))
238 		return true;
239 	iov_iter_revert(i, copied);
240 	return false;
241 }
242 
243 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
244 /*
245  * Note, users like pmem that depend on the stricter semantics of
246  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
247  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
248  * destination is flushed from the cache on return.
249  */
250 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
251 #else
252 #define _copy_from_iter_flushcache _copy_from_iter_nocache
253 #endif
254 
255 #ifdef CONFIG_ARCH_HAS_COPY_MC
256 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
257 #else
258 #define _copy_mc_to_iter _copy_to_iter
259 #endif
260 
261 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
262 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
263 			unsigned len_mask);
264 unsigned long iov_iter_alignment(const struct iov_iter *i);
265 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
266 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
267 			unsigned long nr_segs, size_t count);
268 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
269 			unsigned long nr_segs, size_t count);
270 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
271 			unsigned long nr_segs, size_t count);
272 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
273 			size_t count);
274 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
275 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
276 		     loff_t start, size_t count);
277 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
278 		size_t maxsize, unsigned maxpages, size_t *start,
279 		iov_iter_extraction_t extraction_flags);
280 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
281 			size_t maxsize, unsigned maxpages, size_t *start);
282 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
283 		struct page ***pages, size_t maxsize, size_t *start,
284 		iov_iter_extraction_t extraction_flags);
285 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
286 			size_t maxsize, size_t *start);
287 int iov_iter_npages(const struct iov_iter *i, int maxpages);
288 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
289 
290 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
291 
292 static inline size_t iov_iter_count(const struct iov_iter *i)
293 {
294 	return i->count;
295 }
296 
297 /*
298  * Cap the iov_iter by given limit; note that the second argument is
299  * *not* the new size - it's upper limit for such.  Passing it a value
300  * greater than the amount of data in iov_iter is fine - it'll just do
301  * nothing in that case.
302  */
303 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
304 {
305 	/*
306 	 * count doesn't have to fit in size_t - comparison extends both
307 	 * operands to u64 here and any value that would be truncated by
308 	 * conversion in assignement is by definition greater than all
309 	 * values of size_t, including old i->count.
310 	 */
311 	if (i->count > count)
312 		i->count = count;
313 }
314 
315 /*
316  * reexpand a previously truncated iterator; count must be no more than how much
317  * we had shrunk it.
318  */
319 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
320 {
321 	i->count = count;
322 }
323 
324 static inline int
325 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
326 {
327 	size_t shorted = 0;
328 	int npages;
329 
330 	if (iov_iter_count(i) > max_bytes) {
331 		shorted = iov_iter_count(i) - max_bytes;
332 		iov_iter_truncate(i, max_bytes);
333 	}
334 	npages = iov_iter_npages(i, maxpages);
335 	if (shorted)
336 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
337 
338 	return npages;
339 }
340 
341 struct csum_state {
342 	__wsum csum;
343 	size_t off;
344 };
345 
346 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
347 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
348 
349 static __always_inline __must_check
350 bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
351 				  __wsum *csum, struct iov_iter *i)
352 {
353 	size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
354 	if (likely(copied == bytes))
355 		return true;
356 	iov_iter_revert(i, copied);
357 	return false;
358 }
359 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
360 		struct iov_iter *i);
361 
362 struct iovec *iovec_from_user(const struct iovec __user *uvector,
363 		unsigned long nr_segs, unsigned long fast_segs,
364 		struct iovec *fast_iov, bool compat);
365 ssize_t import_iovec(int type, const struct iovec __user *uvec,
366 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
367 		 struct iov_iter *i);
368 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
369 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
370 		 struct iov_iter *i, bool compat);
371 int import_single_range(int type, void __user *buf, size_t len,
372 		 struct iovec *iov, struct iov_iter *i);
373 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
374 
375 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
376 			void __user *buf, size_t count)
377 {
378 	WARN_ON(direction & ~(READ | WRITE));
379 	*i = (struct iov_iter) {
380 		.iter_type = ITER_UBUF,
381 		.user_backed = true,
382 		.data_source = direction,
383 		.ubuf = buf,
384 		.count = count,
385 		.nr_segs = 1
386 	};
387 }
388 /* Flags for iov_iter_get/extract_pages*() */
389 /* Allow P2PDMA on the extracted pages */
390 #define ITER_ALLOW_P2PDMA	((__force iov_iter_extraction_t)0x01)
391 
392 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
393 			       size_t maxsize, unsigned int maxpages,
394 			       iov_iter_extraction_t extraction_flags,
395 			       size_t *offset0);
396 
397 /**
398  * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
399  * @iter: The iterator
400  *
401  * Examine the iterator and indicate by returning true or false as to how, if
402  * at all, pages extracted from the iterator will be retained by the extraction
403  * function.
404  *
405  * %true indicates that the pages will have a pin placed in them that the
406  * caller must unpin.  This is must be done for DMA/async DIO to force fork()
407  * to forcibly copy a page for the child (the parent must retain the original
408  * page).
409  *
410  * %false indicates that no measures are taken and that it's up to the caller
411  * to retain the pages.
412  */
413 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
414 {
415 	return user_backed_iter(iter);
416 }
417 
418 #endif
419