xref: /linux-6.15/include/linux/uio.h (revision db0aa2e9)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12 
13 struct page;
14 struct folio_queue;
15 
16 typedef unsigned int __bitwise iov_iter_extraction_t;
17 
18 struct kvec {
19 	void *iov_base; /* and that should *never* hold a userland pointer */
20 	size_t iov_len;
21 };
22 
23 enum iter_type {
24 	/* iter types */
25 	ITER_UBUF,
26 	ITER_IOVEC,
27 	ITER_BVEC,
28 	ITER_KVEC,
29 	ITER_FOLIOQ,
30 	ITER_XARRAY,
31 	ITER_DISCARD,
32 };
33 
34 #define ITER_SOURCE	1	// == WRITE
35 #define ITER_DEST	0	// == READ
36 
37 struct iov_iter_state {
38 	size_t iov_offset;
39 	size_t count;
40 	unsigned long nr_segs;
41 };
42 
43 struct iov_iter {
44 	u8 iter_type;
45 	bool nofault;
46 	bool data_source;
47 	size_t iov_offset;
48 	/*
49 	 * Hack alert: overlay ubuf_iovec with iovec + count, so
50 	 * that the members resolve correctly regardless of the type
51 	 * of iterator used. This means that you can use:
52 	 *
53 	 * &iter->__ubuf_iovec or iter->__iov
54 	 *
55 	 * interchangably for the user_backed cases, hence simplifying
56 	 * some of the cases that need to deal with both.
57 	 */
58 	union {
59 		/*
60 		 * This really should be a const, but we cannot do that without
61 		 * also modifying any of the zero-filling iter init functions.
62 		 * Leave it non-const for now, but it should be treated as such.
63 		 */
64 		struct iovec __ubuf_iovec;
65 		struct {
66 			union {
67 				/* use iter_iov() to get the current vec */
68 				const struct iovec *__iov;
69 				const struct kvec *kvec;
70 				const struct bio_vec *bvec;
71 				const struct folio_queue *folioq;
72 				struct xarray *xarray;
73 				void __user *ubuf;
74 			};
75 			size_t count;
76 		};
77 	};
78 	union {
79 		unsigned long nr_segs;
80 		u8 folioq_slot;
81 		loff_t xarray_start;
82 	};
83 };
84 
85 static inline const struct iovec *iter_iov(const struct iov_iter *iter)
86 {
87 	if (iter->iter_type == ITER_UBUF)
88 		return (const struct iovec *) &iter->__ubuf_iovec;
89 	return iter->__iov;
90 }
91 
92 #define iter_iov_addr(iter)	(iter_iov(iter)->iov_base + (iter)->iov_offset)
93 #define iter_iov_len(iter)	(iter_iov(iter)->iov_len - (iter)->iov_offset)
94 
95 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
96 {
97 	return i->iter_type;
98 }
99 
100 static inline void iov_iter_save_state(struct iov_iter *iter,
101 				       struct iov_iter_state *state)
102 {
103 	state->iov_offset = iter->iov_offset;
104 	state->count = iter->count;
105 	state->nr_segs = iter->nr_segs;
106 }
107 
108 static inline bool iter_is_ubuf(const struct iov_iter *i)
109 {
110 	return iov_iter_type(i) == ITER_UBUF;
111 }
112 
113 static inline bool iter_is_iovec(const struct iov_iter *i)
114 {
115 	return iov_iter_type(i) == ITER_IOVEC;
116 }
117 
118 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
119 {
120 	return iov_iter_type(i) == ITER_KVEC;
121 }
122 
123 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
124 {
125 	return iov_iter_type(i) == ITER_BVEC;
126 }
127 
128 static inline bool iov_iter_is_discard(const struct iov_iter *i)
129 {
130 	return iov_iter_type(i) == ITER_DISCARD;
131 }
132 
133 static inline bool iov_iter_is_folioq(const struct iov_iter *i)
134 {
135 	return iov_iter_type(i) == ITER_FOLIOQ;
136 }
137 
138 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
139 {
140 	return iov_iter_type(i) == ITER_XARRAY;
141 }
142 
143 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
144 {
145 	return i->data_source ? WRITE : READ;
146 }
147 
148 static inline bool user_backed_iter(const struct iov_iter *i)
149 {
150 	return iter_is_ubuf(i) || iter_is_iovec(i);
151 }
152 
153 /*
154  * Total number of bytes covered by an iovec.
155  *
156  * NOTE that it is not safe to use this function until all the iovec's
157  * segment lengths have been validated.  Because the individual lengths can
158  * overflow a size_t when added together.
159  */
160 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
161 {
162 	unsigned long seg;
163 	size_t ret = 0;
164 
165 	for (seg = 0; seg < nr_segs; seg++)
166 		ret += iov[seg].iov_len;
167 	return ret;
168 }
169 
170 size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
171 				  size_t bytes, struct iov_iter *i);
172 void iov_iter_advance(struct iov_iter *i, size_t bytes);
173 void iov_iter_revert(struct iov_iter *i, size_t bytes);
174 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
175 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
176 size_t iov_iter_single_seg_count(const struct iov_iter *i);
177 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
178 			 struct iov_iter *i);
179 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
180 			 struct iov_iter *i);
181 
182 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
183 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
184 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
185 
186 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
187 		size_t bytes, struct iov_iter *i)
188 {
189 	return copy_page_to_iter(&folio->page, offset, bytes, i);
190 }
191 
192 static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
193 		size_t offset, size_t bytes, struct iov_iter *i)
194 {
195 	return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
196 }
197 
198 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
199 				 size_t bytes, struct iov_iter *i);
200 
201 static __always_inline __must_check
202 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
203 {
204 	if (check_copy_size(addr, bytes, true))
205 		return _copy_to_iter(addr, bytes, i);
206 	return 0;
207 }
208 
209 static __always_inline __must_check
210 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
211 {
212 	if (check_copy_size(addr, bytes, false))
213 		return _copy_from_iter(addr, bytes, i);
214 	return 0;
215 }
216 
217 static __always_inline __must_check
218 bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i)
219 {
220 	size_t copied = copy_to_iter(addr, bytes, i);
221 	if (likely(copied == bytes))
222 		return true;
223 	iov_iter_revert(i, copied);
224 	return false;
225 }
226 
227 static __always_inline __must_check
228 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
229 {
230 	size_t copied = copy_from_iter(addr, bytes, i);
231 	if (likely(copied == bytes))
232 		return true;
233 	iov_iter_revert(i, copied);
234 	return false;
235 }
236 
237 static __always_inline __must_check
238 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
239 {
240 	if (check_copy_size(addr, bytes, false))
241 		return _copy_from_iter_nocache(addr, bytes, i);
242 	return 0;
243 }
244 
245 static __always_inline __must_check
246 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
247 {
248 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
249 	if (likely(copied == bytes))
250 		return true;
251 	iov_iter_revert(i, copied);
252 	return false;
253 }
254 
255 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
256 /*
257  * Note, users like pmem that depend on the stricter semantics of
258  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
259  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
260  * destination is flushed from the cache on return.
261  */
262 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
263 #else
264 #define _copy_from_iter_flushcache _copy_from_iter_nocache
265 #endif
266 
267 #ifdef CONFIG_ARCH_HAS_COPY_MC
268 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
269 #else
270 #define _copy_mc_to_iter _copy_to_iter
271 #endif
272 
273 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
274 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
275 			unsigned len_mask);
276 unsigned long iov_iter_alignment(const struct iov_iter *i);
277 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
278 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
279 			unsigned long nr_segs, size_t count);
280 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
281 			unsigned long nr_segs, size_t count);
282 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
283 			unsigned long nr_segs, size_t count);
284 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
285 void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
286 			  const struct folio_queue *folioq,
287 			  unsigned int first_slot, unsigned int offset, size_t count);
288 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
289 		     loff_t start, size_t count);
290 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
291 			size_t maxsize, unsigned maxpages, size_t *start);
292 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
293 			size_t maxsize, size_t *start);
294 int iov_iter_npages(const struct iov_iter *i, int maxpages);
295 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
296 
297 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
298 
299 static inline size_t iov_iter_count(const struct iov_iter *i)
300 {
301 	return i->count;
302 }
303 
304 /*
305  * Cap the iov_iter by given limit; note that the second argument is
306  * *not* the new size - it's upper limit for such.  Passing it a value
307  * greater than the amount of data in iov_iter is fine - it'll just do
308  * nothing in that case.
309  */
310 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
311 {
312 	/*
313 	 * count doesn't have to fit in size_t - comparison extends both
314 	 * operands to u64 here and any value that would be truncated by
315 	 * conversion in assignement is by definition greater than all
316 	 * values of size_t, including old i->count.
317 	 */
318 	if (i->count > count)
319 		i->count = count;
320 }
321 
322 /*
323  * reexpand a previously truncated iterator; count must be no more than how much
324  * we had shrunk it.
325  */
326 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
327 {
328 	i->count = count;
329 }
330 
331 static inline int
332 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
333 {
334 	size_t shorted = 0;
335 	int npages;
336 
337 	if (iov_iter_count(i) > max_bytes) {
338 		shorted = iov_iter_count(i) - max_bytes;
339 		iov_iter_truncate(i, max_bytes);
340 	}
341 	npages = iov_iter_npages(i, maxpages);
342 	if (shorted)
343 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
344 
345 	return npages;
346 }
347 
348 struct iovec *iovec_from_user(const struct iovec __user *uvector,
349 		unsigned long nr_segs, unsigned long fast_segs,
350 		struct iovec *fast_iov, bool compat);
351 ssize_t import_iovec(int type, const struct iovec __user *uvec,
352 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
353 		 struct iov_iter *i);
354 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
355 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
356 		 struct iov_iter *i, bool compat);
357 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
358 
359 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
360 			void __user *buf, size_t count)
361 {
362 	WARN_ON(direction & ~(READ | WRITE));
363 	*i = (struct iov_iter) {
364 		.iter_type = ITER_UBUF,
365 		.data_source = direction,
366 		.ubuf = buf,
367 		.count = count,
368 		.nr_segs = 1
369 	};
370 }
371 /* Flags for iov_iter_get/extract_pages*() */
372 /* Allow P2PDMA on the extracted pages */
373 #define ITER_ALLOW_P2PDMA	((__force iov_iter_extraction_t)0x01)
374 
375 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
376 			       size_t maxsize, unsigned int maxpages,
377 			       iov_iter_extraction_t extraction_flags,
378 			       size_t *offset0);
379 
380 /**
381  * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
382  * @iter: The iterator
383  *
384  * Examine the iterator and indicate by returning true or false as to how, if
385  * at all, pages extracted from the iterator will be retained by the extraction
386  * function.
387  *
388  * %true indicates that the pages will have a pin placed in them that the
389  * caller must unpin.  This is must be done for DMA/async DIO to force fork()
390  * to forcibly copy a page for the child (the parent must retain the original
391  * page).
392  *
393  * %false indicates that no measures are taken and that it's up to the caller
394  * to retain the pages.
395  */
396 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
397 {
398 	return user_backed_iter(iter);
399 }
400 
401 struct sg_table;
402 ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
403 			   struct sg_table *sgtable, unsigned int sg_max,
404 			   iov_iter_extraction_t extraction_flags);
405 
406 #endif
407