1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Berkeley style UIO structures - Alan Cox 1994. 4 */ 5 #ifndef __LINUX_UIO_H 6 #define __LINUX_UIO_H 7 8 #include <linux/kernel.h> 9 #include <linux/thread_info.h> 10 #include <linux/mm_types.h> 11 #include <uapi/linux/uio.h> 12 13 struct page; 14 struct folio_queue; 15 16 typedef unsigned int __bitwise iov_iter_extraction_t; 17 18 struct kvec { 19 void *iov_base; /* and that should *never* hold a userland pointer */ 20 size_t iov_len; 21 }; 22 23 enum iter_type { 24 /* iter types */ 25 ITER_UBUF, 26 ITER_IOVEC, 27 ITER_BVEC, 28 ITER_KVEC, 29 ITER_FOLIOQ, 30 ITER_XARRAY, 31 ITER_DISCARD, 32 }; 33 34 #define ITER_SOURCE 1 // == WRITE 35 #define ITER_DEST 0 // == READ 36 37 struct iov_iter_state { 38 size_t iov_offset; 39 size_t count; 40 unsigned long nr_segs; 41 }; 42 43 struct iov_iter { 44 u8 iter_type; 45 bool nofault; 46 bool data_source; 47 size_t iov_offset; 48 /* 49 * Hack alert: overlay ubuf_iovec with iovec + count, so 50 * that the members resolve correctly regardless of the type 51 * of iterator used. This means that you can use: 52 * 53 * &iter->__ubuf_iovec or iter->__iov 54 * 55 * interchangably for the user_backed cases, hence simplifying 56 * some of the cases that need to deal with both. 57 */ 58 union { 59 /* 60 * This really should be a const, but we cannot do that without 61 * also modifying any of the zero-filling iter init functions. 62 * Leave it non-const for now, but it should be treated as such. 63 */ 64 struct iovec __ubuf_iovec; 65 struct { 66 union { 67 /* use iter_iov() to get the current vec */ 68 const struct iovec *__iov; 69 const struct kvec *kvec; 70 const struct bio_vec *bvec; 71 const struct folio_queue *folioq; 72 struct xarray *xarray; 73 void __user *ubuf; 74 }; 75 size_t count; 76 }; 77 }; 78 union { 79 unsigned long nr_segs; 80 u8 folioq_slot; 81 loff_t xarray_start; 82 }; 83 }; 84 85 typedef __u16 uio_meta_flags_t; 86 87 struct uio_meta { 88 uio_meta_flags_t flags; 89 u16 app_tag; 90 u64 seed; 91 struct iov_iter iter; 92 }; 93 94 static inline const struct iovec *iter_iov(const struct iov_iter *iter) 95 { 96 if (iter->iter_type == ITER_UBUF) 97 return (const struct iovec *) &iter->__ubuf_iovec; 98 return iter->__iov; 99 } 100 101 #define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset) 102 #define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset) 103 104 static inline enum iter_type iov_iter_type(const struct iov_iter *i) 105 { 106 return i->iter_type; 107 } 108 109 static inline void iov_iter_save_state(struct iov_iter *iter, 110 struct iov_iter_state *state) 111 { 112 state->iov_offset = iter->iov_offset; 113 state->count = iter->count; 114 state->nr_segs = iter->nr_segs; 115 } 116 117 static inline bool iter_is_ubuf(const struct iov_iter *i) 118 { 119 return iov_iter_type(i) == ITER_UBUF; 120 } 121 122 static inline bool iter_is_iovec(const struct iov_iter *i) 123 { 124 return iov_iter_type(i) == ITER_IOVEC; 125 } 126 127 static inline bool iov_iter_is_kvec(const struct iov_iter *i) 128 { 129 return iov_iter_type(i) == ITER_KVEC; 130 } 131 132 static inline bool iov_iter_is_bvec(const struct iov_iter *i) 133 { 134 return iov_iter_type(i) == ITER_BVEC; 135 } 136 137 static inline bool iov_iter_is_discard(const struct iov_iter *i) 138 { 139 return iov_iter_type(i) == ITER_DISCARD; 140 } 141 142 static inline bool iov_iter_is_folioq(const struct iov_iter *i) 143 { 144 return iov_iter_type(i) == ITER_FOLIOQ; 145 } 146 147 static inline bool iov_iter_is_xarray(const struct iov_iter *i) 148 { 149 return iov_iter_type(i) == ITER_XARRAY; 150 } 151 152 static inline unsigned char iov_iter_rw(const struct iov_iter *i) 153 { 154 return i->data_source ? WRITE : READ; 155 } 156 157 static inline bool user_backed_iter(const struct iov_iter *i) 158 { 159 return iter_is_ubuf(i) || iter_is_iovec(i); 160 } 161 162 /* 163 * Total number of bytes covered by an iovec. 164 * 165 * NOTE that it is not safe to use this function until all the iovec's 166 * segment lengths have been validated. Because the individual lengths can 167 * overflow a size_t when added together. 168 */ 169 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) 170 { 171 unsigned long seg; 172 size_t ret = 0; 173 174 for (seg = 0; seg < nr_segs; seg++) 175 ret += iov[seg].iov_len; 176 return ret; 177 } 178 179 size_t copy_page_from_iter_atomic(struct page *page, size_t offset, 180 size_t bytes, struct iov_iter *i); 181 void iov_iter_advance(struct iov_iter *i, size_t bytes); 182 void iov_iter_revert(struct iov_iter *i, size_t bytes); 183 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes); 184 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes); 185 size_t iov_iter_single_seg_count(const struct iov_iter *i); 186 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 187 struct iov_iter *i); 188 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 189 struct iov_iter *i); 190 191 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 192 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); 193 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 194 195 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, 196 size_t bytes, struct iov_iter *i) 197 { 198 return copy_page_to_iter(&folio->page, offset, bytes, i); 199 } 200 201 static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset, 202 size_t bytes, struct iov_iter *i) 203 { 204 return copy_page_from_iter(&folio->page, offset, bytes, i); 205 } 206 207 static inline size_t copy_folio_from_iter_atomic(struct folio *folio, 208 size_t offset, size_t bytes, struct iov_iter *i) 209 { 210 return copy_page_from_iter_atomic(&folio->page, offset, bytes, i); 211 } 212 213 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, 214 size_t bytes, struct iov_iter *i); 215 216 static __always_inline __must_check 217 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 218 { 219 if (check_copy_size(addr, bytes, true)) 220 return _copy_to_iter(addr, bytes, i); 221 return 0; 222 } 223 224 static __always_inline __must_check 225 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 226 { 227 if (check_copy_size(addr, bytes, false)) 228 return _copy_from_iter(addr, bytes, i); 229 return 0; 230 } 231 232 static __always_inline __must_check 233 bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i) 234 { 235 size_t copied = copy_to_iter(addr, bytes, i); 236 if (likely(copied == bytes)) 237 return true; 238 iov_iter_revert(i, copied); 239 return false; 240 } 241 242 static __always_inline __must_check 243 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 244 { 245 size_t copied = copy_from_iter(addr, bytes, i); 246 if (likely(copied == bytes)) 247 return true; 248 iov_iter_revert(i, copied); 249 return false; 250 } 251 252 static __always_inline __must_check 253 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 254 { 255 if (check_copy_size(addr, bytes, false)) 256 return _copy_from_iter_nocache(addr, bytes, i); 257 return 0; 258 } 259 260 static __always_inline __must_check 261 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 262 { 263 size_t copied = copy_from_iter_nocache(addr, bytes, i); 264 if (likely(copied == bytes)) 265 return true; 266 iov_iter_revert(i, copied); 267 return false; 268 } 269 270 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 271 /* 272 * Note, users like pmem that depend on the stricter semantics of 273 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for 274 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the 275 * destination is flushed from the cache on return. 276 */ 277 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); 278 #else 279 #define _copy_from_iter_flushcache _copy_from_iter_nocache 280 #endif 281 282 #ifdef CONFIG_ARCH_HAS_COPY_MC 283 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 284 #else 285 #define _copy_mc_to_iter _copy_to_iter 286 #endif 287 288 size_t iov_iter_zero(size_t bytes, struct iov_iter *); 289 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 290 unsigned len_mask); 291 unsigned long iov_iter_alignment(const struct iov_iter *i); 292 unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 293 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, 294 unsigned long nr_segs, size_t count); 295 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, 296 unsigned long nr_segs, size_t count); 297 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, 298 unsigned long nr_segs, size_t count); 299 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); 300 void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, 301 const struct folio_queue *folioq, 302 unsigned int first_slot, unsigned int offset, size_t count); 303 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, 304 loff_t start, size_t count); 305 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, 306 size_t maxsize, unsigned maxpages, size_t *start); 307 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, 308 size_t maxsize, size_t *start); 309 int iov_iter_npages(const struct iov_iter *i, int maxpages); 310 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); 311 312 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); 313 314 static inline size_t iov_iter_count(const struct iov_iter *i) 315 { 316 return i->count; 317 } 318 319 /* 320 * Cap the iov_iter by given limit; note that the second argument is 321 * *not* the new size - it's upper limit for such. Passing it a value 322 * greater than the amount of data in iov_iter is fine - it'll just do 323 * nothing in that case. 324 */ 325 static inline void iov_iter_truncate(struct iov_iter *i, u64 count) 326 { 327 /* 328 * count doesn't have to fit in size_t - comparison extends both 329 * operands to u64 here and any value that would be truncated by 330 * conversion in assignement is by definition greater than all 331 * values of size_t, including old i->count. 332 */ 333 if (i->count > count) 334 i->count = count; 335 } 336 337 /* 338 * reexpand a previously truncated iterator; count must be no more than how much 339 * we had shrunk it. 340 */ 341 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) 342 { 343 i->count = count; 344 } 345 346 static inline int 347 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) 348 { 349 size_t shorted = 0; 350 int npages; 351 352 if (iov_iter_count(i) > max_bytes) { 353 shorted = iov_iter_count(i) - max_bytes; 354 iov_iter_truncate(i, max_bytes); 355 } 356 npages = iov_iter_npages(i, maxpages); 357 if (shorted) 358 iov_iter_reexpand(i, iov_iter_count(i) + shorted); 359 360 return npages; 361 } 362 363 struct iovec *iovec_from_user(const struct iovec __user *uvector, 364 unsigned long nr_segs, unsigned long fast_segs, 365 struct iovec *fast_iov, bool compat); 366 ssize_t import_iovec(int type, const struct iovec __user *uvec, 367 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 368 struct iov_iter *i); 369 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 370 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 371 struct iov_iter *i, bool compat); 372 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i); 373 374 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, 375 void __user *buf, size_t count) 376 { 377 WARN_ON(direction & ~(READ | WRITE)); 378 *i = (struct iov_iter) { 379 .iter_type = ITER_UBUF, 380 .data_source = direction, 381 .ubuf = buf, 382 .count = count, 383 .nr_segs = 1 384 }; 385 } 386 /* Flags for iov_iter_get/extract_pages*() */ 387 /* Allow P2PDMA on the extracted pages */ 388 #define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01) 389 390 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages, 391 size_t maxsize, unsigned int maxpages, 392 iov_iter_extraction_t extraction_flags, 393 size_t *offset0); 394 395 /** 396 * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained 397 * @iter: The iterator 398 * 399 * Examine the iterator and indicate by returning true or false as to how, if 400 * at all, pages extracted from the iterator will be retained by the extraction 401 * function. 402 * 403 * %true indicates that the pages will have a pin placed in them that the 404 * caller must unpin. This is must be done for DMA/async DIO to force fork() 405 * to forcibly copy a page for the child (the parent must retain the original 406 * page). 407 * 408 * %false indicates that no measures are taken and that it's up to the caller 409 * to retain the pages. 410 */ 411 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter) 412 { 413 return user_backed_iter(iter); 414 } 415 416 struct sg_table; 417 ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len, 418 struct sg_table *sgtable, unsigned int sg_max, 419 iov_iter_extraction_t extraction_flags); 420 421 #endif 422