1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Berkeley style UIO structures - Alan Cox 1994. 4 */ 5 #ifndef __LINUX_UIO_H 6 #define __LINUX_UIO_H 7 8 #include <linux/kernel.h> 9 #include <linux/thread_info.h> 10 #include <linux/mm_types.h> 11 #include <uapi/linux/uio.h> 12 13 struct page; 14 struct pipe_inode_info; 15 16 typedef unsigned int __bitwise iov_iter_extraction_t; 17 18 struct kvec { 19 void *iov_base; /* and that should *never* hold a userland pointer */ 20 size_t iov_len; 21 }; 22 23 enum iter_type { 24 /* iter types */ 25 ITER_IOVEC, 26 ITER_KVEC, 27 ITER_BVEC, 28 ITER_PIPE, 29 ITER_XARRAY, 30 ITER_DISCARD, 31 ITER_UBUF, 32 }; 33 34 #define ITER_SOURCE 1 // == WRITE 35 #define ITER_DEST 0 // == READ 36 37 struct iov_iter_state { 38 size_t iov_offset; 39 size_t count; 40 unsigned long nr_segs; 41 }; 42 43 struct iov_iter { 44 u8 iter_type; 45 bool nofault; 46 bool data_source; 47 bool user_backed; 48 union { 49 size_t iov_offset; 50 int last_offset; 51 }; 52 size_t count; 53 union { 54 /* use iter_iov() to get the current vec */ 55 const struct iovec *__iov; 56 const struct kvec *kvec; 57 const struct bio_vec *bvec; 58 struct xarray *xarray; 59 struct pipe_inode_info *pipe; 60 void __user *ubuf; 61 }; 62 union { 63 unsigned long nr_segs; 64 struct { 65 unsigned int head; 66 unsigned int start_head; 67 }; 68 loff_t xarray_start; 69 }; 70 }; 71 72 #define iter_iov(iter) (iter)->__iov 73 #define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset) 74 #define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset) 75 76 static inline enum iter_type iov_iter_type(const struct iov_iter *i) 77 { 78 return i->iter_type; 79 } 80 81 static inline void iov_iter_save_state(struct iov_iter *iter, 82 struct iov_iter_state *state) 83 { 84 state->iov_offset = iter->iov_offset; 85 state->count = iter->count; 86 state->nr_segs = iter->nr_segs; 87 } 88 89 static inline bool iter_is_ubuf(const struct iov_iter *i) 90 { 91 return iov_iter_type(i) == ITER_UBUF; 92 } 93 94 static inline bool iter_is_iovec(const struct iov_iter *i) 95 { 96 return iov_iter_type(i) == ITER_IOVEC; 97 } 98 99 static inline bool iov_iter_is_kvec(const struct iov_iter *i) 100 { 101 return iov_iter_type(i) == ITER_KVEC; 102 } 103 104 static inline bool iov_iter_is_bvec(const struct iov_iter *i) 105 { 106 return iov_iter_type(i) == ITER_BVEC; 107 } 108 109 static inline bool iov_iter_is_pipe(const struct iov_iter *i) 110 { 111 return iov_iter_type(i) == ITER_PIPE; 112 } 113 114 static inline bool iov_iter_is_discard(const struct iov_iter *i) 115 { 116 return iov_iter_type(i) == ITER_DISCARD; 117 } 118 119 static inline bool iov_iter_is_xarray(const struct iov_iter *i) 120 { 121 return iov_iter_type(i) == ITER_XARRAY; 122 } 123 124 static inline unsigned char iov_iter_rw(const struct iov_iter *i) 125 { 126 return i->data_source ? WRITE : READ; 127 } 128 129 static inline bool user_backed_iter(const struct iov_iter *i) 130 { 131 return i->user_backed; 132 } 133 134 /* 135 * Total number of bytes covered by an iovec. 136 * 137 * NOTE that it is not safe to use this function until all the iovec's 138 * segment lengths have been validated. Because the individual lengths can 139 * overflow a size_t when added together. 140 */ 141 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) 142 { 143 unsigned long seg; 144 size_t ret = 0; 145 146 for (seg = 0; seg < nr_segs; seg++) 147 ret += iov[seg].iov_len; 148 return ret; 149 } 150 151 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, 152 size_t bytes, struct iov_iter *i); 153 void iov_iter_advance(struct iov_iter *i, size_t bytes); 154 void iov_iter_revert(struct iov_iter *i, size_t bytes); 155 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes); 156 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes); 157 size_t iov_iter_single_seg_count(const struct iov_iter *i); 158 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 159 struct iov_iter *i); 160 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 161 struct iov_iter *i); 162 163 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 164 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); 165 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 166 167 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, 168 size_t bytes, struct iov_iter *i) 169 { 170 return copy_page_to_iter(&folio->page, offset, bytes, i); 171 } 172 173 static __always_inline __must_check 174 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 175 { 176 if (check_copy_size(addr, bytes, true)) 177 return _copy_to_iter(addr, bytes, i); 178 return 0; 179 } 180 181 static __always_inline __must_check 182 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 183 { 184 if (check_copy_size(addr, bytes, false)) 185 return _copy_from_iter(addr, bytes, i); 186 return 0; 187 } 188 189 static __always_inline __must_check 190 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 191 { 192 size_t copied = copy_from_iter(addr, bytes, i); 193 if (likely(copied == bytes)) 194 return true; 195 iov_iter_revert(i, copied); 196 return false; 197 } 198 199 static __always_inline __must_check 200 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 201 { 202 if (check_copy_size(addr, bytes, false)) 203 return _copy_from_iter_nocache(addr, bytes, i); 204 return 0; 205 } 206 207 static __always_inline __must_check 208 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 209 { 210 size_t copied = copy_from_iter_nocache(addr, bytes, i); 211 if (likely(copied == bytes)) 212 return true; 213 iov_iter_revert(i, copied); 214 return false; 215 } 216 217 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 218 /* 219 * Note, users like pmem that depend on the stricter semantics of 220 * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for 221 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the 222 * destination is flushed from the cache on return. 223 */ 224 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); 225 #else 226 #define _copy_from_iter_flushcache _copy_from_iter_nocache 227 #endif 228 229 #ifdef CONFIG_ARCH_HAS_COPY_MC 230 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 231 #else 232 #define _copy_mc_to_iter _copy_to_iter 233 #endif 234 235 size_t iov_iter_zero(size_t bytes, struct iov_iter *); 236 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 237 unsigned len_mask); 238 unsigned long iov_iter_alignment(const struct iov_iter *i); 239 unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 240 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, 241 unsigned long nr_segs, size_t count); 242 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, 243 unsigned long nr_segs, size_t count); 244 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, 245 unsigned long nr_segs, size_t count); 246 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, 247 size_t count); 248 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); 249 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, 250 loff_t start, size_t count); 251 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, 252 size_t maxsize, unsigned maxpages, size_t *start, 253 iov_iter_extraction_t extraction_flags); 254 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, 255 size_t maxsize, unsigned maxpages, size_t *start); 256 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 257 struct page ***pages, size_t maxsize, size_t *start, 258 iov_iter_extraction_t extraction_flags); 259 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, 260 size_t maxsize, size_t *start); 261 int iov_iter_npages(const struct iov_iter *i, int maxpages); 262 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); 263 264 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); 265 266 static inline size_t iov_iter_count(const struct iov_iter *i) 267 { 268 return i->count; 269 } 270 271 /* 272 * Cap the iov_iter by given limit; note that the second argument is 273 * *not* the new size - it's upper limit for such. Passing it a value 274 * greater than the amount of data in iov_iter is fine - it'll just do 275 * nothing in that case. 276 */ 277 static inline void iov_iter_truncate(struct iov_iter *i, u64 count) 278 { 279 /* 280 * count doesn't have to fit in size_t - comparison extends both 281 * operands to u64 here and any value that would be truncated by 282 * conversion in assignement is by definition greater than all 283 * values of size_t, including old i->count. 284 */ 285 if (i->count > count) 286 i->count = count; 287 } 288 289 /* 290 * reexpand a previously truncated iterator; count must be no more than how much 291 * we had shrunk it. 292 */ 293 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) 294 { 295 i->count = count; 296 } 297 298 static inline int 299 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) 300 { 301 size_t shorted = 0; 302 int npages; 303 304 if (iov_iter_count(i) > max_bytes) { 305 shorted = iov_iter_count(i) - max_bytes; 306 iov_iter_truncate(i, max_bytes); 307 } 308 npages = iov_iter_npages(i, maxpages); 309 if (shorted) 310 iov_iter_reexpand(i, iov_iter_count(i) + shorted); 311 312 return npages; 313 } 314 315 struct csum_state { 316 __wsum csum; 317 size_t off; 318 }; 319 320 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i); 321 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 322 323 static __always_inline __must_check 324 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, 325 __wsum *csum, struct iov_iter *i) 326 { 327 size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i); 328 if (likely(copied == bytes)) 329 return true; 330 iov_iter_revert(i, copied); 331 return false; 332 } 333 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 334 struct iov_iter *i); 335 336 struct iovec *iovec_from_user(const struct iovec __user *uvector, 337 unsigned long nr_segs, unsigned long fast_segs, 338 struct iovec *fast_iov, bool compat); 339 ssize_t import_iovec(int type, const struct iovec __user *uvec, 340 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 341 struct iov_iter *i); 342 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 343 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 344 struct iov_iter *i, bool compat); 345 int import_single_range(int type, void __user *buf, size_t len, 346 struct iovec *iov, struct iov_iter *i); 347 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i); 348 349 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, 350 void __user *buf, size_t count) 351 { 352 WARN_ON(direction & ~(READ | WRITE)); 353 *i = (struct iov_iter) { 354 .iter_type = ITER_UBUF, 355 .user_backed = true, 356 .data_source = direction, 357 .ubuf = buf, 358 .count = count, 359 .nr_segs = 1 360 }; 361 } 362 /* Flags for iov_iter_get/extract_pages*() */ 363 /* Allow P2PDMA on the extracted pages */ 364 #define ITER_ALLOW_P2PDMA ((__force iov_iter_extraction_t)0x01) 365 366 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages, 367 size_t maxsize, unsigned int maxpages, 368 iov_iter_extraction_t extraction_flags, 369 size_t *offset0); 370 371 /** 372 * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained 373 * @iter: The iterator 374 * 375 * Examine the iterator and indicate by returning true or false as to how, if 376 * at all, pages extracted from the iterator will be retained by the extraction 377 * function. 378 * 379 * %true indicates that the pages will have a pin placed in them that the 380 * caller must unpin. This is must be done for DMA/async DIO to force fork() 381 * to forcibly copy a page for the child (the parent must retain the original 382 * page). 383 * 384 * %false indicates that no measures are taken and that it's up to the caller 385 * to retain the pages. 386 */ 387 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter) 388 { 389 return user_backed_iter(iter); 390 } 391 392 #endif 393