1 /* 2 * Berkeley style UIO structures - Alan Cox 1994. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 #ifndef __LINUX_UIO_H 10 #define __LINUX_UIO_H 11 12 #include <linux/kernel.h> 13 #include <linux/thread_info.h> 14 #include <crypto/hash.h> 15 #include <uapi/linux/uio.h> 16 17 struct page; 18 struct pipe_inode_info; 19 20 struct kvec { 21 void *iov_base; /* and that should *never* hold a userland pointer */ 22 size_t iov_len; 23 }; 24 25 enum iter_type { 26 ITER_IOVEC = 0, 27 ITER_KVEC = 2, 28 ITER_BVEC = 4, 29 ITER_PIPE = 8, 30 ITER_DISCARD = 16, 31 }; 32 33 struct iov_iter { 34 unsigned int type; 35 size_t iov_offset; 36 size_t count; 37 union { 38 const struct iovec *iov; 39 const struct kvec *kvec; 40 const struct bio_vec *bvec; 41 struct pipe_inode_info *pipe; 42 }; 43 union { 44 unsigned long nr_segs; 45 struct { 46 int idx; 47 int start_idx; 48 }; 49 }; 50 }; 51 52 static inline enum iter_type iov_iter_type(const struct iov_iter *i) 53 { 54 return i->type & ~(READ | WRITE); 55 } 56 57 static inline bool iter_is_iovec(const struct iov_iter *i) 58 { 59 return iov_iter_type(i) == ITER_IOVEC; 60 } 61 62 static inline bool iov_iter_is_kvec(const struct iov_iter *i) 63 { 64 return iov_iter_type(i) == ITER_KVEC; 65 } 66 67 static inline bool iov_iter_is_bvec(const struct iov_iter *i) 68 { 69 return iov_iter_type(i) == ITER_BVEC; 70 } 71 72 static inline bool iov_iter_is_pipe(const struct iov_iter *i) 73 { 74 return iov_iter_type(i) == ITER_PIPE; 75 } 76 77 static inline bool iov_iter_is_discard(const struct iov_iter *i) 78 { 79 return iov_iter_type(i) == ITER_DISCARD; 80 } 81 82 static inline unsigned char iov_iter_rw(const struct iov_iter *i) 83 { 84 return i->type & (READ | WRITE); 85 } 86 87 /* 88 * Total number of bytes covered by an iovec. 89 * 90 * NOTE that it is not safe to use this function until all the iovec's 91 * segment lengths have been validated. Because the individual lengths can 92 * overflow a size_t when added together. 93 */ 94 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) 95 { 96 unsigned long seg; 97 size_t ret = 0; 98 99 for (seg = 0; seg < nr_segs; seg++) 100 ret += iov[seg].iov_len; 101 return ret; 102 } 103 104 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) 105 { 106 return (struct iovec) { 107 .iov_base = iter->iov->iov_base + iter->iov_offset, 108 .iov_len = min(iter->count, 109 iter->iov->iov_len - iter->iov_offset), 110 }; 111 } 112 113 size_t iov_iter_copy_from_user_atomic(struct page *page, 114 struct iov_iter *i, unsigned long offset, size_t bytes); 115 void iov_iter_advance(struct iov_iter *i, size_t bytes); 116 void iov_iter_revert(struct iov_iter *i, size_t bytes); 117 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); 118 size_t iov_iter_single_seg_count(const struct iov_iter *i); 119 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 120 struct iov_iter *i); 121 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 122 struct iov_iter *i); 123 124 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); 125 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); 126 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); 127 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); 128 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); 129 130 static __always_inline __must_check 131 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 132 { 133 if (unlikely(!check_copy_size(addr, bytes, true))) 134 return 0; 135 else 136 return _copy_to_iter(addr, bytes, i); 137 } 138 139 static __always_inline __must_check 140 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 141 { 142 if (unlikely(!check_copy_size(addr, bytes, false))) 143 return 0; 144 else 145 return _copy_from_iter(addr, bytes, i); 146 } 147 148 static __always_inline __must_check 149 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 150 { 151 if (unlikely(!check_copy_size(addr, bytes, false))) 152 return false; 153 else 154 return _copy_from_iter_full(addr, bytes, i); 155 } 156 157 static __always_inline __must_check 158 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 159 { 160 if (unlikely(!check_copy_size(addr, bytes, false))) 161 return 0; 162 else 163 return _copy_from_iter_nocache(addr, bytes, i); 164 } 165 166 static __always_inline __must_check 167 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 168 { 169 if (unlikely(!check_copy_size(addr, bytes, false))) 170 return false; 171 else 172 return _copy_from_iter_full_nocache(addr, bytes, i); 173 } 174 175 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 176 /* 177 * Note, users like pmem that depend on the stricter semantics of 178 * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for 179 * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the 180 * destination is flushed from the cache on return. 181 */ 182 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); 183 #else 184 #define _copy_from_iter_flushcache _copy_from_iter_nocache 185 #endif 186 187 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE 188 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i); 189 #else 190 #define _copy_to_iter_mcsafe _copy_to_iter 191 #endif 192 193 static __always_inline __must_check 194 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 195 { 196 if (unlikely(!check_copy_size(addr, bytes, false))) 197 return 0; 198 else 199 return _copy_from_iter_flushcache(addr, bytes, i); 200 } 201 202 static __always_inline __must_check 203 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) 204 { 205 if (unlikely(!check_copy_size(addr, bytes, true))) 206 return 0; 207 else 208 return _copy_to_iter_mcsafe(addr, bytes, i); 209 } 210 211 size_t iov_iter_zero(size_t bytes, struct iov_iter *); 212 unsigned long iov_iter_alignment(const struct iov_iter *i); 213 unsigned long iov_iter_gap_alignment(const struct iov_iter *i); 214 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, 215 unsigned long nr_segs, size_t count); 216 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, 217 unsigned long nr_segs, size_t count); 218 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, 219 unsigned long nr_segs, size_t count); 220 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, 221 size_t count); 222 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); 223 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, 224 size_t maxsize, unsigned maxpages, size_t *start); 225 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, 226 size_t maxsize, size_t *start); 227 int iov_iter_npages(const struct iov_iter *i, int maxpages); 228 229 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); 230 231 static inline size_t iov_iter_count(const struct iov_iter *i) 232 { 233 return i->count; 234 } 235 236 /* 237 * Cap the iov_iter by given limit; note that the second argument is 238 * *not* the new size - it's upper limit for such. Passing it a value 239 * greater than the amount of data in iov_iter is fine - it'll just do 240 * nothing in that case. 241 */ 242 static inline void iov_iter_truncate(struct iov_iter *i, u64 count) 243 { 244 /* 245 * count doesn't have to fit in size_t - comparison extends both 246 * operands to u64 here and any value that would be truncated by 247 * conversion in assignement is by definition greater than all 248 * values of size_t, including old i->count. 249 */ 250 if (i->count > count) 251 i->count = count; 252 } 253 254 /* 255 * reexpand a previously truncated iterator; count must be no more than how much 256 * we had shrunk it. 257 */ 258 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) 259 { 260 i->count = count; 261 } 262 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); 263 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 264 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); 265 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 266 struct iov_iter *i); 267 268 int import_iovec(int type, const struct iovec __user * uvector, 269 unsigned nr_segs, unsigned fast_segs, 270 struct iovec **iov, struct iov_iter *i); 271 272 #ifdef CONFIG_COMPAT 273 struct compat_iovec; 274 int compat_import_iovec(int type, const struct compat_iovec __user * uvector, 275 unsigned nr_segs, unsigned fast_segs, 276 struct iovec **iov, struct iov_iter *i); 277 #endif 278 279 int import_single_range(int type, void __user *buf, size_t len, 280 struct iovec *iov, struct iov_iter *i); 281 282 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, 283 int (*f)(struct kvec *vec, void *context), 284 void *context); 285 286 #endif 287