1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /* All Rights Reserved */
28
29 /*
30 * University Copyright- Copyright (c) 1982, 1986, 1988
31 * The Regents of the University of California
32 * All Rights Reserved
33 *
34 * University Acknowledgment- Portions of this document are derived from
35 * software developed by the University of California, Berkeley, and its
36 * contributors.
37 */
38 /*
39 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
40 */
41
42 #ifdef _KERNEL
43
44 #include <sys/types.h>
45 #include <sys/uio_impl.h>
46 #include <sys/sysmacros.h>
47 #include <sys/strings.h>
48 #include <linux/kmap_compat.h>
49 #include <linux/uaccess.h>
50
51 /*
52 * Move "n" bytes at byte address "p"; "rw" indicates the direction
53 * of the move, and the I/O parameters are provided in "uio", which is
54 * update to reflect the data which was moved. Returns 0 on success or
55 * a non-zero errno on failure.
56 */
57 static int
uiomove_iov(void * p,size_t n,enum uio_rw rw,struct uio * uio)58 uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio)
59 {
60 const struct iovec *iov = uio->uio_iov;
61 size_t skip = uio->uio_skip;
62 ulong_t cnt;
63
64 while (n && uio->uio_resid) {
65 cnt = MIN(iov->iov_len - skip, n);
66 switch (uio->uio_segflg) {
67 case UIO_USERSPACE:
68 /*
69 * p = kernel data pointer
70 * iov->iov_base = user data pointer
71 */
72 if (rw == UIO_READ) {
73 if (copy_to_user(iov->iov_base+skip, p, cnt))
74 return (EFAULT);
75 } else {
76 unsigned long b_left = 0;
77 if (uio->uio_fault_disable) {
78 if (!zfs_access_ok(VERIFY_READ,
79 (iov->iov_base + skip), cnt)) {
80 return (EFAULT);
81 }
82 pagefault_disable();
83 b_left =
84 __copy_from_user_inatomic(p,
85 (iov->iov_base + skip), cnt);
86 pagefault_enable();
87 } else {
88 b_left =
89 copy_from_user(p,
90 (iov->iov_base + skip), cnt);
91 }
92 if (b_left > 0) {
93 unsigned long c_bytes =
94 cnt - b_left;
95 uio->uio_skip += c_bytes;
96 ASSERT3U(uio->uio_skip, <,
97 iov->iov_len);
98 uio->uio_resid -= c_bytes;
99 uio->uio_loffset += c_bytes;
100 return (EFAULT);
101 }
102 }
103 break;
104 case UIO_SYSSPACE:
105 if (rw == UIO_READ)
106 bcopy(p, iov->iov_base + skip, cnt);
107 else
108 bcopy(iov->iov_base + skip, p, cnt);
109 break;
110 default:
111 ASSERT(0);
112 }
113 skip += cnt;
114 if (skip == iov->iov_len) {
115 skip = 0;
116 uio->uio_iov = (++iov);
117 uio->uio_iovcnt--;
118 }
119 uio->uio_skip = skip;
120 uio->uio_resid -= cnt;
121 uio->uio_loffset += cnt;
122 p = (caddr_t)p + cnt;
123 n -= cnt;
124 }
125 return (0);
126 }
127
128 static int
uiomove_bvec(void * p,size_t n,enum uio_rw rw,struct uio * uio)129 uiomove_bvec(void *p, size_t n, enum uio_rw rw, struct uio *uio)
130 {
131 const struct bio_vec *bv = uio->uio_bvec;
132 size_t skip = uio->uio_skip;
133 ulong_t cnt;
134
135 while (n && uio->uio_resid) {
136 void *paddr;
137 cnt = MIN(bv->bv_len - skip, n);
138
139 paddr = zfs_kmap_atomic(bv->bv_page, KM_USER1);
140 if (rw == UIO_READ)
141 bcopy(p, paddr + bv->bv_offset + skip, cnt);
142 else
143 bcopy(paddr + bv->bv_offset + skip, p, cnt);
144 zfs_kunmap_atomic(paddr, KM_USER1);
145
146 skip += cnt;
147 if (skip == bv->bv_len) {
148 skip = 0;
149 uio->uio_bvec = (++bv);
150 uio->uio_iovcnt--;
151 }
152 uio->uio_skip = skip;
153 uio->uio_resid -= cnt;
154 uio->uio_loffset += cnt;
155 p = (caddr_t)p + cnt;
156 n -= cnt;
157 }
158 return (0);
159 }
160
161 #if defined(HAVE_VFS_IOV_ITER)
162 static int
uiomove_iter(void * p,size_t n,enum uio_rw rw,struct uio * uio,boolean_t revert)163 uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio,
164 boolean_t revert)
165 {
166 size_t cnt = MIN(n, uio->uio_resid);
167
168 if (uio->uio_skip)
169 iov_iter_advance(uio->uio_iter, uio->uio_skip);
170
171 if (rw == UIO_READ)
172 cnt = copy_to_iter(p, cnt, uio->uio_iter);
173 else
174 cnt = copy_from_iter(p, cnt, uio->uio_iter);
175
176 /*
177 * When operating on a full pipe no bytes are processed.
178 * In which case return EFAULT which is converted to EAGAIN
179 * by the kernel's generic_file_splice_read() function.
180 */
181 if (cnt == 0)
182 return (EFAULT);
183
184 /*
185 * Revert advancing the uio_iter. This is set by uiocopy()
186 * to avoid consuming the uio and its iov_iter structure.
187 */
188 if (revert)
189 iov_iter_revert(uio->uio_iter, cnt);
190
191 uio->uio_resid -= cnt;
192 uio->uio_loffset += cnt;
193
194 return (0);
195 }
196 #endif
197
198 int
uiomove(void * p,size_t n,enum uio_rw rw,struct uio * uio)199 uiomove(void *p, size_t n, enum uio_rw rw, struct uio *uio)
200 {
201 if (uio->uio_segflg == UIO_BVEC)
202 return (uiomove_bvec(p, n, rw, uio));
203 #if defined(HAVE_VFS_IOV_ITER)
204 else if (uio->uio_segflg == UIO_ITER)
205 return (uiomove_iter(p, n, rw, uio, B_FALSE));
206 #endif
207 else
208 return (uiomove_iov(p, n, rw, uio));
209 }
210 EXPORT_SYMBOL(uiomove);
211
212 int
uio_prefaultpages(ssize_t n,struct uio * uio)213 uio_prefaultpages(ssize_t n, struct uio *uio)
214 {
215 struct iov_iter iter, *iterp = NULL;
216
217 #if defined(HAVE_IOV_ITER_FAULT_IN_READABLE)
218 if (uio->uio_segflg == UIO_USERSPACE) {
219 iterp = &iter;
220 iov_iter_init_compat(iterp, READ, uio->uio_iov,
221 uio->uio_iovcnt, uio->uio_resid);
222 #if defined(HAVE_VFS_IOV_ITER)
223 } else if (uio->uio_segflg == UIO_ITER) {
224 iterp = uio->uio_iter;
225 #endif
226 }
227
228 if (iterp && iov_iter_fault_in_readable(iterp, n))
229 return (EFAULT);
230 #endif
231 return (0);
232 }
233 EXPORT_SYMBOL(uio_prefaultpages);
234
235 /*
236 * The same as uiomove() but doesn't modify uio structure.
237 * return in cbytes how many bytes were copied.
238 */
239 int
uiocopy(void * p,size_t n,enum uio_rw rw,struct uio * uio,size_t * cbytes)240 uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes)
241 {
242 struct uio uio_copy;
243 int ret;
244
245 bcopy(uio, &uio_copy, sizeof (struct uio));
246
247 if (uio->uio_segflg == UIO_BVEC)
248 ret = uiomove_bvec(p, n, rw, &uio_copy);
249 #if defined(HAVE_VFS_IOV_ITER)
250 else if (uio->uio_segflg == UIO_ITER)
251 ret = uiomove_iter(p, n, rw, &uio_copy, B_TRUE);
252 #endif
253 else
254 ret = uiomove_iov(p, n, rw, &uio_copy);
255
256 *cbytes = uio->uio_resid - uio_copy.uio_resid;
257
258 return (ret);
259 }
260 EXPORT_SYMBOL(uiocopy);
261
262 /*
263 * Drop the next n chars out of *uio.
264 */
265 void
uioskip(uio_t * uio,size_t n)266 uioskip(uio_t *uio, size_t n)
267 {
268 if (n > uio->uio_resid)
269 return;
270
271 if (uio->uio_segflg == UIO_BVEC) {
272 uio->uio_skip += n;
273 while (uio->uio_iovcnt &&
274 uio->uio_skip >= uio->uio_bvec->bv_len) {
275 uio->uio_skip -= uio->uio_bvec->bv_len;
276 uio->uio_bvec++;
277 uio->uio_iovcnt--;
278 }
279 #if defined(HAVE_VFS_IOV_ITER)
280 } else if (uio->uio_segflg == UIO_ITER) {
281 iov_iter_advance(uio->uio_iter, n);
282 #endif
283 } else {
284 uio->uio_skip += n;
285 while (uio->uio_iovcnt &&
286 uio->uio_skip >= uio->uio_iov->iov_len) {
287 uio->uio_skip -= uio->uio_iov->iov_len;
288 uio->uio_iov++;
289 uio->uio_iovcnt--;
290 }
291 }
292 uio->uio_loffset += n;
293 uio->uio_resid -= n;
294 }
295 EXPORT_SYMBOL(uioskip);
296 #endif /* _KERNEL */
297