xref: /f-stack/lib/ff_kern_subr.c (revision ebf5cedb)
1 /*
2  * Copyright (c) 2010 Kip Macy. All rights reserved.
3  * Copyright (C) 2017 THL A29 Limited, a Tencent company.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *   list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *   this list of conditions and the following disclaimer in the documentation
13  *   and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * Derived in part from libplebnet's pn_kern_subr.c.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/malloc.h>
39 #include <sys/resourcevar.h>
40 #include <sys/sched.h>
41 #include <sys/sysctl.h>
42 #include <sys/uio.h>
43 
44 #include "ff_host_interface.h"
45 
46 
47 /*
48  * General routine to allocate a hash table with control of memory flags.
49  */
50 void *
51 hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask,
52     int flags)
53 {
54     long hashsize;
55     LIST_HEAD(generic, generic) *hashtbl;
56     int i;
57 
58     if (elements <= 0)
59         panic("hashinit: bad elements");
60 
61     /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */
62     KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT),
63         ("Bad flags (0x%x) passed to hashinit_flags", flags));
64 
65     for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
66         continue;
67     hashsize >>= 1;
68 
69     if (flags & HASH_NOWAIT)
70         hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
71             type, M_NOWAIT);
72     else
73         hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl),
74             type, M_WAITOK);
75 
76     if (hashtbl != NULL) {
77         for (i = 0; i < hashsize; i++)
78             LIST_INIT(&hashtbl[i]);
79         *hashmask = hashsize - 1;
80     }
81     return (hashtbl);
82 }
83 
84 /*
85  * Allocate and initialize a hash table with default flag: may sleep.
86  */
87 void *
88 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
89 {
90     return (hashinit_flags(elements, type, hashmask, HASH_WAITOK));
91 }
92 
93 void
94 hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
95 {
96     LIST_HEAD(generic, generic) *hashtbl, *hp;
97 
98     hashtbl = vhashtbl;
99     for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
100         KASSERT(LIST_EMPTY(hp), ("%s: hashtbl %p not empty "
101             "(malloc type %s)", __func__, hashtbl, type->ks_shortdesc));
102     free(hashtbl, type);
103 }
104 
105 static void
106 uio_yield(void)
107 {
108 
109 }
110 
111 int
112 uiomove(void *cp, int n, struct uio *uio)
113 {
114     struct thread *td = curthread;
115     struct iovec *iov;
116     u_int cnt;
117     int error = 0;
118     int save = 0;
119 
120     KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
121         ("uiomove: mode"));
122     KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
123         ("uiomove proc"));
124     WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
125         "Calling uiomove()");
126 
127     save = td->td_pflags & TDP_DEADLKTREAT;
128     td->td_pflags |= TDP_DEADLKTREAT;
129 
130     while (n > 0 && uio->uio_resid) {
131         iov = uio->uio_iov;
132         cnt = iov->iov_len;
133         if (cnt == 0) {
134             uio->uio_iov++;
135             uio->uio_iovcnt--;
136             continue;
137         }
138         if (cnt > n)
139             cnt = n;
140 
141         switch (uio->uio_segflg) {
142 
143         case UIO_USERSPACE:
144             if (ticks - PCPU_GET(switchticks) >= hogticks)
145                 uio_yield();
146             if (uio->uio_rw == UIO_READ)
147                 error = copyout(cp, iov->iov_base, cnt);
148             else
149                 error = copyin(iov->iov_base, cp, cnt);
150             if (error)
151                 goto out;
152             break;
153 
154         case UIO_SYSSPACE:
155             if (uio->uio_rw == UIO_READ)
156                 bcopy(cp, iov->iov_base, cnt);
157             else
158                 bcopy(iov->iov_base, cp, cnt);
159             break;
160         case UIO_NOCOPY:
161             break;
162         }
163         iov->iov_base = (char *)iov->iov_base + cnt;
164         iov->iov_len -= cnt;
165         uio->uio_resid -= cnt;
166         uio->uio_offset += cnt;
167         cp = (char *)cp + cnt;
168         n -= cnt;
169     }
170 out:
171     if (save == 0)
172         td->td_pflags &= ~TDP_DEADLKTREAT;
173     return (error);
174 }
175 
176 int
177 copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
178 {
179     struct iovec *iov;
180     struct uio *uio;
181     u_int iovlen;
182     int error, i;
183 
184     *uiop = NULL;
185     if (iovcnt > UIO_MAXIOV)
186         return (EINVAL);
187     iovlen = iovcnt * sizeof (struct iovec);
188     uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
189     if (uio == NULL) {
190         return (ENOMEM);
191     }
192     iov = (struct iovec *)(uio + 1);
193     error = copyin(iovp, iov, iovlen);
194     if (error) {
195         free(uio, M_IOV);
196         return (error);
197     }
198     uio->uio_iov = iov;
199     uio->uio_iovcnt = iovcnt;
200     uio->uio_segflg = UIO_USERSPACE;
201     uio->uio_offset = -1;
202     uio->uio_resid = 0;
203     for (i = 0; i < iovcnt; i++) {
204         if (iov->iov_len > INT_MAX - uio->uio_resid) {
205             free(uio, M_IOV);
206             return (EINVAL);
207         }
208         uio->uio_resid += iov->iov_len;
209         iov++;
210     }
211     *uiop = uio;
212     return (0);
213 }
214 
215 int
216 copyout_nofault(const void *kaddr, void *udaddr, size_t len)
217 {
218     return copyout(kaddr, udaddr, len);
219 }
220 
221