1 /*
2 * Copyright (c) 2010 Kip Macy. All rights reserved.
3 * Copyright (C) 2017-2021 THL A29 Limited, a Tencent company.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * Derived in part from libplebnet's pn_kern_subr.c.
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/malloc.h>
39 #include <sys/resourcevar.h>
40 #include <sys/sched.h>
41 #include <sys/sysctl.h>
42 #include <sys/uio.h>
43
44 #include "ff_host_interface.h"
45
46 static __inline int
hash_mflags(int flags)47 hash_mflags(int flags)
48 {
49 return ((flags & HASH_NOWAIT) ? M_NOWAIT : M_WAITOK);
50 }
51
52 /*
53 * General routine to allocate a hash table with control of memory flags.
54 */
55 void *
hashinit_flags(int elements,struct malloc_type * type,u_long * hashmask,int flags)56 hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask,
57 int flags)
58 {
59 long hashsize;
60 LIST_HEAD(generic, generic) *hashtbl;
61 int i;
62
63 if (elements <= 0)
64 panic("hashinit: bad elements");
65
66 /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */
67 KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT),
68 ("Bad flags (0x%x) passed to hashinit_flags", flags));
69
70 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
71 continue;
72 hashsize >>= 1;
73
74 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type,
75 hash_mflags(flags));
76
77 if (hashtbl != NULL) {
78 for (i = 0; i < hashsize; i++)
79 LIST_INIT(&hashtbl[i]);
80 *hashmask = hashsize - 1;
81 }
82 return (hashtbl);
83 }
84
85 /*
86 * Allocate and initialize a hash table with default flag: may sleep.
87 */
88 void *
hashinit(int elements,struct malloc_type * type,u_long * hashmask)89 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
90 {
91 return (hashinit_flags(elements, type, hashmask, HASH_WAITOK));
92 }
93
94 void
hashdestroy(void * vhashtbl,struct malloc_type * type,u_long hashmask)95 hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
96 {
97 LIST_HEAD(generic, generic) *hashtbl, *hp;
98
99 hashtbl = vhashtbl;
100 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
101 KASSERT(LIST_EMPTY(hp), ("%s: hashtbl %p not empty "
102 "(malloc type %s)", __func__, hashtbl, type->ks_shortdesc));
103 free(hashtbl, type);
104 }
105
106 static const int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531,
107 2039, 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143,
108 6653, 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
109 #define NPRIMES nitems(primes)
110
111 /*
112 * General routine to allocate a prime number sized hash table with control of
113 * memory flags.
114 */
115 void *
phashinit_flags(int elements,struct malloc_type * type,u_long * nentries,int flags)116 phashinit_flags(int elements, struct malloc_type *type, u_long *nentries, int flags)
117 {
118 long hashsize, i;
119 LIST_HEAD(generic, generic) *hashtbl;
120
121 KASSERT(elements > 0, ("%s: bad elements", __func__));
122 /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */
123 KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT),
124 ("Bad flags (0x%x) passed to phashinit_flags", flags));
125
126 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
127 i++;
128 if (i == NPRIMES)
129 break;
130 hashsize = primes[i];
131 }
132 hashsize = primes[i - 1];
133
134 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type,
135 hash_mflags(flags));
136 if (hashtbl == NULL)
137 return (NULL);
138
139 for (i = 0; i < hashsize; i++)
140 LIST_INIT(&hashtbl[i]);
141 *nentries = hashsize;
142 return (hashtbl);
143 }
144
145 /*
146 * Allocate and initialize a prime number sized hash table with default flag:
147 * may sleep.
148 */
149 void *
phashinit(int elements,struct malloc_type * type,u_long * nentries)150 phashinit(int elements, struct malloc_type *type, u_long *nentries)
151 {
152
153 return (phashinit_flags(elements, type, nentries, HASH_WAITOK));
154 }
155
156 static void
uio_yield(void)157 uio_yield(void)
158 {
159
160 }
161
162 int
uiomove(void * cp,int n,struct uio * uio)163 uiomove(void *cp, int n, struct uio *uio)
164 {
165 struct thread *td = curthread;
166 struct iovec *iov;
167 u_int cnt;
168 int error = 0;
169 int save = 0;
170
171 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
172 ("uiomove: mode"));
173 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
174 ("uiomove proc"));
175 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
176 "Calling uiomove()");
177
178 save = td->td_pflags & TDP_DEADLKTREAT;
179 td->td_pflags |= TDP_DEADLKTREAT;
180
181 while (n > 0 && uio->uio_resid) {
182 iov = uio->uio_iov;
183 cnt = iov->iov_len;
184 if (cnt == 0) {
185 uio->uio_iov++;
186 uio->uio_iovcnt--;
187 continue;
188 }
189 if (cnt > n)
190 cnt = n;
191
192 switch (uio->uio_segflg) {
193
194 case UIO_USERSPACE:
195 if (ticks - PCPU_GET(switchticks) >= hogticks)
196 uio_yield();
197 if (uio->uio_rw == UIO_READ)
198 error = copyout(cp, iov->iov_base, cnt);
199 else
200 error = copyin(iov->iov_base, cp, cnt);
201 if (error)
202 goto out;
203 break;
204
205 case UIO_SYSSPACE:
206 if (uio->uio_rw == UIO_READ)
207 bcopy(cp, iov->iov_base, cnt);
208 else
209 bcopy(iov->iov_base, cp, cnt);
210 break;
211 case UIO_NOCOPY:
212 break;
213 }
214 iov->iov_base = (char *)iov->iov_base + cnt;
215 iov->iov_len -= cnt;
216 uio->uio_resid -= cnt;
217 uio->uio_offset += cnt;
218 cp = (char *)cp + cnt;
219 n -= cnt;
220 }
221 out:
222 if (save == 0)
223 td->td_pflags &= ~TDP_DEADLKTREAT;
224 return (error);
225 }
226
227 int
copyinuio(const struct iovec * iovp,u_int iovcnt,struct uio ** uiop)228 copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
229 {
230 struct iovec *iov;
231 struct uio *uio;
232 u_int iovlen;
233 int error, i;
234
235 *uiop = NULL;
236 if (iovcnt > UIO_MAXIOV)
237 return (EINVAL);
238 iovlen = iovcnt * sizeof (struct iovec);
239 uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
240 if (uio == NULL) {
241 return (ENOMEM);
242 }
243 iov = (struct iovec *)(uio + 1);
244 error = copyin(iovp, iov, iovlen);
245 if (error) {
246 free(uio, M_IOV);
247 return (error);
248 }
249 uio->uio_iov = iov;
250 uio->uio_iovcnt = iovcnt;
251 uio->uio_segflg = UIO_USERSPACE;
252 uio->uio_offset = -1;
253 uio->uio_resid = 0;
254 for (i = 0; i < iovcnt; i++) {
255 if (iov->iov_len > INT_MAX - uio->uio_resid) {
256 free(uio, M_IOV);
257 return (EINVAL);
258 }
259 uio->uio_resid += iov->iov_len;
260 iov++;
261 }
262 *uiop = uio;
263 return (0);
264 }
265
266 int
copyout_nofault(const void * kaddr,void * udaddr,size_t len)267 copyout_nofault(const void *kaddr, void *udaddr, size_t len)
268 {
269 return copyout(kaddr, udaddr, len);
270 }
271
272