xref: /f-stack/lib/ff_glue.c (revision a9643ea8)
1 /*
2  * Copyright (c) 2010 Kip Macy. All rights reserved.
3  * Copyright (C) 2017 THL A29 Limited, a Tencent company.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *   list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *   this list of conditions and the following disclaimer in the documentation
13  *   and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * Derived in part from libplebnet's pn_glue.c.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/kernel.h>
33 #include <sys/kthread.h>
34 #include <sys/event.h>
35 #include <sys/jail.h>
36 #include <sys/limits.h>
37 #include <sys/malloc.h>
38 #include <sys/refcount.h>
39 #include <sys/resourcevar.h>
40 #include <sys/sysctl.h>
41 #include <sys/sysent.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/priv.h>
45 #include <sys/time.h>
46 #include <sys/ucred.h>
47 #include <sys/uio.h>
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/buf.h>
51 #include <sys/file.h>
52 #include <sys/vmem.h>
53 #include <sys/mbuf.h>
54 #include <sys/smp.h>
55 #include <sys/sched.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_extern.h>
63 
64 #include "ff_host_interface.h"
65 
66 int kstack_pages = KSTACK_PAGES;
67 SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0,
68     "Kernel stack size in pages");
69 
70 int bootverbose;
71 
72 SYSCTL_ROOT_NODE(0, sysctl, CTLFLAG_RW, 0, "Sysctl internal magic");
73 
74 SYSCTL_ROOT_NODE(CTL_VFS, vfs, CTLFLAG_RW, 0, "File system");
75 
76 SYSCTL_ROOT_NODE(CTL_KERN, kern, CTLFLAG_RW, 0, "High kernel, proc, limits &c");
77 
78 SYSCTL_ROOT_NODE(CTL_NET, net, CTLFLAG_RW, 0, "Network, (see socket.h)");
79 
80 SYSCTL_ROOT_NODE(CTL_MACHDEP, machdep, CTLFLAG_RW, 0, "machine dependent");
81 
82 SYSCTL_ROOT_NODE(CTL_VM, vm, CTLFLAG_RW, 0, "Virtual memory");
83 
84 SYSCTL_ROOT_NODE(CTL_DEBUG, debug, CTLFLAG_RW, 0, "Debugging");
85 
86 SYSCTL_ROOT_NODE(OID_AUTO, security, CTLFLAG_RW, 0, "Security");
87 
88 SYSCTL_NODE(_kern, OID_AUTO, features, CTLFLAG_RD, 0, "Kernel Features");
89 
90 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD,  0, "Process table");
91 
92 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
93 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
94 static MALLOC_DEFINE(M_CRED, "cred", "credentials");
95 static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
96 
97 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
98 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
99 
100 static void configure_final(void *dummy);
101 
102 SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
103 
104 volatile int ticks;
105 int cpu_disable_deep_sleep;
106 
107 /* This is used in modules that need to work in both SMP and UP. */
108 cpuset_t all_cpus;
109 
110 int mp_ncpus;
111 /* export this for libkvm consumers. */
112 int mp_maxcpus = MAXCPU;
113 
114 volatile int smp_started;
115 u_int mp_maxid;
116 
117 long first_page = 0;
118 
119 struct vmmeter vm_cnt;
120 vm_map_t kernel_map=0;
121 vm_map_t kmem_map=0;
122 
123 vmem_t *kernel_arena = NULL;
124 vmem_t *kmem_arena = NULL;
125 
126 struct vm_object kernel_object_store;
127 struct vm_object kmem_object_store;
128 
129 struct filterops fs_filtops;
130 struct filterops sig_filtops;
131 
132 int cold = 1;
133 
134 int unmapped_buf_allowed = 1;
135 
136 int cpu_deepest_sleep = 0;    /* Deepest Cx state available. */
137 int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */
138 int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */
139 
140 static void timevalfix(struct timeval *);
141 
142 void
143 procinit()
144 {
145     sx_init(&allproc_lock, "allproc");
146     LIST_INIT(&allproc);
147 }
148 
149 
150 /*
151  * Find a prison that is a descendant of mypr.  Returns a locked prison or NULL.
152  */
153 struct prison *
154 prison_find_child(struct prison *mypr, int prid)
155 {
156     return (NULL);
157 }
158 
159 void
160 prison_free(struct prison *pr)
161 {
162 
163 }
164 
165 void
166 prison_hold_locked(struct prison *pr)
167 {
168 
169 }
170 
171 int
172 prison_if(struct ucred *cred, struct sockaddr *sa)
173 {
174     return (0);
175 }
176 
177 int
178 prison_check_af(struct ucred *cred, int af)
179 {
180     return (0);
181 }
182 
183 int
184 prison_check_ip4(const struct ucred *cred, const struct in_addr *ia)
185 {
186     return (0);
187 }
188 
189 int
190 prison_equal_ip4(struct prison *pr1, struct prison *pr2)
191 {
192     return (1);
193 }
194 
195 #ifdef INET6
196 int
197 prison_check_ip6(struct ucred *cred, struct in6_addr *ia)
198 {
199     return (0);
200 }
201 
202 int
203 prison_equal_ip6(struct prison *pr1, struct prison *pr2)
204 {
205     return (1);
206 }
207 #endif
208 
209 /*
210  * See if a prison has the specific flag set.
211  */
212 int
213 prison_flag(struct ucred *cred, unsigned flag)
214 {
215     /* This is an atomic read, so no locking is necessary. */
216     return (flag & PR_HOST);
217 }
218 
219 int
220 prison_get_ip4(struct ucred *cred, struct in_addr *ia)
221 {
222     return (0);
223 }
224 
225 int
226 prison_local_ip4(struct ucred *cred, struct in_addr *ia)
227 {
228     return (0);
229 }
230 
231 int
232 prison_remote_ip4(struct ucred *cred, struct in_addr *ia)
233 {
234     return (0);
235 }
236 
237 #ifdef INET6
238 int
239 prison_get_ip6(struct ucred *cred, struct in6_addr *ia)
240 {
241     return (0);
242 }
243 
244 int
245 prison_local_ip6(struct ucred *cred, struct in6_addr *ia, int other)
246 {
247     return (0);
248 }
249 
250 int
251 prison_remote_ip6(struct ucred *cred, struct in6_addr *ia)
252 {
253     return (0);
254 }
255 #endif
256 
257 int
258 prison_saddrsel_ip4(struct ucred *cred, struct in_addr *ia)
259 {
260     /* not jailed */
261     return (1);
262 }
263 
264 #ifdef INET6
265 int
266 prison_saddrsel_ip6(struct ucred *cred, struct in6_addr *ia)
267 {
268     /* not jailed */
269     return (1);
270 }
271 #endif
272 
273 int
274 jailed(struct ucred *cred)
275 {
276     return (0);
277 }
278 
279 /*
280  * Return 1 if the passed credential is in a jail and that jail does not
281  * have its own virtual network stack, otherwise 0.
282  */
283 int
284 jailed_without_vnet(struct ucred *cred)
285 {
286     return (0);
287 }
288 
289 int
290 priv_check(struct thread *td, int priv)
291 {
292     return (0);
293 }
294 
295 int
296 priv_check_cred(struct ucred *cred, int priv, int flags)
297 {
298     return (0);
299 }
300 
301 
302 int
303 vslock(void *addr, size_t len)
304 {
305     return (0);
306 }
307 
308 void
309 vsunlock(void *addr, size_t len)
310 {
311 
312 }
313 
314 
315 /*
316  * Check that a proposed value to load into the .it_value or
317  * .it_interval part of an interval timer is acceptable, and
318  * fix it to have at least minimal value (i.e. if it is less
319  * than the resolution of the clock, round it up.)
320  */
321 int
322 itimerfix(struct timeval *tv)
323 {
324 
325     if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000)
326         return (EINVAL);
327     if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick)
328         tv->tv_usec = tick;
329     return (0);
330 }
331 
332 /*
333  * Decrement an interval timer by a specified number
334  * of microseconds, which must be less than a second,
335  * i.e. < 1000000.  If the timer expires, then reload
336  * it.  In this case, carry over (usec - old value) to
337  * reduce the value reloaded into the timer so that
338  * the timer does not drift.  This routine assumes
339  * that it is called in a context where the timers
340  * on which it is operating cannot change in value.
341  */
342 int
343 itimerdecr(struct itimerval *itp, int usec)
344 {
345     if (itp->it_value.tv_usec < usec) {
346         if (itp->it_value.tv_sec == 0) {
347             /* expired, and already in next interval */
348             usec -= itp->it_value.tv_usec;
349             goto expire;
350         }
351         itp->it_value.tv_usec += 1000000;
352         itp->it_value.tv_sec--;
353     }
354     itp->it_value.tv_usec -= usec;
355     usec = 0;
356     if (timevalisset(&itp->it_value))
357         return (1);
358     /* expired, exactly at end of interval */
359 expire:
360     if (timevalisset(&itp->it_interval)) {
361         itp->it_value = itp->it_interval;
362         itp->it_value.tv_usec -= usec;
363         if (itp->it_value.tv_usec < 0) {
364             itp->it_value.tv_usec += 1000000;
365             itp->it_value.tv_sec--;
366         }
367     } else
368         itp->it_value.tv_usec = 0;        /* sec is already 0 */
369     return (0);
370 }
371 
372 /*
373  * Add and subtract routines for timevals.
374  * N.B.: subtract routine doesn't deal with
375  * results which are before the beginning,
376  * it just gets very confused in this case.
377  * Caveat emptor.
378  */
379 void
380 timevaladd(struct timeval *t1, const struct timeval *t2)
381 {
382     t1->tv_sec += t2->tv_sec;
383     t1->tv_usec += t2->tv_usec;
384     timevalfix(t1);
385 }
386 
387 void
388 timevalsub(struct timeval *t1, const struct timeval *t2)
389 {
390     t1->tv_sec -= t2->tv_sec;
391     t1->tv_usec -= t2->tv_usec;
392     timevalfix(t1);
393 }
394 
395 static void
396 timevalfix(struct timeval *t1)
397 {
398     if (t1->tv_usec < 0) {
399         t1->tv_sec--;
400         t1->tv_usec += 1000000;
401     }
402     if (t1->tv_usec >= 1000000) {
403         t1->tv_sec++;
404         t1->tv_usec -= 1000000;
405     }
406 }
407 
408 /*
409  * ratecheck(): simple time-based rate-limit checking.
410  */
411 int
412 ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
413 {
414     struct timeval tv, delta;
415     int rv = 0;
416 
417     getmicrouptime(&tv);        /* NB: 10ms precision */
418     delta = tv;
419     timevalsub(&delta, lasttime);
420 
421     /*
422      * check for 0,0 is so that the message will be seen at least once,
423      * even if interval is huge.
424      */
425     if (timevalcmp(&delta, mininterval, >=) ||
426         (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
427         *lasttime = tv;
428         rv = 1;
429     }
430 
431     return (rv);
432 }
433 
434 /*
435  * ppsratecheck(): packets (or events) per second limitation.
436  *
437  * Return 0 if the limit is to be enforced (e.g. the caller
438  * should drop a packet because of the rate limitation).
439  *
440  * maxpps of 0 always causes zero to be returned.  maxpps of -1
441  * always causes 1 to be returned; this effectively defeats rate
442  * limiting.
443  *
444  * Note that we maintain the struct timeval for compatibility
445  * with other bsd systems.  We reuse the storage and just monitor
446  * clock ticks for minimal overhead.
447  */
448 int
449 ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
450 {
451     int now;
452 
453     /*
454      * Reset the last time and counter if this is the first call
455      * or more than a second has passed since the last update of
456      * lasttime.
457      */
458     now = ticks;
459     if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
460         lasttime->tv_sec = now;
461         *curpps = 1;
462         return (maxpps != 0);
463     } else {
464         (*curpps)++;        /* NB: ignore potential overflow */
465         return (maxpps < 0 || *curpps < maxpps);
466     }
467 }
468 
469 /*
470  * Compute number of ticks in the specified amount of time.
471  */
472 int
473 tvtohz(tv)
474     struct timeval *tv;
475 {
476     register unsigned long ticks;
477     register long sec, usec;
478 
479     /*
480      * If the number of usecs in the whole seconds part of the time
481      * difference fits in a long, then the total number of usecs will
482      * fit in an unsigned long.  Compute the total and convert it to
483      * ticks, rounding up and adding 1 to allow for the current tick
484      * to expire.  Rounding also depends on unsigned long arithmetic
485      * to avoid overflow.
486      *
487      * Otherwise, if the number of ticks in the whole seconds part of
488      * the time difference fits in a long, then convert the parts to
489      * ticks separately and add, using similar rounding methods and
490      * overflow avoidance.  This method would work in the previous
491      * case but it is slightly slower and assumes that hz is integral.
492      *
493      * Otherwise, round the time difference down to the maximum
494      * representable value.
495      *
496      * If ints have 32 bits, then the maximum value for any timeout in
497      * 10ms ticks is 248 days.
498      */
499     sec = tv->tv_sec;
500     usec = tv->tv_usec;
501     if (usec < 0) {
502         sec--;
503         usec += 1000000;
504     }
505     if (sec < 0) {
506 #ifdef DIAGNOSTIC
507         if (usec > 0) {
508             sec++;
509             usec -= 1000000;
510         }
511         printf("tvotohz: negative time difference %ld sec %ld usec\n",
512                sec, usec);
513 #endif
514         ticks = 1;
515     } else if (sec <= LONG_MAX / 1000000)
516         ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
517             / tick + 1;
518     else if (sec <= LONG_MAX / hz)
519         ticks = sec * hz
520             + ((unsigned long)usec + (tick - 1)) / tick + 1;
521     else
522         ticks = LONG_MAX;
523     if (ticks > INT_MAX)
524         ticks = INT_MAX;
525     return ((int)ticks);
526 }
527 
528 int
529 copyin(const void *uaddr, void *kaddr, size_t len)
530 {
531     memcpy(kaddr, uaddr, len);
532     return (0);
533 }
534 
535 int
536 copyout(const void *kaddr, void *uaddr, size_t len)
537 {
538     memcpy(uaddr, kaddr, len);
539     return (0);
540 }
541 
542 int
543 copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done)
544 {
545     size_t bytes;
546 
547     bytes = strlcpy(kdaddr, kfaddr, len);
548     if (done != NULL)
549         *done = bytes;
550 
551     return (0);
552 }
553 
554 
555 
556 int
557 copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
558 {
559     size_t bytes;
560 
561     bytes = strlcpy(kaddr, uaddr, len);
562     if (done != NULL)
563         *done = bytes;
564 
565     return (0);
566 }
567 
568 int
569 copyiniov(const struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
570 {
571     u_int iovlen;
572 
573     *iov = NULL;
574     if (iovcnt > UIO_MAXIOV)
575         return (error);
576     iovlen = iovcnt * sizeof (struct iovec);
577     *iov = malloc(iovlen, M_IOV, M_WAITOK);
578     error = copyin(iovp, *iov, iovlen);
579     if (error) {
580         free(*iov, M_IOV);
581         *iov = NULL;
582     }
583     return (error);
584 }
585 
586 int
587 subyte(volatile void *base, int byte)
588 {
589     *(volatile char *)base = (uint8_t)byte;
590     return (0);
591 }
592 
593 static inline int
594 chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
595 {
596     /* Don't allow them to exceed max, but allow subtraction. */
597     if (diff > 0 && max != 0) {
598         if (atomic_fetchadd_long(limit, (long)diff) + diff > max) {
599             atomic_subtract_long(limit, (long)diff);
600             return (0);
601         }
602     } else {
603         atomic_add_long(limit, (long)diff);
604         if (*limit < 0)
605             printf("negative %s for uid = %d\n", name, uip->ui_uid);
606     }
607     return (1);
608 }
609 
610 /*
611  * Change the count associated with number of processes
612  * a given user is using.  When 'max' is 0, don't enforce a limit
613  */
614 int
615 chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
616 {
617     return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt"));
618 }
619 
620 /*
621  * Change the total socket buffer size a user has used.
622  */
623 int
624 chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
625 {
626     int diff, rv;
627 
628     diff = to - *hiwat;
629     if (diff > 0 && max == 0) {
630         rv = 0;
631     } else {
632         rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize");
633         if (rv != 0)
634             *hiwat = to;
635     }
636     return (rv);
637 }
638 
639 /*
640  * Change the count associated with number of pseudo-terminals
641  * a given user is using.  When 'max' is 0, don't enforce a limit
642  */
643 int
644 chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
645 {
646     return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt"));
647 }
648 
649 int
650 chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
651 {
652     return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
653 }
654 
655 int
656 chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
657 {
658     return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));
659 }
660 
661 /*
662  * Allocate a new resource limits structure and initialize its
663  * reference count and mutex pointer.
664  */
665 struct plimit *
666 lim_alloc()
667 {
668     struct plimit *limp;
669 
670     limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
671     refcount_init(&limp->pl_refcnt, 1);
672     return (limp);
673 }
674 
675 struct plimit *
676 lim_hold(struct plimit *limp)
677 {
678     refcount_acquire(&limp->pl_refcnt);
679     return (limp);
680 }
681 
682 /*
683  * Return the current (soft) limit for a particular system resource.
684  * The which parameter which specifies the index into the rlimit array
685  */
686 rlim_t
687 lim_cur(struct thread *td, int which)
688 {
689     struct rlimit rl;
690 
691     lim_rlimit(td, which, &rl);
692     return (rl.rlim_cur);
693 }
694 
695 rlim_t
696 lim_cur_proc(struct proc *p, int which)
697 {
698     struct rlimit rl;
699 
700     lim_rlimit_proc(p, which, &rl);
701     return (rl.rlim_cur);
702 }
703 
704 /*
705  * Return a copy of the entire rlimit structure for the system limit
706  * specified by 'which' in the rlimit structure pointed to by 'rlp'.
707  */
708 void
709 lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
710 {
711     struct proc *p = td->td_proc;
712 
713     MPASS(td == curthread);
714     KASSERT(which >= 0 && which < RLIM_NLIMITS,
715         ("request for invalid resource limit"));
716     *rlp = p->p_limit->pl_rlimit[which];
717     if (p->p_sysent->sv_fixlimit != NULL)
718         p->p_sysent->sv_fixlimit(rlp, which);
719 }
720 
721 void
722 lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
723 {
724     PROC_LOCK_ASSERT(p, MA_OWNED);
725     KASSERT(which >= 0 && which < RLIM_NLIMITS,
726         ("request for invalid resource limit"));
727     *rlp = p->p_limit->pl_rlimit[which];
728     if (p->p_sysent->sv_fixlimit != NULL)
729         p->p_sysent->sv_fixlimit(rlp, which);
730 }
731 
732 int
733 useracc(void *addr, int len, int rw)
734 {
735     return (1);
736 }
737 
738 struct pgrp *
739 pgfind(pid_t pgid)
740 {
741     return (NULL);
742 }
743 
744 struct proc *
745 zpfind(pid_t pid)
746 {
747     return (NULL);
748 }
749 
750 
751 int
752 p_cansee(struct thread *td, struct proc *p)
753 {
754     return (0);
755 }
756 
757 struct proc *
758 pfind(pid_t pid)
759 {
760     return (NULL);
761 }
762 
763 int
764 pget(pid_t pid, int flags, struct proc **pp)
765 {
766     return (ESRCH);
767 }
768 
769 struct uidinfo uid0;
770 
771 struct uidinfo *
772 uifind(uid_t uid)
773 {
774     return (&uid0);
775 }
776 
777 /*
778  * Allocate a zeroed cred structure.
779  */
780 struct ucred *
781 crget(void)
782 {
783     register struct ucred *cr;
784 
785     cr = malloc(sizeof(*cr), M_CRED, M_WAITOK | M_ZERO);
786     refcount_init(&cr->cr_ref, 1);
787 
788     return (cr);
789 }
790 
791 /*
792  * Claim another reference to a ucred structure.
793  */
794 struct ucred *
795 crhold(struct ucred *cr)
796 {
797     refcount_acquire(&cr->cr_ref);
798     return (cr);
799 }
800 
801 /*
802  * Free a cred structure.  Throws away space when ref count gets to 0.
803  */
804 void
805 crfree(struct ucred *cr)
806 {
807     KASSERT(cr->cr_ref > 0, ("bad ucred refcount: %d", cr->cr_ref));
808     KASSERT(cr->cr_ref != 0xdeadc0de, ("dangling reference to ucred"));
809     if (refcount_release(&cr->cr_ref)) {
810 
811         free(cr, M_CRED);
812     }
813 }
814 
815 /*
816  * Fill in a struct xucred based on a struct ucred.
817  */
818 
819 void
820 cru2x(struct ucred *cr, struct xucred *xcr)
821 {
822 #if 0
823     int ngroups;
824 
825     bzero(xcr, sizeof(*xcr));
826     xcr->cr_version = XUCRED_VERSION;
827     xcr->cr_uid = cr->cr_uid;
828 
829     ngroups = MIN(cr->cr_ngroups, XU_NGROUPS);
830     xcr->cr_ngroups = ngroups;
831     bcopy(cr->cr_groups, xcr->cr_groups,
832         ngroups * sizeof(*cr->cr_groups));
833 #endif
834 }
835 
836 
837 int
838 cr_cansee(struct ucred *u1, struct ucred *u2)
839 {
840     return (0);
841 }
842 
843 int
844 cr_canseesocket(struct ucred *cred, struct socket *so)
845 {
846     return (0);
847 }
848 
849 int
850 cr_canseeinpcb(struct ucred *cred, struct inpcb *inp)
851 {
852     return (0);
853 }
854 
855 int
856 securelevel_gt(struct ucred *cr, int level)
857 {
858     return (0);
859 }
860 
861 
862 /**
863  * @brief Send a 'notification' to userland, using standard ways
864  */
865 void
866 devctl_notify(const char *system, const char *subsystem, const char *type,
867     const char *data)
868 {
869 
870 }
871 
872 void
873 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
874 {
875 
876 }
877 
878 static void
879 configure_final(void *dummy)
880 {
881     cold = 0;
882 }
883 
884 /*
885  * Send a SIGIO or SIGURG signal to a process or process group using stored
886  * credentials rather than those of the current process.
887  */
888 void
889 pgsigio(sigiop, sig, checkctty)
890     struct sigio **sigiop;
891     int sig, checkctty;
892 {
893     panic("SIGIO not supported yet\n");
894 #ifdef notyet
895     ksiginfo_t ksi;
896     struct sigio *sigio;
897 
898     ksiginfo_init(&ksi);
899     ksi.ksi_signo = sig;
900     ksi.ksi_code = SI_KERNEL;
901 
902     SIGIO_LOCK();
903     sigio = *sigiop;
904     if (sigio == NULL) {
905         SIGIO_UNLOCK();
906         return;
907     }
908     if (sigio->sio_pgid > 0) {
909         PROC_LOCK(sigio->sio_proc);
910         if (CANSIGIO(sigio->sio_ucred, sigio->sio_proc->p_ucred))
911             psignal(sigio->sio_proc, sig);
912         PROC_UNLOCK(sigio->sio_proc);
913     } else if (sigio->sio_pgid < 0) {
914         struct proc *p;
915 
916         PGRP_LOCK(sigio->sio_pgrp);
917         LIST_FOREACH(p, &sigio->sio_pgrp->pg_members, p_pglist) {
918             PROC_LOCK(p);
919             if (CANSIGIO(sigio->sio_ucred, p->p_ucred) &&
920                 (checkctty == 0 || (p->p_flag & P_CONTROLT)))
921                 psignal(p, sig);
922             PROC_UNLOCK(p);
923         }
924         PGRP_UNLOCK(sigio->sio_pgrp);
925     }
926     SIGIO_UNLOCK();
927 #endif
928 }
929 
930 void
931 kproc_exit(int ecode)
932 {
933     panic("kproc_exit unsupported");
934 }
935 
936 vm_offset_t
937 kmem_malloc(struct vmem *vmem, vm_size_t bytes, int flags)
938 {
939     void *alloc = ff_mmap(NULL, bytes, ff_PROT_READ|ff_PROT_WRITE, ff_MAP_ANON|ff_MAP_PRIVATE, -1, 0);
940     if ((flags & M_ZERO) && alloc != NULL)
941         bzero(alloc, bytes);
942     return ((vm_offset_t)alloc);
943 }
944 
945 void
946 kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
947 {
948     ff_munmap((void *)addr, size);
949 }
950 
951 vm_offset_t
952 kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
953     vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
954 {
955     return (kmem_malloc(vmem, size, flags));
956 }
957 
958 void
959 malloc_init(void *data)
960 {
961     /* Nothing to do here */
962 }
963 
964 
965 void
966 malloc_uninit(void *data)
967 {
968     /* Nothing to do here */
969 }
970 
971 
972 void *
973 malloc(unsigned long size, struct malloc_type *type, int flags)
974 {
975     void *alloc;
976 
977     do {
978         alloc = ff_malloc(size);
979         if (alloc || !(flags & M_WAITOK))
980             break;
981 
982         pause("malloc", hz/100);
983     } while (alloc == NULL);
984 
985     if ((flags & M_ZERO) && alloc != NULL)
986         bzero(alloc, size);
987     return (alloc);
988 }
989 
990 void
991 free(void *addr, struct malloc_type *type)
992 {
993     ff_free(addr);
994 }
995 
996 
997 void *
998 realloc(void *addr, unsigned long size, struct malloc_type *type,
999     int flags)
1000 {
1001     return (ff_realloc(addr, size));
1002 }
1003 
1004 void *
1005 reallocf(void *addr, unsigned long size, struct malloc_type *type,
1006      int flags)
1007 {
1008     void *mem;
1009 
1010     if ((mem = ff_realloc(addr, size)) == NULL)
1011         ff_free(addr);
1012 
1013     return (mem);
1014 }
1015 
1016 void
1017 DELAY(int delay)
1018 {
1019     struct timespec rqt;
1020 
1021     if (delay < 1000)
1022         return;
1023 
1024     rqt.tv_nsec = 1000*((unsigned long)delay);
1025     rqt.tv_sec = 0;
1026     /*
1027      * FIXME: We shouldn't sleep in dpdk apps.
1028      */
1029     //nanosleep(&rqt, NULL);
1030 }
1031 
1032 void
1033 bwillwrite(void)
1034 {
1035 
1036 }
1037 
1038 off_t
1039 foffset_lock(struct file *fp, int flags)
1040 {
1041     struct mtx *mtxp;
1042     off_t res;
1043 
1044     KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
1045 
1046 #if OFF_MAX <= LONG_MAX
1047     /*
1048      * Caller only wants the current f_offset value.  Assume that
1049      * the long and shorter integer types reads are atomic.
1050      */
1051     if ((flags & FOF_NOLOCK) != 0)
1052         return (fp->f_offset);
1053 #endif
1054 
1055     /*
1056      * According to McKusick the vn lock was protecting f_offset here.
1057      * It is now protected by the FOFFSET_LOCKED flag.
1058      */
1059     mtxp = mtx_pool_find(mtxpool_sleep, fp);
1060     mtx_lock(mtxp);
1061     if ((flags & FOF_NOLOCK) == 0) {
1062         while (fp->f_vnread_flags & FOFFSET_LOCKED) {
1063             fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
1064             msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
1065                 "vofflock", 0);
1066         }
1067         fp->f_vnread_flags |= FOFFSET_LOCKED;
1068     }
1069     res = fp->f_offset;
1070     mtx_unlock(mtxp);
1071     return (res);
1072 }
1073 
1074 void
1075 sf_ext_free(void *arg1, void *arg2)
1076 {
1077     panic("sf_ext_free not implemented.\n");
1078 }
1079 
1080 void
1081 sf_ext_free_nocache(void *arg1, void *arg2)
1082 {
1083     panic("sf_ext_free_nocache not implemented.\n");
1084 }
1085 
1086 void
1087 sched_bind(struct thread *td, int cpu)
1088 {
1089 
1090 }
1091 
1092 void
1093 sched_unbind(struct thread* td)
1094 {
1095 
1096 }
1097 
1098