1 /*-
2 * SPDX-License-Identifier: (Beerware AND BSD-3-Clause)
3 *
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <[email protected]> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * $FreeBSD$
12 *
13 */
14
15 /*-
16 * The following functions are based in the vn(4) driver: mdstart_swap(),
17 * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
18 * and as such under the following copyright:
19 *
20 * Copyright (c) 1988 University of Utah.
21 * Copyright (c) 1990, 1993
22 * The Regents of the University of California. All rights reserved.
23 * Copyright (c) 2013 The FreeBSD Foundation
24 * All rights reserved.
25 *
26 * This code is derived from software contributed to Berkeley by
27 * the Systems Programming Group of the University of Utah Computer
28 * Science Department.
29 *
30 * Portions of this software were developed by Konstantin Belousov
31 * under sponsorship from the FreeBSD Foundation.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * from: Utah Hdr: vn.c 1.13 94/04/02
58 *
59 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
60 * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
61 */
62
63 #include "opt_rootdevname.h"
64 #include "opt_geom.h"
65 #include "opt_md.h"
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/bio.h>
70 #include <sys/buf.h>
71 #include <sys/conf.h>
72 #include <sys/devicestat.h>
73 #include <sys/fcntl.h>
74 #include <sys/kernel.h>
75 #include <sys/kthread.h>
76 #include <sys/limits.h>
77 #include <sys/linker.h>
78 #include <sys/lock.h>
79 #include <sys/malloc.h>
80 #include <sys/mdioctl.h>
81 #include <sys/mount.h>
82 #include <sys/mutex.h>
83 #include <sys/sx.h>
84 #include <sys/namei.h>
85 #include <sys/proc.h>
86 #include <sys/queue.h>
87 #include <sys/rwlock.h>
88 #include <sys/sbuf.h>
89 #include <sys/sched.h>
90 #include <sys/sf_buf.h>
91 #include <sys/sysctl.h>
92 #include <sys/uio.h>
93 #include <sys/vnode.h>
94 #include <sys/disk.h>
95
96 #include <geom/geom.h>
97 #include <geom/geom_int.h>
98
99 #include <vm/vm.h>
100 #include <vm/vm_param.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pager.h>
104 #include <vm/swap_pager.h>
105 #include <vm/uma.h>
106
107 #include <machine/bus.h>
108
109 #define MD_MODVER 1
110
111 #define MD_SHUTDOWN 0x10000 /* Tell worker thread to terminate. */
112 #define MD_EXITING 0x20000 /* Worker thread is exiting. */
113 #define MD_PROVIDERGONE 0x40000 /* Safe to free the softc */
114
115 #ifndef MD_NSECT
116 #define MD_NSECT (10000 * 2)
117 #endif
118
119 struct md_req {
120 unsigned md_unit; /* unit number */
121 enum md_types md_type; /* type of disk */
122 off_t md_mediasize; /* size of disk in bytes */
123 unsigned md_sectorsize; /* sectorsize */
124 unsigned md_options; /* options */
125 int md_fwheads; /* firmware heads */
126 int md_fwsectors; /* firmware sectors */
127 char *md_file; /* pathname of file to mount */
128 enum uio_seg md_file_seg; /* location of md_file */
129 char *md_label; /* label of the device (userspace) */
130 int *md_units; /* pointer to units array (kernel) */
131 size_t md_units_nitems; /* items in md_units array */
132 };
133
134 #ifdef COMPAT_FREEBSD32
135 struct md_ioctl32 {
136 unsigned md_version;
137 unsigned md_unit;
138 enum md_types md_type;
139 uint32_t md_file;
140 off_t md_mediasize;
141 unsigned md_sectorsize;
142 unsigned md_options;
143 uint64_t md_base;
144 int md_fwheads;
145 int md_fwsectors;
146 uint32_t md_label;
147 int md_pad[MDNPAD];
148 } __attribute__((__packed__));
149 CTASSERT((sizeof(struct md_ioctl32)) == 436);
150
151 #define MDIOCATTACH_32 _IOC_NEWTYPE(MDIOCATTACH, struct md_ioctl32)
152 #define MDIOCDETACH_32 _IOC_NEWTYPE(MDIOCDETACH, struct md_ioctl32)
153 #define MDIOCQUERY_32 _IOC_NEWTYPE(MDIOCQUERY, struct md_ioctl32)
154 #define MDIOCLIST_32 _IOC_NEWTYPE(MDIOCLIST, struct md_ioctl32)
155 #define MDIOCRESIZE_32 _IOC_NEWTYPE(MDIOCRESIZE, struct md_ioctl32)
156 #endif /* COMPAT_FREEBSD32 */
157
158 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
159 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
160
161 static int md_debug;
162 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0,
163 "Enable md(4) debug messages");
164 static int md_malloc_wait;
165 SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0,
166 "Allow malloc to wait for memory allocations");
167
168 #if defined(MD_ROOT) && !defined(MD_ROOT_FSTYPE)
169 #define MD_ROOT_FSTYPE "ufs"
170 #endif
171
172 #if defined(MD_ROOT)
173 /*
174 * Preloaded image gets put here.
175 */
176 #if defined(MD_ROOT_SIZE)
177 /*
178 * We put the mfs_root symbol into the oldmfs section of the kernel object file.
179 * Applications that patch the object with the image can determine
180 * the size looking at the oldmfs section size within the kernel.
181 */
182 u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs")));
183 const int mfs_root_size = sizeof(mfs_root);
184 #elif defined(MD_ROOT_MEM)
185 /* MD region already mapped in the memory */
186 u_char *mfs_root;
187 int mfs_root_size;
188 #else
189 extern volatile u_char __weak_symbol mfs_root;
190 extern volatile u_char __weak_symbol mfs_root_end;
191 __GLOBL(mfs_root);
192 __GLOBL(mfs_root_end);
193 #define mfs_root_size ((uintptr_t)(&mfs_root_end - &mfs_root))
194 #endif
195 #endif
196
197 static g_init_t g_md_init;
198 static g_fini_t g_md_fini;
199 static g_start_t g_md_start;
200 static g_access_t g_md_access;
201 static void g_md_dumpconf(struct sbuf *sb, const char *indent,
202 struct g_geom *gp, struct g_consumer *cp __unused, struct g_provider *pp);
203 static g_provgone_t g_md_providergone;
204
205 static struct cdev *status_dev = NULL;
206 static struct sx md_sx;
207 static struct unrhdr *md_uh;
208
209 static d_ioctl_t mdctlioctl;
210
211 static struct cdevsw mdctl_cdevsw = {
212 .d_version = D_VERSION,
213 .d_ioctl = mdctlioctl,
214 .d_name = MD_NAME,
215 };
216
217 struct g_class g_md_class = {
218 .name = "MD",
219 .version = G_VERSION,
220 .init = g_md_init,
221 .fini = g_md_fini,
222 .start = g_md_start,
223 .access = g_md_access,
224 .dumpconf = g_md_dumpconf,
225 .providergone = g_md_providergone,
226 };
227
228 DECLARE_GEOM_CLASS(g_md_class, g_md);
229
230
231 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(md_softc_list);
232
233 #define NINDIR (PAGE_SIZE / sizeof(uintptr_t))
234 #define NMASK (NINDIR-1)
235 static int nshift;
236
237 static int md_vnode_pbuf_freecnt;
238
239 struct indir {
240 uintptr_t *array;
241 u_int total;
242 u_int used;
243 u_int shift;
244 };
245
246 struct md_s {
247 int unit;
248 LIST_ENTRY(md_s) list;
249 struct bio_queue_head bio_queue;
250 struct mtx queue_mtx;
251 struct mtx stat_mtx;
252 struct cdev *dev;
253 enum md_types type;
254 off_t mediasize;
255 unsigned sectorsize;
256 unsigned opencount;
257 unsigned fwheads;
258 unsigned fwsectors;
259 char ident[32];
260 unsigned flags;
261 char name[20];
262 struct proc *procp;
263 struct g_geom *gp;
264 struct g_provider *pp;
265 int (*start)(struct md_s *sc, struct bio *bp);
266 struct devstat *devstat;
267
268 /* MD_MALLOC related fields */
269 struct indir *indir;
270 uma_zone_t uma;
271
272 /* MD_PRELOAD related fields */
273 u_char *pl_ptr;
274 size_t pl_len;
275
276 /* MD_VNODE related fields */
277 struct vnode *vnode;
278 char file[PATH_MAX];
279 char label[PATH_MAX];
280 struct ucred *cred;
281
282 /* MD_SWAP related fields */
283 vm_object_t object;
284 };
285
286 static struct indir *
new_indir(u_int shift)287 new_indir(u_int shift)
288 {
289 struct indir *ip;
290
291 ip = malloc(sizeof *ip, M_MD, (md_malloc_wait ? M_WAITOK : M_NOWAIT)
292 | M_ZERO);
293 if (ip == NULL)
294 return (NULL);
295 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
296 M_MDSECT, (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
297 if (ip->array == NULL) {
298 free(ip, M_MD);
299 return (NULL);
300 }
301 ip->total = NINDIR;
302 ip->shift = shift;
303 return (ip);
304 }
305
306 static void
del_indir(struct indir * ip)307 del_indir(struct indir *ip)
308 {
309
310 free(ip->array, M_MDSECT);
311 free(ip, M_MD);
312 }
313
314 static void
destroy_indir(struct md_s * sc,struct indir * ip)315 destroy_indir(struct md_s *sc, struct indir *ip)
316 {
317 int i;
318
319 for (i = 0; i < NINDIR; i++) {
320 if (!ip->array[i])
321 continue;
322 if (ip->shift)
323 destroy_indir(sc, (struct indir*)(ip->array[i]));
324 else if (ip->array[i] > 255)
325 uma_zfree(sc->uma, (void *)(ip->array[i]));
326 }
327 del_indir(ip);
328 }
329
330 /*
331 * This function does the math and allocates the top level "indir" structure
332 * for a device of "size" sectors.
333 */
334
335 static struct indir *
dimension(off_t size)336 dimension(off_t size)
337 {
338 off_t rcnt;
339 struct indir *ip;
340 int layer;
341
342 rcnt = size;
343 layer = 0;
344 while (rcnt > NINDIR) {
345 rcnt /= NINDIR;
346 layer++;
347 }
348
349 /*
350 * XXX: the top layer is probably not fully populated, so we allocate
351 * too much space for ip->array in here.
352 */
353 ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
354 ip->array = malloc(sizeof(uintptr_t) * NINDIR,
355 M_MDSECT, M_WAITOK | M_ZERO);
356 ip->total = NINDIR;
357 ip->shift = layer * nshift;
358 return (ip);
359 }
360
361 /*
362 * Read a given sector
363 */
364
365 static uintptr_t
s_read(struct indir * ip,off_t offset)366 s_read(struct indir *ip, off_t offset)
367 {
368 struct indir *cip;
369 int idx;
370 uintptr_t up;
371
372 if (md_debug > 1)
373 printf("s_read(%jd)\n", (intmax_t)offset);
374 up = 0;
375 for (cip = ip; cip != NULL;) {
376 if (cip->shift) {
377 idx = (offset >> cip->shift) & NMASK;
378 up = cip->array[idx];
379 cip = (struct indir *)up;
380 continue;
381 }
382 idx = offset & NMASK;
383 return (cip->array[idx]);
384 }
385 return (0);
386 }
387
388 /*
389 * Write a given sector, prune the tree if the value is 0
390 */
391
392 static int
s_write(struct indir * ip,off_t offset,uintptr_t ptr)393 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
394 {
395 struct indir *cip, *lip[10];
396 int idx, li;
397 uintptr_t up;
398
399 if (md_debug > 1)
400 printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
401 up = 0;
402 li = 0;
403 cip = ip;
404 for (;;) {
405 lip[li++] = cip;
406 if (cip->shift) {
407 idx = (offset >> cip->shift) & NMASK;
408 up = cip->array[idx];
409 if (up != 0) {
410 cip = (struct indir *)up;
411 continue;
412 }
413 /* Allocate branch */
414 cip->array[idx] =
415 (uintptr_t)new_indir(cip->shift - nshift);
416 if (cip->array[idx] == 0)
417 return (ENOSPC);
418 cip->used++;
419 up = cip->array[idx];
420 cip = (struct indir *)up;
421 continue;
422 }
423 /* leafnode */
424 idx = offset & NMASK;
425 up = cip->array[idx];
426 if (up != 0)
427 cip->used--;
428 cip->array[idx] = ptr;
429 if (ptr != 0)
430 cip->used++;
431 break;
432 }
433 if (cip->used != 0 || li == 1)
434 return (0);
435 li--;
436 while (cip->used == 0 && cip != ip) {
437 li--;
438 idx = (offset >> lip[li]->shift) & NMASK;
439 up = lip[li]->array[idx];
440 KASSERT(up == (uintptr_t)cip, ("md screwed up"));
441 del_indir(cip);
442 lip[li]->array[idx] = 0;
443 lip[li]->used--;
444 cip = lip[li];
445 }
446 return (0);
447 }
448
449
450 static int
g_md_access(struct g_provider * pp,int r,int w,int e)451 g_md_access(struct g_provider *pp, int r, int w, int e)
452 {
453 struct md_s *sc;
454
455 sc = pp->geom->softc;
456 if (sc == NULL) {
457 if (r <= 0 && w <= 0 && e <= 0)
458 return (0);
459 return (ENXIO);
460 }
461 r += pp->acr;
462 w += pp->acw;
463 e += pp->ace;
464 if ((sc->flags & MD_READONLY) != 0 && w > 0)
465 return (EROFS);
466 if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
467 sc->opencount = 1;
468 } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
469 sc->opencount = 0;
470 }
471 return (0);
472 }
473
474 static void
g_md_start(struct bio * bp)475 g_md_start(struct bio *bp)
476 {
477 struct md_s *sc;
478
479 sc = bp->bio_to->geom->softc;
480 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE)) {
481 mtx_lock(&sc->stat_mtx);
482 devstat_start_transaction_bio(sc->devstat, bp);
483 mtx_unlock(&sc->stat_mtx);
484 }
485 mtx_lock(&sc->queue_mtx);
486 bioq_disksort(&sc->bio_queue, bp);
487 wakeup(sc);
488 mtx_unlock(&sc->queue_mtx);
489 }
490
491 #define MD_MALLOC_MOVE_ZERO 1
492 #define MD_MALLOC_MOVE_FILL 2
493 #define MD_MALLOC_MOVE_READ 3
494 #define MD_MALLOC_MOVE_WRITE 4
495 #define MD_MALLOC_MOVE_CMP 5
496
497 static int
md_malloc_move_ma(vm_page_t ** mp,int * ma_offs,unsigned sectorsize,void * ptr,u_char fill,int op)498 md_malloc_move_ma(vm_page_t **mp, int *ma_offs, unsigned sectorsize,
499 void *ptr, u_char fill, int op)
500 {
501 struct sf_buf *sf;
502 vm_page_t m, *mp1;
503 char *p, first;
504 off_t *uc;
505 unsigned n;
506 int error, i, ma_offs1, sz, first_read;
507
508 m = NULL;
509 error = 0;
510 sf = NULL;
511 /* if (op == MD_MALLOC_MOVE_CMP) { gcc */
512 first = 0;
513 first_read = 0;
514 uc = ptr;
515 mp1 = *mp;
516 ma_offs1 = *ma_offs;
517 /* } */
518 sched_pin();
519 for (n = sectorsize; n != 0; n -= sz) {
520 sz = imin(PAGE_SIZE - *ma_offs, n);
521 if (m != **mp) {
522 if (sf != NULL)
523 sf_buf_free(sf);
524 m = **mp;
525 sf = sf_buf_alloc(m, SFB_CPUPRIVATE |
526 (md_malloc_wait ? 0 : SFB_NOWAIT));
527 if (sf == NULL) {
528 error = ENOMEM;
529 break;
530 }
531 }
532 p = (char *)sf_buf_kva(sf) + *ma_offs;
533 switch (op) {
534 case MD_MALLOC_MOVE_ZERO:
535 bzero(p, sz);
536 break;
537 case MD_MALLOC_MOVE_FILL:
538 memset(p, fill, sz);
539 break;
540 case MD_MALLOC_MOVE_READ:
541 bcopy(ptr, p, sz);
542 cpu_flush_dcache(p, sz);
543 break;
544 case MD_MALLOC_MOVE_WRITE:
545 bcopy(p, ptr, sz);
546 break;
547 case MD_MALLOC_MOVE_CMP:
548 for (i = 0; i < sz; i++, p++) {
549 if (!first_read) {
550 *uc = (u_char)*p;
551 first = *p;
552 first_read = 1;
553 } else if (*p != first) {
554 error = EDOOFUS;
555 break;
556 }
557 }
558 break;
559 default:
560 KASSERT(0, ("md_malloc_move_ma unknown op %d\n", op));
561 break;
562 }
563 if (error != 0)
564 break;
565 *ma_offs += sz;
566 *ma_offs %= PAGE_SIZE;
567 if (*ma_offs == 0)
568 (*mp)++;
569 ptr = (char *)ptr + sz;
570 }
571
572 if (sf != NULL)
573 sf_buf_free(sf);
574 sched_unpin();
575 if (op == MD_MALLOC_MOVE_CMP && error != 0) {
576 *mp = mp1;
577 *ma_offs = ma_offs1;
578 }
579 return (error);
580 }
581
582 static int
md_malloc_move_vlist(bus_dma_segment_t ** pvlist,int * pma_offs,unsigned len,void * ptr,u_char fill,int op)583 md_malloc_move_vlist(bus_dma_segment_t **pvlist, int *pma_offs,
584 unsigned len, void *ptr, u_char fill, int op)
585 {
586 bus_dma_segment_t *vlist;
587 uint8_t *p, *end, first;
588 off_t *uc;
589 int ma_offs, seg_len;
590
591 vlist = *pvlist;
592 ma_offs = *pma_offs;
593 uc = ptr;
594
595 for (; len != 0; len -= seg_len) {
596 seg_len = imin(vlist->ds_len - ma_offs, len);
597 p = (uint8_t *)(uintptr_t)vlist->ds_addr + ma_offs;
598 switch (op) {
599 case MD_MALLOC_MOVE_ZERO:
600 bzero(p, seg_len);
601 break;
602 case MD_MALLOC_MOVE_FILL:
603 memset(p, fill, seg_len);
604 break;
605 case MD_MALLOC_MOVE_READ:
606 bcopy(ptr, p, seg_len);
607 cpu_flush_dcache(p, seg_len);
608 break;
609 case MD_MALLOC_MOVE_WRITE:
610 bcopy(p, ptr, seg_len);
611 break;
612 case MD_MALLOC_MOVE_CMP:
613 end = p + seg_len;
614 first = *uc = *p;
615 /* Confirm all following bytes match the first */
616 while (++p < end) {
617 if (*p != first)
618 return (EDOOFUS);
619 }
620 break;
621 default:
622 KASSERT(0, ("md_malloc_move_vlist unknown op %d\n", op));
623 break;
624 }
625
626 ma_offs += seg_len;
627 if (ma_offs == vlist->ds_len) {
628 ma_offs = 0;
629 vlist++;
630 }
631 ptr = (uint8_t *)ptr + seg_len;
632 }
633 *pvlist = vlist;
634 *pma_offs = ma_offs;
635
636 return (0);
637 }
638
639 static int
mdstart_malloc(struct md_s * sc,struct bio * bp)640 mdstart_malloc(struct md_s *sc, struct bio *bp)
641 {
642 u_char *dst;
643 vm_page_t *m;
644 bus_dma_segment_t *vlist;
645 int i, error, error1, ma_offs, notmapped;
646 off_t secno, nsec, uc;
647 uintptr_t sp, osp;
648
649 switch (bp->bio_cmd) {
650 case BIO_READ:
651 case BIO_WRITE:
652 case BIO_DELETE:
653 break;
654 default:
655 return (EOPNOTSUPP);
656 }
657
658 notmapped = (bp->bio_flags & BIO_UNMAPPED) != 0;
659 vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
660 (bus_dma_segment_t *)bp->bio_data : NULL;
661 if (notmapped) {
662 m = bp->bio_ma;
663 ma_offs = bp->bio_ma_offset;
664 dst = NULL;
665 KASSERT(vlist == NULL, ("vlists cannot be unmapped"));
666 } else if (vlist != NULL) {
667 ma_offs = bp->bio_ma_offset;
668 dst = NULL;
669 } else {
670 dst = bp->bio_data;
671 }
672
673 nsec = bp->bio_length / sc->sectorsize;
674 secno = bp->bio_offset / sc->sectorsize;
675 error = 0;
676 while (nsec--) {
677 osp = s_read(sc->indir, secno);
678 if (bp->bio_cmd == BIO_DELETE) {
679 if (osp != 0)
680 error = s_write(sc->indir, secno, 0);
681 } else if (bp->bio_cmd == BIO_READ) {
682 if (osp == 0) {
683 if (notmapped) {
684 error = md_malloc_move_ma(&m, &ma_offs,
685 sc->sectorsize, NULL, 0,
686 MD_MALLOC_MOVE_ZERO);
687 } else if (vlist != NULL) {
688 error = md_malloc_move_vlist(&vlist,
689 &ma_offs, sc->sectorsize, NULL, 0,
690 MD_MALLOC_MOVE_ZERO);
691 } else
692 bzero(dst, sc->sectorsize);
693 } else if (osp <= 255) {
694 if (notmapped) {
695 error = md_malloc_move_ma(&m, &ma_offs,
696 sc->sectorsize, NULL, osp,
697 MD_MALLOC_MOVE_FILL);
698 } else if (vlist != NULL) {
699 error = md_malloc_move_vlist(&vlist,
700 &ma_offs, sc->sectorsize, NULL, osp,
701 MD_MALLOC_MOVE_FILL);
702 } else
703 memset(dst, osp, sc->sectorsize);
704 } else {
705 if (notmapped) {
706 error = md_malloc_move_ma(&m, &ma_offs,
707 sc->sectorsize, (void *)osp, 0,
708 MD_MALLOC_MOVE_READ);
709 } else if (vlist != NULL) {
710 error = md_malloc_move_vlist(&vlist,
711 &ma_offs, sc->sectorsize,
712 (void *)osp, 0,
713 MD_MALLOC_MOVE_READ);
714 } else {
715 bcopy((void *)osp, dst, sc->sectorsize);
716 cpu_flush_dcache(dst, sc->sectorsize);
717 }
718 }
719 osp = 0;
720 } else if (bp->bio_cmd == BIO_WRITE) {
721 if (sc->flags & MD_COMPRESS) {
722 if (notmapped) {
723 error1 = md_malloc_move_ma(&m, &ma_offs,
724 sc->sectorsize, &uc, 0,
725 MD_MALLOC_MOVE_CMP);
726 i = error1 == 0 ? sc->sectorsize : 0;
727 } else if (vlist != NULL) {
728 error1 = md_malloc_move_vlist(&vlist,
729 &ma_offs, sc->sectorsize, &uc, 0,
730 MD_MALLOC_MOVE_CMP);
731 i = error1 == 0 ? sc->sectorsize : 0;
732 } else {
733 uc = dst[0];
734 for (i = 1; i < sc->sectorsize; i++) {
735 if (dst[i] != uc)
736 break;
737 }
738 }
739 } else {
740 i = 0;
741 uc = 0;
742 }
743 if (i == sc->sectorsize) {
744 if (osp != uc)
745 error = s_write(sc->indir, secno, uc);
746 } else {
747 if (osp <= 255) {
748 sp = (uintptr_t)uma_zalloc(sc->uma,
749 md_malloc_wait ? M_WAITOK :
750 M_NOWAIT);
751 if (sp == 0) {
752 error = ENOSPC;
753 break;
754 }
755 if (notmapped) {
756 error = md_malloc_move_ma(&m,
757 &ma_offs, sc->sectorsize,
758 (void *)sp, 0,
759 MD_MALLOC_MOVE_WRITE);
760 } else if (vlist != NULL) {
761 error = md_malloc_move_vlist(
762 &vlist, &ma_offs,
763 sc->sectorsize, (void *)sp,
764 0, MD_MALLOC_MOVE_WRITE);
765 } else {
766 bcopy(dst, (void *)sp,
767 sc->sectorsize);
768 }
769 error = s_write(sc->indir, secno, sp);
770 } else {
771 if (notmapped) {
772 error = md_malloc_move_ma(&m,
773 &ma_offs, sc->sectorsize,
774 (void *)osp, 0,
775 MD_MALLOC_MOVE_WRITE);
776 } else if (vlist != NULL) {
777 error = md_malloc_move_vlist(
778 &vlist, &ma_offs,
779 sc->sectorsize, (void *)osp,
780 0, MD_MALLOC_MOVE_WRITE);
781 } else {
782 bcopy(dst, (void *)osp,
783 sc->sectorsize);
784 }
785 osp = 0;
786 }
787 }
788 } else {
789 error = EOPNOTSUPP;
790 }
791 if (osp > 255)
792 uma_zfree(sc->uma, (void*)osp);
793 if (error != 0)
794 break;
795 secno++;
796 if (!notmapped && vlist == NULL)
797 dst += sc->sectorsize;
798 }
799 bp->bio_resid = 0;
800 return (error);
801 }
802
803 static void
mdcopyto_vlist(void * src,bus_dma_segment_t * vlist,off_t offset,off_t len)804 mdcopyto_vlist(void *src, bus_dma_segment_t *vlist, off_t offset, off_t len)
805 {
806 off_t seg_len;
807
808 while (offset >= vlist->ds_len) {
809 offset -= vlist->ds_len;
810 vlist++;
811 }
812
813 while (len != 0) {
814 seg_len = omin(len, vlist->ds_len - offset);
815 bcopy(src, (void *)(uintptr_t)(vlist->ds_addr + offset),
816 seg_len);
817 offset = 0;
818 src = (uint8_t *)src + seg_len;
819 len -= seg_len;
820 vlist++;
821 }
822 }
823
824 static void
mdcopyfrom_vlist(bus_dma_segment_t * vlist,off_t offset,void * dst,off_t len)825 mdcopyfrom_vlist(bus_dma_segment_t *vlist, off_t offset, void *dst, off_t len)
826 {
827 off_t seg_len;
828
829 while (offset >= vlist->ds_len) {
830 offset -= vlist->ds_len;
831 vlist++;
832 }
833
834 while (len != 0) {
835 seg_len = omin(len, vlist->ds_len - offset);
836 bcopy((void *)(uintptr_t)(vlist->ds_addr + offset), dst,
837 seg_len);
838 offset = 0;
839 dst = (uint8_t *)dst + seg_len;
840 len -= seg_len;
841 vlist++;
842 }
843 }
844
845 static int
mdstart_preload(struct md_s * sc,struct bio * bp)846 mdstart_preload(struct md_s *sc, struct bio *bp)
847 {
848 uint8_t *p;
849
850 p = sc->pl_ptr + bp->bio_offset;
851 switch (bp->bio_cmd) {
852 case BIO_READ:
853 if ((bp->bio_flags & BIO_VLIST) != 0) {
854 mdcopyto_vlist(p, (bus_dma_segment_t *)bp->bio_data,
855 bp->bio_ma_offset, bp->bio_length);
856 } else {
857 bcopy(p, bp->bio_data, bp->bio_length);
858 }
859 cpu_flush_dcache(bp->bio_data, bp->bio_length);
860 break;
861 case BIO_WRITE:
862 if ((bp->bio_flags & BIO_VLIST) != 0) {
863 mdcopyfrom_vlist((bus_dma_segment_t *)bp->bio_data,
864 bp->bio_ma_offset, p, bp->bio_length);
865 } else {
866 bcopy(bp->bio_data, p, bp->bio_length);
867 }
868 break;
869 }
870 bp->bio_resid = 0;
871 return (0);
872 }
873
874 static int
mdstart_vnode(struct md_s * sc,struct bio * bp)875 mdstart_vnode(struct md_s *sc, struct bio *bp)
876 {
877 int error;
878 struct uio auio;
879 struct iovec aiov;
880 struct iovec *piov;
881 struct mount *mp;
882 struct vnode *vp;
883 struct buf *pb;
884 bus_dma_segment_t *vlist;
885 struct thread *td;
886 off_t iolen, len, zerosize;
887 int ma_offs, npages;
888
889 switch (bp->bio_cmd) {
890 case BIO_READ:
891 auio.uio_rw = UIO_READ;
892 break;
893 case BIO_WRITE:
894 case BIO_DELETE:
895 auio.uio_rw = UIO_WRITE;
896 break;
897 case BIO_FLUSH:
898 break;
899 default:
900 return (EOPNOTSUPP);
901 }
902
903 td = curthread;
904 vp = sc->vnode;
905 pb = NULL;
906 piov = NULL;
907 ma_offs = bp->bio_ma_offset;
908 len = bp->bio_length;
909
910 /*
911 * VNODE I/O
912 *
913 * If an error occurs, we set BIO_ERROR but we do not set
914 * B_INVAL because (for a write anyway), the buffer is
915 * still valid.
916 */
917
918 if (bp->bio_cmd == BIO_FLUSH) {
919 (void) vn_start_write(vp, &mp, V_WAIT);
920 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
921 error = VOP_FSYNC(vp, MNT_WAIT, td);
922 VOP_UNLOCK(vp, 0);
923 vn_finished_write(mp);
924 return (error);
925 }
926
927 auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
928 auio.uio_resid = bp->bio_length;
929 auio.uio_segflg = UIO_SYSSPACE;
930 auio.uio_td = td;
931
932 if (bp->bio_cmd == BIO_DELETE) {
933 /*
934 * Emulate BIO_DELETE by writing zeros.
935 */
936 zerosize = ZERO_REGION_SIZE -
937 (ZERO_REGION_SIZE % sc->sectorsize);
938 auio.uio_iovcnt = howmany(bp->bio_length, zerosize);
939 piov = malloc(sizeof(*piov) * auio.uio_iovcnt, M_MD, M_WAITOK);
940 auio.uio_iov = piov;
941 while (len > 0) {
942 piov->iov_base = __DECONST(void *, zero_region);
943 piov->iov_len = len;
944 if (len > zerosize)
945 piov->iov_len = zerosize;
946 len -= piov->iov_len;
947 piov++;
948 }
949 piov = auio.uio_iov;
950 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
951 piov = malloc(sizeof(*piov) * bp->bio_ma_n, M_MD, M_WAITOK);
952 auio.uio_iov = piov;
953 vlist = (bus_dma_segment_t *)bp->bio_data;
954 while (len > 0) {
955 piov->iov_base = (void *)(uintptr_t)(vlist->ds_addr +
956 ma_offs);
957 piov->iov_len = vlist->ds_len - ma_offs;
958 if (piov->iov_len > len)
959 piov->iov_len = len;
960 len -= piov->iov_len;
961 ma_offs = 0;
962 vlist++;
963 piov++;
964 }
965 auio.uio_iovcnt = piov - auio.uio_iov;
966 piov = auio.uio_iov;
967 } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
968 pb = getpbuf(&md_vnode_pbuf_freecnt);
969 bp->bio_resid = len;
970 unmapped_step:
971 npages = atop(min(MAXPHYS, round_page(len + (ma_offs &
972 PAGE_MASK))));
973 iolen = min(ptoa(npages) - (ma_offs & PAGE_MASK), len);
974 KASSERT(iolen > 0, ("zero iolen"));
975 pmap_qenter((vm_offset_t)pb->b_data,
976 &bp->bio_ma[atop(ma_offs)], npages);
977 aiov.iov_base = (void *)((vm_offset_t)pb->b_data +
978 (ma_offs & PAGE_MASK));
979 aiov.iov_len = iolen;
980 auio.uio_iov = &aiov;
981 auio.uio_iovcnt = 1;
982 auio.uio_resid = iolen;
983 } else {
984 aiov.iov_base = bp->bio_data;
985 aiov.iov_len = bp->bio_length;
986 auio.uio_iov = &aiov;
987 auio.uio_iovcnt = 1;
988 }
989 /*
990 * When reading set IO_DIRECT to try to avoid double-caching
991 * the data. When writing IO_DIRECT is not optimal.
992 */
993 if (auio.uio_rw == UIO_READ) {
994 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
995 error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
996 VOP_UNLOCK(vp, 0);
997 } else {
998 (void) vn_start_write(vp, &mp, V_WAIT);
999 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1000 error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
1001 sc->cred);
1002 VOP_UNLOCK(vp, 0);
1003 vn_finished_write(mp);
1004 if (error == 0)
1005 sc->flags &= ~MD_VERIFY;
1006 }
1007
1008 if (pb != NULL) {
1009 pmap_qremove((vm_offset_t)pb->b_data, npages);
1010 if (error == 0) {
1011 len -= iolen;
1012 bp->bio_resid -= iolen;
1013 ma_offs += iolen;
1014 if (len > 0)
1015 goto unmapped_step;
1016 }
1017 relpbuf(pb, &md_vnode_pbuf_freecnt);
1018 }
1019
1020 free(piov, M_MD);
1021 if (pb == NULL)
1022 bp->bio_resid = auio.uio_resid;
1023 return (error);
1024 }
1025
1026 static void
md_swap_page_free(vm_page_t m)1027 md_swap_page_free(vm_page_t m)
1028 {
1029
1030 vm_page_xunbusy(m);
1031 vm_page_lock(m);
1032 vm_page_free(m);
1033 vm_page_unlock(m);
1034 }
1035
1036 static int
mdstart_swap(struct md_s * sc,struct bio * bp)1037 mdstart_swap(struct md_s *sc, struct bio *bp)
1038 {
1039 vm_page_t m;
1040 u_char *p;
1041 vm_pindex_t i, lastp;
1042 bus_dma_segment_t *vlist;
1043 int rv, ma_offs, offs, len, lastend;
1044
1045 switch (bp->bio_cmd) {
1046 case BIO_READ:
1047 case BIO_WRITE:
1048 case BIO_DELETE:
1049 break;
1050 default:
1051 return (EOPNOTSUPP);
1052 }
1053
1054 p = bp->bio_data;
1055 ma_offs = (bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0 ?
1056 bp->bio_ma_offset : 0;
1057 vlist = (bp->bio_flags & BIO_VLIST) != 0 ?
1058 (bus_dma_segment_t *)bp->bio_data : NULL;
1059
1060 /*
1061 * offs is the offset at which to start operating on the
1062 * next (ie, first) page. lastp is the last page on
1063 * which we're going to operate. lastend is the ending
1064 * position within that last page (ie, PAGE_SIZE if
1065 * we're operating on complete aligned pages).
1066 */
1067 offs = bp->bio_offset % PAGE_SIZE;
1068 lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
1069 lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
1070
1071 rv = VM_PAGER_OK;
1072 VM_OBJECT_WLOCK(sc->object);
1073 vm_object_pip_add(sc->object, 1);
1074 for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
1075 len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
1076 m = vm_page_grab(sc->object, i, VM_ALLOC_SYSTEM);
1077 if (bp->bio_cmd == BIO_READ) {
1078 if (m->valid == VM_PAGE_BITS_ALL)
1079 rv = VM_PAGER_OK;
1080 else
1081 rv = vm_pager_get_pages(sc->object, &m, 1,
1082 NULL, NULL);
1083 if (rv == VM_PAGER_ERROR) {
1084 md_swap_page_free(m);
1085 break;
1086 } else if (rv == VM_PAGER_FAIL) {
1087 /*
1088 * Pager does not have the page. Zero
1089 * the allocated page, and mark it as
1090 * valid. Do not set dirty, the page
1091 * can be recreated if thrown out.
1092 */
1093 pmap_zero_page(m);
1094 m->valid = VM_PAGE_BITS_ALL;
1095 }
1096 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1097 pmap_copy_pages(&m, offs, bp->bio_ma,
1098 ma_offs, len);
1099 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
1100 physcopyout_vlist(VM_PAGE_TO_PHYS(m) + offs,
1101 vlist, ma_offs, len);
1102 cpu_flush_dcache(p, len);
1103 } else {
1104 physcopyout(VM_PAGE_TO_PHYS(m) + offs, p, len);
1105 cpu_flush_dcache(p, len);
1106 }
1107 } else if (bp->bio_cmd == BIO_WRITE) {
1108 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL)
1109 rv = VM_PAGER_OK;
1110 else
1111 rv = vm_pager_get_pages(sc->object, &m, 1,
1112 NULL, NULL);
1113 if (rv == VM_PAGER_ERROR) {
1114 md_swap_page_free(m);
1115 break;
1116 } else if (rv == VM_PAGER_FAIL)
1117 pmap_zero_page(m);
1118
1119 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
1120 pmap_copy_pages(bp->bio_ma, ma_offs, &m,
1121 offs, len);
1122 } else if ((bp->bio_flags & BIO_VLIST) != 0) {
1123 physcopyin_vlist(vlist, ma_offs,
1124 VM_PAGE_TO_PHYS(m) + offs, len);
1125 } else {
1126 physcopyin(p, VM_PAGE_TO_PHYS(m) + offs, len);
1127 }
1128
1129 m->valid = VM_PAGE_BITS_ALL;
1130 if (m->dirty != VM_PAGE_BITS_ALL) {
1131 vm_page_dirty(m);
1132 vm_pager_page_unswapped(m);
1133 }
1134 } else if (bp->bio_cmd == BIO_DELETE) {
1135 if (len == PAGE_SIZE || m->valid == VM_PAGE_BITS_ALL)
1136 rv = VM_PAGER_OK;
1137 else
1138 rv = vm_pager_get_pages(sc->object, &m, 1,
1139 NULL, NULL);
1140 if (rv == VM_PAGER_ERROR) {
1141 md_swap_page_free(m);
1142 break;
1143 } else if (rv == VM_PAGER_FAIL) {
1144 md_swap_page_free(m);
1145 m = NULL;
1146 } else {
1147 /* Page is valid. */
1148 if (len != PAGE_SIZE) {
1149 pmap_zero_page_area(m, offs, len);
1150 if (m->dirty != VM_PAGE_BITS_ALL) {
1151 vm_page_dirty(m);
1152 vm_pager_page_unswapped(m);
1153 }
1154 } else {
1155 vm_pager_page_unswapped(m);
1156 md_swap_page_free(m);
1157 m = NULL;
1158 }
1159 }
1160 }
1161 if (m != NULL) {
1162 vm_page_xunbusy(m);
1163 vm_page_lock(m);
1164 if (vm_page_active(m))
1165 vm_page_reference(m);
1166 else
1167 vm_page_activate(m);
1168 vm_page_unlock(m);
1169 }
1170
1171 /* Actions on further pages start at offset 0 */
1172 p += PAGE_SIZE - offs;
1173 offs = 0;
1174 ma_offs += len;
1175 }
1176 vm_object_pip_wakeup(sc->object);
1177 VM_OBJECT_WUNLOCK(sc->object);
1178 return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
1179 }
1180
1181 static int
mdstart_null(struct md_s * sc,struct bio * bp)1182 mdstart_null(struct md_s *sc, struct bio *bp)
1183 {
1184
1185 switch (bp->bio_cmd) {
1186 case BIO_READ:
1187 bzero(bp->bio_data, bp->bio_length);
1188 cpu_flush_dcache(bp->bio_data, bp->bio_length);
1189 break;
1190 case BIO_WRITE:
1191 break;
1192 }
1193 bp->bio_resid = 0;
1194 return (0);
1195 }
1196
1197 static void
md_kthread(void * arg)1198 md_kthread(void *arg)
1199 {
1200 struct md_s *sc;
1201 struct bio *bp;
1202 int error;
1203
1204 sc = arg;
1205 thread_lock(curthread);
1206 sched_prio(curthread, PRIBIO);
1207 thread_unlock(curthread);
1208 if (sc->type == MD_VNODE)
1209 curthread->td_pflags |= TDP_NORUNNINGBUF;
1210
1211 for (;;) {
1212 mtx_lock(&sc->queue_mtx);
1213 if (sc->flags & MD_SHUTDOWN) {
1214 sc->flags |= MD_EXITING;
1215 mtx_unlock(&sc->queue_mtx);
1216 kproc_exit(0);
1217 }
1218 bp = bioq_takefirst(&sc->bio_queue);
1219 if (!bp) {
1220 msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
1221 continue;
1222 }
1223 mtx_unlock(&sc->queue_mtx);
1224 if (bp->bio_cmd == BIO_GETATTR) {
1225 int isv = ((sc->flags & MD_VERIFY) != 0);
1226
1227 if ((sc->fwsectors && sc->fwheads &&
1228 (g_handleattr_int(bp, "GEOM::fwsectors",
1229 sc->fwsectors) ||
1230 g_handleattr_int(bp, "GEOM::fwheads",
1231 sc->fwheads))) ||
1232 g_handleattr_int(bp, "GEOM::candelete", 1))
1233 error = -1;
1234 else if (sc->ident[0] != '\0' &&
1235 g_handleattr_str(bp, "GEOM::ident", sc->ident))
1236 error = -1;
1237 else if (g_handleattr_int(bp, "MNT::verified", isv))
1238 error = -1;
1239 else
1240 error = EOPNOTSUPP;
1241 } else {
1242 error = sc->start(sc, bp);
1243 }
1244
1245 if (error != -1) {
1246 bp->bio_completed = bp->bio_length;
1247 if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
1248 devstat_end_transaction_bio(sc->devstat, bp);
1249 g_io_deliver(bp, error);
1250 }
1251 }
1252 }
1253
1254 static struct md_s *
mdfind(int unit)1255 mdfind(int unit)
1256 {
1257 struct md_s *sc;
1258
1259 LIST_FOREACH(sc, &md_softc_list, list) {
1260 if (sc->unit == unit)
1261 break;
1262 }
1263 return (sc);
1264 }
1265
1266 static struct md_s *
mdnew(int unit,int * errp,enum md_types type)1267 mdnew(int unit, int *errp, enum md_types type)
1268 {
1269 struct md_s *sc;
1270 int error;
1271
1272 *errp = 0;
1273 if (unit == -1)
1274 unit = alloc_unr(md_uh);
1275 else
1276 unit = alloc_unr_specific(md_uh, unit);
1277
1278 if (unit == -1) {
1279 *errp = EBUSY;
1280 return (NULL);
1281 }
1282
1283 sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
1284 sc->type = type;
1285 bioq_init(&sc->bio_queue);
1286 mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
1287 mtx_init(&sc->stat_mtx, "md stat", NULL, MTX_DEF);
1288 sc->unit = unit;
1289 sprintf(sc->name, "md%d", unit);
1290 LIST_INSERT_HEAD(&md_softc_list, sc, list);
1291 error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
1292 if (error == 0)
1293 return (sc);
1294 LIST_REMOVE(sc, list);
1295 mtx_destroy(&sc->stat_mtx);
1296 mtx_destroy(&sc->queue_mtx);
1297 free_unr(md_uh, sc->unit);
1298 free(sc, M_MD);
1299 *errp = error;
1300 return (NULL);
1301 }
1302
1303 static void
mdinit(struct md_s * sc)1304 mdinit(struct md_s *sc)
1305 {
1306 struct g_geom *gp;
1307 struct g_provider *pp;
1308
1309 g_topology_lock();
1310 gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
1311 gp->softc = sc;
1312 pp = g_new_providerf(gp, "md%d", sc->unit);
1313 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
1314 pp->mediasize = sc->mediasize;
1315 pp->sectorsize = sc->sectorsize;
1316 switch (sc->type) {
1317 case MD_MALLOC:
1318 case MD_VNODE:
1319 case MD_SWAP:
1320 pp->flags |= G_PF_ACCEPT_UNMAPPED;
1321 break;
1322 case MD_PRELOAD:
1323 case MD_NULL:
1324 break;
1325 }
1326 sc->gp = gp;
1327 sc->pp = pp;
1328 g_error_provider(pp, 0);
1329 g_topology_unlock();
1330 sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
1331 DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
1332 }
1333
1334 static int
mdcreate_malloc(struct md_s * sc,struct md_req * mdr)1335 mdcreate_malloc(struct md_s *sc, struct md_req *mdr)
1336 {
1337 uintptr_t sp;
1338 int error;
1339 off_t u;
1340
1341 error = 0;
1342 if (mdr->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
1343 return (EINVAL);
1344 if (mdr->md_sectorsize != 0 && !powerof2(mdr->md_sectorsize))
1345 return (EINVAL);
1346 /* Compression doesn't make sense if we have reserved space */
1347 if (mdr->md_options & MD_RESERVE)
1348 mdr->md_options &= ~MD_COMPRESS;
1349 if (mdr->md_fwsectors != 0)
1350 sc->fwsectors = mdr->md_fwsectors;
1351 if (mdr->md_fwheads != 0)
1352 sc->fwheads = mdr->md_fwheads;
1353 sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE);
1354 sc->indir = dimension(sc->mediasize / sc->sectorsize);
1355 sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
1356 0x1ff, 0);
1357 if (mdr->md_options & MD_RESERVE) {
1358 off_t nsectors;
1359
1360 nsectors = sc->mediasize / sc->sectorsize;
1361 for (u = 0; u < nsectors; u++) {
1362 sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
1363 M_WAITOK : M_NOWAIT) | M_ZERO);
1364 if (sp != 0)
1365 error = s_write(sc->indir, u, sp);
1366 else
1367 error = ENOMEM;
1368 if (error != 0)
1369 break;
1370 }
1371 }
1372 return (error);
1373 }
1374
1375
1376 static int
mdsetcred(struct md_s * sc,struct ucred * cred)1377 mdsetcred(struct md_s *sc, struct ucred *cred)
1378 {
1379 char *tmpbuf;
1380 int error = 0;
1381
1382 /*
1383 * Set credits in our softc
1384 */
1385
1386 if (sc->cred)
1387 crfree(sc->cred);
1388 sc->cred = crhold(cred);
1389
1390 /*
1391 * Horrible kludge to establish credentials for NFS XXX.
1392 */
1393
1394 if (sc->vnode) {
1395 struct uio auio;
1396 struct iovec aiov;
1397
1398 tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
1399 bzero(&auio, sizeof(auio));
1400
1401 aiov.iov_base = tmpbuf;
1402 aiov.iov_len = sc->sectorsize;
1403 auio.uio_iov = &aiov;
1404 auio.uio_iovcnt = 1;
1405 auio.uio_offset = 0;
1406 auio.uio_rw = UIO_READ;
1407 auio.uio_segflg = UIO_SYSSPACE;
1408 auio.uio_resid = aiov.iov_len;
1409 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1410 error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
1411 VOP_UNLOCK(sc->vnode, 0);
1412 free(tmpbuf, M_TEMP);
1413 }
1414 return (error);
1415 }
1416
1417 static int
mdcreate_vnode(struct md_s * sc,struct md_req * mdr,struct thread * td)1418 mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
1419 {
1420 struct vattr vattr;
1421 struct nameidata nd;
1422 char *fname;
1423 int error, flags;
1424
1425 fname = mdr->md_file;
1426 if (mdr->md_file_seg == UIO_USERSPACE) {
1427 error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
1428 if (error != 0)
1429 return (error);
1430 } else if (mdr->md_file_seg == UIO_SYSSPACE)
1431 strlcpy(sc->file, fname, sizeof(sc->file));
1432 else
1433 return (EDOOFUS);
1434
1435 /*
1436 * If the user specified that this is a read only device, don't
1437 * set the FWRITE mask before trying to open the backing store.
1438 */
1439 flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \
1440 | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0);
1441 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file, td);
1442 error = vn_open(&nd, &flags, 0, NULL);
1443 if (error != 0)
1444 return (error);
1445 NDFREE(&nd, NDF_ONLY_PNBUF);
1446 if (nd.ni_vp->v_type != VREG) {
1447 error = EINVAL;
1448 goto bad;
1449 }
1450 error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
1451 if (error != 0)
1452 goto bad;
1453 if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
1454 vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
1455 if (nd.ni_vp->v_iflag & VI_DOOMED) {
1456 /* Forced unmount. */
1457 error = EBADF;
1458 goto bad;
1459 }
1460 }
1461 nd.ni_vp->v_vflag |= VV_MD;
1462 VOP_UNLOCK(nd.ni_vp, 0);
1463
1464 if (mdr->md_fwsectors != 0)
1465 sc->fwsectors = mdr->md_fwsectors;
1466 if (mdr->md_fwheads != 0)
1467 sc->fwheads = mdr->md_fwheads;
1468 snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju",
1469 (uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid);
1470 sc->flags = mdr->md_options & (MD_FORCE | MD_ASYNC | MD_VERIFY);
1471 if (!(flags & FWRITE))
1472 sc->flags |= MD_READONLY;
1473 sc->vnode = nd.ni_vp;
1474
1475 error = mdsetcred(sc, td->td_ucred);
1476 if (error != 0) {
1477 sc->vnode = NULL;
1478 vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
1479 nd.ni_vp->v_vflag &= ~VV_MD;
1480 goto bad;
1481 }
1482 return (0);
1483 bad:
1484 VOP_UNLOCK(nd.ni_vp, 0);
1485 (void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
1486 return (error);
1487 }
1488
1489 static void
g_md_providergone(struct g_provider * pp)1490 g_md_providergone(struct g_provider *pp)
1491 {
1492 struct md_s *sc = pp->geom->softc;
1493
1494 mtx_lock(&sc->queue_mtx);
1495 sc->flags |= MD_PROVIDERGONE;
1496 wakeup(&sc->flags);
1497 mtx_unlock(&sc->queue_mtx);
1498 }
1499
1500 static int
mddestroy(struct md_s * sc,struct thread * td)1501 mddestroy(struct md_s *sc, struct thread *td)
1502 {
1503
1504 if (sc->gp) {
1505 g_topology_lock();
1506 g_wither_geom(sc->gp, ENXIO);
1507 g_topology_unlock();
1508
1509 mtx_lock(&sc->queue_mtx);
1510 while (!(sc->flags & MD_PROVIDERGONE))
1511 msleep(&sc->flags, &sc->queue_mtx, PRIBIO, "mddestroy", 0);
1512 mtx_unlock(&sc->queue_mtx);
1513 }
1514 if (sc->devstat) {
1515 devstat_remove_entry(sc->devstat);
1516 sc->devstat = NULL;
1517 }
1518 mtx_lock(&sc->queue_mtx);
1519 sc->flags |= MD_SHUTDOWN;
1520 wakeup(sc);
1521 while (!(sc->flags & MD_EXITING))
1522 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
1523 mtx_unlock(&sc->queue_mtx);
1524 mtx_destroy(&sc->stat_mtx);
1525 mtx_destroy(&sc->queue_mtx);
1526 if (sc->vnode != NULL) {
1527 vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
1528 sc->vnode->v_vflag &= ~VV_MD;
1529 VOP_UNLOCK(sc->vnode, 0);
1530 (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
1531 FREAD : (FREAD|FWRITE), sc->cred, td);
1532 }
1533 if (sc->cred != NULL)
1534 crfree(sc->cred);
1535 if (sc->object != NULL)
1536 vm_object_deallocate(sc->object);
1537 if (sc->indir)
1538 destroy_indir(sc, sc->indir);
1539 if (sc->uma)
1540 uma_zdestroy(sc->uma);
1541
1542 LIST_REMOVE(sc, list);
1543 free_unr(md_uh, sc->unit);
1544 free(sc, M_MD);
1545 return (0);
1546 }
1547
1548 static int
mdresize(struct md_s * sc,struct md_req * mdr)1549 mdresize(struct md_s *sc, struct md_req *mdr)
1550 {
1551 int error, res;
1552 vm_pindex_t oldpages, newpages;
1553
1554 switch (sc->type) {
1555 case MD_VNODE:
1556 case MD_NULL:
1557 break;
1558 case MD_SWAP:
1559 if (mdr->md_mediasize <= 0 ||
1560 (mdr->md_mediasize % PAGE_SIZE) != 0)
1561 return (EDOM);
1562 oldpages = OFF_TO_IDX(round_page(sc->mediasize));
1563 newpages = OFF_TO_IDX(round_page(mdr->md_mediasize));
1564 if (newpages < oldpages) {
1565 VM_OBJECT_WLOCK(sc->object);
1566 vm_object_page_remove(sc->object, newpages, 0, 0);
1567 swap_pager_freespace(sc->object, newpages,
1568 oldpages - newpages);
1569 swap_release_by_cred(IDX_TO_OFF(oldpages -
1570 newpages), sc->cred);
1571 sc->object->charge = IDX_TO_OFF(newpages);
1572 sc->object->size = newpages;
1573 VM_OBJECT_WUNLOCK(sc->object);
1574 } else if (newpages > oldpages) {
1575 res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
1576 oldpages), sc->cred);
1577 if (!res)
1578 return (ENOMEM);
1579 if ((mdr->md_options & MD_RESERVE) ||
1580 (sc->flags & MD_RESERVE)) {
1581 error = swap_pager_reserve(sc->object,
1582 oldpages, newpages - oldpages);
1583 if (error < 0) {
1584 swap_release_by_cred(
1585 IDX_TO_OFF(newpages - oldpages),
1586 sc->cred);
1587 return (EDOM);
1588 }
1589 }
1590 VM_OBJECT_WLOCK(sc->object);
1591 sc->object->charge = IDX_TO_OFF(newpages);
1592 sc->object->size = newpages;
1593 VM_OBJECT_WUNLOCK(sc->object);
1594 }
1595 break;
1596 default:
1597 return (EOPNOTSUPP);
1598 }
1599
1600 sc->mediasize = mdr->md_mediasize;
1601 g_topology_lock();
1602 g_resize_provider(sc->pp, sc->mediasize);
1603 g_topology_unlock();
1604 return (0);
1605 }
1606
1607 static int
mdcreate_swap(struct md_s * sc,struct md_req * mdr,struct thread * td)1608 mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td)
1609 {
1610 vm_ooffset_t npage;
1611 int error;
1612
1613 /*
1614 * Range check. Disallow negative sizes and sizes not being
1615 * multiple of page size.
1616 */
1617 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1618 return (EDOM);
1619
1620 /*
1621 * Allocate an OBJT_SWAP object.
1622 *
1623 * Note the truncation.
1624 */
1625
1626 if ((mdr->md_options & MD_VERIFY) != 0)
1627 return (EINVAL);
1628 npage = mdr->md_mediasize / PAGE_SIZE;
1629 if (mdr->md_fwsectors != 0)
1630 sc->fwsectors = mdr->md_fwsectors;
1631 if (mdr->md_fwheads != 0)
1632 sc->fwheads = mdr->md_fwheads;
1633 sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1634 VM_PROT_DEFAULT, 0, td->td_ucred);
1635 if (sc->object == NULL)
1636 return (ENOMEM);
1637 sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE);
1638 if (mdr->md_options & MD_RESERVE) {
1639 if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1640 error = EDOM;
1641 goto finish;
1642 }
1643 }
1644 error = mdsetcred(sc, td->td_ucred);
1645 finish:
1646 if (error != 0) {
1647 vm_object_deallocate(sc->object);
1648 sc->object = NULL;
1649 }
1650 return (error);
1651 }
1652
1653 static int
mdcreate_null(struct md_s * sc,struct md_req * mdr,struct thread * td)1654 mdcreate_null(struct md_s *sc, struct md_req *mdr, struct thread *td)
1655 {
1656
1657 /*
1658 * Range check. Disallow negative sizes and sizes not being
1659 * multiple of page size.
1660 */
1661 if (sc->mediasize <= 0 || (sc->mediasize % PAGE_SIZE) != 0)
1662 return (EDOM);
1663
1664 return (0);
1665 }
1666
1667 static int
kern_mdattach_locked(struct thread * td,struct md_req * mdr)1668 kern_mdattach_locked(struct thread *td, struct md_req *mdr)
1669 {
1670 struct md_s *sc;
1671 unsigned sectsize;
1672 int error, i;
1673
1674 sx_assert(&md_sx, SA_XLOCKED);
1675
1676 switch (mdr->md_type) {
1677 case MD_MALLOC:
1678 case MD_PRELOAD:
1679 case MD_VNODE:
1680 case MD_SWAP:
1681 case MD_NULL:
1682 break;
1683 default:
1684 return (EINVAL);
1685 }
1686 if (mdr->md_sectorsize == 0)
1687 sectsize = DEV_BSIZE;
1688 else
1689 sectsize = mdr->md_sectorsize;
1690 if (sectsize > MAXPHYS || mdr->md_mediasize < sectsize)
1691 return (EINVAL);
1692 if (mdr->md_options & MD_AUTOUNIT)
1693 sc = mdnew(-1, &error, mdr->md_type);
1694 else {
1695 if (mdr->md_unit > INT_MAX)
1696 return (EINVAL);
1697 sc = mdnew(mdr->md_unit, &error, mdr->md_type);
1698 }
1699 if (sc == NULL)
1700 return (error);
1701 if (mdr->md_label != NULL)
1702 error = copyinstr(mdr->md_label, sc->label,
1703 sizeof(sc->label), NULL);
1704 if (error != 0)
1705 goto err_after_new;
1706 if (mdr->md_options & MD_AUTOUNIT)
1707 mdr->md_unit = sc->unit;
1708 sc->mediasize = mdr->md_mediasize;
1709 sc->sectorsize = sectsize;
1710 error = EDOOFUS;
1711 switch (sc->type) {
1712 case MD_MALLOC:
1713 sc->start = mdstart_malloc;
1714 error = mdcreate_malloc(sc, mdr);
1715 break;
1716 case MD_PRELOAD:
1717 /*
1718 * We disallow attaching preloaded memory disks via
1719 * ioctl. Preloaded memory disks are automatically
1720 * attached in g_md_init().
1721 */
1722 error = EOPNOTSUPP;
1723 break;
1724 case MD_VNODE:
1725 sc->start = mdstart_vnode;
1726 error = mdcreate_vnode(sc, mdr, td);
1727 break;
1728 case MD_SWAP:
1729 sc->start = mdstart_swap;
1730 error = mdcreate_swap(sc, mdr, td);
1731 break;
1732 case MD_NULL:
1733 sc->start = mdstart_null;
1734 error = mdcreate_null(sc, mdr, td);
1735 break;
1736 }
1737 err_after_new:
1738 if (error != 0) {
1739 mddestroy(sc, td);
1740 return (error);
1741 }
1742
1743 /* Prune off any residual fractional sector */
1744 i = sc->mediasize % sc->sectorsize;
1745 sc->mediasize -= i;
1746
1747 mdinit(sc);
1748 return (0);
1749 }
1750
1751 static int
kern_mdattach(struct thread * td,struct md_req * mdr)1752 kern_mdattach(struct thread *td, struct md_req *mdr)
1753 {
1754 int error;
1755
1756 sx_xlock(&md_sx);
1757 error = kern_mdattach_locked(td, mdr);
1758 sx_xunlock(&md_sx);
1759 return (error);
1760 }
1761
1762 static int
kern_mddetach_locked(struct thread * td,struct md_req * mdr)1763 kern_mddetach_locked(struct thread *td, struct md_req *mdr)
1764 {
1765 struct md_s *sc;
1766
1767 sx_assert(&md_sx, SA_XLOCKED);
1768
1769 if (mdr->md_mediasize != 0 ||
1770 (mdr->md_options & ~MD_FORCE) != 0)
1771 return (EINVAL);
1772
1773 sc = mdfind(mdr->md_unit);
1774 if (sc == NULL)
1775 return (ENOENT);
1776 if (sc->opencount != 0 && !(sc->flags & MD_FORCE) &&
1777 !(mdr->md_options & MD_FORCE))
1778 return (EBUSY);
1779 return (mddestroy(sc, td));
1780 }
1781
1782 static int
kern_mddetach(struct thread * td,struct md_req * mdr)1783 kern_mddetach(struct thread *td, struct md_req *mdr)
1784 {
1785 int error;
1786
1787 sx_xlock(&md_sx);
1788 error = kern_mddetach_locked(td, mdr);
1789 sx_xunlock(&md_sx);
1790 return (error);
1791 }
1792
1793 static int
kern_mdresize_locked(struct md_req * mdr)1794 kern_mdresize_locked(struct md_req *mdr)
1795 {
1796 struct md_s *sc;
1797
1798 sx_assert(&md_sx, SA_XLOCKED);
1799
1800 if ((mdr->md_options & ~(MD_FORCE | MD_RESERVE)) != 0)
1801 return (EINVAL);
1802
1803 sc = mdfind(mdr->md_unit);
1804 if (sc == NULL)
1805 return (ENOENT);
1806 if (mdr->md_mediasize < sc->sectorsize)
1807 return (EINVAL);
1808 if (mdr->md_mediasize < sc->mediasize &&
1809 !(sc->flags & MD_FORCE) &&
1810 !(mdr->md_options & MD_FORCE))
1811 return (EBUSY);
1812 return (mdresize(sc, mdr));
1813 }
1814
1815 static int
kern_mdresize(struct md_req * mdr)1816 kern_mdresize(struct md_req *mdr)
1817 {
1818 int error;
1819
1820 sx_xlock(&md_sx);
1821 error = kern_mdresize_locked(mdr);
1822 sx_xunlock(&md_sx);
1823 return (error);
1824 }
1825
1826 static int
kern_mdquery_locked(struct md_req * mdr)1827 kern_mdquery_locked(struct md_req *mdr)
1828 {
1829 struct md_s *sc;
1830 int error;
1831
1832 sx_assert(&md_sx, SA_XLOCKED);
1833
1834 sc = mdfind(mdr->md_unit);
1835 if (sc == NULL)
1836 return (ENOENT);
1837 mdr->md_type = sc->type;
1838 mdr->md_options = sc->flags;
1839 mdr->md_mediasize = sc->mediasize;
1840 mdr->md_sectorsize = sc->sectorsize;
1841 error = 0;
1842 if (mdr->md_label != NULL) {
1843 error = copyout(sc->label, mdr->md_label,
1844 strlen(sc->label) + 1);
1845 if (error != 0)
1846 return (error);
1847 }
1848 if (sc->type == MD_VNODE ||
1849 (sc->type == MD_PRELOAD && mdr->md_file != NULL))
1850 error = copyout(sc->file, mdr->md_file,
1851 strlen(sc->file) + 1);
1852 return (error);
1853 }
1854
1855 static int
kern_mdquery(struct md_req * mdr)1856 kern_mdquery(struct md_req *mdr)
1857 {
1858 int error;
1859
1860 sx_xlock(&md_sx);
1861 error = kern_mdquery_locked(mdr);
1862 sx_xunlock(&md_sx);
1863 return (error);
1864 }
1865
1866 static int
kern_mdlist_locked(struct md_req * mdr)1867 kern_mdlist_locked(struct md_req *mdr)
1868 {
1869 struct md_s *sc;
1870 int i;
1871
1872 sx_assert(&md_sx, SA_XLOCKED);
1873
1874 /*
1875 * Write the number of md devices to mdr->md_units[0].
1876 * Write the unit number of the first (mdr->md_units_nitems - 2)
1877 * units to mdr->md_units[1::(mdr->md_units - 2)] and terminate the
1878 * list with -1.
1879 *
1880 * XXX: There is currently no mechanism to retrieve unit
1881 * numbers for more than (MDNPAD - 2) units.
1882 *
1883 * XXX: Due to the use of LIST_INSERT_HEAD in mdnew(), the
1884 * list of visible unit numbers not stable.
1885 */
1886 i = 1;
1887 LIST_FOREACH(sc, &md_softc_list, list) {
1888 if (i < mdr->md_units_nitems - 1)
1889 mdr->md_units[i] = sc->unit;
1890 i++;
1891 }
1892 mdr->md_units[MIN(i, mdr->md_units_nitems - 1)] = -1;
1893 mdr->md_units[0] = i - 1;
1894 return (0);
1895 }
1896
1897 static int
kern_mdlist(struct md_req * mdr)1898 kern_mdlist(struct md_req *mdr)
1899 {
1900 int error;
1901
1902 sx_xlock(&md_sx);
1903 error = kern_mdlist_locked(mdr);
1904 sx_xunlock(&md_sx);
1905 return (error);
1906 }
1907
1908 /* Copy members that are not userspace pointers. */
1909 #define MD_IOCTL2REQ(mdio, mdr) do { \
1910 (mdr)->md_unit = (mdio)->md_unit; \
1911 (mdr)->md_type = (mdio)->md_type; \
1912 (mdr)->md_mediasize = (mdio)->md_mediasize; \
1913 (mdr)->md_sectorsize = (mdio)->md_sectorsize; \
1914 (mdr)->md_options = (mdio)->md_options; \
1915 (mdr)->md_fwheads = (mdio)->md_fwheads; \
1916 (mdr)->md_fwsectors = (mdio)->md_fwsectors; \
1917 (mdr)->md_units = &(mdio)->md_pad[0]; \
1918 (mdr)->md_units_nitems = nitems((mdio)->md_pad); \
1919 } while(0)
1920
1921 /* Copy members that might have been updated */
1922 #define MD_REQ2IOCTL(mdr, mdio) do { \
1923 (mdio)->md_unit = (mdr)->md_unit; \
1924 (mdio)->md_type = (mdr)->md_type; \
1925 (mdio)->md_mediasize = (mdr)->md_mediasize; \
1926 (mdio)->md_sectorsize = (mdr)->md_sectorsize; \
1927 (mdio)->md_options = (mdr)->md_options; \
1928 (mdio)->md_fwheads = (mdr)->md_fwheads; \
1929 (mdio)->md_fwsectors = (mdr)->md_fwsectors; \
1930 } while(0)
1931
1932 static int
mdctlioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)1933 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1934 struct thread *td)
1935 {
1936 struct md_req mdr;
1937 int error;
1938
1939 if (md_debug)
1940 printf("mdctlioctl(%s %lx %p %x %p)\n",
1941 devtoname(dev), cmd, addr, flags, td);
1942
1943 bzero(&mdr, sizeof(mdr));
1944 switch (cmd) {
1945 case MDIOCATTACH:
1946 case MDIOCDETACH:
1947 case MDIOCRESIZE:
1948 case MDIOCQUERY:
1949 case MDIOCLIST: {
1950 struct md_ioctl *mdio = (struct md_ioctl *)addr;
1951 if (mdio->md_version != MDIOVERSION)
1952 return (EINVAL);
1953 MD_IOCTL2REQ(mdio, &mdr);
1954 mdr.md_file = mdio->md_file;
1955 mdr.md_file_seg = UIO_USERSPACE;
1956 /* If the file is adjacent to the md_ioctl it's in kernel. */
1957 if ((void *)mdio->md_file == (void *)(mdio + 1))
1958 mdr.md_file_seg = UIO_SYSSPACE;
1959 mdr.md_label = mdio->md_label;
1960 break;
1961 }
1962 #ifdef COMPAT_FREEBSD32
1963 case MDIOCATTACH_32:
1964 case MDIOCDETACH_32:
1965 case MDIOCRESIZE_32:
1966 case MDIOCQUERY_32:
1967 case MDIOCLIST_32: {
1968 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr;
1969 if (mdio->md_version != MDIOVERSION)
1970 return (EINVAL);
1971 MD_IOCTL2REQ(mdio, &mdr);
1972 mdr.md_file = (void *)(uintptr_t)mdio->md_file;
1973 mdr.md_file_seg = UIO_USERSPACE;
1974 mdr.md_label = (void *)(uintptr_t)mdio->md_label;
1975 break;
1976 }
1977 #endif
1978 default:
1979 /* Fall through to handler switch. */
1980 break;
1981 }
1982
1983 error = 0;
1984 switch (cmd) {
1985 case MDIOCATTACH:
1986 #ifdef COMPAT_FREEBSD32
1987 case MDIOCATTACH_32:
1988 #endif
1989 error = kern_mdattach(td, &mdr);
1990 break;
1991 case MDIOCDETACH:
1992 #ifdef COMPAT_FREEBSD32
1993 case MDIOCDETACH_32:
1994 #endif
1995 error = kern_mddetach(td, &mdr);
1996 break;
1997 case MDIOCRESIZE:
1998 #ifdef COMPAT_FREEBSD32
1999 case MDIOCRESIZE_32:
2000 #endif
2001 error = kern_mdresize(&mdr);
2002 break;
2003 case MDIOCQUERY:
2004 #ifdef COMPAT_FREEBSD32
2005 case MDIOCQUERY_32:
2006 #endif
2007 error = kern_mdquery(&mdr);
2008 break;
2009 case MDIOCLIST:
2010 #ifdef COMPAT_FREEBSD32
2011 case MDIOCLIST_32:
2012 #endif
2013 error = kern_mdlist(&mdr);
2014 break;
2015 default:
2016 error = ENOIOCTL;
2017 }
2018
2019 switch (cmd) {
2020 case MDIOCATTACH:
2021 case MDIOCQUERY: {
2022 struct md_ioctl *mdio = (struct md_ioctl *)addr;
2023 MD_REQ2IOCTL(&mdr, mdio);
2024 break;
2025 }
2026 #ifdef COMPAT_FREEBSD32
2027 case MDIOCATTACH_32:
2028 case MDIOCQUERY_32: {
2029 struct md_ioctl32 *mdio = (struct md_ioctl32 *)addr;
2030 MD_REQ2IOCTL(&mdr, mdio);
2031 break;
2032 }
2033 #endif
2034 default:
2035 /* Other commands to not alter mdr. */
2036 break;
2037 }
2038
2039 return (error);
2040 }
2041
2042 static void
md_preloaded(u_char * image,size_t length,const char * name)2043 md_preloaded(u_char *image, size_t length, const char *name)
2044 {
2045 struct md_s *sc;
2046 int error;
2047
2048 sc = mdnew(-1, &error, MD_PRELOAD);
2049 if (sc == NULL)
2050 return;
2051 sc->mediasize = length;
2052 sc->sectorsize = DEV_BSIZE;
2053 sc->pl_ptr = image;
2054 sc->pl_len = length;
2055 sc->start = mdstart_preload;
2056 if (name != NULL)
2057 strlcpy(sc->file, name, sizeof(sc->file));
2058 #ifdef MD_ROOT
2059 if (sc->unit == 0) {
2060 #ifndef ROOTDEVNAME
2061 rootdevnames[0] = MD_ROOT_FSTYPE ":/dev/md0";
2062 #endif
2063 #ifdef MD_ROOT_READONLY
2064 sc->flags |= MD_READONLY;
2065 #endif
2066 }
2067 #endif
2068 mdinit(sc);
2069 if (name != NULL) {
2070 printf("%s%d: Preloaded image <%s> %zd bytes at %p\n",
2071 MD_NAME, sc->unit, name, length, image);
2072 } else {
2073 printf("%s%d: Embedded image %zd bytes at %p\n",
2074 MD_NAME, sc->unit, length, image);
2075 }
2076 }
2077
2078 static void
g_md_init(struct g_class * mp __unused)2079 g_md_init(struct g_class *mp __unused)
2080 {
2081 caddr_t mod;
2082 u_char *ptr, *name, *type;
2083 unsigned len;
2084 int i;
2085
2086 /* figure out log2(NINDIR) */
2087 for (i = NINDIR, nshift = -1; i; nshift++)
2088 i >>= 1;
2089
2090 mod = NULL;
2091 sx_init(&md_sx, "MD config lock");
2092 g_topology_unlock();
2093 md_uh = new_unrhdr(0, INT_MAX, NULL);
2094 #ifdef MD_ROOT
2095 if (mfs_root_size != 0) {
2096 sx_xlock(&md_sx);
2097 #ifdef MD_ROOT_MEM
2098 md_preloaded(mfs_root, mfs_root_size, NULL);
2099 #else
2100 md_preloaded(__DEVOLATILE(u_char *, &mfs_root), mfs_root_size,
2101 NULL);
2102 #endif
2103 sx_xunlock(&md_sx);
2104 }
2105 #endif
2106 /* XXX: are preload_* static or do they need Giant ? */
2107 while ((mod = preload_search_next_name(mod)) != NULL) {
2108 name = (char *)preload_search_info(mod, MODINFO_NAME);
2109 if (name == NULL)
2110 continue;
2111 type = (char *)preload_search_info(mod, MODINFO_TYPE);
2112 if (type == NULL)
2113 continue;
2114 if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
2115 continue;
2116 ptr = preload_fetch_addr(mod);
2117 len = preload_fetch_size(mod);
2118 if (ptr != NULL && len != 0) {
2119 sx_xlock(&md_sx);
2120 md_preloaded(ptr, len, name);
2121 sx_xunlock(&md_sx);
2122 }
2123 }
2124 md_vnode_pbuf_freecnt = nswbuf / 10;
2125 status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
2126 0600, MDCTL_NAME);
2127 g_topology_lock();
2128 }
2129
2130 static void
g_md_dumpconf(struct sbuf * sb,const char * indent,struct g_geom * gp,struct g_consumer * cp __unused,struct g_provider * pp)2131 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2132 struct g_consumer *cp __unused, struct g_provider *pp)
2133 {
2134 struct md_s *mp;
2135 char *type;
2136
2137 mp = gp->softc;
2138 if (mp == NULL)
2139 return;
2140
2141 switch (mp->type) {
2142 case MD_MALLOC:
2143 type = "malloc";
2144 break;
2145 case MD_PRELOAD:
2146 type = "preload";
2147 break;
2148 case MD_VNODE:
2149 type = "vnode";
2150 break;
2151 case MD_SWAP:
2152 type = "swap";
2153 break;
2154 case MD_NULL:
2155 type = "null";
2156 break;
2157 default:
2158 type = "unknown";
2159 break;
2160 }
2161
2162 if (pp != NULL) {
2163 if (indent == NULL) {
2164 sbuf_printf(sb, " u %d", mp->unit);
2165 sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
2166 sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
2167 sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
2168 sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
2169 sbuf_printf(sb, " t %s", type);
2170 if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
2171 (mp->type == MD_PRELOAD && mp->file[0] != '\0'))
2172 sbuf_printf(sb, " file %s", mp->file);
2173 sbuf_printf(sb, " label %s", mp->label);
2174 } else {
2175 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
2176 mp->unit);
2177 sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
2178 indent, (uintmax_t) mp->sectorsize);
2179 sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
2180 indent, (uintmax_t) mp->fwheads);
2181 sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
2182 indent, (uintmax_t) mp->fwsectors);
2183 if (mp->ident[0] != '\0') {
2184 sbuf_printf(sb, "%s<ident>", indent);
2185 g_conf_printf_escaped(sb, "%s", mp->ident);
2186 sbuf_printf(sb, "</ident>\n");
2187 }
2188 sbuf_printf(sb, "%s<length>%ju</length>\n",
2189 indent, (uintmax_t) mp->mediasize);
2190 sbuf_printf(sb, "%s<compression>%s</compression>\n", indent,
2191 (mp->flags & MD_COMPRESS) == 0 ? "off": "on");
2192 sbuf_printf(sb, "%s<access>%s</access>\n", indent,
2193 (mp->flags & MD_READONLY) == 0 ? "read-write":
2194 "read-only");
2195 sbuf_printf(sb, "%s<type>%s</type>\n", indent,
2196 type);
2197 if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
2198 (mp->type == MD_PRELOAD && mp->file[0] != '\0')) {
2199 sbuf_printf(sb, "%s<file>", indent);
2200 g_conf_printf_escaped(sb, "%s", mp->file);
2201 sbuf_printf(sb, "</file>\n");
2202 }
2203 sbuf_printf(sb, "%s<label>", indent);
2204 g_conf_printf_escaped(sb, "%s", mp->label);
2205 sbuf_printf(sb, "</label>\n");
2206 }
2207 }
2208 }
2209
2210 static void
g_md_fini(struct g_class * mp __unused)2211 g_md_fini(struct g_class *mp __unused)
2212 {
2213
2214 sx_destroy(&md_sx);
2215 if (status_dev != NULL)
2216 destroy_dev(status_dev);
2217 delete_unrhdr(md_uh);
2218 }
2219