1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
5 * Copyright (c) 2012 The FreeBSD Foundation
6 * Copyright (c) 2014-2017 Alexander Motin <[email protected]>
7 * All rights reserved.
8 *
9 * Portions of this software were developed by Edward Tomasz Napierala
10 * under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19 * substantially similar to the "NO WARRANTY" disclaimer below
20 * ("Disclaimer") and any redistribution must be conditioned upon
21 * including a substantially similar Disclaimer requirement for further
22 * binary redistribution.
23 *
24 * NO WARRANTY
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGES.
36 *
37 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
38 */
39 /*
40 * CAM Target Layer black hole and RAM disk backend.
41 *
42 * Author: Ken Merry <[email protected]>
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/condvar.h>
52 #include <sys/types.h>
53 #include <sys/limits.h>
54 #include <sys/lock.h>
55 #include <sys/mutex.h>
56 #include <sys/malloc.h>
57 #include <sys/sx.h>
58 #include <sys/taskqueue.h>
59 #include <sys/time.h>
60 #include <sys/queue.h>
61 #include <sys/conf.h>
62 #include <sys/ioccom.h>
63 #include <sys/module.h>
64 #include <sys/sysctl.h>
65 #include <sys/nv.h>
66 #include <sys/dnv.h>
67
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_da.h>
70 #include <cam/ctl/ctl_io.h>
71 #include <cam/ctl/ctl.h>
72 #include <cam/ctl/ctl_util.h>
73 #include <cam/ctl/ctl_backend.h>
74 #include <cam/ctl/ctl_debug.h>
75 #include <cam/ctl/ctl_ioctl.h>
76 #include <cam/ctl/ctl_ha.h>
77 #include <cam/ctl/ctl_private.h>
78 #include <cam/ctl/ctl_error.h>
79
80 #define PRIV(io) \
81 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
82 #define ARGS(io) \
83 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
84
85 #define PPP (PAGE_SIZE / sizeof(uint8_t **))
86 #ifdef __LP64__
87 #define PPPS (PAGE_SHIFT - 3)
88 #else
89 #define PPPS (PAGE_SHIFT - 2)
90 #endif
91 #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry))
92
93 #define P_UNMAPPED NULL /* Page is unmapped. */
94 #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */
95
96 typedef enum {
97 GP_READ, /* Return data page or zero page. */
98 GP_WRITE, /* Return data page, try allocate if none. */
99 GP_ANCHOR, /* Return data page, try anchor if none. */
100 GP_OTHER, /* Return what present, do not allocate/anchor. */
101 } getpage_op_t;
102
103 typedef enum {
104 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
105 CTL_BE_RAMDISK_LUN_WAITING = 0x04
106 } ctl_be_ramdisk_lun_flags;
107
108 struct ctl_be_ramdisk_lun {
109 struct ctl_be_lun cbe_lun; /* Must be first element. */
110 struct ctl_lun_create_params params;
111 int indir;
112 uint8_t **pages;
113 uint8_t *zero_page;
114 struct sx page_lock;
115 u_int pblocksize;
116 u_int pblockmul;
117 uint64_t size_bytes;
118 uint64_t size_blocks;
119 uint64_t cap_bytes;
120 uint64_t cap_used;
121 struct ctl_be_ramdisk_softc *softc;
122 ctl_be_ramdisk_lun_flags flags;
123 SLIST_ENTRY(ctl_be_ramdisk_lun) links;
124 struct taskqueue *io_taskqueue;
125 struct task io_task;
126 STAILQ_HEAD(, ctl_io_hdr) cont_queue;
127 struct mtx_padalign queue_lock;
128 };
129
130 struct ctl_be_ramdisk_softc {
131 struct sx modify_lock;
132 struct mtx lock;
133 int num_luns;
134 SLIST_HEAD(, ctl_be_ramdisk_lun) lun_list;
135 };
136
137 static struct ctl_be_ramdisk_softc rd_softc;
138 extern struct ctl_softc *control_softc;
139
140 static int ctl_backend_ramdisk_init(void);
141 static int ctl_backend_ramdisk_shutdown(void);
142 static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr);
143 static void ctl_backend_ramdisk_compare(union ctl_io *io);
144 static void ctl_backend_ramdisk_rw(union ctl_io *io);
145 static int ctl_backend_ramdisk_submit(union ctl_io *io);
146 static void ctl_backend_ramdisk_worker(void *context, int pending);
147 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
148 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
149 static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname);
150 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
151 caddr_t addr, int flag, struct thread *td);
152 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
153 struct ctl_lun_req *req);
154 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
155 struct ctl_lun_req *req);
156 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
157 struct ctl_lun_req *req);
158 static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun);
159
160 static struct ctl_backend_driver ctl_be_ramdisk_driver =
161 {
162 .name = "ramdisk",
163 .flags = CTL_BE_FLAG_HAS_CONFIG,
164 .init = ctl_backend_ramdisk_init,
165 .shutdown = ctl_backend_ramdisk_shutdown,
166 .data_submit = ctl_backend_ramdisk_submit,
167 .config_read = ctl_backend_ramdisk_config_read,
168 .config_write = ctl_backend_ramdisk_config_write,
169 .ioctl = ctl_backend_ramdisk_ioctl,
170 .lun_attr = ctl_backend_ramdisk_lun_attr,
171 };
172
173 MALLOC_DEFINE(M_RAMDISK, "ctlramdisk", "Memory used for CTL RAMdisk");
174 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
175
176 static int
ctl_backend_ramdisk_init(void)177 ctl_backend_ramdisk_init(void)
178 {
179 struct ctl_be_ramdisk_softc *softc = &rd_softc;
180
181 memset(softc, 0, sizeof(*softc));
182 sx_init(&softc->modify_lock, "ctlrammod");
183 mtx_init(&softc->lock, "ctlram", NULL, MTX_DEF);
184 SLIST_INIT(&softc->lun_list);
185 return (0);
186 }
187
188 static int
ctl_backend_ramdisk_shutdown(void)189 ctl_backend_ramdisk_shutdown(void)
190 {
191 struct ctl_be_ramdisk_softc *softc = &rd_softc;
192 struct ctl_be_ramdisk_lun *lun;
193
194 mtx_lock(&softc->lock);
195 while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) {
196 SLIST_REMOVE_HEAD(&softc->lun_list, links);
197 softc->num_luns--;
198 /*
199 * Drop our lock here. Since ctl_remove_lun() can call
200 * back into us, this could potentially lead to a recursive
201 * lock of the same mutex, which would cause a hang.
202 */
203 mtx_unlock(&softc->lock);
204 ctl_remove_lun(&lun->cbe_lun);
205 mtx_lock(&softc->lock);
206 }
207 mtx_unlock(&softc->lock);
208 mtx_destroy(&softc->lock);
209 sx_destroy(&softc->modify_lock);
210 return (0);
211 }
212
213 static uint8_t *
ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun * be_lun,off_t pn,getpage_op_t op)214 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
215 getpage_op_t op)
216 {
217 uint8_t **p, ***pp;
218 off_t i;
219 int s;
220
221 if (be_lun->cap_bytes == 0) {
222 switch (op) {
223 case GP_READ:
224 return (be_lun->zero_page);
225 case GP_WRITE:
226 return ((uint8_t *)be_lun->pages);
227 case GP_ANCHOR:
228 return (P_ANCHORED);
229 default:
230 return (P_UNMAPPED);
231 }
232 }
233 if (op == GP_WRITE || op == GP_ANCHOR) {
234 sx_xlock(&be_lun->page_lock);
235 pp = &be_lun->pages;
236 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
237 if (*pp == NULL) {
238 *pp = malloc(PAGE_SIZE, M_RAMDISK,
239 M_WAITOK|M_ZERO);
240 }
241 i = pn >> s;
242 pp = (uint8_t ***)&(*pp)[i];
243 pn -= i << s;
244 }
245 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
246 if (op == GP_WRITE) {
247 *pp = malloc(be_lun->pblocksize, M_RAMDISK,
248 M_WAITOK|M_ZERO);
249 } else
250 *pp = P_ANCHORED;
251 be_lun->cap_used += be_lun->pblocksize;
252 } else if (*pp == P_ANCHORED && op == GP_WRITE) {
253 *pp = malloc(be_lun->pblocksize, M_RAMDISK,
254 M_WAITOK|M_ZERO);
255 }
256 sx_xunlock(&be_lun->page_lock);
257 return ((uint8_t *)*pp);
258 } else {
259 sx_slock(&be_lun->page_lock);
260 p = be_lun->pages;
261 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
262 if (p == NULL)
263 break;
264 i = pn >> s;
265 p = (uint8_t **)p[i];
266 pn -= i << s;
267 }
268 sx_sunlock(&be_lun->page_lock);
269 if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ)
270 return (be_lun->zero_page);
271 return ((uint8_t *)p);
272 }
273 };
274
275 static void
ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun * be_lun,off_t pn)276 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
277 {
278 uint8_t ***pp;
279 off_t i;
280 int s;
281
282 if (be_lun->cap_bytes == 0)
283 return;
284 sx_xlock(&be_lun->page_lock);
285 pp = &be_lun->pages;
286 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
287 if (*pp == NULL)
288 goto noindir;
289 i = pn >> s;
290 pp = (uint8_t ***)&(*pp)[i];
291 pn -= i << s;
292 }
293 if (*pp == P_ANCHORED) {
294 be_lun->cap_used -= be_lun->pblocksize;
295 *pp = P_UNMAPPED;
296 } else if (*pp != P_UNMAPPED) {
297 free(*pp, M_RAMDISK);
298 be_lun->cap_used -= be_lun->pblocksize;
299 *pp = P_UNMAPPED;
300 }
301 noindir:
302 sx_xunlock(&be_lun->page_lock);
303 };
304
305 static void
ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun * be_lun,off_t pn)306 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
307 {
308 uint8_t ***pp;
309 off_t i;
310 int s;
311
312 if (be_lun->cap_bytes == 0)
313 return;
314 sx_xlock(&be_lun->page_lock);
315 pp = &be_lun->pages;
316 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
317 if (*pp == NULL)
318 goto noindir;
319 i = pn >> s;
320 pp = (uint8_t ***)&(*pp)[i];
321 pn -= i << s;
322 }
323 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
324 be_lun->cap_used += be_lun->pblocksize;
325 *pp = P_ANCHORED;
326 } else if (*pp != P_ANCHORED) {
327 free(*pp, M_RAMDISK);
328 *pp = P_ANCHORED;
329 }
330 noindir:
331 sx_xunlock(&be_lun->page_lock);
332 };
333
334 static void
ctl_backend_ramdisk_freeallpages(uint8_t ** p,int indir)335 ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir)
336 {
337 int i;
338
339 if (p == NULL)
340 return;
341 if (indir == 0) {
342 free(p, M_RAMDISK);
343 return;
344 }
345 for (i = 0; i < PPP; i++) {
346 if (p[i] == NULL)
347 continue;
348 ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1);
349 }
350 free(p, M_RAMDISK);
351 };
352
353 static size_t
cmp(uint8_t * a,uint8_t * b,size_t size)354 cmp(uint8_t *a, uint8_t *b, size_t size)
355 {
356 size_t i;
357
358 for (i = 0; i < size; i++) {
359 if (a[i] != b[i])
360 break;
361 }
362 return (i);
363 }
364
365 static int
ctl_backend_ramdisk_cmp(union ctl_io * io)366 ctl_backend_ramdisk_cmp(union ctl_io *io)
367 {
368 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
369 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
370 uint8_t *page;
371 uint8_t info[8];
372 uint64_t lba;
373 u_int lbaoff, lbas, res, off;
374
375 lbas = io->scsiio.kern_data_len / cbe_lun->blocksize;
376 lba = ARGS(io)->lba + PRIV(io)->len - lbas;
377 off = 0;
378 for (; lbas > 0; lbas--, lba++) {
379 page = ctl_backend_ramdisk_getpage(be_lun,
380 lba >> cbe_lun->pblockexp, GP_READ);
381 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
382 page += lbaoff * cbe_lun->blocksize;
383 res = cmp(io->scsiio.kern_data_ptr + off, page,
384 cbe_lun->blocksize);
385 off += res;
386 if (res < cbe_lun->blocksize)
387 break;
388 }
389 if (lbas > 0) {
390 off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len;
391 scsi_u64to8b(off, info);
392 ctl_set_sense(&io->scsiio, /*current_error*/ 1,
393 /*sense_key*/ SSD_KEY_MISCOMPARE,
394 /*asc*/ 0x1D, /*ascq*/ 0x00,
395 /*type*/ SSD_ELEM_INFO,
396 /*size*/ sizeof(info), /*data*/ &info,
397 /*type*/ SSD_ELEM_NONE);
398 return (1);
399 }
400 return (0);
401 }
402
403 static int
ctl_backend_ramdisk_move_done(union ctl_io * io,bool samethr)404 ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr)
405 {
406 struct ctl_be_ramdisk_lun *be_lun =
407 (struct ctl_be_ramdisk_lun *)CTL_BACKEND_LUN(io);
408
409 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
410 if (io->scsiio.kern_sg_entries > 0)
411 free(io->scsiio.kern_data_ptr, M_RAMDISK);
412 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
413 if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 &&
414 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) {
415 if (ARGS(io)->flags & CTL_LLF_COMPARE) {
416 /* We have data block ready for comparison. */
417 if (ctl_backend_ramdisk_cmp(io))
418 goto done;
419 }
420 if (ARGS(io)->len > PRIV(io)->len) {
421 mtx_lock(&be_lun->queue_lock);
422 STAILQ_INSERT_TAIL(&be_lun->cont_queue,
423 &io->io_hdr, links);
424 mtx_unlock(&be_lun->queue_lock);
425 taskqueue_enqueue(be_lun->io_taskqueue,
426 &be_lun->io_task);
427 return (0);
428 }
429 ctl_set_success(&io->scsiio);
430 }
431 done:
432 ctl_data_submit_done(io);
433 return(0);
434 }
435
436 static void
ctl_backend_ramdisk_compare(union ctl_io * io)437 ctl_backend_ramdisk_compare(union ctl_io *io)
438 {
439 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
440 u_int lbas, len;
441
442 lbas = ARGS(io)->len - PRIV(io)->len;
443 lbas = MIN(lbas, 131072 / cbe_lun->blocksize);
444 len = lbas * cbe_lun->blocksize;
445
446 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
447 io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK);
448 io->scsiio.kern_data_len = len;
449 io->scsiio.kern_sg_entries = 0;
450 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
451 PRIV(io)->len += lbas;
452 ctl_datamove(io);
453 }
454
455 static void
ctl_backend_ramdisk_rw(union ctl_io * io)456 ctl_backend_ramdisk_rw(union ctl_io *io)
457 {
458 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
459 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
460 struct ctl_sg_entry *sg_entries;
461 uint8_t *page;
462 uint64_t lba;
463 u_int i, len, lbaoff, lbas, sgs, off;
464 getpage_op_t op;
465
466 lba = ARGS(io)->lba + PRIV(io)->len;
467 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
468 lbas = ARGS(io)->len - PRIV(io)->len;
469 lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff);
470 sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
471 off = lbaoff * cbe_lun->blocksize;
472 op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
473 if (sgs > 1) {
474 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
475 sgs, M_RAMDISK, M_WAITOK);
476 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
477 len = lbas * cbe_lun->blocksize;
478 for (i = 0; i < sgs; i++) {
479 page = ctl_backend_ramdisk_getpage(be_lun,
480 (lba >> cbe_lun->pblockexp) + i, op);
481 if (page == P_UNMAPPED || page == P_ANCHORED) {
482 free(io->scsiio.kern_data_ptr, M_RAMDISK);
483 nospc:
484 ctl_set_space_alloc_fail(&io->scsiio);
485 ctl_data_submit_done(io);
486 return;
487 }
488 sg_entries[i].addr = page + off;
489 sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
490 len -= sg_entries[i].len;
491 off = 0;
492 }
493 } else {
494 page = ctl_backend_ramdisk_getpage(be_lun,
495 lba >> cbe_lun->pblockexp, op);
496 if (page == P_UNMAPPED || page == P_ANCHORED)
497 goto nospc;
498 sgs = 0;
499 io->scsiio.kern_data_ptr = page + off;
500 }
501
502 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
503 io->scsiio.kern_data_len = lbas * cbe_lun->blocksize;
504 io->scsiio.kern_sg_entries = sgs;
505 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
506 PRIV(io)->len += lbas;
507 if ((ARGS(io)->flags & CTL_LLF_READ) &&
508 ARGS(io)->len <= PRIV(io)->len) {
509 ctl_set_success(&io->scsiio);
510 if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT)
511 ctl_serseq_done(io);
512 }
513 ctl_datamove(io);
514 }
515
516 static int
ctl_backend_ramdisk_submit(union ctl_io * io)517 ctl_backend_ramdisk_submit(union ctl_io *io)
518 {
519 struct ctl_lba_len_flags *lbalen = ARGS(io);
520
521 if (lbalen->flags & CTL_LLF_VERIFY) {
522 ctl_set_success(&io->scsiio);
523 ctl_data_submit_done(io);
524 return (CTL_RETVAL_COMPLETE);
525 }
526 PRIV(io)->len = 0;
527 if (lbalen->flags & CTL_LLF_COMPARE)
528 ctl_backend_ramdisk_compare(io);
529 else
530 ctl_backend_ramdisk_rw(io);
531 return (CTL_RETVAL_COMPLETE);
532 }
533
534 static void
ctl_backend_ramdisk_worker(void * context,int pending)535 ctl_backend_ramdisk_worker(void *context, int pending)
536 {
537 struct ctl_be_ramdisk_lun *be_lun;
538 union ctl_io *io;
539
540 be_lun = (struct ctl_be_ramdisk_lun *)context;
541 mtx_lock(&be_lun->queue_lock);
542 for (;;) {
543 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
544 if (io != NULL) {
545 STAILQ_REMOVE_HEAD(&be_lun->cont_queue, links);
546 mtx_unlock(&be_lun->queue_lock);
547 if (ARGS(io)->flags & CTL_LLF_COMPARE)
548 ctl_backend_ramdisk_compare(io);
549 else
550 ctl_backend_ramdisk_rw(io);
551 mtx_lock(&be_lun->queue_lock);
552 continue;
553 }
554
555 /*
556 * If we get here, there is no work left in the queues, so
557 * just break out and let the task queue go to sleep.
558 */
559 break;
560 }
561 mtx_unlock(&be_lun->queue_lock);
562 }
563
564 static int
ctl_backend_ramdisk_gls(union ctl_io * io)565 ctl_backend_ramdisk_gls(union ctl_io *io)
566 {
567 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
568 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
569 struct scsi_get_lba_status_data *data;
570 uint8_t *page;
571 u_int lbaoff;
572
573 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
574 scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
575 lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
576 scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
577 page = ctl_backend_ramdisk_getpage(be_lun,
578 ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
579 if (page == P_UNMAPPED)
580 data->descr[0].status = 1;
581 else if (page == P_ANCHORED)
582 data->descr[0].status = 2;
583 else
584 data->descr[0].status = 0;
585 ctl_config_read_done(io);
586 return (CTL_RETVAL_COMPLETE);
587 }
588
589 static int
ctl_backend_ramdisk_config_read(union ctl_io * io)590 ctl_backend_ramdisk_config_read(union ctl_io *io)
591 {
592 int retval = 0;
593
594 switch (io->scsiio.cdb[0]) {
595 case SERVICE_ACTION_IN:
596 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
597 retval = ctl_backend_ramdisk_gls(io);
598 break;
599 }
600 ctl_set_invalid_field(&io->scsiio,
601 /*sks_valid*/ 1,
602 /*command*/ 1,
603 /*field*/ 1,
604 /*bit_valid*/ 1,
605 /*bit*/ 4);
606 ctl_config_read_done(io);
607 retval = CTL_RETVAL_COMPLETE;
608 break;
609 default:
610 ctl_set_invalid_opcode(&io->scsiio);
611 ctl_config_read_done(io);
612 retval = CTL_RETVAL_COMPLETE;
613 break;
614 }
615 return (retval);
616 }
617
618 static void
ctl_backend_ramdisk_delete(struct ctl_be_lun * cbe_lun,off_t lba,off_t len,int anchor)619 ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len,
620 int anchor)
621 {
622 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
623 uint8_t *page;
624 uint64_t p, lp;
625 u_int lbaoff;
626 getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER;
627
628 /* Partially zero first partial page. */
629 p = lba >> cbe_lun->pblockexp;
630 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
631 if (lbaoff != 0) {
632 page = ctl_backend_ramdisk_getpage(be_lun, p, op);
633 if (page != P_UNMAPPED && page != P_ANCHORED) {
634 memset(page + lbaoff * cbe_lun->blocksize, 0,
635 min(len, be_lun->pblockmul - lbaoff) *
636 cbe_lun->blocksize);
637 }
638 p++;
639 }
640
641 /* Partially zero last partial page. */
642 lp = (lba + len) >> cbe_lun->pblockexp;
643 lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp);
644 if (p <= lp && lbaoff != 0) {
645 page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
646 if (page != P_UNMAPPED && page != P_ANCHORED)
647 memset(page, 0, lbaoff * cbe_lun->blocksize);
648 }
649
650 /* Delete remaining full pages. */
651 if (anchor) {
652 for (; p < lp; p++)
653 ctl_backend_ramdisk_anchorpage(be_lun, p);
654 } else {
655 for (; p < lp; p++)
656 ctl_backend_ramdisk_unmappage(be_lun, p);
657 }
658 }
659
660 static void
ctl_backend_ramdisk_ws(union ctl_io * io)661 ctl_backend_ramdisk_ws(union ctl_io *io)
662 {
663 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
664 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
665 struct ctl_lba_len_flags *lbalen = ARGS(io);
666 uint8_t *page;
667 uint64_t lba;
668 u_int lbaoff, lbas;
669
670 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) {
671 ctl_set_invalid_field(&io->scsiio,
672 /*sks_valid*/ 1,
673 /*command*/ 1,
674 /*field*/ 1,
675 /*bit_valid*/ 0,
676 /*bit*/ 0);
677 ctl_config_write_done(io);
678 return;
679 }
680 if (lbalen->flags & SWS_UNMAP) {
681 ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len,
682 (lbalen->flags & SWS_ANCHOR) != 0);
683 ctl_set_success(&io->scsiio);
684 ctl_config_write_done(io);
685 return;
686 }
687
688 for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) {
689 page = ctl_backend_ramdisk_getpage(be_lun,
690 lba >> cbe_lun->pblockexp, GP_WRITE);
691 if (page == P_UNMAPPED || page == P_ANCHORED) {
692 ctl_set_space_alloc_fail(&io->scsiio);
693 ctl_data_submit_done(io);
694 return;
695 }
696 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
697 page += lbaoff * cbe_lun->blocksize;
698 if (lbalen->flags & SWS_NDOB) {
699 memset(page, 0, cbe_lun->blocksize);
700 } else {
701 memcpy(page, io->scsiio.kern_data_ptr,
702 cbe_lun->blocksize);
703 }
704 if (lbalen->flags & SWS_LBDATA)
705 scsi_ulto4b(lba, page);
706 }
707 ctl_set_success(&io->scsiio);
708 ctl_config_write_done(io);
709 }
710
711 static void
ctl_backend_ramdisk_unmap(union ctl_io * io)712 ctl_backend_ramdisk_unmap(union ctl_io *io)
713 {
714 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
715 struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
716 struct scsi_unmap_desc *buf, *end;
717
718 if ((ptrlen->flags & ~SU_ANCHOR) != 0) {
719 ctl_set_invalid_field(&io->scsiio,
720 /*sks_valid*/ 0,
721 /*command*/ 0,
722 /*field*/ 0,
723 /*bit_valid*/ 0,
724 /*bit*/ 0);
725 ctl_config_write_done(io);
726 return;
727 }
728
729 buf = (struct scsi_unmap_desc *)ptrlen->ptr;
730 end = buf + ptrlen->len / sizeof(*buf);
731 for (; buf < end; buf++) {
732 ctl_backend_ramdisk_delete(cbe_lun,
733 scsi_8btou64(buf->lba), scsi_4btoul(buf->length),
734 (ptrlen->flags & SU_ANCHOR) != 0);
735 }
736
737 ctl_set_success(&io->scsiio);
738 ctl_config_write_done(io);
739 }
740
741 static int
ctl_backend_ramdisk_config_write(union ctl_io * io)742 ctl_backend_ramdisk_config_write(union ctl_io *io)
743 {
744 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
745 int retval = 0;
746
747 switch (io->scsiio.cdb[0]) {
748 case SYNCHRONIZE_CACHE:
749 case SYNCHRONIZE_CACHE_16:
750 /* We have no cache to flush. */
751 ctl_set_success(&io->scsiio);
752 ctl_config_write_done(io);
753 break;
754 case START_STOP_UNIT: {
755 struct scsi_start_stop_unit *cdb;
756
757 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
758 if ((cdb->how & SSS_PC_MASK) != 0) {
759 ctl_set_success(&io->scsiio);
760 ctl_config_write_done(io);
761 break;
762 }
763 if (cdb->how & SSS_START) {
764 if (cdb->how & SSS_LOEJ)
765 ctl_lun_has_media(cbe_lun);
766 ctl_start_lun(cbe_lun);
767 } else {
768 ctl_stop_lun(cbe_lun);
769 if (cdb->how & SSS_LOEJ)
770 ctl_lun_ejected(cbe_lun);
771 }
772 ctl_set_success(&io->scsiio);
773 ctl_config_write_done(io);
774 break;
775 }
776 case PREVENT_ALLOW:
777 ctl_set_success(&io->scsiio);
778 ctl_config_write_done(io);
779 break;
780 case WRITE_SAME_10:
781 case WRITE_SAME_16:
782 ctl_backend_ramdisk_ws(io);
783 break;
784 case UNMAP:
785 ctl_backend_ramdisk_unmap(io);
786 break;
787 default:
788 ctl_set_invalid_opcode(&io->scsiio);
789 ctl_config_write_done(io);
790 retval = CTL_RETVAL_COMPLETE;
791 break;
792 }
793
794 return (retval);
795 }
796
797 static uint64_t
ctl_backend_ramdisk_lun_attr(struct ctl_be_lun * cbe_lun,const char * attrname)798 ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname)
799 {
800 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
801 uint64_t val;
802
803 val = UINT64_MAX;
804 if (be_lun->cap_bytes == 0)
805 return (val);
806 sx_slock(&be_lun->page_lock);
807 if (strcmp(attrname, "blocksused") == 0) {
808 val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
809 } else if (strcmp(attrname, "blocksavail") == 0) {
810 val = (be_lun->cap_bytes - be_lun->cap_used) /
811 be_lun->cbe_lun.blocksize;
812 }
813 sx_sunlock(&be_lun->page_lock);
814 return (val);
815 }
816
817 static int
ctl_backend_ramdisk_ioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flag,struct thread * td)818 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
819 int flag, struct thread *td)
820 {
821 struct ctl_be_ramdisk_softc *softc = &rd_softc;
822 struct ctl_lun_req *lun_req;
823 int retval;
824
825 retval = 0;
826 switch (cmd) {
827 case CTL_LUN_REQ:
828 lun_req = (struct ctl_lun_req *)addr;
829 switch (lun_req->reqtype) {
830 case CTL_LUNREQ_CREATE:
831 retval = ctl_backend_ramdisk_create(softc, lun_req);
832 break;
833 case CTL_LUNREQ_RM:
834 retval = ctl_backend_ramdisk_rm(softc, lun_req);
835 break;
836 case CTL_LUNREQ_MODIFY:
837 retval = ctl_backend_ramdisk_modify(softc, lun_req);
838 break;
839 default:
840 lun_req->status = CTL_LUN_ERROR;
841 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
842 "%s: invalid LUN request type %d", __func__,
843 lun_req->reqtype);
844 break;
845 }
846 break;
847 default:
848 retval = ENOTTY;
849 break;
850 }
851
852 return (retval);
853 }
854
855 static int
ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc * softc,struct ctl_lun_req * req)856 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
857 struct ctl_lun_req *req)
858 {
859 struct ctl_be_ramdisk_lun *be_lun;
860 struct ctl_lun_rm_params *params;
861 int retval;
862
863 params = &req->reqdata.rm;
864 sx_xlock(&softc->modify_lock);
865 mtx_lock(&softc->lock);
866 SLIST_FOREACH(be_lun, &softc->lun_list, links) {
867 if (be_lun->cbe_lun.lun_id == params->lun_id) {
868 SLIST_REMOVE(&softc->lun_list, be_lun,
869 ctl_be_ramdisk_lun, links);
870 softc->num_luns--;
871 break;
872 }
873 }
874 mtx_unlock(&softc->lock);
875 sx_xunlock(&softc->modify_lock);
876 if (be_lun == NULL) {
877 snprintf(req->error_str, sizeof(req->error_str),
878 "%s: LUN %u is not managed by the ramdisk backend",
879 __func__, params->lun_id);
880 goto bailout_error;
881 }
882
883 /*
884 * Set the waiting flag before we invalidate the LUN. Our shutdown
885 * routine can be called any time after we invalidate the LUN,
886 * and can be called from our context.
887 *
888 * This tells the shutdown routine that we're waiting, or we're
889 * going to wait for the shutdown to happen.
890 */
891 mtx_lock(&softc->lock);
892 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
893 mtx_unlock(&softc->lock);
894
895 retval = ctl_remove_lun(&be_lun->cbe_lun);
896 if (retval != 0) {
897 snprintf(req->error_str, sizeof(req->error_str),
898 "%s: error %d returned from ctl_remove_lun() for "
899 "LUN %d", __func__, retval, params->lun_id);
900 mtx_lock(&softc->lock);
901 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
902 mtx_unlock(&softc->lock);
903 goto bailout_error;
904 }
905
906 mtx_lock(&softc->lock);
907 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
908 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlramrm", 0);
909 if (retval == EINTR)
910 break;
911 }
912 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
913 if (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
914 mtx_unlock(&softc->lock);
915 free(be_lun, M_RAMDISK);
916 } else {
917 mtx_unlock(&softc->lock);
918 return (EINTR);
919 }
920
921 req->status = CTL_LUN_OK;
922 return (retval);
923
924 bailout_error:
925 req->status = CTL_LUN_ERROR;
926 return (0);
927 }
928
929 static int
ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc * softc,struct ctl_lun_req * req)930 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
931 struct ctl_lun_req *req)
932 {
933 struct ctl_be_ramdisk_lun *be_lun;
934 struct ctl_be_lun *cbe_lun;
935 struct ctl_lun_create_params *params;
936 const char *value;
937 char tmpstr[32];
938 uint64_t t;
939 int retval;
940
941 retval = 0;
942 params = &req->reqdata.create;
943
944 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
945 cbe_lun = &be_lun->cbe_lun;
946 cbe_lun->options = nvlist_clone(req->args_nvl);
947 be_lun->params = req->reqdata.create;
948 be_lun->softc = softc;
949
950 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
951 cbe_lun->lun_type = params->device_type;
952 else
953 cbe_lun->lun_type = T_DIRECT;
954 be_lun->flags = 0;
955 cbe_lun->flags = 0;
956 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL);
957 if (value != NULL) {
958 if (strcmp(value, "primary") == 0)
959 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
960 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
961 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
962
963 be_lun->pblocksize = PAGE_SIZE;
964 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL);
965 if (value != NULL) {
966 ctl_expand_number(value, &t);
967 be_lun->pblocksize = t;
968 }
969 if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
970 snprintf(req->error_str, sizeof(req->error_str),
971 "%s: unsupported pblocksize %u", __func__,
972 be_lun->pblocksize);
973 goto bailout_error;
974 }
975
976 if (cbe_lun->lun_type == T_DIRECT ||
977 cbe_lun->lun_type == T_CDROM) {
978 if (params->blocksize_bytes != 0)
979 cbe_lun->blocksize = params->blocksize_bytes;
980 else if (cbe_lun->lun_type == T_CDROM)
981 cbe_lun->blocksize = 2048;
982 else
983 cbe_lun->blocksize = 512;
984 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
985 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
986 snprintf(req->error_str, sizeof(req->error_str),
987 "%s: pblocksize %u not exp2 of blocksize %u",
988 __func__,
989 be_lun->pblocksize, cbe_lun->blocksize);
990 goto bailout_error;
991 }
992 if (params->lun_size_bytes < cbe_lun->blocksize) {
993 snprintf(req->error_str, sizeof(req->error_str),
994 "%s: LUN size %ju < blocksize %u", __func__,
995 params->lun_size_bytes, cbe_lun->blocksize);
996 goto bailout_error;
997 }
998 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
999 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
1000 be_lun->indir = 0;
1001 t = be_lun->size_bytes / be_lun->pblocksize;
1002 while (t > 1) {
1003 t /= PPP;
1004 be_lun->indir++;
1005 }
1006 cbe_lun->maxlba = be_lun->size_blocks - 1;
1007 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
1008 cbe_lun->pblockoff = 0;
1009 cbe_lun->ublockexp = cbe_lun->pblockexp;
1010 cbe_lun->ublockoff = 0;
1011 cbe_lun->atomicblock = be_lun->pblocksize;
1012 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
1013 value = dnvlist_get_string(cbe_lun->options, "capacity", NULL);
1014 if (value != NULL)
1015 ctl_expand_number(value, &be_lun->cap_bytes);
1016 } else {
1017 be_lun->pblockmul = 1;
1018 cbe_lun->pblockexp = 0;
1019 }
1020
1021 /* Tell the user the blocksize we ended up using */
1022 params->blocksize_bytes = cbe_lun->blocksize;
1023 params->lun_size_bytes = be_lun->size_bytes;
1024
1025 value = dnvlist_get_string(cbe_lun->options, "unmap", NULL);
1026 if (value == NULL || strcmp(value, "off") != 0)
1027 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
1028 value = dnvlist_get_string(cbe_lun->options, "readonly", NULL);
1029 if (value != NULL) {
1030 if (strcmp(value, "on") == 0)
1031 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1032 } else if (cbe_lun->lun_type != T_DIRECT)
1033 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1034 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1035 value = dnvlist_get_string(cbe_lun->options, "serseq", NULL);
1036 if (value != NULL && strcmp(value, "on") == 0)
1037 cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
1038 else if (value != NULL && strcmp(value, "read") == 0)
1039 cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
1040 else if (value != NULL && strcmp(value, "soft") == 0)
1041 cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT;
1042 else if (value != NULL && strcmp(value, "off") == 0)
1043 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1044
1045 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1046 cbe_lun->req_lun_id = params->req_lun_id;
1047 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
1048 } else
1049 cbe_lun->req_lun_id = 0;
1050
1051 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
1052 cbe_lun->be = &ctl_be_ramdisk_driver;
1053 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1054 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d",
1055 softc->num_luns);
1056 strncpy((char *)cbe_lun->serial_num, tmpstr,
1057 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
1058
1059 /* Tell the user what we used for a serial number */
1060 strncpy((char *)params->serial_num, tmpstr,
1061 MIN(sizeof(params->serial_num), sizeof(tmpstr)));
1062 } else {
1063 strncpy((char *)cbe_lun->serial_num, params->serial_num,
1064 MIN(sizeof(cbe_lun->serial_num),
1065 sizeof(params->serial_num)));
1066 }
1067 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1068 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns);
1069 strncpy((char *)cbe_lun->device_id, tmpstr,
1070 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
1071
1072 /* Tell the user what we used for a device ID */
1073 strncpy((char *)params->device_id, tmpstr,
1074 MIN(sizeof(params->device_id), sizeof(tmpstr)));
1075 } else {
1076 strncpy((char *)cbe_lun->device_id, params->device_id,
1077 MIN(sizeof(cbe_lun->device_id),
1078 sizeof(params->device_id)));
1079 }
1080
1081 STAILQ_INIT(&be_lun->cont_queue);
1082 sx_init(&be_lun->page_lock, "ctlram page");
1083 if (be_lun->cap_bytes == 0) {
1084 be_lun->indir = 0;
1085 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
1086 }
1087 be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
1088 M_WAITOK|M_ZERO);
1089 mtx_init(&be_lun->queue_lock, "ctlram queue", NULL, MTX_DEF);
1090 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
1091 be_lun);
1092
1093 be_lun->io_taskqueue = taskqueue_create("ctlramtq", M_WAITOK,
1094 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1095 if (be_lun->io_taskqueue == NULL) {
1096 snprintf(req->error_str, sizeof(req->error_str),
1097 "%s: Unable to create taskqueue", __func__);
1098 goto bailout_error;
1099 }
1100
1101 retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue,
1102 /*num threads*/1,
1103 /*priority*/PUSER,
1104 /*proc*/control_softc->ctl_proc,
1105 /*thread name*/"ramdisk");
1106 if (retval != 0)
1107 goto bailout_error;
1108
1109 retval = ctl_add_lun(&be_lun->cbe_lun);
1110 if (retval != 0) {
1111 snprintf(req->error_str, sizeof(req->error_str),
1112 "%s: ctl_add_lun() returned error %d, see dmesg for "
1113 "details", __func__, retval);
1114 retval = 0;
1115 goto bailout_error;
1116 }
1117
1118 mtx_lock(&softc->lock);
1119 softc->num_luns++;
1120 SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links);
1121 mtx_unlock(&softc->lock);
1122
1123 params->req_lun_id = cbe_lun->lun_id;
1124
1125 req->status = CTL_LUN_OK;
1126 return (retval);
1127
1128 bailout_error:
1129 req->status = CTL_LUN_ERROR;
1130 if (be_lun != NULL) {
1131 if (be_lun->io_taskqueue != NULL)
1132 taskqueue_free(be_lun->io_taskqueue);
1133 nvlist_destroy(cbe_lun->options);
1134 free(be_lun->zero_page, M_RAMDISK);
1135 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1136 sx_destroy(&be_lun->page_lock);
1137 mtx_destroy(&be_lun->queue_lock);
1138 free(be_lun, M_RAMDISK);
1139 }
1140 return (retval);
1141 }
1142
1143 static int
ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc * softc,struct ctl_lun_req * req)1144 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
1145 struct ctl_lun_req *req)
1146 {
1147 struct ctl_be_ramdisk_lun *be_lun;
1148 struct ctl_be_lun *cbe_lun;
1149 struct ctl_lun_modify_params *params;
1150 const char *value;
1151 uint32_t blocksize;
1152 int wasprim;
1153
1154 params = &req->reqdata.modify;
1155 sx_xlock(&softc->modify_lock);
1156 mtx_lock(&softc->lock);
1157 SLIST_FOREACH(be_lun, &softc->lun_list, links) {
1158 if (be_lun->cbe_lun.lun_id == params->lun_id)
1159 break;
1160 }
1161 mtx_unlock(&softc->lock);
1162 if (be_lun == NULL) {
1163 snprintf(req->error_str, sizeof(req->error_str),
1164 "%s: LUN %u is not managed by the ramdisk backend",
1165 __func__, params->lun_id);
1166 goto bailout_error;
1167 }
1168 cbe_lun = &be_lun->cbe_lun;
1169
1170 if (params->lun_size_bytes != 0)
1171 be_lun->params.lun_size_bytes = params->lun_size_bytes;
1172
1173 if (req->args_nvl != NULL) {
1174 nvlist_destroy(cbe_lun->options);
1175 cbe_lun->options = nvlist_clone(req->args_nvl);
1176 }
1177
1178 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
1179 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL);
1180 if (value != NULL) {
1181 if (strcmp(value, "primary") == 0)
1182 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1183 else
1184 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1185 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
1186 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1187 else
1188 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1189 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
1190 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
1191 ctl_lun_primary(cbe_lun);
1192 else
1193 ctl_lun_secondary(cbe_lun);
1194 }
1195
1196 blocksize = be_lun->cbe_lun.blocksize;
1197 if (be_lun->params.lun_size_bytes < blocksize) {
1198 snprintf(req->error_str, sizeof(req->error_str),
1199 "%s: LUN size %ju < blocksize %u", __func__,
1200 be_lun->params.lun_size_bytes, blocksize);
1201 goto bailout_error;
1202 }
1203 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
1204 be_lun->size_bytes = be_lun->size_blocks * blocksize;
1205 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
1206 ctl_lun_capacity_changed(&be_lun->cbe_lun);
1207
1208 /* Tell the user the exact size we ended up using */
1209 params->lun_size_bytes = be_lun->size_bytes;
1210
1211 sx_xunlock(&softc->modify_lock);
1212 req->status = CTL_LUN_OK;
1213 return (0);
1214
1215 bailout_error:
1216 sx_xunlock(&softc->modify_lock);
1217 req->status = CTL_LUN_ERROR;
1218 return (0);
1219 }
1220
1221 static void
ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun * cbe_lun)1222 ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun)
1223 {
1224 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun;
1225 struct ctl_be_ramdisk_softc *softc = be_lun->softc;
1226
1227 taskqueue_drain_all(be_lun->io_taskqueue);
1228 taskqueue_free(be_lun->io_taskqueue);
1229 nvlist_destroy(be_lun->cbe_lun.options);
1230 free(be_lun->zero_page, M_RAMDISK);
1231 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1232 sx_destroy(&be_lun->page_lock);
1233 mtx_destroy(&be_lun->queue_lock);
1234
1235 mtx_lock(&softc->lock);
1236 be_lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1237 if (be_lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
1238 wakeup(be_lun);
1239 else
1240 free(be_lun, M_RAMDISK);
1241 mtx_unlock(&softc->lock);
1242 }
1243