1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
5 * Copyright (c) 2012 The FreeBSD Foundation
6 * Copyright (c) 2014-2017 Alexander Motin <[email protected]>
7 * All rights reserved.
8 *
9 * Portions of this software were developed by Edward Tomasz Napierala
10 * under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19 * substantially similar to the "NO WARRANTY" disclaimer below
20 * ("Disclaimer") and any redistribution must be conditioned upon
21 * including a substantially similar Disclaimer requirement for further
22 * binary redistribution.
23 *
24 * NO WARRANTY
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGES.
36 *
37 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
38 */
39 /*
40 * CAM Target Layer black hole and RAM disk backend.
41 *
42 * Author: Ken Merry <[email protected]>
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/condvar.h>
52 #include <sys/types.h>
53 #include <sys/limits.h>
54 #include <sys/lock.h>
55 #include <sys/mutex.h>
56 #include <sys/malloc.h>
57 #include <sys/sx.h>
58 #include <sys/taskqueue.h>
59 #include <sys/time.h>
60 #include <sys/queue.h>
61 #include <sys/conf.h>
62 #include <sys/ioccom.h>
63 #include <sys/module.h>
64 #include <sys/sysctl.h>
65 #include <sys/nv.h>
66 #include <sys/dnv.h>
67
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_da.h>
70 #include <cam/ctl/ctl_io.h>
71 #include <cam/ctl/ctl.h>
72 #include <cam/ctl/ctl_util.h>
73 #include <cam/ctl/ctl_backend.h>
74 #include <cam/ctl/ctl_debug.h>
75 #include <cam/ctl/ctl_ioctl.h>
76 #include <cam/ctl/ctl_ha.h>
77 #include <cam/ctl/ctl_private.h>
78 #include <cam/ctl/ctl_error.h>
79
80 #define PRIV(io) \
81 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
82 #define ARGS(io) \
83 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
84
85 #define PPP (PAGE_SIZE / sizeof(uint8_t **))
86 #ifdef __LP64__
87 #define PPPS (PAGE_SHIFT - 3)
88 #else
89 #define PPPS (PAGE_SHIFT - 2)
90 #endif
91 #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry))
92
93 #define P_UNMAPPED NULL /* Page is unmapped. */
94 #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */
95
96 typedef enum {
97 GP_READ, /* Return data page or zero page. */
98 GP_WRITE, /* Return data page, try allocate if none. */
99 GP_ANCHOR, /* Return data page, try anchor if none. */
100 GP_OTHER, /* Return what present, do not allocate/anchor. */
101 } getpage_op_t;
102
103 typedef enum {
104 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
105 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02,
106 CTL_BE_RAMDISK_LUN_WAITING = 0x04
107 } ctl_be_ramdisk_lun_flags;
108
109 struct ctl_be_ramdisk_lun {
110 struct ctl_lun_create_params params;
111 char lunname[32];
112 int indir;
113 uint8_t **pages;
114 uint8_t *zero_page;
115 struct sx page_lock;
116 u_int pblocksize;
117 u_int pblockmul;
118 uint64_t size_bytes;
119 uint64_t size_blocks;
120 uint64_t cap_bytes;
121 uint64_t cap_used;
122 struct ctl_be_ramdisk_softc *softc;
123 ctl_be_ramdisk_lun_flags flags;
124 STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
125 struct ctl_be_lun cbe_lun;
126 struct taskqueue *io_taskqueue;
127 struct task io_task;
128 STAILQ_HEAD(, ctl_io_hdr) cont_queue;
129 struct mtx_padalign queue_lock;
130 };
131
132 struct ctl_be_ramdisk_softc {
133 struct mtx lock;
134 int num_luns;
135 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
136 };
137
138 static struct ctl_be_ramdisk_softc rd_softc;
139 extern struct ctl_softc *control_softc;
140
141 static int ctl_backend_ramdisk_init(void);
142 static int ctl_backend_ramdisk_shutdown(void);
143 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
144 static void ctl_backend_ramdisk_compare(union ctl_io *io);
145 static void ctl_backend_ramdisk_rw(union ctl_io *io);
146 static int ctl_backend_ramdisk_submit(union ctl_io *io);
147 static void ctl_backend_ramdisk_worker(void *context, int pending);
148 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
149 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
150 static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname);
151 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
152 caddr_t addr, int flag, struct thread *td);
153 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
154 struct ctl_lun_req *req);
155 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
156 struct ctl_lun_req *req);
157 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
158 struct ctl_lun_req *req);
159 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
160 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
161 ctl_lun_config_status status);
162
163 static struct ctl_backend_driver ctl_be_ramdisk_driver =
164 {
165 .name = "ramdisk",
166 .flags = CTL_BE_FLAG_HAS_CONFIG,
167 .init = ctl_backend_ramdisk_init,
168 .shutdown = ctl_backend_ramdisk_shutdown,
169 .data_submit = ctl_backend_ramdisk_submit,
170 .data_move_done = ctl_backend_ramdisk_move_done,
171 .config_read = ctl_backend_ramdisk_config_read,
172 .config_write = ctl_backend_ramdisk_config_write,
173 .ioctl = ctl_backend_ramdisk_ioctl,
174 .lun_attr = ctl_backend_ramdisk_lun_attr,
175 };
176
177 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
178 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
179
180 static int
ctl_backend_ramdisk_init(void)181 ctl_backend_ramdisk_init(void)
182 {
183 struct ctl_be_ramdisk_softc *softc = &rd_softc;
184
185 memset(softc, 0, sizeof(*softc));
186 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
187 STAILQ_INIT(&softc->lun_list);
188 return (0);
189 }
190
191 static int
ctl_backend_ramdisk_shutdown(void)192 ctl_backend_ramdisk_shutdown(void)
193 {
194 struct ctl_be_ramdisk_softc *softc = &rd_softc;
195 struct ctl_be_ramdisk_lun *lun, *next_lun;
196
197 mtx_lock(&softc->lock);
198 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
199 /*
200 * Drop our lock here. Since ctl_invalidate_lun() can call
201 * back into us, this could potentially lead to a recursive
202 * lock of the same mutex, which would cause a hang.
203 */
204 mtx_unlock(&softc->lock);
205 ctl_disable_lun(&lun->cbe_lun);
206 ctl_invalidate_lun(&lun->cbe_lun);
207 mtx_lock(&softc->lock);
208 }
209 mtx_unlock(&softc->lock);
210 mtx_destroy(&softc->lock);
211 return (0);
212 }
213
214 static uint8_t *
ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun * be_lun,off_t pn,getpage_op_t op)215 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
216 getpage_op_t op)
217 {
218 uint8_t **p, ***pp;
219 off_t i;
220 int s;
221
222 if (be_lun->cap_bytes == 0) {
223 switch (op) {
224 case GP_READ:
225 return (be_lun->zero_page);
226 case GP_WRITE:
227 return ((uint8_t *)be_lun->pages);
228 case GP_ANCHOR:
229 return (P_ANCHORED);
230 default:
231 return (P_UNMAPPED);
232 }
233 }
234 if (op == GP_WRITE || op == GP_ANCHOR) {
235 sx_xlock(&be_lun->page_lock);
236 pp = &be_lun->pages;
237 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
238 if (*pp == NULL) {
239 *pp = malloc(PAGE_SIZE, M_RAMDISK,
240 M_WAITOK|M_ZERO);
241 }
242 i = pn >> s;
243 pp = (uint8_t ***)&(*pp)[i];
244 pn -= i << s;
245 }
246 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
247 if (op == GP_WRITE) {
248 *pp = malloc(be_lun->pblocksize, M_RAMDISK,
249 M_WAITOK|M_ZERO);
250 } else
251 *pp = P_ANCHORED;
252 be_lun->cap_used += be_lun->pblocksize;
253 } else if (*pp == P_ANCHORED && op == GP_WRITE) {
254 *pp = malloc(be_lun->pblocksize, M_RAMDISK,
255 M_WAITOK|M_ZERO);
256 }
257 sx_xunlock(&be_lun->page_lock);
258 return ((uint8_t *)*pp);
259 } else {
260 sx_slock(&be_lun->page_lock);
261 p = be_lun->pages;
262 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
263 if (p == NULL)
264 break;
265 i = pn >> s;
266 p = (uint8_t **)p[i];
267 pn -= i << s;
268 }
269 sx_sunlock(&be_lun->page_lock);
270 if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ)
271 return (be_lun->zero_page);
272 return ((uint8_t *)p);
273 }
274 };
275
276 static void
ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun * be_lun,off_t pn)277 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
278 {
279 uint8_t ***pp;
280 off_t i;
281 int s;
282
283 if (be_lun->cap_bytes == 0)
284 return;
285 sx_xlock(&be_lun->page_lock);
286 pp = &be_lun->pages;
287 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
288 if (*pp == NULL)
289 goto noindir;
290 i = pn >> s;
291 pp = (uint8_t ***)&(*pp)[i];
292 pn -= i << s;
293 }
294 if (*pp == P_ANCHORED) {
295 be_lun->cap_used -= be_lun->pblocksize;
296 *pp = P_UNMAPPED;
297 } else if (*pp != P_UNMAPPED) {
298 free(*pp, M_RAMDISK);
299 be_lun->cap_used -= be_lun->pblocksize;
300 *pp = P_UNMAPPED;
301 }
302 noindir:
303 sx_xunlock(&be_lun->page_lock);
304 };
305
306 static void
ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun * be_lun,off_t pn)307 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
308 {
309 uint8_t ***pp;
310 off_t i;
311 int s;
312
313 if (be_lun->cap_bytes == 0)
314 return;
315 sx_xlock(&be_lun->page_lock);
316 pp = &be_lun->pages;
317 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
318 if (*pp == NULL)
319 goto noindir;
320 i = pn >> s;
321 pp = (uint8_t ***)&(*pp)[i];
322 pn -= i << s;
323 }
324 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
325 be_lun->cap_used += be_lun->pblocksize;
326 *pp = P_ANCHORED;
327 } else if (*pp != P_ANCHORED) {
328 free(*pp, M_RAMDISK);
329 *pp = P_ANCHORED;
330 }
331 noindir:
332 sx_xunlock(&be_lun->page_lock);
333 };
334
335 static void
ctl_backend_ramdisk_freeallpages(uint8_t ** p,int indir)336 ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir)
337 {
338 int i;
339
340 if (p == NULL)
341 return;
342 if (indir == 0) {
343 free(p, M_RAMDISK);
344 return;
345 }
346 for (i = 0; i < PPP; i++) {
347 if (p[i] == NULL)
348 continue;
349 ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1);
350 }
351 free(p, M_RAMDISK);
352 };
353
354 static size_t
cmp(uint8_t * a,uint8_t * b,size_t size)355 cmp(uint8_t *a, uint8_t *b, size_t size)
356 {
357 size_t i;
358
359 for (i = 0; i < size; i++) {
360 if (a[i] != b[i])
361 break;
362 }
363 return (i);
364 }
365
366 static int
ctl_backend_ramdisk_cmp(union ctl_io * io)367 ctl_backend_ramdisk_cmp(union ctl_io *io)
368 {
369 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
370 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
371 uint8_t *page;
372 uint8_t info[8];
373 uint64_t lba;
374 u_int lbaoff, lbas, res, off;
375
376 lbas = io->scsiio.kern_data_len / cbe_lun->blocksize;
377 lba = ARGS(io)->lba + PRIV(io)->len - lbas;
378 off = 0;
379 for (; lbas > 0; lbas--, lba++) {
380 page = ctl_backend_ramdisk_getpage(be_lun,
381 lba >> cbe_lun->pblockexp, GP_READ);
382 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
383 page += lbaoff * cbe_lun->blocksize;
384 res = cmp(io->scsiio.kern_data_ptr + off, page,
385 cbe_lun->blocksize);
386 off += res;
387 if (res < cbe_lun->blocksize)
388 break;
389 }
390 if (lbas > 0) {
391 off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len;
392 scsi_u64to8b(off, info);
393 ctl_set_sense(&io->scsiio, /*current_error*/ 1,
394 /*sense_key*/ SSD_KEY_MISCOMPARE,
395 /*asc*/ 0x1D, /*ascq*/ 0x00,
396 /*type*/ SSD_ELEM_INFO,
397 /*size*/ sizeof(info), /*data*/ &info,
398 /*type*/ SSD_ELEM_NONE);
399 return (1);
400 }
401 return (0);
402 }
403
404 static int
ctl_backend_ramdisk_move_done(union ctl_io * io)405 ctl_backend_ramdisk_move_done(union ctl_io *io)
406 {
407 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
408 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
409 #ifdef CTL_TIME_IO
410 struct bintime cur_bt;
411 #endif
412
413 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
414 #ifdef CTL_TIME_IO
415 getbinuptime(&cur_bt);
416 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
417 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
418 #endif
419 io->io_hdr.num_dmas++;
420 if (io->scsiio.kern_sg_entries > 0)
421 free(io->scsiio.kern_data_ptr, M_RAMDISK);
422 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
423 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
424 ;
425 } else if (io->io_hdr.port_status != 0 &&
426 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
427 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
428 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
429 /*retry_count*/ io->io_hdr.port_status);
430 } else if (io->scsiio.kern_data_resid != 0 &&
431 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
432 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
433 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
434 ctl_set_invalid_field_ciu(&io->scsiio);
435 } else if ((io->io_hdr.port_status == 0) &&
436 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
437 if (ARGS(io)->flags & CTL_LLF_COMPARE) {
438 /* We have data block ready for comparison. */
439 if (ctl_backend_ramdisk_cmp(io))
440 goto done;
441 }
442 if (ARGS(io)->len > PRIV(io)->len) {
443 mtx_lock(&be_lun->queue_lock);
444 STAILQ_INSERT_TAIL(&be_lun->cont_queue,
445 &io->io_hdr, links);
446 mtx_unlock(&be_lun->queue_lock);
447 taskqueue_enqueue(be_lun->io_taskqueue,
448 &be_lun->io_task);
449 return (0);
450 }
451 ctl_set_success(&io->scsiio);
452 }
453 done:
454 ctl_data_submit_done(io);
455 return(0);
456 }
457
458 static void
ctl_backend_ramdisk_compare(union ctl_io * io)459 ctl_backend_ramdisk_compare(union ctl_io *io)
460 {
461 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
462 u_int lbas, len;
463
464 lbas = ARGS(io)->len - PRIV(io)->len;
465 lbas = MIN(lbas, 131072 / cbe_lun->blocksize);
466 len = lbas * cbe_lun->blocksize;
467
468 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
469 io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK);
470 io->scsiio.kern_data_len = len;
471 io->scsiio.kern_sg_entries = 0;
472 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
473 PRIV(io)->len += lbas;
474 #ifdef CTL_TIME_IO
475 getbinuptime(&io->io_hdr.dma_start_bt);
476 #endif
477 ctl_datamove(io);
478 }
479
480 static void
ctl_backend_ramdisk_rw(union ctl_io * io)481 ctl_backend_ramdisk_rw(union ctl_io *io)
482 {
483 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
484 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
485 struct ctl_sg_entry *sg_entries;
486 uint8_t *page;
487 uint64_t lba;
488 u_int i, len, lbaoff, lbas, sgs, off;
489 getpage_op_t op;
490
491 lba = ARGS(io)->lba + PRIV(io)->len;
492 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
493 lbas = ARGS(io)->len - PRIV(io)->len;
494 lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff);
495 sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
496 off = lbaoff * cbe_lun->blocksize;
497 op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
498 if (sgs > 1) {
499 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
500 sgs, M_RAMDISK, M_WAITOK);
501 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
502 len = lbas * cbe_lun->blocksize;
503 for (i = 0; i < sgs; i++) {
504 page = ctl_backend_ramdisk_getpage(be_lun,
505 (lba >> cbe_lun->pblockexp) + i, op);
506 if (page == P_UNMAPPED || page == P_ANCHORED) {
507 free(io->scsiio.kern_data_ptr, M_RAMDISK);
508 nospc:
509 ctl_set_space_alloc_fail(&io->scsiio);
510 ctl_data_submit_done(io);
511 return;
512 }
513 sg_entries[i].addr = page + off;
514 sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
515 len -= sg_entries[i].len;
516 off = 0;
517 }
518 } else {
519 page = ctl_backend_ramdisk_getpage(be_lun,
520 lba >> cbe_lun->pblockexp, op);
521 if (page == P_UNMAPPED || page == P_ANCHORED)
522 goto nospc;
523 sgs = 0;
524 io->scsiio.kern_data_ptr = page + off;
525 }
526
527 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
528 io->scsiio.kern_data_len = lbas * cbe_lun->blocksize;
529 io->scsiio.kern_sg_entries = sgs;
530 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
531 PRIV(io)->len += lbas;
532 if ((ARGS(io)->flags & CTL_LLF_READ) &&
533 ARGS(io)->len <= PRIV(io)->len) {
534 ctl_set_success(&io->scsiio);
535 ctl_serseq_done(io);
536 }
537 #ifdef CTL_TIME_IO
538 getbinuptime(&io->io_hdr.dma_start_bt);
539 #endif
540 ctl_datamove(io);
541 }
542
543 static int
ctl_backend_ramdisk_submit(union ctl_io * io)544 ctl_backend_ramdisk_submit(union ctl_io *io)
545 {
546 struct ctl_lba_len_flags *lbalen = ARGS(io);
547
548 if (lbalen->flags & CTL_LLF_VERIFY) {
549 ctl_set_success(&io->scsiio);
550 ctl_data_submit_done(io);
551 return (CTL_RETVAL_COMPLETE);
552 }
553 PRIV(io)->len = 0;
554 if (lbalen->flags & CTL_LLF_COMPARE)
555 ctl_backend_ramdisk_compare(io);
556 else
557 ctl_backend_ramdisk_rw(io);
558 return (CTL_RETVAL_COMPLETE);
559 }
560
561 static void
ctl_backend_ramdisk_worker(void * context,int pending)562 ctl_backend_ramdisk_worker(void *context, int pending)
563 {
564 struct ctl_be_ramdisk_lun *be_lun;
565 union ctl_io *io;
566
567 be_lun = (struct ctl_be_ramdisk_lun *)context;
568 mtx_lock(&be_lun->queue_lock);
569 for (;;) {
570 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
571 if (io != NULL) {
572 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
573 ctl_io_hdr, links);
574 mtx_unlock(&be_lun->queue_lock);
575 if (ARGS(io)->flags & CTL_LLF_COMPARE)
576 ctl_backend_ramdisk_compare(io);
577 else
578 ctl_backend_ramdisk_rw(io);
579 mtx_lock(&be_lun->queue_lock);
580 continue;
581 }
582
583 /*
584 * If we get here, there is no work left in the queues, so
585 * just break out and let the task queue go to sleep.
586 */
587 break;
588 }
589 mtx_unlock(&be_lun->queue_lock);
590 }
591
592 static int
ctl_backend_ramdisk_gls(union ctl_io * io)593 ctl_backend_ramdisk_gls(union ctl_io *io)
594 {
595 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
596 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
597 struct scsi_get_lba_status_data *data;
598 uint8_t *page;
599 u_int lbaoff;
600
601 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
602 scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
603 lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
604 scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
605 page = ctl_backend_ramdisk_getpage(be_lun,
606 ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
607 if (page == P_UNMAPPED)
608 data->descr[0].status = 1;
609 else if (page == P_ANCHORED)
610 data->descr[0].status = 2;
611 else
612 data->descr[0].status = 0;
613 ctl_config_read_done(io);
614 return (CTL_RETVAL_COMPLETE);
615 }
616
617 static int
ctl_backend_ramdisk_config_read(union ctl_io * io)618 ctl_backend_ramdisk_config_read(union ctl_io *io)
619 {
620 int retval = 0;
621
622 switch (io->scsiio.cdb[0]) {
623 case SERVICE_ACTION_IN:
624 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
625 retval = ctl_backend_ramdisk_gls(io);
626 break;
627 }
628 ctl_set_invalid_field(&io->scsiio,
629 /*sks_valid*/ 1,
630 /*command*/ 1,
631 /*field*/ 1,
632 /*bit_valid*/ 1,
633 /*bit*/ 4);
634 ctl_config_read_done(io);
635 retval = CTL_RETVAL_COMPLETE;
636 break;
637 default:
638 ctl_set_invalid_opcode(&io->scsiio);
639 ctl_config_read_done(io);
640 retval = CTL_RETVAL_COMPLETE;
641 break;
642 }
643 return (retval);
644 }
645
646 static void
ctl_backend_ramdisk_delete(struct ctl_be_lun * cbe_lun,off_t lba,off_t len,int anchor)647 ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len,
648 int anchor)
649 {
650 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
651 uint8_t *page;
652 uint64_t p, lp;
653 u_int lbaoff;
654 getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER;
655
656 /* Partially zero first partial page. */
657 p = lba >> cbe_lun->pblockexp;
658 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
659 if (lbaoff != 0) {
660 page = ctl_backend_ramdisk_getpage(be_lun, p, op);
661 if (page != P_UNMAPPED && page != P_ANCHORED) {
662 memset(page + lbaoff * cbe_lun->blocksize, 0,
663 min(len, be_lun->pblockmul - lbaoff) *
664 cbe_lun->blocksize);
665 }
666 p++;
667 }
668
669 /* Partially zero last partial page. */
670 lp = (lba + len) >> cbe_lun->pblockexp;
671 lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp);
672 if (p <= lp && lbaoff != 0) {
673 page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
674 if (page != P_UNMAPPED && page != P_ANCHORED)
675 memset(page, 0, lbaoff * cbe_lun->blocksize);
676 }
677
678 /* Delete remaining full pages. */
679 if (anchor) {
680 for (; p < lp; p++)
681 ctl_backend_ramdisk_anchorpage(be_lun, p);
682 } else {
683 for (; p < lp; p++)
684 ctl_backend_ramdisk_unmappage(be_lun, p);
685 }
686 }
687
688 static void
ctl_backend_ramdisk_ws(union ctl_io * io)689 ctl_backend_ramdisk_ws(union ctl_io *io)
690 {
691 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
692 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
693 struct ctl_lba_len_flags *lbalen = ARGS(io);
694 uint8_t *page;
695 uint64_t lba;
696 u_int lbaoff, lbas;
697
698 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) {
699 ctl_set_invalid_field(&io->scsiio,
700 /*sks_valid*/ 1,
701 /*command*/ 1,
702 /*field*/ 1,
703 /*bit_valid*/ 0,
704 /*bit*/ 0);
705 ctl_config_write_done(io);
706 return;
707 }
708 if (lbalen->flags & SWS_UNMAP) {
709 ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len,
710 (lbalen->flags & SWS_ANCHOR) != 0);
711 ctl_set_success(&io->scsiio);
712 ctl_config_write_done(io);
713 return;
714 }
715
716 for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) {
717 page = ctl_backend_ramdisk_getpage(be_lun,
718 lba >> cbe_lun->pblockexp, GP_WRITE);
719 if (page == P_UNMAPPED || page == P_ANCHORED) {
720 ctl_set_space_alloc_fail(&io->scsiio);
721 ctl_data_submit_done(io);
722 return;
723 }
724 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
725 page += lbaoff * cbe_lun->blocksize;
726 if (lbalen->flags & SWS_NDOB) {
727 memset(page, 0, cbe_lun->blocksize);
728 } else {
729 memcpy(page, io->scsiio.kern_data_ptr,
730 cbe_lun->blocksize);
731 }
732 if (lbalen->flags & SWS_LBDATA)
733 scsi_ulto4b(lba, page);
734 }
735 ctl_set_success(&io->scsiio);
736 ctl_config_write_done(io);
737 }
738
739 static void
ctl_backend_ramdisk_unmap(union ctl_io * io)740 ctl_backend_ramdisk_unmap(union ctl_io *io)
741 {
742 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
743 struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
744 struct scsi_unmap_desc *buf, *end;
745
746 if ((ptrlen->flags & ~SU_ANCHOR) != 0) {
747 ctl_set_invalid_field(&io->scsiio,
748 /*sks_valid*/ 0,
749 /*command*/ 0,
750 /*field*/ 0,
751 /*bit_valid*/ 0,
752 /*bit*/ 0);
753 ctl_config_write_done(io);
754 return;
755 }
756
757 buf = (struct scsi_unmap_desc *)ptrlen->ptr;
758 end = buf + ptrlen->len / sizeof(*buf);
759 for (; buf < end; buf++) {
760 ctl_backend_ramdisk_delete(cbe_lun,
761 scsi_8btou64(buf->lba), scsi_4btoul(buf->length),
762 (ptrlen->flags & SU_ANCHOR) != 0);
763 }
764
765 ctl_set_success(&io->scsiio);
766 ctl_config_write_done(io);
767 }
768
769 static int
ctl_backend_ramdisk_config_write(union ctl_io * io)770 ctl_backend_ramdisk_config_write(union ctl_io *io)
771 {
772 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
773 int retval = 0;
774
775 switch (io->scsiio.cdb[0]) {
776 case SYNCHRONIZE_CACHE:
777 case SYNCHRONIZE_CACHE_16:
778 /* We have no cache to flush. */
779 ctl_set_success(&io->scsiio);
780 ctl_config_write_done(io);
781 break;
782 case START_STOP_UNIT: {
783 struct scsi_start_stop_unit *cdb;
784
785 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
786 if ((cdb->how & SSS_PC_MASK) != 0) {
787 ctl_set_success(&io->scsiio);
788 ctl_config_write_done(io);
789 break;
790 }
791 if (cdb->how & SSS_START) {
792 if (cdb->how & SSS_LOEJ)
793 ctl_lun_has_media(cbe_lun);
794 ctl_start_lun(cbe_lun);
795 } else {
796 ctl_stop_lun(cbe_lun);
797 if (cdb->how & SSS_LOEJ)
798 ctl_lun_ejected(cbe_lun);
799 }
800 ctl_set_success(&io->scsiio);
801 ctl_config_write_done(io);
802 break;
803 }
804 case PREVENT_ALLOW:
805 ctl_set_success(&io->scsiio);
806 ctl_config_write_done(io);
807 break;
808 case WRITE_SAME_10:
809 case WRITE_SAME_16:
810 ctl_backend_ramdisk_ws(io);
811 break;
812 case UNMAP:
813 ctl_backend_ramdisk_unmap(io);
814 break;
815 default:
816 ctl_set_invalid_opcode(&io->scsiio);
817 ctl_config_write_done(io);
818 retval = CTL_RETVAL_COMPLETE;
819 break;
820 }
821
822 return (retval);
823 }
824
825 static uint64_t
ctl_backend_ramdisk_lun_attr(void * arg,const char * attrname)826 ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname)
827 {
828 struct ctl_be_ramdisk_lun *be_lun = arg;
829 uint64_t val;
830
831 val = UINT64_MAX;
832 if (be_lun->cap_bytes == 0)
833 return (val);
834 sx_slock(&be_lun->page_lock);
835 if (strcmp(attrname, "blocksused") == 0) {
836 val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
837 } else if (strcmp(attrname, "blocksavail") == 0) {
838 val = (be_lun->cap_bytes - be_lun->cap_used) /
839 be_lun->cbe_lun.blocksize;
840 }
841 sx_sunlock(&be_lun->page_lock);
842 return (val);
843 }
844
845 static int
ctl_backend_ramdisk_ioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flag,struct thread * td)846 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
847 int flag, struct thread *td)
848 {
849 struct ctl_be_ramdisk_softc *softc = &rd_softc;
850 struct ctl_lun_req *lun_req;
851 int retval;
852
853 retval = 0;
854 switch (cmd) {
855 case CTL_LUN_REQ:
856 lun_req = (struct ctl_lun_req *)addr;
857 switch (lun_req->reqtype) {
858 case CTL_LUNREQ_CREATE:
859 retval = ctl_backend_ramdisk_create(softc, lun_req);
860 break;
861 case CTL_LUNREQ_RM:
862 retval = ctl_backend_ramdisk_rm(softc, lun_req);
863 break;
864 case CTL_LUNREQ_MODIFY:
865 retval = ctl_backend_ramdisk_modify(softc, lun_req);
866 break;
867 default:
868 lun_req->status = CTL_LUN_ERROR;
869 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
870 "%s: invalid LUN request type %d", __func__,
871 lun_req->reqtype);
872 break;
873 }
874 break;
875 default:
876 retval = ENOTTY;
877 break;
878 }
879
880 return (retval);
881 }
882
883 static int
ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc * softc,struct ctl_lun_req * req)884 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
885 struct ctl_lun_req *req)
886 {
887 struct ctl_be_ramdisk_lun *be_lun;
888 struct ctl_lun_rm_params *params;
889 int retval;
890
891 params = &req->reqdata.rm;
892 mtx_lock(&softc->lock);
893 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
894 if (be_lun->cbe_lun.lun_id == params->lun_id)
895 break;
896 }
897 mtx_unlock(&softc->lock);
898 if (be_lun == NULL) {
899 snprintf(req->error_str, sizeof(req->error_str),
900 "%s: LUN %u is not managed by the ramdisk backend",
901 __func__, params->lun_id);
902 goto bailout_error;
903 }
904
905 retval = ctl_disable_lun(&be_lun->cbe_lun);
906 if (retval != 0) {
907 snprintf(req->error_str, sizeof(req->error_str),
908 "%s: error %d returned from ctl_disable_lun() for "
909 "LUN %d", __func__, retval, params->lun_id);
910 goto bailout_error;
911 }
912
913 /*
914 * Set the waiting flag before we invalidate the LUN. Our shutdown
915 * routine can be called any time after we invalidate the LUN,
916 * and can be called from our context.
917 *
918 * This tells the shutdown routine that we're waiting, or we're
919 * going to wait for the shutdown to happen.
920 */
921 mtx_lock(&softc->lock);
922 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
923 mtx_unlock(&softc->lock);
924
925 retval = ctl_invalidate_lun(&be_lun->cbe_lun);
926 if (retval != 0) {
927 snprintf(req->error_str, sizeof(req->error_str),
928 "%s: error %d returned from ctl_invalidate_lun() for "
929 "LUN %d", __func__, retval, params->lun_id);
930 mtx_lock(&softc->lock);
931 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
932 mtx_unlock(&softc->lock);
933 goto bailout_error;
934 }
935
936 mtx_lock(&softc->lock);
937 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
938 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
939 if (retval == EINTR)
940 break;
941 }
942 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
943
944 /*
945 * We only remove this LUN from the list and free it (below) if
946 * retval == 0. If the user interrupted the wait, we just bail out
947 * without actually freeing the LUN. We let the shutdown routine
948 * free the LUN if that happens.
949 */
950 if (retval == 0) {
951 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
952 links);
953 softc->num_luns--;
954 }
955
956 mtx_unlock(&softc->lock);
957
958 if (retval == 0) {
959 taskqueue_drain_all(be_lun->io_taskqueue);
960 taskqueue_free(be_lun->io_taskqueue);
961 nvlist_destroy(be_lun->cbe_lun.options);
962 free(be_lun->zero_page, M_RAMDISK);
963 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
964 sx_destroy(&be_lun->page_lock);
965 mtx_destroy(&be_lun->queue_lock);
966 free(be_lun, M_RAMDISK);
967 }
968
969 req->status = CTL_LUN_OK;
970 return (retval);
971
972 bailout_error:
973 req->status = CTL_LUN_ERROR;
974 return (0);
975 }
976
977 static int
ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc * softc,struct ctl_lun_req * req)978 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
979 struct ctl_lun_req *req)
980 {
981 struct ctl_be_ramdisk_lun *be_lun;
982 struct ctl_be_lun *cbe_lun;
983 struct ctl_lun_create_params *params;
984 const char *value;
985 char tmpstr[32];
986 uint64_t t;
987 int retval;
988
989 retval = 0;
990 params = &req->reqdata.create;
991
992 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
993 cbe_lun = &be_lun->cbe_lun;
994 cbe_lun->be_lun = be_lun;
995 cbe_lun->options = nvlist_clone(req->args_nvl);
996 be_lun->params = req->reqdata.create;
997 be_lun->softc = softc;
998 sprintf(be_lun->lunname, "cram%d", softc->num_luns);
999
1000 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
1001 cbe_lun->lun_type = params->device_type;
1002 else
1003 cbe_lun->lun_type = T_DIRECT;
1004 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1005 cbe_lun->flags = 0;
1006 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL);
1007 if (value != NULL) {
1008 if (strcmp(value, "primary") == 0)
1009 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1010 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
1011 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1012
1013 be_lun->pblocksize = PAGE_SIZE;
1014 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL);
1015 if (value != NULL) {
1016 ctl_expand_number(value, &t);
1017 be_lun->pblocksize = t;
1018 }
1019 if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
1020 snprintf(req->error_str, sizeof(req->error_str),
1021 "%s: unsupported pblocksize %u", __func__,
1022 be_lun->pblocksize);
1023 goto bailout_error;
1024 }
1025
1026 if (cbe_lun->lun_type == T_DIRECT ||
1027 cbe_lun->lun_type == T_CDROM) {
1028 if (params->blocksize_bytes != 0)
1029 cbe_lun->blocksize = params->blocksize_bytes;
1030 else if (cbe_lun->lun_type == T_CDROM)
1031 cbe_lun->blocksize = 2048;
1032 else
1033 cbe_lun->blocksize = 512;
1034 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
1035 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
1036 snprintf(req->error_str, sizeof(req->error_str),
1037 "%s: pblocksize %u not exp2 of blocksize %u",
1038 __func__,
1039 be_lun->pblocksize, cbe_lun->blocksize);
1040 goto bailout_error;
1041 }
1042 if (params->lun_size_bytes < cbe_lun->blocksize) {
1043 snprintf(req->error_str, sizeof(req->error_str),
1044 "%s: LUN size %ju < blocksize %u", __func__,
1045 params->lun_size_bytes, cbe_lun->blocksize);
1046 goto bailout_error;
1047 }
1048 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
1049 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
1050 be_lun->indir = 0;
1051 t = be_lun->size_bytes / be_lun->pblocksize;
1052 while (t > 1) {
1053 t /= PPP;
1054 be_lun->indir++;
1055 }
1056 cbe_lun->maxlba = be_lun->size_blocks - 1;
1057 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
1058 cbe_lun->pblockoff = 0;
1059 cbe_lun->ublockexp = cbe_lun->pblockexp;
1060 cbe_lun->ublockoff = 0;
1061 cbe_lun->atomicblock = be_lun->pblocksize;
1062 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
1063 value = dnvlist_get_string(cbe_lun->options, "capacity", NULL);
1064 if (value != NULL)
1065 ctl_expand_number(value, &be_lun->cap_bytes);
1066 } else {
1067 be_lun->pblockmul = 1;
1068 cbe_lun->pblockexp = 0;
1069 }
1070
1071 /* Tell the user the blocksize we ended up using */
1072 params->blocksize_bytes = cbe_lun->blocksize;
1073 params->lun_size_bytes = be_lun->size_bytes;
1074
1075 value = dnvlist_get_string(cbe_lun->options, "unmap", NULL);
1076 if (value == NULL || strcmp(value, "off") != 0)
1077 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
1078 value = dnvlist_get_string(cbe_lun->options, "readonly", NULL);
1079 if (value != NULL) {
1080 if (strcmp(value, "on") == 0)
1081 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1082 } else if (cbe_lun->lun_type != T_DIRECT)
1083 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1084 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1085 value = dnvlist_get_string(cbe_lun->options, "serseq", NULL);
1086 if (value != NULL && strcmp(value, "on") == 0)
1087 cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
1088 else if (value != NULL && strcmp(value, "read") == 0)
1089 cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
1090 else if (value != NULL && strcmp(value, "off") == 0)
1091 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1092
1093 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1094 cbe_lun->req_lun_id = params->req_lun_id;
1095 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
1096 } else
1097 cbe_lun->req_lun_id = 0;
1098
1099 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
1100 cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
1101 cbe_lun->be = &ctl_be_ramdisk_driver;
1102 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1103 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d",
1104 softc->num_luns);
1105 strncpy((char *)cbe_lun->serial_num, tmpstr,
1106 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
1107
1108 /* Tell the user what we used for a serial number */
1109 strncpy((char *)params->serial_num, tmpstr,
1110 MIN(sizeof(params->serial_num), sizeof(tmpstr)));
1111 } else {
1112 strncpy((char *)cbe_lun->serial_num, params->serial_num,
1113 MIN(sizeof(cbe_lun->serial_num),
1114 sizeof(params->serial_num)));
1115 }
1116 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1117 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns);
1118 strncpy((char *)cbe_lun->device_id, tmpstr,
1119 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
1120
1121 /* Tell the user what we used for a device ID */
1122 strncpy((char *)params->device_id, tmpstr,
1123 MIN(sizeof(params->device_id), sizeof(tmpstr)));
1124 } else {
1125 strncpy((char *)cbe_lun->device_id, params->device_id,
1126 MIN(sizeof(cbe_lun->device_id),
1127 sizeof(params->device_id)));
1128 }
1129
1130 STAILQ_INIT(&be_lun->cont_queue);
1131 sx_init(&be_lun->page_lock, "cram page lock");
1132 if (be_lun->cap_bytes == 0) {
1133 be_lun->indir = 0;
1134 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
1135 }
1136 be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
1137 M_WAITOK|M_ZERO);
1138 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
1139 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
1140 be_lun);
1141
1142 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
1143 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1144 if (be_lun->io_taskqueue == NULL) {
1145 snprintf(req->error_str, sizeof(req->error_str),
1146 "%s: Unable to create taskqueue", __func__);
1147 goto bailout_error;
1148 }
1149
1150 retval = taskqueue_start_threads(&be_lun->io_taskqueue,
1151 /*num threads*/1,
1152 /*priority*/PUSER,
1153 /*thread name*/
1154 "%s taskq", be_lun->lunname);
1155 if (retval != 0)
1156 goto bailout_error;
1157
1158 mtx_lock(&softc->lock);
1159 softc->num_luns++;
1160 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
1161 mtx_unlock(&softc->lock);
1162
1163 retval = ctl_add_lun(&be_lun->cbe_lun);
1164 if (retval != 0) {
1165 mtx_lock(&softc->lock);
1166 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
1167 links);
1168 softc->num_luns--;
1169 mtx_unlock(&softc->lock);
1170 snprintf(req->error_str, sizeof(req->error_str),
1171 "%s: ctl_add_lun() returned error %d, see dmesg for "
1172 "details", __func__, retval);
1173 retval = 0;
1174 goto bailout_error;
1175 }
1176
1177 mtx_lock(&softc->lock);
1178
1179 /*
1180 * Tell the config_status routine that we're waiting so it won't
1181 * clean up the LUN in the event of an error.
1182 */
1183 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
1184
1185 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
1186 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
1187 if (retval == EINTR)
1188 break;
1189 }
1190 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
1191
1192 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
1193 snprintf(req->error_str, sizeof(req->error_str),
1194 "%s: LUN configuration error, see dmesg for details",
1195 __func__);
1196 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
1197 links);
1198 softc->num_luns--;
1199 mtx_unlock(&softc->lock);
1200 goto bailout_error;
1201 } else {
1202 params->req_lun_id = cbe_lun->lun_id;
1203 }
1204 mtx_unlock(&softc->lock);
1205
1206 req->status = CTL_LUN_OK;
1207 return (retval);
1208
1209 bailout_error:
1210 req->status = CTL_LUN_ERROR;
1211 if (be_lun != NULL) {
1212 if (be_lun->io_taskqueue != NULL)
1213 taskqueue_free(be_lun->io_taskqueue);
1214 nvlist_destroy(cbe_lun->options);
1215 free(be_lun->zero_page, M_RAMDISK);
1216 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1217 sx_destroy(&be_lun->page_lock);
1218 mtx_destroy(&be_lun->queue_lock);
1219 free(be_lun, M_RAMDISK);
1220 }
1221 return (retval);
1222 }
1223
1224 static int
ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc * softc,struct ctl_lun_req * req)1225 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
1226 struct ctl_lun_req *req)
1227 {
1228 struct ctl_be_ramdisk_lun *be_lun;
1229 struct ctl_be_lun *cbe_lun;
1230 struct ctl_lun_modify_params *params;
1231 const char *value;
1232 uint32_t blocksize;
1233 int wasprim;
1234
1235 params = &req->reqdata.modify;
1236
1237 mtx_lock(&softc->lock);
1238 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
1239 if (be_lun->cbe_lun.lun_id == params->lun_id)
1240 break;
1241 }
1242 mtx_unlock(&softc->lock);
1243 if (be_lun == NULL) {
1244 snprintf(req->error_str, sizeof(req->error_str),
1245 "%s: LUN %u is not managed by the ramdisk backend",
1246 __func__, params->lun_id);
1247 goto bailout_error;
1248 }
1249 cbe_lun = &be_lun->cbe_lun;
1250
1251 if (params->lun_size_bytes != 0)
1252 be_lun->params.lun_size_bytes = params->lun_size_bytes;
1253
1254 nvlist_destroy(cbe_lun->options);
1255 cbe_lun->options = nvlist_clone(req->args_nvl);
1256
1257 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
1258 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL);
1259 if (value != NULL) {
1260 if (strcmp(value, "primary") == 0)
1261 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1262 else
1263 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1264 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
1265 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1266 else
1267 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1268 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
1269 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
1270 ctl_lun_primary(cbe_lun);
1271 else
1272 ctl_lun_secondary(cbe_lun);
1273 }
1274
1275 blocksize = be_lun->cbe_lun.blocksize;
1276 if (be_lun->params.lun_size_bytes < blocksize) {
1277 snprintf(req->error_str, sizeof(req->error_str),
1278 "%s: LUN size %ju < blocksize %u", __func__,
1279 be_lun->params.lun_size_bytes, blocksize);
1280 goto bailout_error;
1281 }
1282 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
1283 be_lun->size_bytes = be_lun->size_blocks * blocksize;
1284 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
1285 ctl_lun_capacity_changed(&be_lun->cbe_lun);
1286
1287 /* Tell the user the exact size we ended up using */
1288 params->lun_size_bytes = be_lun->size_bytes;
1289
1290 req->status = CTL_LUN_OK;
1291 return (0);
1292
1293 bailout_error:
1294 req->status = CTL_LUN_ERROR;
1295 return (0);
1296 }
1297
1298 static void
ctl_backend_ramdisk_lun_shutdown(void * be_lun)1299 ctl_backend_ramdisk_lun_shutdown(void *be_lun)
1300 {
1301 struct ctl_be_ramdisk_lun *lun = be_lun;
1302 struct ctl_be_ramdisk_softc *softc = lun->softc;
1303
1304 mtx_lock(&softc->lock);
1305 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1306 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
1307 wakeup(lun);
1308 } else {
1309 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
1310 links);
1311 softc->num_luns--;
1312 free(be_lun, M_RAMDISK);
1313 }
1314 mtx_unlock(&softc->lock);
1315 }
1316
1317 static void
ctl_backend_ramdisk_lun_config_status(void * be_lun,ctl_lun_config_status status)1318 ctl_backend_ramdisk_lun_config_status(void *be_lun,
1319 ctl_lun_config_status status)
1320 {
1321 struct ctl_be_ramdisk_lun *lun;
1322 struct ctl_be_ramdisk_softc *softc;
1323
1324 lun = (struct ctl_be_ramdisk_lun *)be_lun;
1325 softc = lun->softc;
1326
1327 if (status == CTL_LUN_CONFIG_OK) {
1328 mtx_lock(&softc->lock);
1329 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1330 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
1331 wakeup(lun);
1332 mtx_unlock(&softc->lock);
1333
1334 /*
1335 * We successfully added the LUN, attempt to enable it.
1336 */
1337 if (ctl_enable_lun(&lun->cbe_lun) != 0) {
1338 printf("%s: ctl_enable_lun() failed!\n", __func__);
1339 if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
1340 printf("%s: ctl_invalidate_lun() failed!\n",
1341 __func__);
1342 }
1343 }
1344
1345 return;
1346 }
1347
1348
1349 mtx_lock(&softc->lock);
1350 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1351
1352 /*
1353 * If we have a user waiting, let him handle the cleanup. If not,
1354 * clean things up here.
1355 */
1356 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
1357 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
1358 wakeup(lun);
1359 } else {
1360 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
1361 links);
1362 softc->num_luns--;
1363 free(lun, M_RAMDISK);
1364 }
1365 mtx_unlock(&softc->lock);
1366 }
1367