1 /******************************************************************************* 2 * 3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 4 * for emulated SAS initiator ports 5 * 6 * © Copyright 2011-2013 Datera, Inc. 7 * 8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 9 * 10 * Author: Nicholas A. Bellinger <[email protected]> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 ****************************************************************************/ 22 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <linux/types.h> 28 #include <linux/configfs.h> 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_tcq.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_cmnd.h> 34 35 #include <target/target_core_base.h> 36 #include <target/target_core_fabric.h> 37 38 #include "tcm_loop.h" 39 40 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) 41 42 static struct workqueue_struct *tcm_loop_workqueue; 43 static struct kmem_cache *tcm_loop_cmd_cache; 44 45 static int tcm_loop_hba_no_cnt; 46 47 static int tcm_loop_queue_status(struct se_cmd *se_cmd); 48 49 /* 50 * Called from struct target_core_fabric_ops->check_stop_free() 51 */ 52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) 53 { 54 /* 55 * Do not release struct se_cmd's containing a valid TMR 56 * pointer. These will be released directly in tcm_loop_device_reset() 57 * with transport_generic_free_cmd(). 58 */ 59 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 60 return 0; 61 /* 62 * Release the struct se_cmd, which will make a callback to release 63 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() 64 */ 65 transport_generic_free_cmd(se_cmd, 0); 66 return 1; 67 } 68 69 static void tcm_loop_release_cmd(struct se_cmd *se_cmd) 70 { 71 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 72 struct tcm_loop_cmd, tl_se_cmd); 73 74 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 75 } 76 77 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) 78 { 79 seq_printf(m, "tcm_loop_proc_info()\n"); 80 return 0; 81 } 82 83 static int tcm_loop_driver_probe(struct device *); 84 static int tcm_loop_driver_remove(struct device *); 85 86 static int pseudo_lld_bus_match(struct device *dev, 87 struct device_driver *dev_driver) 88 { 89 return 1; 90 } 91 92 static struct bus_type tcm_loop_lld_bus = { 93 .name = "tcm_loop_bus", 94 .match = pseudo_lld_bus_match, 95 .probe = tcm_loop_driver_probe, 96 .remove = tcm_loop_driver_remove, 97 }; 98 99 static struct device_driver tcm_loop_driverfs = { 100 .name = "tcm_loop", 101 .bus = &tcm_loop_lld_bus, 102 }; 103 /* 104 * Used with root_device_register() in tcm_loop_alloc_core_bus() below 105 */ 106 static struct device *tcm_loop_primary; 107 108 static void tcm_loop_submission_work(struct work_struct *work) 109 { 110 struct tcm_loop_cmd *tl_cmd = 111 container_of(work, struct tcm_loop_cmd, work); 112 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; 113 struct scsi_cmnd *sc = tl_cmd->sc; 114 struct tcm_loop_nexus *tl_nexus; 115 struct tcm_loop_hba *tl_hba; 116 struct tcm_loop_tpg *tl_tpg; 117 struct scatterlist *sgl_bidi = NULL; 118 u32 sgl_bidi_count = 0, transfer_length; 119 int rc; 120 121 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 122 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 123 124 /* 125 * Ensure that this tl_tpg reference from the incoming sc->device->id 126 * has already been configured via tcm_loop_make_naa_tpg(). 127 */ 128 if (!tl_tpg->tl_hba) { 129 set_host_byte(sc, DID_NO_CONNECT); 130 goto out_done; 131 } 132 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { 133 set_host_byte(sc, DID_TRANSPORT_DISRUPTED); 134 goto out_done; 135 } 136 tl_nexus = tl_tpg->tl_nexus; 137 if (!tl_nexus) { 138 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 139 " does not exist\n"); 140 set_host_byte(sc, DID_ERROR); 141 goto out_done; 142 } 143 if (scsi_bidi_cmnd(sc)) { 144 struct scsi_data_buffer *sdb = scsi_in(sc); 145 146 sgl_bidi = sdb->table.sgl; 147 sgl_bidi_count = sdb->table.nents; 148 se_cmd->se_cmd_flags |= SCF_BIDI; 149 150 } 151 152 transfer_length = scsi_transfer_length(sc); 153 if (!scsi_prot_sg_count(sc) && 154 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { 155 se_cmd->prot_pto = true; 156 /* 157 * loopback transport doesn't support 158 * WRITE_GENERATE, READ_STRIP protection 159 * information operations, go ahead unprotected. 160 */ 161 transfer_length = scsi_bufflen(sc); 162 } 163 164 se_cmd->tag = tl_cmd->sc_cmd_tag; 165 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 166 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 167 transfer_length, TCM_SIMPLE_TAG, 168 sc->sc_data_direction, 0, 169 scsi_sglist(sc), scsi_sg_count(sc), 170 sgl_bidi, sgl_bidi_count, 171 scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); 172 if (rc < 0) { 173 set_host_byte(sc, DID_NO_CONNECT); 174 goto out_done; 175 } 176 return; 177 178 out_done: 179 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 180 sc->scsi_done(sc); 181 return; 182 } 183 184 /* 185 * ->queuecommand can be and usually is called from interrupt context, so 186 * defer the actual submission to a workqueue. 187 */ 188 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 189 { 190 struct tcm_loop_cmd *tl_cmd; 191 192 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x" 193 " scsi_buf_len: %u\n", sc->device->host->host_no, 194 sc->device->id, sc->device->channel, sc->device->lun, 195 sc->cmnd[0], scsi_bufflen(sc)); 196 197 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 198 if (!tl_cmd) { 199 pr_err("Unable to allocate struct tcm_loop_cmd\n"); 200 set_host_byte(sc, DID_ERROR); 201 sc->scsi_done(sc); 202 return 0; 203 } 204 205 tl_cmd->sc = sc; 206 tl_cmd->sc_cmd_tag = sc->request->tag; 207 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 208 queue_work(tcm_loop_workqueue, &tl_cmd->work); 209 return 0; 210 } 211 212 /* 213 * Called from SCSI EH process context to issue a LUN_RESET TMR 214 * to struct scsi_device 215 */ 216 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, 217 u64 lun, int task, enum tcm_tmreq_table tmr) 218 { 219 struct se_cmd *se_cmd = NULL; 220 struct se_session *se_sess; 221 struct se_portal_group *se_tpg; 222 struct tcm_loop_nexus *tl_nexus; 223 struct tcm_loop_cmd *tl_cmd = NULL; 224 struct tcm_loop_tmr *tl_tmr = NULL; 225 int ret = TMR_FUNCTION_FAILED, rc; 226 227 /* 228 * Locate the tl_nexus and se_sess pointers 229 */ 230 tl_nexus = tl_tpg->tl_nexus; 231 if (!tl_nexus) { 232 pr_err("Unable to perform device reset without" 233 " active I_T Nexus\n"); 234 return ret; 235 } 236 237 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 238 if (!tl_cmd) { 239 pr_err("Unable to allocate memory for tl_cmd\n"); 240 return ret; 241 } 242 243 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 244 if (!tl_tmr) { 245 pr_err("Unable to allocate memory for tl_tmr\n"); 246 goto release; 247 } 248 init_waitqueue_head(&tl_tmr->tl_tmr_wait); 249 250 se_cmd = &tl_cmd->tl_se_cmd; 251 se_tpg = &tl_tpg->tl_se_tpg; 252 se_sess = tl_tpg->tl_nexus->se_sess; 253 /* 254 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 255 */ 256 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 257 DMA_NONE, TCM_SIMPLE_TAG, 258 &tl_cmd->tl_sense_buf[0]); 259 260 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); 261 if (rc < 0) 262 goto release; 263 264 if (tmr == TMR_ABORT_TASK) 265 se_cmd->se_tmr_req->ref_task_tag = task; 266 267 /* 268 * Locate the underlying TCM struct se_lun 269 */ 270 if (transport_lookup_tmr_lun(se_cmd, lun) < 0) { 271 ret = TMR_LUN_DOES_NOT_EXIST; 272 goto release; 273 } 274 /* 275 * Queue the TMR to TCM Core and sleep waiting for 276 * tcm_loop_queue_tm_rsp() to wake us up. 277 */ 278 transport_generic_handle_tmr(se_cmd); 279 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); 280 /* 281 * The TMR LUN_RESET has completed, check the response status and 282 * then release allocations. 283 */ 284 ret = se_cmd->se_tmr_req->response; 285 release: 286 if (se_cmd) 287 transport_generic_free_cmd(se_cmd, 1); 288 else 289 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 290 kfree(tl_tmr); 291 return ret; 292 } 293 294 static int tcm_loop_abort_task(struct scsi_cmnd *sc) 295 { 296 struct tcm_loop_hba *tl_hba; 297 struct tcm_loop_tpg *tl_tpg; 298 int ret = FAILED; 299 300 /* 301 * Locate the tcm_loop_hba_t pointer 302 */ 303 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 304 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 305 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 306 sc->request->tag, TMR_ABORT_TASK); 307 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 308 } 309 310 /* 311 * Called from SCSI EH process context to issue a LUN_RESET TMR 312 * to struct scsi_device 313 */ 314 static int tcm_loop_device_reset(struct scsi_cmnd *sc) 315 { 316 struct tcm_loop_hba *tl_hba; 317 struct tcm_loop_tpg *tl_tpg; 318 int ret = FAILED; 319 320 /* 321 * Locate the tcm_loop_hba_t pointer 322 */ 323 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 324 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 325 326 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 327 0, TMR_LUN_RESET); 328 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 329 } 330 331 static int tcm_loop_target_reset(struct scsi_cmnd *sc) 332 { 333 struct tcm_loop_hba *tl_hba; 334 struct tcm_loop_tpg *tl_tpg; 335 336 /* 337 * Locate the tcm_loop_hba_t pointer 338 */ 339 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 340 if (!tl_hba) { 341 pr_err("Unable to perform device reset without" 342 " active I_T Nexus\n"); 343 return FAILED; 344 } 345 /* 346 * Locate the tl_tpg pointer from TargetID in sc->device->id 347 */ 348 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 349 if (tl_tpg) { 350 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 351 return SUCCESS; 352 } 353 return FAILED; 354 } 355 356 static int tcm_loop_slave_alloc(struct scsi_device *sd) 357 { 358 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); 359 return 0; 360 } 361 362 static struct scsi_host_template tcm_loop_driver_template = { 363 .show_info = tcm_loop_show_info, 364 .proc_name = "tcm_loopback", 365 .name = "TCM_Loopback", 366 .queuecommand = tcm_loop_queuecommand, 367 .change_queue_depth = scsi_change_queue_depth, 368 .eh_abort_handler = tcm_loop_abort_task, 369 .eh_device_reset_handler = tcm_loop_device_reset, 370 .eh_target_reset_handler = tcm_loop_target_reset, 371 .can_queue = 1024, 372 .this_id = -1, 373 .sg_tablesize = 256, 374 .cmd_per_lun = 1024, 375 .max_sectors = 0xFFFF, 376 .use_clustering = DISABLE_CLUSTERING, 377 .slave_alloc = tcm_loop_slave_alloc, 378 .module = THIS_MODULE, 379 .track_queue_depth = 1, 380 }; 381 382 static int tcm_loop_driver_probe(struct device *dev) 383 { 384 struct tcm_loop_hba *tl_hba; 385 struct Scsi_Host *sh; 386 int error, host_prot; 387 388 tl_hba = to_tcm_loop_hba(dev); 389 390 sh = scsi_host_alloc(&tcm_loop_driver_template, 391 sizeof(struct tcm_loop_hba)); 392 if (!sh) { 393 pr_err("Unable to allocate struct scsi_host\n"); 394 return -ENODEV; 395 } 396 tl_hba->sh = sh; 397 398 /* 399 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata 400 */ 401 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; 402 /* 403 * Setup single ID, Channel and LUN for now.. 404 */ 405 sh->max_id = 2; 406 sh->max_lun = 0; 407 sh->max_channel = 0; 408 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; 409 410 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 411 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 412 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 413 414 scsi_host_set_prot(sh, host_prot); 415 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); 416 417 error = scsi_add_host(sh, &tl_hba->dev); 418 if (error) { 419 pr_err("%s: scsi_add_host failed\n", __func__); 420 scsi_host_put(sh); 421 return -ENODEV; 422 } 423 return 0; 424 } 425 426 static int tcm_loop_driver_remove(struct device *dev) 427 { 428 struct tcm_loop_hba *tl_hba; 429 struct Scsi_Host *sh; 430 431 tl_hba = to_tcm_loop_hba(dev); 432 sh = tl_hba->sh; 433 434 scsi_remove_host(sh); 435 scsi_host_put(sh); 436 return 0; 437 } 438 439 static void tcm_loop_release_adapter(struct device *dev) 440 { 441 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); 442 443 kfree(tl_hba); 444 } 445 446 /* 447 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c 448 */ 449 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) 450 { 451 int ret; 452 453 tl_hba->dev.bus = &tcm_loop_lld_bus; 454 tl_hba->dev.parent = tcm_loop_primary; 455 tl_hba->dev.release = &tcm_loop_release_adapter; 456 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); 457 458 ret = device_register(&tl_hba->dev); 459 if (ret) { 460 pr_err("device_register() failed for" 461 " tl_hba->dev: %d\n", ret); 462 return -ENODEV; 463 } 464 465 return 0; 466 } 467 468 /* 469 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated 470 * tcm_loop SCSI bus. 471 */ 472 static int tcm_loop_alloc_core_bus(void) 473 { 474 int ret; 475 476 tcm_loop_primary = root_device_register("tcm_loop_0"); 477 if (IS_ERR(tcm_loop_primary)) { 478 pr_err("Unable to allocate tcm_loop_primary\n"); 479 return PTR_ERR(tcm_loop_primary); 480 } 481 482 ret = bus_register(&tcm_loop_lld_bus); 483 if (ret) { 484 pr_err("bus_register() failed for tcm_loop_lld_bus\n"); 485 goto dev_unreg; 486 } 487 488 ret = driver_register(&tcm_loop_driverfs); 489 if (ret) { 490 pr_err("driver_register() failed for" 491 "tcm_loop_driverfs\n"); 492 goto bus_unreg; 493 } 494 495 pr_debug("Initialized TCM Loop Core Bus\n"); 496 return ret; 497 498 bus_unreg: 499 bus_unregister(&tcm_loop_lld_bus); 500 dev_unreg: 501 root_device_unregister(tcm_loop_primary); 502 return ret; 503 } 504 505 static void tcm_loop_release_core_bus(void) 506 { 507 driver_unregister(&tcm_loop_driverfs); 508 bus_unregister(&tcm_loop_lld_bus); 509 root_device_unregister(tcm_loop_primary); 510 511 pr_debug("Releasing TCM Loop Core BUS\n"); 512 } 513 514 static char *tcm_loop_get_fabric_name(void) 515 { 516 return "loopback"; 517 } 518 519 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) 520 { 521 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 522 } 523 524 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 525 { 526 /* 527 * Return the passed NAA identifier for the Target Port 528 */ 529 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0]; 530 } 531 532 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 533 { 534 /* 535 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 536 * to represent the SCSI Target Port. 537 */ 538 return tl_tpg(se_tpg)->tl_tpgt; 539 } 540 541 /* 542 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated 543 * based upon the incoming fabric dependent SCSI Initiator Port 544 */ 545 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) 546 { 547 return 1; 548 } 549 550 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) 551 { 552 return 0; 553 } 554 555 /* 556 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for 557 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest 558 */ 559 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) 560 { 561 return 0; 562 } 563 564 /* 565 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will 566 * never be called for TCM_Loop by target_core_fabric_configfs.c code. 567 * It has been added here as a nop for target_fabric_tf_ops_check() 568 */ 569 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) 570 { 571 return 0; 572 } 573 574 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) 575 { 576 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 577 tl_se_tpg); 578 return tl_tpg->tl_fabric_prot_type; 579 } 580 581 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) 582 { 583 return 1; 584 } 585 586 static u32 tcm_loop_sess_get_index(struct se_session *se_sess) 587 { 588 return 1; 589 } 590 591 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) 592 { 593 return; 594 } 595 596 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 597 { 598 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 599 struct tcm_loop_cmd, tl_se_cmd); 600 601 return tl_cmd->sc_cmd_state; 602 } 603 604 static void tcm_loop_close_session(struct se_session *se_sess) 605 { 606 return; 607 }; 608 609 static int tcm_loop_write_pending(struct se_cmd *se_cmd) 610 { 611 /* 612 * Since Linux/SCSI has already sent down a struct scsi_cmnd 613 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array 614 * memory, and memory has already been mapped to struct se_cmd->t_mem_list 615 * format with transport_generic_map_mem_to_cmd(). 616 * 617 * We now tell TCM to add this WRITE CDB directly into the TCM storage 618 * object execution queue. 619 */ 620 target_execute_cmd(se_cmd); 621 return 0; 622 } 623 624 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) 625 { 626 return 0; 627 } 628 629 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) 630 { 631 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 632 struct tcm_loop_cmd, tl_se_cmd); 633 struct scsi_cmnd *sc = tl_cmd->sc; 634 635 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 636 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 637 638 sc->result = SAM_STAT_GOOD; 639 set_host_byte(sc, DID_OK); 640 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 641 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 642 scsi_set_resid(sc, se_cmd->residual_count); 643 sc->scsi_done(sc); 644 return 0; 645 } 646 647 static int tcm_loop_queue_status(struct se_cmd *se_cmd) 648 { 649 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 650 struct tcm_loop_cmd, tl_se_cmd); 651 struct scsi_cmnd *sc = tl_cmd->sc; 652 653 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" 654 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 655 656 if (se_cmd->sense_buffer && 657 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 658 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 659 660 memcpy(sc->sense_buffer, se_cmd->sense_buffer, 661 SCSI_SENSE_BUFFERSIZE); 662 sc->result = SAM_STAT_CHECK_CONDITION; 663 set_driver_byte(sc, DRIVER_SENSE); 664 } else 665 sc->result = se_cmd->scsi_status; 666 667 set_host_byte(sc, DID_OK); 668 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 669 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 670 scsi_set_resid(sc, se_cmd->residual_count); 671 sc->scsi_done(sc); 672 return 0; 673 } 674 675 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 676 { 677 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 678 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; 679 /* 680 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead 681 * and wake up the wait_queue_head_t in tcm_loop_device_reset() 682 */ 683 atomic_set(&tl_tmr->tmr_complete, 1); 684 wake_up(&tl_tmr->tl_tmr_wait); 685 } 686 687 static void tcm_loop_aborted_task(struct se_cmd *se_cmd) 688 { 689 return; 690 } 691 692 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 693 { 694 switch (tl_hba->tl_proto_id) { 695 case SCSI_PROTOCOL_SAS: 696 return "SAS"; 697 case SCSI_PROTOCOL_FCP: 698 return "FCP"; 699 case SCSI_PROTOCOL_ISCSI: 700 return "iSCSI"; 701 default: 702 break; 703 } 704 705 return "Unknown"; 706 } 707 708 /* Start items for tcm_loop_port_cit */ 709 710 static int tcm_loop_port_link( 711 struct se_portal_group *se_tpg, 712 struct se_lun *lun) 713 { 714 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 715 struct tcm_loop_tpg, tl_se_tpg); 716 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 717 718 atomic_inc_mb(&tl_tpg->tl_tpg_port_count); 719 /* 720 * Add Linux/SCSI struct scsi_device by HCTL 721 */ 722 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 723 724 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); 725 return 0; 726 } 727 728 static void tcm_loop_port_unlink( 729 struct se_portal_group *se_tpg, 730 struct se_lun *se_lun) 731 { 732 struct scsi_device *sd; 733 struct tcm_loop_hba *tl_hba; 734 struct tcm_loop_tpg *tl_tpg; 735 736 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 737 tl_hba = tl_tpg->tl_hba; 738 739 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 740 se_lun->unpacked_lun); 741 if (!sd) { 742 pr_err("Unable to locate struct scsi_device for %d:%d:" 743 "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 744 return; 745 } 746 /* 747 * Remove Linux/SCSI struct scsi_device by HCTL 748 */ 749 scsi_remove_device(sd); 750 scsi_device_put(sd); 751 752 atomic_dec_mb(&tl_tpg->tl_tpg_port_count); 753 754 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 755 } 756 757 /* End items for tcm_loop_port_cit */ 758 759 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show( 760 struct config_item *item, char *page) 761 { 762 struct se_portal_group *se_tpg = attrib_to_tpg(item); 763 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 764 tl_se_tpg); 765 766 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); 767 } 768 769 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store( 770 struct config_item *item, const char *page, size_t count) 771 { 772 struct se_portal_group *se_tpg = attrib_to_tpg(item); 773 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 774 tl_se_tpg); 775 unsigned long val; 776 int ret = kstrtoul(page, 0, &val); 777 778 if (ret) { 779 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 780 return ret; 781 } 782 if (val != 0 && val != 1 && val != 3) { 783 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); 784 return -EINVAL; 785 } 786 tl_tpg->tl_fabric_prot_type = val; 787 788 return count; 789 } 790 791 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type); 792 793 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { 794 &tcm_loop_tpg_attrib_attr_fabric_prot_type, 795 NULL, 796 }; 797 798 /* Start items for tcm_loop_nexus_cit */ 799 800 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg, 801 struct se_session *se_sess, void *p) 802 { 803 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 804 struct tcm_loop_tpg, tl_se_tpg); 805 806 tl_tpg->tl_nexus = p; 807 return 0; 808 } 809 810 static int tcm_loop_make_nexus( 811 struct tcm_loop_tpg *tl_tpg, 812 const char *name) 813 { 814 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 815 struct tcm_loop_nexus *tl_nexus; 816 int ret; 817 818 if (tl_tpg->tl_nexus) { 819 pr_debug("tl_tpg->tl_nexus already exists\n"); 820 return -EEXIST; 821 } 822 823 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 824 if (!tl_nexus) { 825 pr_err("Unable to allocate struct tcm_loop_nexus\n"); 826 return -ENOMEM; 827 } 828 829 tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0, 830 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, 831 name, tl_nexus, tcm_loop_alloc_sess_cb); 832 if (IS_ERR(tl_nexus->se_sess)) { 833 ret = PTR_ERR(tl_nexus->se_sess); 834 kfree(tl_nexus); 835 return ret; 836 } 837 838 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 839 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 840 name); 841 return 0; 842 } 843 844 static int tcm_loop_drop_nexus( 845 struct tcm_loop_tpg *tpg) 846 { 847 struct se_session *se_sess; 848 struct tcm_loop_nexus *tl_nexus; 849 850 tl_nexus = tpg->tl_nexus; 851 if (!tl_nexus) 852 return -ENODEV; 853 854 se_sess = tl_nexus->se_sess; 855 if (!se_sess) 856 return -ENODEV; 857 858 if (atomic_read(&tpg->tl_tpg_port_count)) { 859 pr_err("Unable to remove TCM_Loop I_T Nexus with" 860 " active TPG port count: %d\n", 861 atomic_read(&tpg->tl_tpg_port_count)); 862 return -EPERM; 863 } 864 865 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 866 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), 867 tl_nexus->se_sess->se_node_acl->initiatorname); 868 /* 869 * Release the SCSI I_T Nexus to the emulated Target Port 870 */ 871 transport_deregister_session(tl_nexus->se_sess); 872 tpg->tl_nexus = NULL; 873 kfree(tl_nexus); 874 return 0; 875 } 876 877 /* End items for tcm_loop_nexus_cit */ 878 879 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page) 880 { 881 struct se_portal_group *se_tpg = to_tpg(item); 882 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 883 struct tcm_loop_tpg, tl_se_tpg); 884 struct tcm_loop_nexus *tl_nexus; 885 ssize_t ret; 886 887 tl_nexus = tl_tpg->tl_nexus; 888 if (!tl_nexus) 889 return -ENODEV; 890 891 ret = snprintf(page, PAGE_SIZE, "%s\n", 892 tl_nexus->se_sess->se_node_acl->initiatorname); 893 894 return ret; 895 } 896 897 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, 898 const char *page, size_t count) 899 { 900 struct se_portal_group *se_tpg = to_tpg(item); 901 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 902 struct tcm_loop_tpg, tl_se_tpg); 903 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 904 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; 905 int ret; 906 /* 907 * Shutdown the active I_T nexus if 'NULL' is passed.. 908 */ 909 if (!strncmp(page, "NULL", 4)) { 910 ret = tcm_loop_drop_nexus(tl_tpg); 911 return (!ret) ? count : ret; 912 } 913 /* 914 * Otherwise make sure the passed virtual Initiator port WWN matches 915 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 916 * tcm_loop_make_nexus() 917 */ 918 if (strlen(page) >= TL_WWN_ADDR_LEN) { 919 pr_err("Emulated NAA Sas Address: %s, exceeds" 920 " max: %d\n", page, TL_WWN_ADDR_LEN); 921 return -EINVAL; 922 } 923 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); 924 925 ptr = strstr(i_port, "naa."); 926 if (ptr) { 927 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 928 pr_err("Passed SAS Initiator Port %s does not" 929 " match target port protoid: %s\n", i_port, 930 tcm_loop_dump_proto_id(tl_hba)); 931 return -EINVAL; 932 } 933 port_ptr = &i_port[0]; 934 goto check_newline; 935 } 936 ptr = strstr(i_port, "fc."); 937 if (ptr) { 938 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 939 pr_err("Passed FCP Initiator Port %s does not" 940 " match target port protoid: %s\n", i_port, 941 tcm_loop_dump_proto_id(tl_hba)); 942 return -EINVAL; 943 } 944 port_ptr = &i_port[3]; /* Skip over "fc." */ 945 goto check_newline; 946 } 947 ptr = strstr(i_port, "iqn."); 948 if (ptr) { 949 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 950 pr_err("Passed iSCSI Initiator Port %s does not" 951 " match target port protoid: %s\n", i_port, 952 tcm_loop_dump_proto_id(tl_hba)); 953 return -EINVAL; 954 } 955 port_ptr = &i_port[0]; 956 goto check_newline; 957 } 958 pr_err("Unable to locate prefix for emulated Initiator Port:" 959 " %s\n", i_port); 960 return -EINVAL; 961 /* 962 * Clear any trailing newline for the NAA WWN 963 */ 964 check_newline: 965 if (i_port[strlen(i_port)-1] == '\n') 966 i_port[strlen(i_port)-1] = '\0'; 967 968 ret = tcm_loop_make_nexus(tl_tpg, port_ptr); 969 if (ret < 0) 970 return ret; 971 972 return count; 973 } 974 975 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item, 976 char *page) 977 { 978 struct se_portal_group *se_tpg = to_tpg(item); 979 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 980 struct tcm_loop_tpg, tl_se_tpg); 981 const char *status = NULL; 982 ssize_t ret = -EINVAL; 983 984 switch (tl_tpg->tl_transport_status) { 985 case TCM_TRANSPORT_ONLINE: 986 status = "online"; 987 break; 988 case TCM_TRANSPORT_OFFLINE: 989 status = "offline"; 990 break; 991 default: 992 break; 993 } 994 995 if (status) 996 ret = snprintf(page, PAGE_SIZE, "%s\n", status); 997 998 return ret; 999 } 1000 1001 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item, 1002 const char *page, size_t count) 1003 { 1004 struct se_portal_group *se_tpg = to_tpg(item); 1005 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1006 struct tcm_loop_tpg, tl_se_tpg); 1007 1008 if (!strncmp(page, "online", 6)) { 1009 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 1010 return count; 1011 } 1012 if (!strncmp(page, "offline", 7)) { 1013 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; 1014 if (tl_tpg->tl_nexus) { 1015 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess; 1016 1017 core_allocate_nexus_loss_ua(tl_sess->se_node_acl); 1018 } 1019 return count; 1020 } 1021 return -EINVAL; 1022 } 1023 1024 static ssize_t tcm_loop_tpg_address_show(struct config_item *item, 1025 char *page) 1026 { 1027 struct se_portal_group *se_tpg = to_tpg(item); 1028 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1029 struct tcm_loop_tpg, tl_se_tpg); 1030 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 1031 1032 return snprintf(page, PAGE_SIZE, "%d:0:%d\n", 1033 tl_hba->sh->host_no, tl_tpg->tl_tpgt); 1034 } 1035 1036 CONFIGFS_ATTR(tcm_loop_tpg_, nexus); 1037 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status); 1038 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address); 1039 1040 static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 1041 &tcm_loop_tpg_attr_nexus, 1042 &tcm_loop_tpg_attr_transport_status, 1043 &tcm_loop_tpg_attr_address, 1044 NULL, 1045 }; 1046 1047 /* Start items for tcm_loop_naa_cit */ 1048 1049 static struct se_portal_group *tcm_loop_make_naa_tpg( 1050 struct se_wwn *wwn, 1051 struct config_group *group, 1052 const char *name) 1053 { 1054 struct tcm_loop_hba *tl_hba = container_of(wwn, 1055 struct tcm_loop_hba, tl_hba_wwn); 1056 struct tcm_loop_tpg *tl_tpg; 1057 int ret; 1058 unsigned long tpgt; 1059 1060 if (strstr(name, "tpgt_") != name) { 1061 pr_err("Unable to locate \"tpgt_#\" directory" 1062 " group\n"); 1063 return ERR_PTR(-EINVAL); 1064 } 1065 if (kstrtoul(name+5, 10, &tpgt)) 1066 return ERR_PTR(-EINVAL); 1067 1068 if (tpgt >= TL_TPGS_PER_HBA) { 1069 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:" 1070 " %u\n", tpgt, TL_TPGS_PER_HBA); 1071 return ERR_PTR(-EINVAL); 1072 } 1073 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; 1074 tl_tpg->tl_hba = tl_hba; 1075 tl_tpg->tl_tpgt = tpgt; 1076 /* 1077 * Register the tl_tpg as a emulated TCM Target Endpoint 1078 */ 1079 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id); 1080 if (ret < 0) 1081 return ERR_PTR(-ENOMEM); 1082 1083 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" 1084 " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba), 1085 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1086 1087 return &tl_tpg->tl_se_tpg; 1088 } 1089 1090 static void tcm_loop_drop_naa_tpg( 1091 struct se_portal_group *se_tpg) 1092 { 1093 struct se_wwn *wwn = se_tpg->se_tpg_wwn; 1094 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1095 struct tcm_loop_tpg, tl_se_tpg); 1096 struct tcm_loop_hba *tl_hba; 1097 unsigned short tpgt; 1098 1099 tl_hba = tl_tpg->tl_hba; 1100 tpgt = tl_tpg->tl_tpgt; 1101 /* 1102 * Release the I_T Nexus for the Virtual target link if present 1103 */ 1104 tcm_loop_drop_nexus(tl_tpg); 1105 /* 1106 * Deregister the tl_tpg as a emulated TCM Target Endpoint 1107 */ 1108 core_tpg_deregister(se_tpg); 1109 1110 tl_tpg->tl_hba = NULL; 1111 tl_tpg->tl_tpgt = 0; 1112 1113 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" 1114 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1115 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1116 } 1117 1118 /* End items for tcm_loop_naa_cit */ 1119 1120 /* Start items for tcm_loop_cit */ 1121 1122 static struct se_wwn *tcm_loop_make_scsi_hba( 1123 struct target_fabric_configfs *tf, 1124 struct config_group *group, 1125 const char *name) 1126 { 1127 struct tcm_loop_hba *tl_hba; 1128 struct Scsi_Host *sh; 1129 char *ptr; 1130 int ret, off = 0; 1131 1132 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); 1133 if (!tl_hba) { 1134 pr_err("Unable to allocate struct tcm_loop_hba\n"); 1135 return ERR_PTR(-ENOMEM); 1136 } 1137 /* 1138 * Determine the emulated Protocol Identifier and Target Port Name 1139 * based on the incoming configfs directory name. 1140 */ 1141 ptr = strstr(name, "naa."); 1142 if (ptr) { 1143 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; 1144 goto check_len; 1145 } 1146 ptr = strstr(name, "fc."); 1147 if (ptr) { 1148 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; 1149 off = 3; /* Skip over "fc." */ 1150 goto check_len; 1151 } 1152 ptr = strstr(name, "iqn."); 1153 if (!ptr) { 1154 pr_err("Unable to locate prefix for emulated Target " 1155 "Port: %s\n", name); 1156 ret = -EINVAL; 1157 goto out; 1158 } 1159 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; 1160 1161 check_len: 1162 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1163 pr_err("Emulated NAA %s Address: %s, exceeds" 1164 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1165 TL_WWN_ADDR_LEN); 1166 ret = -EINVAL; 1167 goto out; 1168 } 1169 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); 1170 1171 /* 1172 * Call device_register(tl_hba->dev) to register the emulated 1173 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after 1174 * device_register() callbacks in tcm_loop_driver_probe() 1175 */ 1176 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); 1177 if (ret) 1178 goto out; 1179 1180 sh = tl_hba->sh; 1181 tcm_loop_hba_no_cnt++; 1182 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" 1183 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1184 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1185 1186 return &tl_hba->tl_hba_wwn; 1187 out: 1188 kfree(tl_hba); 1189 return ERR_PTR(ret); 1190 } 1191 1192 static void tcm_loop_drop_scsi_hba( 1193 struct se_wwn *wwn) 1194 { 1195 struct tcm_loop_hba *tl_hba = container_of(wwn, 1196 struct tcm_loop_hba, tl_hba_wwn); 1197 1198 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" 1199 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1200 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, 1201 tl_hba->sh->host_no); 1202 /* 1203 * Call device_unregister() on the original tl_hba->dev. 1204 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1205 * release *tl_hba; 1206 */ 1207 device_unregister(&tl_hba->dev); 1208 } 1209 1210 /* Start items for tcm_loop_cit */ 1211 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page) 1212 { 1213 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); 1214 } 1215 1216 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version); 1217 1218 static struct configfs_attribute *tcm_loop_wwn_attrs[] = { 1219 &tcm_loop_wwn_attr_version, 1220 NULL, 1221 }; 1222 1223 /* End items for tcm_loop_cit */ 1224 1225 static const struct target_core_fabric_ops loop_ops = { 1226 .module = THIS_MODULE, 1227 .name = "loopback", 1228 .get_fabric_name = tcm_loop_get_fabric_name, 1229 .tpg_get_wwn = tcm_loop_get_endpoint_wwn, 1230 .tpg_get_tag = tcm_loop_get_tag, 1231 .tpg_check_demo_mode = tcm_loop_check_demo_mode, 1232 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, 1233 .tpg_check_demo_mode_write_protect = 1234 tcm_loop_check_demo_mode_write_protect, 1235 .tpg_check_prod_mode_write_protect = 1236 tcm_loop_check_prod_mode_write_protect, 1237 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, 1238 .tpg_get_inst_index = tcm_loop_get_inst_index, 1239 .check_stop_free = tcm_loop_check_stop_free, 1240 .release_cmd = tcm_loop_release_cmd, 1241 .close_session = tcm_loop_close_session, 1242 .sess_get_index = tcm_loop_sess_get_index, 1243 .write_pending = tcm_loop_write_pending, 1244 .write_pending_status = tcm_loop_write_pending_status, 1245 .set_default_node_attributes = tcm_loop_set_default_node_attributes, 1246 .get_cmd_state = tcm_loop_get_cmd_state, 1247 .queue_data_in = tcm_loop_queue_data_in, 1248 .queue_status = tcm_loop_queue_status, 1249 .queue_tm_rsp = tcm_loop_queue_tm_rsp, 1250 .aborted_task = tcm_loop_aborted_task, 1251 .fabric_make_wwn = tcm_loop_make_scsi_hba, 1252 .fabric_drop_wwn = tcm_loop_drop_scsi_hba, 1253 .fabric_make_tpg = tcm_loop_make_naa_tpg, 1254 .fabric_drop_tpg = tcm_loop_drop_naa_tpg, 1255 .fabric_post_link = tcm_loop_port_link, 1256 .fabric_pre_unlink = tcm_loop_port_unlink, 1257 .tfc_wwn_attrs = tcm_loop_wwn_attrs, 1258 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, 1259 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, 1260 }; 1261 1262 static int __init tcm_loop_fabric_init(void) 1263 { 1264 int ret = -ENOMEM; 1265 1266 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0); 1267 if (!tcm_loop_workqueue) 1268 goto out; 1269 1270 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", 1271 sizeof(struct tcm_loop_cmd), 1272 __alignof__(struct tcm_loop_cmd), 1273 0, NULL); 1274 if (!tcm_loop_cmd_cache) { 1275 pr_debug("kmem_cache_create() for" 1276 " tcm_loop_cmd_cache failed\n"); 1277 goto out_destroy_workqueue; 1278 } 1279 1280 ret = tcm_loop_alloc_core_bus(); 1281 if (ret) 1282 goto out_destroy_cache; 1283 1284 ret = target_register_template(&loop_ops); 1285 if (ret) 1286 goto out_release_core_bus; 1287 1288 return 0; 1289 1290 out_release_core_bus: 1291 tcm_loop_release_core_bus(); 1292 out_destroy_cache: 1293 kmem_cache_destroy(tcm_loop_cmd_cache); 1294 out_destroy_workqueue: 1295 destroy_workqueue(tcm_loop_workqueue); 1296 out: 1297 return ret; 1298 } 1299 1300 static void __exit tcm_loop_fabric_exit(void) 1301 { 1302 target_unregister_template(&loop_ops); 1303 tcm_loop_release_core_bus(); 1304 kmem_cache_destroy(tcm_loop_cmd_cache); 1305 destroy_workqueue(tcm_loop_workqueue); 1306 } 1307 1308 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); 1309 MODULE_AUTHOR("Nicholas A. Bellinger <[email protected]>"); 1310 MODULE_LICENSE("GPL"); 1311 module_init(tcm_loop_fabric_init); 1312 module_exit(tcm_loop_fabric_exit); 1313