xref: /freebsd-14.2/sys/dev/isp/isp_freebsd.c (revision fffcbbcd)
1 /* $FreeBSD$ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <dev/isp/isp_freebsd.h>
29 #include <sys/unistd.h>
30 #include <sys/kthread.h>
31 #include <machine/stdarg.h>	/* for use by isp_prt below */
32 #include <sys/conf.h>
33 #include <sys/ioccom.h>
34 #include <dev/isp/isp_ioctl.h>
35 
36 
37 static d_ioctl_t ispioctl;
38 static void isp_intr_enable(void *);
39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
40 static void isp_poll(struct cam_sim *);
41 #if	0
42 static void isp_relsim(void *);
43 #endif
44 static timeout_t isp_watchdog;
45 static void isp_kthread(void *);
46 static void isp_action(struct cam_sim *, union ccb *);
47 
48 
49 #define ISP_CDEV_MAJOR	248
50 static struct cdevsw isp_cdevsw = {
51 	/* open */	nullopen,
52 	/* close */	nullclose,
53 	/* read */	noread,
54 	/* write */	nowrite,
55 	/* ioctl */	ispioctl,
56 	/* poll */	nopoll,
57 	/* mmap */	nommap,
58 	/* strategy */	nostrategy,
59 	/* name */	"isp",
60 	/* maj */	ISP_CDEV_MAJOR,
61 	/* dump */	nodump,
62 	/* psize */	nopsize,
63 	/* flags */	D_TAPE,
64 };
65 
66 static struct ispsoftc *isplist = NULL;
67 
68 void
69 isp_attach(struct ispsoftc *isp)
70 {
71 	int primary, secondary;
72 	struct ccb_setasync csa;
73 	struct cam_devq *devq;
74 	struct cam_sim *sim;
75 	struct cam_path *path;
76 
77 	/*
78 	 * Establish (in case of 12X0) which bus is the primary.
79 	 */
80 
81 	primary = 0;
82 	secondary = 1;
83 
84 	/*
85 	 * Create the device queue for our SIM(s).
86 	 */
87 	devq = cam_simq_alloc(isp->isp_maxcmds);
88 	if (devq == NULL) {
89 		return;
90 	}
91 
92 	/*
93 	 * Construct our SIM entry.
94 	 */
95 	ISPLOCK_2_CAMLOCK(isp);
96 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
97 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
98 	if (sim == NULL) {
99 		cam_simq_free(devq);
100 		CAMLOCK_2_ISPLOCK(isp);
101 		return;
102 	}
103 	CAMLOCK_2_ISPLOCK(isp);
104 
105 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
106 	isp->isp_osinfo.ehook.ich_arg = isp;
107 	ISPLOCK_2_CAMLOCK(isp);
108 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
109 		cam_sim_free(sim, TRUE);
110 		CAMLOCK_2_ISPLOCK(isp);
111 		isp_prt(isp, ISP_LOGERR,
112 		    "could not establish interrupt enable hook");
113 		return;
114 	}
115 
116 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
117 		cam_sim_free(sim, TRUE);
118 		CAMLOCK_2_ISPLOCK(isp);
119 		return;
120 	}
121 
122 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
123 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
124 		xpt_bus_deregister(cam_sim_path(sim));
125 		cam_sim_free(sim, TRUE);
126 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
127 		CAMLOCK_2_ISPLOCK(isp);
128 		return;
129 	}
130 
131 	xpt_setup_ccb(&csa.ccb_h, path, 5);
132 	csa.ccb_h.func_code = XPT_SASYNC_CB;
133 	csa.event_enable = AC_LOST_DEVICE;
134 	csa.callback = isp_cam_async;
135 	csa.callback_arg = sim;
136 	xpt_action((union ccb *)&csa);
137 	CAMLOCK_2_ISPLOCK(isp);
138 	isp->isp_sim = sim;
139 	isp->isp_path = path;
140 	/*
141 	 * Create a kernel thread for fibre channel instances. We
142 	 * don't have dual channel FC cards.
143 	 */
144 	if (IS_FC(isp)) {
145 		ISPLOCK_2_CAMLOCK(isp);
146 		/* XXX: LOCK VIOLATION */
147 		cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
148 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
149 		    RFHIGHPID, "%s: fc_thrd",
150 		    device_get_nameunit(isp->isp_dev))) {
151 			xpt_bus_deregister(cam_sim_path(sim));
152 			cam_sim_free(sim, TRUE);
153 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
154 			CAMLOCK_2_ISPLOCK(isp);
155 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
156 			return;
157 		}
158 		CAMLOCK_2_ISPLOCK(isp);
159 	}
160 
161 
162 	/*
163 	 * If we have a second channel, construct SIM entry for that.
164 	 */
165 	if (IS_DUALBUS(isp)) {
166 		ISPLOCK_2_CAMLOCK(isp);
167 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
168 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
169 		if (sim == NULL) {
170 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
171 			xpt_free_path(isp->isp_path);
172 			cam_simq_free(devq);
173 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
174 			return;
175 		}
176 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
177 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
178 			xpt_free_path(isp->isp_path);
179 			cam_sim_free(sim, TRUE);
180 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
181 			CAMLOCK_2_ISPLOCK(isp);
182 			return;
183 		}
184 
185 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
186 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
187 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
188 			xpt_free_path(isp->isp_path);
189 			xpt_bus_deregister(cam_sim_path(sim));
190 			cam_sim_free(sim, TRUE);
191 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
192 			CAMLOCK_2_ISPLOCK(isp);
193 			return;
194 		}
195 
196 		xpt_setup_ccb(&csa.ccb_h, path, 5);
197 		csa.ccb_h.func_code = XPT_SASYNC_CB;
198 		csa.event_enable = AC_LOST_DEVICE;
199 		csa.callback = isp_cam_async;
200 		csa.callback_arg = sim;
201 		xpt_action((union ccb *)&csa);
202 		CAMLOCK_2_ISPLOCK(isp);
203 		isp->isp_sim2 = sim;
204 		isp->isp_path2 = path;
205 	}
206 
207 	/*
208 	 * Create device nodes
209 	 */
210 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
211 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
212 
213 	if (isp->isp_role != ISP_ROLE_NONE) {
214 		isp->isp_state = ISP_RUNSTATE;
215 		ENABLE_INTS(isp);
216 	}
217 	if (isplist == NULL) {
218 		isplist = isp;
219 	} else {
220 		struct ispsoftc *tmp = isplist;
221 		while (tmp->isp_osinfo.next) {
222 			tmp = tmp->isp_osinfo.next;
223 		}
224 		tmp->isp_osinfo.next = isp;
225 	}
226 
227 }
228 
229 static int
230 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
231 {
232 	struct ispsoftc *isp;
233 	int retval = ENOTTY;
234 
235 	isp = isplist;
236 	while (isp) {
237 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
238 			break;
239 		}
240 		isp = isp->isp_osinfo.next;
241 	}
242 	if (isp == NULL)
243 		return (ENXIO);
244 
245 	switch (cmd) {
246 	case ISP_SDBLEV:
247 	{
248 		int olddblev = isp->isp_dblev;
249 		isp->isp_dblev = *(int *)addr;
250 		*(int *)addr = olddblev;
251 		retval = 0;
252 		break;
253 	}
254 	case ISP_RESETHBA:
255 		ISP_LOCK(isp);
256 		isp_reinit(isp);
257 		ISP_UNLOCK(isp);
258 		retval = 0;
259 		break;
260 	case ISP_FC_RESCAN:
261 		if (IS_FC(isp)) {
262 			ISP_LOCK(isp);
263 			if (isp_fc_runstate(isp, 5 * 1000000)) {
264 				retval = EIO;
265 			} else {
266 				retval = 0;
267 			}
268 			ISP_UNLOCK(isp);
269 		}
270 		break;
271 	case ISP_FC_LIP:
272 		if (IS_FC(isp)) {
273 			ISP_LOCK(isp);
274 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
275 				retval = EIO;
276 			} else {
277 				retval = 0;
278 			}
279 			ISP_UNLOCK(isp);
280 		}
281 		break;
282 	case ISP_FC_GETDINFO:
283 	{
284 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
285 		struct lportdb *lp;
286 
287 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
288 			retval = EINVAL;
289 			break;
290 		}
291 		ISP_LOCK(isp);
292 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
293 		if (lp->valid) {
294 			ifc->loopid = lp->loopid;
295 			ifc->portid = lp->portid;
296 			ifc->node_wwn = lp->node_wwn;
297 			ifc->port_wwn = lp->port_wwn;
298 			retval = 0;
299 		} else {
300 			retval = ENODEV;
301 		}
302 		ISP_UNLOCK(isp);
303 		break;
304 	}
305 	default:
306 		break;
307 	}
308 	return (retval);
309 }
310 
311 static void
312 isp_intr_enable(void *arg)
313 {
314 	struct ispsoftc *isp = arg;
315 	if (isp->isp_role != ISP_ROLE_NONE) {
316 		ENABLE_INTS(isp);
317 		isp->isp_osinfo.intsok = 1;
318 	}
319 	/* Release our hook so that the boot can continue. */
320 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
321 }
322 
323 /*
324  * Put the target mode functions here, because some are inlines
325  */
326 
327 #ifdef	ISP_TARGET_MODE
328 
329 static __inline int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
330 static __inline int are_any_luns_enabled(struct ispsoftc *, int);
331 static __inline tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
332 static __inline void rls_lun_statep(struct ispsoftc *, tstate_t *);
333 static __inline int isp_psema_sig_rqe(struct ispsoftc *);
334 static __inline int isp_cv_wait_timed_rqe(struct ispsoftc *, int);
335 static __inline void isp_cv_signal_rqe(struct ispsoftc *, int);
336 static __inline void isp_vsema_rqe(struct ispsoftc *);
337 static cam_status
338 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
339 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
340 static void isp_en_lun(struct ispsoftc *, union ccb *);
341 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
342 static timeout_t isp_refire_putback_atio;
343 static void isp_complete_ctio(union ccb *);
344 static void isp_target_putback_atio(union ccb *);
345 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
346 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
347 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
348 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
349 
350 static __inline int
351 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
352 {
353 	tstate_t *tptr;
354 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
355 	if (tptr == NULL) {
356 		ISP_UNLOCK(isp);
357 		return (0);
358 	}
359 	do {
360 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
361 			ISP_UNLOCK(isp);
362 			return (1);
363 		}
364 	} while ((tptr = tptr->next) != NULL);
365 	return (0);
366 }
367 
368 static __inline int
369 are_any_luns_enabled(struct ispsoftc *isp, int port)
370 {
371 	int lo, hi;
372 	if (IS_DUALBUS(isp)) {
373 		lo = (port * (LUN_HASH_SIZE >> 1));
374 		hi = lo + (LUN_HASH_SIZE >> 1);
375 	} else {
376 		lo = 0;
377 		hi = LUN_HASH_SIZE;
378 	}
379 	for (lo = 0; lo < hi; lo++) {
380 		if (isp->isp_osinfo.lun_hash[lo]) {
381 			return (1);
382 		}
383 	}
384 	return (0);
385 }
386 
387 static __inline tstate_t *
388 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
389 {
390 	tstate_t *tptr;
391 
392 	if (lun == CAM_LUN_WILDCARD) {
393 		tptr = &isp->isp_osinfo.tsdflt[bus];
394 		tptr->hold++;
395 		return (tptr);
396 	} else {
397 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
398 	}
399 	if (tptr == NULL) {
400 		return (NULL);
401 	}
402 
403 	do {
404 		if (tptr->lun == lun && tptr->bus == bus) {
405 			tptr->hold++;
406 			return (tptr);
407 		}
408 	} while ((tptr = tptr->next) != NULL);
409 	return (tptr);
410 }
411 
412 static __inline void
413 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
414 {
415 	if (tptr->hold)
416 		tptr->hold--;
417 }
418 
419 static __inline int
420 isp_psema_sig_rqe(struct ispsoftc *isp)
421 {
422 	while (isp->isp_osinfo.tmflags & TM_BUSY) {
423 		isp->isp_osinfo.tmflags |= TM_WANTED;
424 		if (tsleep(&isp->isp_osinfo.tmflags, PRIBIO|PCATCH, "i0", 0)) {
425 			return (-1);
426 		}
427 		isp->isp_osinfo.tmflags |= TM_BUSY;
428 	}
429 	return (0);
430 }
431 
432 static __inline int
433 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int timo)
434 {
435 	if (tsleep(&isp->isp_osinfo.rstatus, PRIBIO, "qt1", timo)) {
436 		ISP_UNLOCK(isp);
437 		return (-1);
438 	}
439 	return (0);
440 }
441 
442 static __inline void
443 isp_cv_signal_rqe(struct ispsoftc *isp, int status)
444 {
445 	isp->isp_osinfo.rstatus = status;
446 	wakeup(&isp->isp_osinfo.rstatus);
447 }
448 
449 static __inline void
450 isp_vsema_rqe(struct ispsoftc *isp)
451 {
452 	if (isp->isp_osinfo.tmflags & TM_WANTED) {
453 		isp->isp_osinfo.tmflags &= ~TM_WANTED;
454 		wakeup(&isp->isp_osinfo.tmflags);
455 	}
456 	isp->isp_osinfo.tmflags &= ~TM_BUSY;
457 }
458 
459 static cam_status
460 create_lun_state(struct ispsoftc *isp, int bus,
461     struct cam_path *path, tstate_t **rslt)
462 {
463 	cam_status status;
464 	lun_id_t lun;
465 	int hfx;
466 	tstate_t *tptr, *new;
467 
468 	lun = xpt_path_lun_id(path);
469 	if (lun < 0) {
470 		return (CAM_LUN_INVALID);
471 	}
472 	if (is_lun_enabled(isp, bus, lun)) {
473 		return (CAM_LUN_ALRDY_ENA);
474 	}
475 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
476 	if (new == NULL) {
477 		return (CAM_RESRC_UNAVAIL);
478 	}
479 
480 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
481 	    xpt_path_target_id(path), xpt_path_lun_id(path));
482 	if (status != CAM_REQ_CMP) {
483 		free(new, M_DEVBUF);
484 		return (status);
485 	}
486 	new->bus = bus;
487 	new->lun = lun;
488 	SLIST_INIT(&new->atios);
489 	SLIST_INIT(&new->inots);
490 	new->hold = 1;
491 
492 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
493 	tptr = isp->isp_osinfo.lun_hash[hfx];
494 	if (tptr == NULL) {
495 		isp->isp_osinfo.lun_hash[hfx] = new;
496 	} else {
497 		while (tptr->next)
498 			tptr = tptr->next;
499 		tptr->next = new;
500 	}
501 	*rslt = new;
502 	return (CAM_REQ_CMP);
503 }
504 
505 static __inline void
506 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
507 {
508 	int hfx;
509 	tstate_t *lw, *pw;
510 
511 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
512 	if (tptr->hold) {
513 		return;
514 	}
515 	pw = isp->isp_osinfo.lun_hash[hfx];
516 	if (pw == NULL) {
517 		return;
518 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
519 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
520 	} else {
521 		lw = pw;
522 		pw = lw->next;
523 		while (pw) {
524 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
525 				lw->next = pw->next;
526 				break;
527 			}
528 			lw = pw;
529 			pw = pw->next;
530 		}
531 		if (pw == NULL) {
532 			ISP_UNLOCK(isp);
533 			return;
534 		}
535 	}
536 	free(tptr, M_DEVBUF);
537 }
538 
539 /*
540  * we enter with our locks held.
541  */
542 static void
543 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
544 {
545 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
546 	struct ccb_en_lun *cel = &ccb->cel;
547 	tstate_t *tptr;
548 	u_int16_t rstat;
549 	int bus, cmd, frozen = 0;
550 	lun_id_t lun;
551 	target_id_t tgt;
552 
553 
554 	bus = XS_CHANNEL(ccb) & 0x1;
555 	tgt = ccb->ccb_h.target_id;
556 	lun = ccb->ccb_h.target_lun;
557 
558 	/*
559 	 * Do some sanity checking first.
560 	 */
561 
562 	if ((lun != CAM_LUN_WILDCARD) &&
563 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
564 		ccb->ccb_h.status = CAM_LUN_INVALID;
565 		return;
566 	}
567 	if (IS_SCSI(isp)) {
568 		sdparam *sdp = isp->isp_param;
569 		sdp += bus;
570 		if (tgt != CAM_TARGET_WILDCARD &&
571 		    tgt != sdp->isp_initiator_id) {
572 			ccb->ccb_h.status = CAM_TID_INVALID;
573 			return;
574 		}
575 	} else {
576 		if (tgt != CAM_TARGET_WILDCARD &&
577 		    tgt != FCPARAM(isp)->isp_iid) {
578 			ccb->ccb_h.status = CAM_TID_INVALID;
579 			return;
580 		}
581 	}
582 
583 	if (tgt == CAM_TARGET_WILDCARD) {
584 		if (lun != CAM_LUN_WILDCARD) {
585 			ccb->ccb_h.status = CAM_LUN_INVALID;
586 			return;
587 		}
588 	}
589 
590 	/*
591 	 * If Fibre Channel, stop and drain all activity to this bus.
592 	 */
593 #if	0
594 	if (IS_FC(isp)) {
595 		ISP_LOCK(isp);
596 		frozen = 1;
597 		xpt_freeze_simq(isp->isp_sim, 1);
598 		isp->isp_osinfo.drain = 1;
599 		while (isp->isp_osinfo.drain) {
600 			(void) msleep(&isp->isp_osinfo.drain, &isp->isp_lock,
601 			    PRIBIO, "ispdrain", 10 * hz);
602 		}
603 		ISP_UNLOCK(isp);
604 	}
605 #endif
606 
607 	/*
608 	 * Check to see if we're enabling on fibre channel and
609 	 * don't yet have a notion of who the heck we are (no
610 	 * loop yet).
611 	 */
612 	if (IS_FC(isp) && cel->enable &&
613 	    (isp->isp_osinfo.tmflags & TM_TMODE_ENABLED) == 0) {
614 		fcparam *fcp = isp->isp_param;
615 		int rv;
616 
617 		rv = isp_fc_runstate(isp, 2 * 1000000);
618 		if (fcp->isp_fwstate != FW_READY ||
619 		    fcp->isp_loopstate != LOOP_READY) {
620 			xpt_print_path(ccb->ccb_h.path);
621 			isp_prt(isp, ISP_LOGWARN,
622 			    "could not get a good port database read");
623 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
624 			if (frozen) {
625 				ISPLOCK_2_CAMLOCK(isp);
626 				xpt_release_simq(isp->isp_sim, 1);
627 				CAMLOCK_2_ISPLOCK(isp);
628 			}
629 			return;
630 		}
631 	}
632 
633 
634 	/*
635 	 * Next check to see whether this is a target/lun wildcard action.
636 	 *
637 	 * If so, we enable/disable target mode but don't do any lun enabling.
638 	 */
639 	if (lun == CAM_LUN_WILDCARD && tgt == CAM_TARGET_WILDCARD) {
640 		int av = bus << 31;
641 		tptr = &isp->isp_osinfo.tsdflt[bus];
642 		if (cel->enable) {
643 			if (isp->isp_osinfo.tmflags & (1 << bus)) {
644 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
645 				if (frozen) {
646 					ISPLOCK_2_CAMLOCK(isp);
647 					xpt_release_simq(isp->isp_sim, 1);
648 					CAMLOCK_2_ISPLOCK(isp);
649 				}
650 				return;
651 			}
652 			ccb->ccb_h.status =
653 			    xpt_create_path(&tptr->owner, NULL,
654 			    xpt_path_path_id(ccb->ccb_h.path),
655 			    xpt_path_target_id(ccb->ccb_h.path),
656 			    xpt_path_lun_id(ccb->ccb_h.path));
657 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
658 				if (frozen) {
659 					ISPLOCK_2_CAMLOCK(isp);
660 					xpt_release_simq(isp->isp_sim, 1);
661 					CAMLOCK_2_ISPLOCK(isp);
662 				}
663 				return;
664 			}
665 			SLIST_INIT(&tptr->atios);
666 			SLIST_INIT(&tptr->inots);
667 			av |= ENABLE_TARGET_FLAG;
668 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
669 			if (av) {
670 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
671 				xpt_free_path(tptr->owner);
672 				if (frozen) {
673 					ISPLOCK_2_CAMLOCK(isp);
674 					xpt_release_simq(isp->isp_sim, 1);
675 					CAMLOCK_2_ISPLOCK(isp);
676 				}
677 				return;
678 			}
679 			isp->isp_osinfo.tmflags |= (1 << bus);
680 		} else {
681 			if ((isp->isp_osinfo.tmflags & (1 << bus)) == 0) {
682 				ccb->ccb_h.status = CAM_LUN_INVALID;
683 				if (frozen) {
684 					ISPLOCK_2_CAMLOCK(isp);
685 					xpt_release_simq(isp->isp_sim, 1);
686 					CAMLOCK_2_ISPLOCK(isp);
687 				}
688 				return;
689 			}
690 			if (are_any_luns_enabled(isp, bus)) {
691 				ccb->ccb_h.status = CAM_SCSI_BUSY;
692 				if (frozen) {
693 					ISPLOCK_2_CAMLOCK(isp);
694 					xpt_release_simq(isp->isp_sim, 1);
695 					CAMLOCK_2_ISPLOCK(isp);
696 				}
697 				return;
698 			}
699 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
700 			if (av) {
701 				ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
702 				if (frozen) {
703 					ISPLOCK_2_CAMLOCK(isp);
704 					xpt_release_simq(isp->isp_sim, 1);
705 					CAMLOCK_2_ISPLOCK(isp);
706 				}
707 				return;
708 			}
709 			isp->isp_osinfo.tmflags &= ~(1 << bus);
710 			ccb->ccb_h.status = CAM_REQ_CMP;
711 		}
712 		xpt_print_path(ccb->ccb_h.path);
713 		isp_prt(isp, ISP_LOGINFO, "Target Mode %sabled on channel %d",
714 		    (cel->enable) ? "en" : "dis", bus);
715 		if (frozen) {
716 			ISPLOCK_2_CAMLOCK(isp);
717 			xpt_release_simq(isp->isp_sim, 1);
718 			CAMLOCK_2_ISPLOCK(isp);
719 		}
720 		return;
721 	}
722 
723 	/*
724 	 * We can move along now...
725 	 */
726 
727 	if (frozen) {
728 		ISPLOCK_2_CAMLOCK(isp);
729 		xpt_release_simq(isp->isp_sim, 1);
730 		CAMLOCK_2_ISPLOCK(isp);
731 	}
732 
733 
734 	if (cel->enable) {
735 		ccb->ccb_h.status =
736 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
737 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
738 			return;
739 		}
740 	} else {
741 		tptr = get_lun_statep(isp, bus, lun);
742 		if (tptr == NULL) {
743 			ccb->ccb_h.status = CAM_LUN_INVALID;
744 			return;
745 		}
746 	}
747 
748 	if (isp_psema_sig_rqe(isp)) {
749 		rls_lun_statep(isp, tptr);
750 		if (cel->enable)
751 			destroy_lun_state(isp, tptr);
752 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
753 		return;
754 	}
755 
756 	if (cel->enable) {
757 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
758 		int c, n, ulun = lun;
759 
760 		cmd = RQSTYPE_ENABLE_LUN;
761 		c = DFLT_CMND_CNT;
762 		n = DFLT_INOT_CNT;
763 		if (IS_FC(isp) && lun != 0) {
764 			cmd = RQSTYPE_MODIFY_LUN;
765 			n = 0;
766 			/*
767 		 	 * For SCC firmware, we only deal with setting
768 			 * (enabling or modifying) lun 0.
769 			 */
770 			ulun = 0;
771 		}
772 		rstat = LUN_ERR;
773 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
774 			xpt_print_path(ccb->ccb_h.path);
775 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
776 			goto out;
777 		}
778 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
779 			xpt_print_path(ccb->ccb_h.path);
780 			isp_prt(isp, ISP_LOGERR,
781 			    "wait for ENABLE/MODIFY LUN timed out");
782 			goto out;
783 		}
784 		rstat = isp->isp_osinfo.rstatus;
785 		if (rstat != LUN_OK) {
786 			xpt_print_path(ccb->ccb_h.path);
787 			isp_prt(isp, ISP_LOGERR,
788 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
789 			goto out;
790 		}
791 	} else {
792 		int c, n, ulun = lun;
793 		u_int32_t seq;
794 
795 		rstat = LUN_ERR;
796 		seq = isp->isp_osinfo.rollinfo++;
797 		cmd = -RQSTYPE_MODIFY_LUN;
798 
799 		c = DFLT_CMND_CNT;
800 		n = DFLT_INOT_CNT;
801 		if (IS_FC(isp) && lun != 0) {
802 			n = 0;
803 			/*
804 		 	 * For SCC firmware, we only deal with setting
805 			 * (enabling or modifying) lun 0.
806 			 */
807 			ulun = 0;
808 		}
809 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
810 			xpt_print_path(ccb->ccb_h.path);
811 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
812 			goto out;
813 		}
814 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
815 			xpt_print_path(ccb->ccb_h.path);
816 			isp_prt(isp, ISP_LOGERR,
817 			    "wait for MODIFY LUN timed out");
818 			goto out;
819 		}
820 		rstat = isp->isp_osinfo.rstatus;
821 		if (rstat != LUN_OK) {
822 			xpt_print_path(ccb->ccb_h.path);
823 			isp_prt(isp, ISP_LOGERR,
824 			    "MODIFY LUN returned 0x%x", rstat);
825 			goto out;
826 		}
827 		if (IS_FC(isp) && lun) {
828 			goto out;
829 		}
830 
831 		seq = isp->isp_osinfo.rollinfo++;
832 
833 		rstat = LUN_ERR;
834 		cmd = -RQSTYPE_ENABLE_LUN;
835 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
836 			xpt_print_path(ccb->ccb_h.path);
837 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
838 			goto out;
839 		}
840 		if (isp_cv_wait_timed_rqe(isp, 30 * hz)) {
841 			xpt_print_path(ccb->ccb_h.path);
842 			isp_prt(isp, ISP_LOGERR,
843 			     "wait for DISABLE LUN timed out");
844 			goto out;
845 		}
846 		rstat = isp->isp_osinfo.rstatus;
847 		if (rstat != LUN_OK) {
848 			xpt_print_path(ccb->ccb_h.path);
849 			isp_prt(isp, ISP_LOGWARN,
850 			    "DISABLE LUN returned 0x%x", rstat);
851 			goto out;
852 		}
853 	}
854 out:
855 	isp_vsema_rqe(isp);
856 
857 	if (rstat != LUN_OK) {
858 		xpt_print_path(ccb->ccb_h.path);
859 		isp_prt(isp, ISP_LOGWARN,
860 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
861 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
862 		rls_lun_statep(isp, tptr);
863 		if (cel->enable)
864 			destroy_lun_state(isp, tptr);
865 	} else {
866 		xpt_print_path(ccb->ccb_h.path);
867 		isp_prt(isp, ISP_LOGINFO, lfmt,
868 		    (cel->enable) ? "en" : "dis", bus);
869 		rls_lun_statep(isp, tptr);
870 		if (cel->enable == 0) {
871 			destroy_lun_state(isp, tptr);
872 		}
873 		ccb->ccb_h.status = CAM_REQ_CMP;
874 	}
875 }
876 
877 static cam_status
878 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
879 {
880 	tstate_t *tptr;
881 	struct ccb_hdr_slist *lp;
882 	struct ccb_hdr *curelm;
883 	int found;
884 	union ccb *accb = ccb->cab.abort_ccb;
885 
886 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
887 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
888 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
889 			return (CAM_PATH_INVALID);
890 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
891 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
892 			return (CAM_PATH_INVALID);
893 		}
894 	}
895 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
896 	if (tptr == NULL) {
897 		return (CAM_PATH_INVALID);
898 	}
899 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
900 		lp = &tptr->atios;
901 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
902 		lp = &tptr->inots;
903 	} else {
904 		rls_lun_statep(isp, tptr);
905 		return (CAM_UA_ABORT);
906 	}
907 	curelm = SLIST_FIRST(lp);
908 	found = 0;
909 	if (curelm == &accb->ccb_h) {
910 		found = 1;
911 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
912 	} else {
913 		while(curelm != NULL) {
914 			struct ccb_hdr *nextelm;
915 
916 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
917 			if (nextelm == &accb->ccb_h) {
918 				found = 1;
919 				SLIST_NEXT(curelm, sim_links.sle) =
920 				    SLIST_NEXT(nextelm, sim_links.sle);
921 				break;
922 			}
923 			curelm = nextelm;
924 		}
925 	}
926 	rls_lun_statep(isp, tptr);
927 	if (found) {
928 		accb->ccb_h.status = CAM_REQ_ABORTED;
929 		return (CAM_REQ_CMP);
930 	}
931 	return(CAM_PATH_INVALID);
932 }
933 
934 static cam_status
935 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
936 {
937 	void *qe;
938 	struct ccb_scsiio *cso = &ccb->csio;
939 	u_int16_t *hp, save_handle;
940 	u_int16_t iptr, optr;
941 
942 
943 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
944 		xpt_print_path(ccb->ccb_h.path);
945 		printf("Request Queue Overflow in isp_target_start_ctio\n");
946 		return (CAM_RESRC_UNAVAIL);
947 	}
948 	bzero(qe, QENTRY_LEN);
949 
950 	/*
951 	 * We're either moving data or completing a command here.
952 	 */
953 
954 	if (IS_FC(isp)) {
955 		struct ccb_accept_tio *atiop;
956 		ct2_entry_t *cto = qe;
957 
958 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
959 		cto->ct_header.rqs_entry_count = 1;
960 		cto->ct_iid = cso->init_id;
961 		if (isp->isp_maxluns <= 16) {
962 			cto->ct_lun = ccb->ccb_h.target_lun;
963 		}
964 		/*
965 		 * Start with a residual based on what the original datalength
966 		 * was supposed to be. Basically, we ignore what CAM has set
967 		 * for residuals. The data transfer routines will knock off
968 		 * the residual for each byte actually moved- and also will
969 		 * be responsible for setting the underrun flag.
970 		 */
971 		/* HACK! HACK! */
972 		if ((atiop = ccb->ccb_h.periph_priv.entries[1].ptr) != NULL) {
973 			cto->ct_resid = atiop->ccb_h.spriv_field0;
974 		}
975 
976 		cto->ct_rxid = cso->tag_id;
977 		if (cso->dxfer_len == 0) {
978 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
979 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
980 				cto->ct_flags |= CT2_SENDSTATUS;
981 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
982 			}
983 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
984 				int m = min(cso->sense_len, MAXRESPLEN);
985 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
986 				cto->rsp.m1.ct_senselen = m;
987 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
988 			}
989 		} else {
990 			cto->ct_flags |= CT2_FLAG_MODE0;
991 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
992 				cto->ct_flags |= CT2_DATA_IN;
993 			} else {
994 				cto->ct_flags |= CT2_DATA_OUT;
995 			}
996 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
997 				cto->ct_flags |= CT2_SENDSTATUS;
998 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
999 			}
1000 			/*
1001 			 * If we're sending data and status back together,
1002 			 * we can't also send back sense data as well.
1003 			 */
1004 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1005 		}
1006 		if (cto->ct_flags & CT2_SENDSTATUS) {
1007 			isp_prt(isp, ISP_LOGTDEBUG1,
1008 			    "CTIO2[%x] SCSI STATUS 0x%x datalength %u",
1009 			    cto->ct_rxid, cso->scsi_status, cto->ct_resid);
1010 		}
1011 		if  (cto->ct_flags & CT2_SENDSTATUS)
1012 			cto->ct_flags |= CT2_CCINCR;
1013 		cto->ct_timeout = 10;
1014 		hp = &cto->ct_syshandle;
1015 	} else {
1016 		ct_entry_t *cto = qe;
1017 
1018 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1019 		cto->ct_header.rqs_entry_count = 1;
1020 		cto->ct_iid = cso->init_id;
1021 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1022 		cto->ct_tgt = ccb->ccb_h.target_id;
1023 		cto->ct_lun = ccb->ccb_h.target_lun;
1024 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1025 		if (AT_HAS_TAG(cso->tag_id)) {
1026 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1027 			cto->ct_flags |= CT_TQAE;
1028 		}
1029 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1030 			cto->ct_flags |= CT_NODISC;
1031 		}
1032 		if (cso->dxfer_len == 0) {
1033 			cto->ct_flags |= CT_NO_DATA;
1034 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1035 			cto->ct_flags |= CT_DATA_IN;
1036 		} else {
1037 			cto->ct_flags |= CT_DATA_OUT;
1038 		}
1039 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1040 			cto->ct_flags |= CT_SENDSTATUS;
1041 			cto->ct_scsi_status = cso->scsi_status;
1042 			cto->ct_resid = cso->resid;
1043 		}
1044 		if (cto->ct_flags & CT_SENDSTATUS) {
1045 			isp_prt(isp, ISP_LOGTDEBUG1,
1046 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1047 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1048 			    cso->tag_id);
1049 		}
1050 		cto->ct_timeout = 10;
1051 		hp = &cto->ct_syshandle;
1052 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1053 		if (cto->ct_flags & CT_SENDSTATUS)
1054 			cto->ct_flags |= CT_CCINCR;
1055 	}
1056 
1057 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1058 		xpt_print_path(ccb->ccb_h.path);
1059 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1060 		return (CAM_RESRC_UNAVAIL);
1061 	}
1062 
1063 
1064 	/*
1065 	 * Call the dma setup routines for this entry (and any subsequent
1066 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1067 	 * new things to play with. As with isp_start's usage of DMA setup,
1068 	 * any swizzling is done in the machine dependent layer. Because
1069 	 * of this, we put the request onto the queue area first in native
1070 	 * format.
1071 	 */
1072 
1073 	save_handle = *hp;
1074 
1075 	switch (ISP_DMASETUP(isp, cso, qe, &iptr, optr)) {
1076 	case CMD_QUEUED:
1077 		ISP_ADD_REQUEST(isp, iptr);
1078 		return (CAM_REQ_INPROG);
1079 
1080 	case CMD_EAGAIN:
1081 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1082 		isp_destroy_handle(isp, save_handle);
1083 		return (CAM_RESRC_UNAVAIL);
1084 
1085 	default:
1086 		isp_destroy_handle(isp, save_handle);
1087 		return (XS_ERR(ccb));
1088 	}
1089 }
1090 
1091 static void
1092 isp_refire_putback_atio(void *arg)
1093 {
1094 	int s = splcam();
1095 	isp_target_putback_atio(arg);
1096 	splx(s);
1097 }
1098 
1099 static void
1100 isp_target_putback_atio(union ccb *ccb)
1101 {
1102 	struct ispsoftc *isp;
1103 	struct ccb_scsiio *cso;
1104 	u_int16_t iptr, optr;
1105 	void *qe;
1106 
1107 	isp = XS_ISP(ccb);
1108 
1109 	if (isp_getrqentry(isp, &iptr, &optr, &qe)) {
1110 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1111 		isp_prt(isp, ISP_LOGWARN,
1112 		    "isp_target_putback_atio: Request Queue Overflow");
1113 		return;
1114 	}
1115 	bzero(qe, QENTRY_LEN);
1116 	cso = &ccb->csio;
1117 	if (IS_FC(isp)) {
1118 		at2_entry_t *at = qe;
1119 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1120 		at->at_header.rqs_entry_count = 1;
1121 		if (isp->isp_maxluns > 16) {
1122 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1123 		} else {
1124 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1125 		}
1126 		at->at_status = CT_OK;
1127 		at->at_rxid = cso->tag_id;
1128 		ISP_SWIZ_ATIO2(isp, qe, qe);
1129 	} else {
1130 		at_entry_t *at = qe;
1131 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1132 		at->at_header.rqs_entry_count = 1;
1133 		at->at_iid = cso->init_id;
1134 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1135 		at->at_tgt = cso->ccb_h.target_id;
1136 		at->at_lun = cso->ccb_h.target_lun;
1137 		at->at_status = CT_OK;
1138 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1139 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1140 		ISP_SWIZ_ATIO(isp, qe, qe);
1141 	}
1142 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1143 	ISP_ADD_REQUEST(isp, iptr);
1144 	isp_complete_ctio(ccb);
1145 }
1146 
1147 static void
1148 isp_complete_ctio(union ccb *ccb)
1149 {
1150 	struct ispsoftc *isp = XS_ISP(ccb);
1151 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1152 		ccb->ccb_h.status |= CAM_REQ_CMP;
1153 	}
1154 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1155 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
1156 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
1157 		if (isp->isp_osinfo.simqfrozen == 0) {
1158 			if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1159 				isp_prt(isp, ISP_LOGDEBUG2, "ctio->relsimq");
1160 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1161 			} else {
1162 				isp_prt(isp, ISP_LOGWARN, "ctio->devqfrozen");
1163 			}
1164 		} else {
1165 			isp_prt(isp, ISP_LOGWARN,
1166 			    "ctio->simqfrozen(%x)", isp->isp_osinfo.simqfrozen);
1167 		}
1168 	}
1169 	xpt_done(ccb);
1170 }
1171 
1172 /*
1173  * Handle ATIO stuff that the generic code can't.
1174  * This means handling CDBs.
1175  */
1176 
1177 static int
1178 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1179 {
1180 	tstate_t *tptr;
1181 	int status, bus;
1182 	struct ccb_accept_tio *atiop;
1183 
1184 	/*
1185 	 * The firmware status (except for the QLTM_SVALID bit)
1186 	 * indicates why this ATIO was sent to us.
1187 	 *
1188 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1189 	 *
1190 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1191 	 * we're still connected on the SCSI bus.
1192 	 */
1193 	status = aep->at_status;
1194 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1195 		/*
1196 		 * Bus Phase Sequence error. We should have sense data
1197 		 * suggested by the f/w. I'm not sure quite yet what
1198 		 * to do about this for CAM.
1199 		 */
1200 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1201 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1202 		return (0);
1203 	}
1204 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1205 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1206 		    status);
1207 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1208 		return (0);
1209 	}
1210 
1211 	bus = GET_BUS_VAL(aep->at_iid);
1212 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1213 	if (tptr == NULL) {
1214 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1215 	}
1216 
1217 	if (tptr == NULL) {
1218 		/*
1219 		 * Because we can't autofeed sense data back with
1220 		 * a command for parallel SCSI, we can't give back
1221 		 * a CHECK CONDITION. We'll give back a BUSY status
1222 		 * instead. This works out okay because the only
1223 		 * time we should, in fact, get this, is in the
1224 		 * case that somebody configured us without the
1225 		 * blackhole driver, so they get what they deserve.
1226 		 */
1227 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1228 		return (0);
1229 	}
1230 
1231 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1232 	if (atiop == NULL) {
1233 		/*
1234 		 * Because we can't autofeed sense data back with
1235 		 * a command for parallel SCSI, we can't give back
1236 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1237 		 * instead. This works out okay because the only time we
1238 		 * should, in fact, get this, is in the case that we've
1239 		 * run out of ATIOS.
1240 		 */
1241 		xpt_print_path(tptr->owner);
1242 		isp_prt(isp, ISP_LOGWARN,
1243 		    "no ATIOS for lun %d from initiator %d on channel %d",
1244 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1245 		rls_lun_statep(isp, tptr);
1246 		if (aep->at_flags & AT_TQAE)
1247 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1248 		else
1249 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1250 		return (0);
1251 	}
1252 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1253 	if (tptr == &isp->isp_osinfo.tsdflt[bus]) {
1254 		atiop->ccb_h.target_id = aep->at_tgt;
1255 		atiop->ccb_h.target_lun = aep->at_lun;
1256 	}
1257 	if (aep->at_flags & AT_NODISC) {
1258 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1259 	} else {
1260 		atiop->ccb_h.flags = 0;
1261 	}
1262 
1263 	if (status & QLTM_SVALID) {
1264 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1265 		atiop->sense_len = amt;
1266 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1267 	} else {
1268 		atiop->sense_len = 0;
1269 	}
1270 
1271 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1272 	atiop->cdb_len = aep->at_cdblen;
1273 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1274 	atiop->ccb_h.status = CAM_CDB_RECVD;
1275 	/*
1276 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1277 	 * and the handle (which we have to preserve).
1278 	 */
1279 	AT_MAKE_TAGID(atiop->tag_id, aep);
1280 	if (aep->at_flags & AT_TQAE) {
1281 		atiop->tag_action = aep->at_tag_type;
1282 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1283 	}
1284 	xpt_done((union ccb*)atiop);
1285 	isp_prt(isp, ISP_LOGTDEBUG1,
1286 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1287 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1288 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1289 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1290 	    "nondisc" : "disconnecting");
1291 	rls_lun_statep(isp, tptr);
1292 	return (0);
1293 }
1294 
1295 static int
1296 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1297 {
1298 	lun_id_t lun;
1299 	tstate_t *tptr;
1300 	struct ccb_accept_tio *atiop;
1301 
1302 	/*
1303 	 * The firmware status (except for the QLTM_SVALID bit)
1304 	 * indicates why this ATIO was sent to us.
1305 	 *
1306 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1307 	 */
1308 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1309 		isp_prt(isp, ISP_LOGWARN,
1310 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1311 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1312 		return (0);
1313 	}
1314 
1315 	if (isp->isp_maxluns > 16) {
1316 		lun = aep->at_scclun;
1317 	} else {
1318 		lun = aep->at_lun;
1319 	}
1320 	tptr = get_lun_statep(isp, 0, lun);
1321 	if (tptr == NULL) {
1322 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1323 	}
1324 
1325 	if (tptr == NULL) {
1326 		/*
1327 		 * What we'd like to know is whether or not we have a listener
1328 		 * upstream that really hasn't configured yet. If we do, then
1329 		 * we can give a more sensible reply here. If not, then we can
1330 		 * reject this out of hand.
1331 		 *
1332 		 * Choices for what to send were
1333 		 *
1334                  *	Not Ready, Unit Not Self-Configured Yet
1335 		 *	(0x2,0x3e,0x00)
1336 		 *
1337 		 * for the former and
1338 		 *
1339 		 *	Illegal Request, Logical Unit Not Supported
1340 		 *	(0x5,0x25,0x00)
1341 		 *
1342 		 * for the latter.
1343 		 *
1344 		 * We used to decide whether there was at least one listener
1345 		 * based upon whether the black hole driver was configured.
1346 		 * However, recent config(8) changes have made this hard to do
1347 		 * at this time.
1348 		 *
1349 		 */
1350 		u_int32_t ccode = SCSI_STATUS_BUSY;
1351 
1352 		/*
1353 		 * Because we can't autofeed sense data back with
1354 		 * a command for parallel SCSI, we can't give back
1355 		 * a CHECK CONDITION. We'll give back a BUSY status
1356 		 * instead. This works out okay because the only
1357 		 * time we should, in fact, get this, is in the
1358 		 * case that somebody configured us without the
1359 		 * blackhole driver, so they get what they deserve.
1360 		 */
1361 		isp_endcmd(isp, aep, ccode, 0);
1362 		return (0);
1363 	}
1364 
1365 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1366 	if (atiop == NULL) {
1367 		/*
1368 		 * Because we can't autofeed sense data back with
1369 		 * a command for parallel SCSI, we can't give back
1370 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1371 		 * instead. This works out okay because the only time we
1372 		 * should, in fact, get this, is in the case that we've
1373 		 * run out of ATIOS.
1374 		 */
1375 		xpt_print_path(tptr->owner);
1376 		isp_prt(isp, ISP_LOGWARN,
1377 		    "no ATIOS for lun %d from initiator %d", lun, aep->at_iid);
1378 		rls_lun_statep(isp, tptr);
1379 		if (aep->at_flags & AT_TQAE)
1380 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1381 		else
1382 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1383 		return (0);
1384 	}
1385 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1386 
1387 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1388 		atiop->ccb_h.target_id =
1389 			((fcparam *)isp->isp_param)->isp_loopid;
1390 		atiop->ccb_h.target_lun = lun;
1391 	}
1392 	/*
1393 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1394 	 */
1395 	atiop->sense_len = 0;
1396 
1397 	atiop->init_id = aep->at_iid;
1398 	atiop->cdb_len = ATIO2_CDBLEN;
1399 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1400 	atiop->ccb_h.status = CAM_CDB_RECVD;
1401 	atiop->tag_id = aep->at_rxid;
1402 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1403 	case ATIO2_TC_ATTR_SIMPLEQ:
1404 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1405 		break;
1406         case ATIO2_TC_ATTR_HEADOFQ:
1407 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1408 		break;
1409         case ATIO2_TC_ATTR_ORDERED:
1410 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1411 		break;
1412         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1413 	case ATIO2_TC_ATTR_UNTAGGED:
1414 	default:
1415 		atiop->tag_action = 0;
1416 		break;
1417 	}
1418 	if (atiop->tag_action != 0) {
1419 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1420 	}
1421 
1422 	/*
1423 	 * Preserve overall command datalength in private field.
1424 	 */
1425 	atiop->ccb_h.spriv_field0 = aep->at_datalen;
1426 
1427 	xpt_done((union ccb*)atiop);
1428 	isp_prt(isp, ISP_LOGTDEBUG1,
1429 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1430 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1431 	    lun, aep->at_taskflags, aep->at_datalen);
1432 	rls_lun_statep(isp, tptr);
1433 	return (0);
1434 }
1435 
1436 static int
1437 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1438 {
1439 	union ccb *ccb;
1440 	int sentstatus, ok, notify_cam, resid = 0;
1441 
1442 	/*
1443 	 * CTIO and CTIO2 are close enough....
1444 	 */
1445 
1446 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1447 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1448 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1449 
1450 	if (IS_FC(isp)) {
1451 		ct2_entry_t *ct = arg;
1452 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1453 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1454 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1455 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1456 		}
1457 		isp_prt(isp, ISP_LOGTDEBUG1,
1458 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d %s",
1459 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1460 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1461 		    sentstatus? "FIN" : "MID");
1462 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1463 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1464 			resid = ct->ct_resid;
1465 		}
1466 	} else {
1467 		ct_entry_t *ct = arg;
1468 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1469 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1470 		isp_prt(isp, ISP_LOGTDEBUG1,
1471 		    "CTIO[%x] tag %x iid %x tgt %d lun %d sts 0x%x flg %x %s",
1472 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_tgt,
1473 		    ct->ct_lun, ct->ct_status, ct->ct_flags,
1474 		    sentstatus? "FIN" : "MID");
1475 
1476 		/*
1477 		 * We *ought* to be able to get back to the original ATIO
1478 		 * here, but for some reason this gets lost. It's just as
1479 		 * well because it's squirrelled away as part of periph
1480 		 * private data.
1481 		 *
1482 		 * We can live without it as long as we continue to use
1483 		 * the auto-replenish feature for CTIOs.
1484 		 */
1485 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1486 		if (ct->ct_status & QLTM_SVALID) {
1487 			char *sp = (char *)ct;
1488 			sp += CTIO_SENSE_OFFSET;
1489 			ccb->csio.sense_len =
1490 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1491 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1492 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1493 		}
1494 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1495 			resid = ct->ct_resid;
1496 		}
1497 	}
1498 	ccb->csio.resid += resid;
1499 
1500 	/*
1501 	 * We're here either because intermediate data transfers are done
1502 	 * and/or the final status CTIO (which may have joined with a
1503 	 * Data Transfer) is done.
1504 	 *
1505 	 * In any case, for this platform, the upper layers figure out
1506 	 * what to do next, so all we do here is collect status and
1507 	 * pass information along. Any DMA handles have already been
1508 	 * freed.
1509 	 */
1510 	if (notify_cam == 0) {
1511 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO done");
1512 		return (0);
1513 	}
1514 
1515 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO done (resid %d)",
1516 	    (sentstatus)? "  FINAL " : "MIDTERM ", ccb->csio.resid);
1517 
1518 	if (!ok) {
1519 		isp_target_putback_atio(ccb);
1520 	} else {
1521 		isp_complete_ctio(ccb);
1522 
1523 	}
1524 	return (0);
1525 }
1526 #endif
1527 
1528 static void
1529 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1530 {
1531 	struct cam_sim *sim;
1532 	struct ispsoftc *isp;
1533 
1534 	sim = (struct cam_sim *)cbarg;
1535 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1536 	switch (code) {
1537 	case AC_LOST_DEVICE:
1538 		if (IS_SCSI(isp)) {
1539 			u_int16_t oflags, nflags;
1540 			sdparam *sdp = isp->isp_param;
1541 			int tgt;
1542 
1543 			tgt = xpt_path_target_id(path);
1544 			ISP_LOCK(isp);
1545 			sdp += cam_sim_bus(sim);
1546 #ifndef	ISP_TARGET_MODE
1547 			if (tgt == sdp->isp_initiator_id) {
1548 				nflags = DPARM_DEFAULT;
1549 			} else {
1550 				nflags = DPARM_SAFE_DFLT;
1551 				if (isp->isp_loaded_fw) {
1552 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1553 				}
1554 			}
1555 #else
1556 			nflags = DPARM_DEFAULT;
1557 #endif
1558 			oflags = sdp->isp_devparam[tgt].dev_flags;
1559 			sdp->isp_devparam[tgt].dev_flags = nflags;
1560 			sdp->isp_devparam[tgt].dev_update = 1;
1561 			isp->isp_update |= (1 << cam_sim_bus(sim));
1562 			(void) isp_control(isp, ISPCTL_UPDATE_PARAMS, NULL);
1563 			sdp->isp_devparam[tgt].dev_flags = oflags;
1564 			ISP_UNLOCK(isp);
1565 		}
1566 		break;
1567 	default:
1568 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1569 		break;
1570 	}
1571 }
1572 
1573 static void
1574 isp_poll(struct cam_sim *sim)
1575 {
1576 	struct ispsoftc *isp = cam_sim_softc(sim);
1577 	ISP_LOCK(isp);
1578 	(void) isp_intr(isp);
1579 	ISP_UNLOCK(isp);
1580 }
1581 
1582 #if	0
1583 static void
1584 isp_relsim(void *arg)
1585 {
1586 	struct ispsoftc *isp = arg;
1587 	ISP_LOCK(isp);
1588 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED) {
1589 		int wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_TIMED;
1590 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_TIMED;
1591 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1592 			xpt_release_simq(isp->isp_sim, 1);
1593 			isp_prt(isp, ISP_LOGDEBUG2, "timed relsimq");
1594 		}
1595 	}
1596 	ISP_UNLOCK(isp);
1597 }
1598 #endif
1599 
1600 static void
1601 isp_watchdog(void *arg)
1602 {
1603 	XS_T *xs = arg;
1604 	struct ispsoftc *isp = XS_ISP(xs);
1605 	u_int32_t handle;
1606 
1607 	/*
1608 	 * We've decided this command is dead. Make sure we're not trying
1609 	 * to kill a command that's already dead by getting it's handle and
1610 	 * and seeing whether it's still alive.
1611 	 */
1612 	ISP_LOCK(isp);
1613 	handle = isp_find_handle(isp, xs);
1614 	if (handle) {
1615 		u_int16_t r;
1616 
1617 		if (XS_CMD_DONE_P(xs)) {
1618 			isp_prt(isp, ISP_LOGDEBUG1,
1619 			    "watchdog found done cmd (handle 0x%x)", handle);
1620 			ISP_UNLOCK(isp);
1621 			return;
1622 		}
1623 
1624 		if (XS_CMD_WDOG_P(xs)) {
1625 			isp_prt(isp, ISP_LOGDEBUG2,
1626 			    "recursive watchdog (handle 0x%x)", handle);
1627 			ISP_UNLOCK(isp);
1628 			return;
1629 		}
1630 
1631 		XS_CMD_S_WDOG(xs);
1632 
1633 		r = ISP_READ(isp, BIU_ISR);
1634 
1635 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
1636 			isp_prt(isp, ISP_LOGDEBUG2,
1637 			    "watchdog cleanup (%x, %x)", handle, r);
1638 			xpt_done((union ccb *) xs);
1639 		} else if (XS_CMD_GRACE_P(xs)) {
1640 			/*
1641 			 * Make sure the command is *really* dead before we
1642 			 * release the handle (and DMA resources) for reuse.
1643 			 */
1644 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1645 
1646 			/*
1647 			 * After this point, the comamnd is really dead.
1648 			 */
1649 			if (XS_XFRLEN(xs)) {
1650 				ISP_DMAFREE(isp, xs, handle);
1651                 	}
1652 			isp_destroy_handle(isp, handle);
1653 			xpt_print_path(xs->ccb_h.path);
1654 			isp_prt(isp, ISP_LOGWARN,
1655 			    "watchdog timeout (%x, %x)", handle, r);
1656 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1657 			XS_CMD_C_WDOG(xs);
1658 			isp_done(xs);
1659 		} else {
1660 			u_int16_t iptr, optr;
1661 			ispreq_t *mp;
1662 
1663 			XS_CMD_C_WDOG(xs);
1664 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1665 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
1666 				ISP_UNLOCK(isp);
1667 				return;
1668 			}
1669 			XS_CMD_S_GRACE(xs);
1670 			MEMZERO((void *) mp, sizeof (*mp));
1671 			mp->req_header.rqs_entry_count = 1;
1672 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1673 			mp->req_modifier = SYNC_ALL;
1674 			mp->req_target = XS_CHANNEL(xs) << 7;
1675 			ISP_SWIZZLE_REQUEST(isp, mp);
1676 			ISP_ADD_REQUEST(isp, iptr);
1677 		}
1678 	} else {
1679 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1680 	}
1681 	ISP_UNLOCK(isp);
1682 }
1683 
1684 static int isp_ktmature = 0;
1685 
1686 static void
1687 isp_kthread(void *arg)
1688 {
1689 	int wasfrozen;
1690 	struct ispsoftc *isp = arg;
1691 
1692 	mtx_lock(&isp->isp_lock);
1693 	for (;;) {
1694 		isp_prt(isp, ISP_LOGDEBUG0, "kthread checking FC state");
1695 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1696 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1697 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1698 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1699 				    isp_ktmature == 0) {
1700 					break;
1701 				}
1702 			}
1703 			msleep(isp_kthread, &isp->isp_lock,
1704 			    PRIBIO, "isp_fcthrd", hz);
1705 		}
1706 		/*
1707 		 * Even if we didn't get good loop state we may be
1708 		 * unfreezing the SIMQ so that we can kill off
1709 		 * commands (if we've never seen loop before, e.g.)
1710 		 */
1711 		isp_ktmature = 1;
1712 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1713 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1714 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1715 			isp_prt(isp, ISP_LOGDEBUG0, "kthread up release simq");
1716 			ISPLOCK_2_CAMLOCK(isp);
1717 			xpt_release_simq(isp->isp_sim, 1);
1718 			CAMLOCK_2_ISPLOCK(isp);
1719 		}
1720 		cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
1721 	}
1722 }
1723 
1724 static void
1725 isp_action(struct cam_sim *sim, union ccb *ccb)
1726 {
1727 	int bus, tgt, error;
1728 	struct ispsoftc *isp;
1729 	struct ccb_trans_settings *cts;
1730 
1731 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1732 
1733 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1734 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1735 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1736 	if (isp->isp_state != ISP_RUNSTATE &&
1737 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1738 		CAMLOCK_2_ISPLOCK(isp);
1739 		isp_init(isp);
1740 		if (isp->isp_state != ISP_INITSTATE) {
1741 			ISP_UNLOCK(isp);
1742 			/*
1743 			 * Lie. Say it was a selection timeout.
1744 			 */
1745 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1746 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1747 			xpt_done(ccb);
1748 			return;
1749 		}
1750 		isp->isp_state = ISP_RUNSTATE;
1751 		ISPLOCK_2_CAMLOCK(isp);
1752 	}
1753 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1754 
1755 
1756 	switch (ccb->ccb_h.func_code) {
1757 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1758 		/*
1759 		 * Do a couple of preliminary checks...
1760 		 */
1761 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1762 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1763 				ccb->ccb_h.status = CAM_REQ_INVALID;
1764 				xpt_done(ccb);
1765 				break;
1766 			}
1767 		}
1768 #ifdef	DIAGNOSTIC
1769 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
1770 			ccb->ccb_h.status = CAM_PATH_INVALID;
1771 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
1772 			ccb->ccb_h.status = CAM_PATH_INVALID;
1773 		}
1774 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
1775 			isp_prt(isp, ISP_LOGERR,
1776 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
1777 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1778 			xpt_done(ccb);
1779 			break;
1780 		}
1781 #endif
1782 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
1783 		CAMLOCK_2_ISPLOCK(isp);
1784 		error = isp_start((XS_T *) ccb);
1785 		switch (error) {
1786 		case CMD_QUEUED:
1787 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1788 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1789 				u_int64_t ticks = (u_int64_t) hz;
1790 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
1791 					ticks = 60 * 1000 * ticks;
1792 				else
1793 					ticks = ccb->ccb_h.timeout * hz;
1794 				ticks = ((ticks + 999) / 1000) + hz + hz;
1795 				if (ticks >= 0x80000000) {
1796 					isp_prt(isp, ISP_LOGERR,
1797 					    "timeout overflow");
1798 					ticks = 0x80000000;
1799 				}
1800 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
1801 				    (caddr_t)ccb, (int)ticks);
1802 			} else {
1803 				callout_handle_init(&ccb->ccb_h.timeout_ch);
1804 			}
1805 			ISPLOCK_2_CAMLOCK(isp);
1806 			break;
1807 		case CMD_RQLATER:
1808 			/*
1809 			 * This can only happen for Fibre Channel
1810 			 */
1811 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
1812 			if (FCPARAM(isp)->loop_seen_once == 0 && isp_ktmature) {
1813 				ISPLOCK_2_CAMLOCK(isp);
1814 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
1815 				xpt_done(ccb);
1816 				break;
1817 			}
1818 			cv_signal(&isp->isp_osinfo.kthread_cv);
1819 			if (isp->isp_osinfo.simqfrozen == 0) {
1820 				isp_prt(isp, ISP_LOGDEBUG2,
1821 				    "RQLATER freeze simq");
1822 				isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
1823 				ISPLOCK_2_CAMLOCK(isp);
1824 				xpt_freeze_simq(sim, 1);
1825 			} else {
1826 				ISPLOCK_2_CAMLOCK(isp);
1827 			}
1828 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1829 			xpt_done(ccb);
1830 			break;
1831 		case CMD_EAGAIN:
1832 			if (isp->isp_osinfo.simqfrozen == 0) {
1833 				xpt_freeze_simq(sim, 1);
1834 				isp_prt(isp, ISP_LOGDEBUG2,
1835 				    "EAGAIN freeze simq");
1836 			}
1837 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1838 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1839 			ISPLOCK_2_CAMLOCK(isp);
1840 			xpt_done(ccb);
1841 			break;
1842 		case CMD_COMPLETE:
1843 			isp_done((struct ccb_scsiio *) ccb);
1844 			ISPLOCK_2_CAMLOCK(isp);
1845 			break;
1846 		default:
1847 			isp_prt(isp, ISP_LOGERR,
1848 			    "What's this? 0x%x at %d in file %s",
1849 			    error, __LINE__, __FILE__);
1850 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1851 			xpt_done(ccb);
1852 			ISPLOCK_2_CAMLOCK(isp);
1853 		}
1854 		break;
1855 
1856 #ifdef	ISP_TARGET_MODE
1857 	case XPT_EN_LUN:		/* Enable LUN as a target */
1858 		CAMLOCK_2_ISPLOCK(isp);
1859 		isp_en_lun(isp, ccb);
1860 		ISPLOCK_2_CAMLOCK(isp);
1861 		xpt_done(ccb);
1862 		break;
1863 
1864 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
1865 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
1866 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
1867 	{
1868 		tstate_t *tptr =
1869 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
1870 		if (tptr == NULL) {
1871 			ccb->ccb_h.status = CAM_LUN_INVALID;
1872 			xpt_done(ccb);
1873 			break;
1874 		}
1875 		ccb->ccb_h.sim_priv.entries[0].field = 0;
1876 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1877 		CAMLOCK_2_ISPLOCK(isp);
1878 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1879 			SLIST_INSERT_HEAD(&tptr->atios,
1880 			    &ccb->ccb_h, sim_links.sle);
1881 		} else {
1882 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
1883 			    sim_links.sle);
1884 		}
1885 		rls_lun_statep(isp, tptr);
1886 		ccb->ccb_h.status = CAM_REQ_INPROG;
1887 		ISPLOCK_2_CAMLOCK(isp);
1888 		break;
1889 	}
1890 	case XPT_CONT_TARGET_IO:
1891 	{
1892 		CAMLOCK_2_ISPLOCK(isp);
1893 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
1894 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1895 			if (isp->isp_osinfo.simqfrozen == 0) {
1896 				xpt_freeze_simq(sim, 1);
1897 				xpt_print_path(ccb->ccb_h.path);
1898 				isp_prt(isp, ISP_LOGINFO,
1899 				    "XPT_CONT_TARGET_IO freeze simq");
1900 			}
1901 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_RESOURCE;
1902 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
1903 			ISPLOCK_2_CAMLOCK(isp);
1904 			xpt_done(ccb);
1905 		} else {
1906 			ISPLOCK_2_CAMLOCK(isp);
1907 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
1908 		}
1909 		break;
1910 	}
1911 #endif
1912 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
1913 
1914 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1915 		tgt = ccb->ccb_h.target_id;
1916 		tgt |= (bus << 16);
1917 
1918 		CAMLOCK_2_ISPLOCK(isp);
1919 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
1920 		ISPLOCK_2_CAMLOCK(isp);
1921 		if (error) {
1922 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1923 		} else {
1924 			ccb->ccb_h.status = CAM_REQ_CMP;
1925 		}
1926 		xpt_done(ccb);
1927 		break;
1928 	case XPT_ABORT:			/* Abort the specified CCB */
1929 	{
1930 		union ccb *accb = ccb->cab.abort_ccb;
1931 		CAMLOCK_2_ISPLOCK(isp);
1932 		switch (accb->ccb_h.func_code) {
1933 #ifdef	ISP_TARGET_MODE
1934 		case XPT_ACCEPT_TARGET_IO:
1935 		case XPT_IMMED_NOTIFY:
1936         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
1937 			break;
1938 		case XPT_CONT_TARGET_IO:
1939 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
1940 			ccb->ccb_h.status = CAM_UA_ABORT;
1941 			break;
1942 #endif
1943 		case XPT_SCSI_IO:
1944 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
1945 			if (error) {
1946 				ccb->ccb_h.status = CAM_UA_ABORT;
1947 			} else {
1948 				ccb->ccb_h.status = CAM_REQ_CMP;
1949 			}
1950 			break;
1951 		default:
1952 			ccb->ccb_h.status = CAM_REQ_INVALID;
1953 			break;
1954 		}
1955 		ISPLOCK_2_CAMLOCK(isp);
1956 		xpt_done(ccb);
1957 		break;
1958 	}
1959 #ifdef	CAM_NEW_TRAN_CODE
1960 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
1961 #else
1962 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
1963 #endif
1964 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
1965 		cts = &ccb->cts;
1966 		tgt = cts->ccb_h.target_id;
1967 		CAMLOCK_2_ISPLOCK(isp);
1968 		if (IS_SCSI(isp)) {
1969 #ifndef	CAM_NEW_TRAN_CODE
1970 			sdparam *sdp = isp->isp_param;
1971 			u_int16_t *dptr;
1972 
1973 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
1974 
1975 			sdp += bus;
1976 			/*
1977 			 * We always update (internally) from dev_flags
1978 			 * so any request to change settings just gets
1979 			 * vectored to that location.
1980 			 */
1981 			dptr = &sdp->isp_devparam[tgt].dev_flags;
1982 
1983 			/*
1984 			 * Note that these operations affect the
1985 			 * the goal flags (dev_flags)- not
1986 			 * the current state flags. Then we mark
1987 			 * things so that the next operation to
1988 			 * this HBA will cause the update to occur.
1989 			 */
1990 			if (cts->valid & CCB_TRANS_DISC_VALID) {
1991 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
1992 					*dptr |= DPARM_DISC;
1993 				} else {
1994 					*dptr &= ~DPARM_DISC;
1995 				}
1996 			}
1997 			if (cts->valid & CCB_TRANS_TQ_VALID) {
1998 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
1999 					*dptr |= DPARM_TQING;
2000 				} else {
2001 					*dptr &= ~DPARM_TQING;
2002 				}
2003 			}
2004 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2005 				switch (cts->bus_width) {
2006 				case MSG_EXT_WDTR_BUS_16_BIT:
2007 					*dptr |= DPARM_WIDE;
2008 					break;
2009 				default:
2010 					*dptr &= ~DPARM_WIDE;
2011 				}
2012 			}
2013 			/*
2014 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2015 			 * of nonzero will cause us to go to the
2016 			 * selected (from NVRAM) maximum value for
2017 			 * this device. At a later point, we'll
2018 			 * allow finer control.
2019 			 */
2020 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2021 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2022 			    (cts->sync_offset > 0)) {
2023 				*dptr |= DPARM_SYNC;
2024 			} else {
2025 				*dptr &= ~DPARM_SYNC;
2026 			}
2027 			*dptr |= DPARM_SAFE_DFLT;
2028 #else
2029 			struct ccb_trans_settings_scsi *scsi =
2030 			    &cts->proto_specific.scsi;
2031 			struct ccb_trans_settings_spi *spi =
2032 			    &cts->xport_specific.spi;
2033 			sdparam *sdp = isp->isp_param;
2034 			u_int16_t *dptr;
2035 
2036 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2037 			sdp += bus;
2038 			/*
2039 			 * We always update (internally) from dev_flags
2040 			 * so any request to change settings just gets
2041 			 * vectored to that location.
2042 			 */
2043 			dptr = &sdp->isp_devparam[tgt].dev_flags;
2044 
2045 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2046 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2047 					*dptr |= DPARM_DISC;
2048 				else
2049 					*dptr &= ~DPARM_DISC;
2050 			}
2051 
2052 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2053 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2054 					*dptr |= DPARM_TQING;
2055 				else
2056 					*dptr &= ~DPARM_TQING;
2057 			}
2058 
2059 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2060 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2061 					*dptr |= DPARM_WIDE;
2062 				else
2063 					*dptr &= ~DPARM_WIDE;
2064 			}
2065 
2066 			/*
2067 			 * XXX: FIX ME
2068 			 */
2069 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2070 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE)) {
2071 				*dptr |= DPARM_SYNC;
2072 				isp_prt(isp, ISP_LOGDEBUG0,
2073 				   "enabling synchronous mode, but ignoring "
2074 				   "setting to period 0x%x offset 0x%x",
2075 				   spi->sync_period, spi->sync_offset);
2076 			} else if (spi->sync_period && spi->sync_offset) {
2077 				*dptr |= DPARM_SYNC;
2078 				isp_prt(isp, ISP_LOGDEBUG0,
2079 				   "enabling synchronous mode (1), but ignoring"
2080 				   " setting to period 0x%x offset 0x%x",
2081 				   spi->sync_period, spi->sync_offset);
2082 			} else {
2083 				*dptr &= ~DPARM_SYNC;
2084 			}
2085 #endif
2086 			isp_prt(isp, ISP_LOGDEBUG0,
2087 			    "%d.%d set %s period 0x%x offset 0x%x flags 0x%x",
2088 			    bus, tgt, IS_CURRENT_SETTINGS(cts)?  "current" :
2089 			    "user", sdp->isp_devparam[tgt].sync_period,
2090 			    sdp->isp_devparam[tgt].sync_offset,
2091 			    sdp->isp_devparam[tgt].dev_flags);
2092 			sdp->isp_devparam[tgt].dev_update = 1;
2093 			isp->isp_update |= (1 << bus);
2094 		} else {
2095 			/*
2096 			 * What, if anything, are we supposed to do?
2097 			 */
2098 		}
2099 		ISPLOCK_2_CAMLOCK(isp);
2100 		ccb->ccb_h.status = CAM_REQ_CMP;
2101 		xpt_done(ccb);
2102 		break;
2103 	case XPT_GET_TRAN_SETTINGS:
2104 		cts = &ccb->cts;
2105 		tgt = cts->ccb_h.target_id;
2106 		CAMLOCK_2_ISPLOCK(isp);
2107 		if (IS_FC(isp)) {
2108 #ifndef	CAM_NEW_TRAN_CODE
2109 			/*
2110 			 * a lot of normal SCSI things don't make sense.
2111 			 */
2112 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2113 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2114 			/*
2115 			 * How do you measure the width of a high
2116 			 * speed serial bus? Well, in bytes.
2117 			 *
2118 			 * Offset and period make no sense, though, so we set
2119 			 * (above) a 'base' transfer speed to be gigabit.
2120 			 */
2121 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2122 #else
2123 			fcparam *fcp = isp->isp_param;
2124 			struct ccb_trans_settings_fc *fc =
2125 			    &cts->xport_specific.fc;
2126 
2127 			cts->protocol = PROTO_SCSI;
2128 			cts->protocol_version = SCSI_REV_2;
2129 			cts->transport = XPORT_FC;
2130 			cts->transport_version = 0;
2131 
2132 			fc->valid = CTS_FC_VALID_SPEED;
2133 			fc->bitrate = 100000;
2134 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2135 				struct lportdb *lp = &fcp->portdb[tgt];
2136 				fc->wwnn = lp->node_wwn;
2137 				fc->wwpn = lp->port_wwn;
2138 				fc->port = lp->portid;
2139 				fc->valid |= CTS_FC_VALID_WWNN |
2140 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2141 			}
2142 #endif
2143 		} else {
2144 #ifdef	CAM_NEW_TRAN_CODE
2145 			struct ccb_trans_settings_scsi *scsi =
2146 			    &cts->proto_specific.scsi;
2147 			struct ccb_trans_settings_spi *spi =
2148 			    &cts->xport_specific.spi;
2149 #endif
2150 			sdparam *sdp = isp->isp_param;
2151 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2152 			u_int16_t dval, pval, oval;
2153 
2154 			sdp += bus;
2155 
2156 			if (IS_CURRENT_SETTINGS(cts)) {
2157 				sdp->isp_devparam[tgt].dev_refresh = 1;
2158 				isp->isp_update |= (1 << bus);
2159 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2160 				    NULL);
2161 				dval = sdp->isp_devparam[tgt].cur_dflags;
2162 				oval = sdp->isp_devparam[tgt].cur_offset;
2163 				pval = sdp->isp_devparam[tgt].cur_period;
2164 			} else {
2165 				dval = sdp->isp_devparam[tgt].dev_flags;
2166 				oval = sdp->isp_devparam[tgt].sync_offset;
2167 				pval = sdp->isp_devparam[tgt].sync_period;
2168 			}
2169 
2170 #ifndef	CAM_NEW_TRAN_CODE
2171 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2172 
2173 			if (dval & DPARM_DISC) {
2174 				cts->flags |= CCB_TRANS_DISC_ENB;
2175 			}
2176 			if (dval & DPARM_TQING) {
2177 				cts->flags |= CCB_TRANS_TAG_ENB;
2178 			}
2179 			if (dval & DPARM_WIDE) {
2180 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2181 			} else {
2182 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2183 			}
2184 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2185 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2186 
2187 			if ((dval & DPARM_SYNC) && oval != 0) {
2188 				cts->sync_period = pval;
2189 				cts->sync_offset = oval;
2190 				cts->valid |=
2191 				    CCB_TRANS_SYNC_RATE_VALID |
2192 				    CCB_TRANS_SYNC_OFFSET_VALID;
2193 			}
2194 #else
2195 			cts->protocol = PROTO_SCSI;
2196 			cts->protocol_version = SCSI_REV_2;
2197 			cts->transport = XPORT_SPI;
2198 			cts->transport_version = 2;
2199 
2200 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2201 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2202 			if (dval & DPARM_DISC) {
2203 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2204 			}
2205 			if (dval & DPARM_TQING) {
2206 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2207 			}
2208 			if ((dval & DPARM_SYNC) && oval != 0) {
2209 				spi->sync_offset = oval;
2210 				spi->sync_period = pval;
2211 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2212 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2213 			}
2214 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2215 			if (dval & DPARM_WIDE) {
2216 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2217 			} else {
2218 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2219 			}
2220 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2221 				scsi->valid = CTS_SCSI_VALID_TQ;
2222 				spi->valid |= CTS_SPI_VALID_DISC;
2223 			} else {
2224 				scsi->valid = 0;
2225 			}
2226 #endif
2227 			isp_prt(isp, ISP_LOGDEBUG0,
2228 			    "%d.%d get %s period 0x%x offset 0x%x flags 0x%x",
2229 			    bus, tgt, IS_CURRENT_SETTINGS(cts)? "current" :
2230 			    "user", pval, oval, dval);
2231 		}
2232 		ISPLOCK_2_CAMLOCK(isp);
2233 		ccb->ccb_h.status = CAM_REQ_CMP;
2234 		xpt_done(ccb);
2235 		break;
2236 
2237 	case XPT_CALC_GEOMETRY:
2238 	{
2239 		struct ccb_calc_geometry *ccg;
2240 		u_int32_t secs_per_cylinder;
2241 		u_int32_t size_mb;
2242 
2243 		ccg = &ccb->ccg;
2244 		if (ccg->block_size == 0) {
2245 			isp_prt(isp, ISP_LOGERR,
2246 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2247 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2248 			ccb->ccb_h.status = CAM_REQ_INVALID;
2249 			xpt_done(ccb);
2250 			break;
2251 		}
2252 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2253 		if (size_mb > 1024) {
2254 			ccg->heads = 255;
2255 			ccg->secs_per_track = 63;
2256 		} else {
2257 			ccg->heads = 64;
2258 			ccg->secs_per_track = 32;
2259 		}
2260 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2261 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2262 		ccb->ccb_h.status = CAM_REQ_CMP;
2263 		xpt_done(ccb);
2264 		break;
2265 	}
2266 	case XPT_RESET_BUS:		/* Reset the specified bus */
2267 		bus = cam_sim_bus(sim);
2268 		CAMLOCK_2_ISPLOCK(isp);
2269 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2270 		ISPLOCK_2_CAMLOCK(isp);
2271 		if (error)
2272 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2273 		else {
2274 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2275 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2276 			else if (isp->isp_path != NULL)
2277 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2278 			ccb->ccb_h.status = CAM_REQ_CMP;
2279 		}
2280 		xpt_done(ccb);
2281 		break;
2282 
2283 	case XPT_TERM_IO:		/* Terminate the I/O process */
2284 		ccb->ccb_h.status = CAM_REQ_INVALID;
2285 		xpt_done(ccb);
2286 		break;
2287 
2288 	case XPT_PATH_INQ:		/* Path routing inquiry */
2289 	{
2290 		struct ccb_pathinq *cpi = &ccb->cpi;
2291 
2292 		cpi->version_num = 1;
2293 #ifdef	ISP_TARGET_MODE
2294 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2295 #else
2296 		cpi->target_sprt = 0;
2297 #endif
2298 		cpi->hba_eng_cnt = 0;
2299 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2300 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2301 		cpi->bus_id = cam_sim_bus(sim);
2302 		if (IS_FC(isp)) {
2303 			cpi->hba_misc = PIM_NOBUSRESET;
2304 			/*
2305 			 * Because our loop ID can shift from time to time,
2306 			 * make our initiator ID out of range of our bus.
2307 			 */
2308 			cpi->initiator_id = cpi->max_target + 1;
2309 
2310 			/*
2311 			 * Set base transfer capabilities for Fibre Channel.
2312 			 * Technically not correct because we don't know
2313 			 * what media we're running on top of- but we'll
2314 			 * look good if we always say 100MB/s.
2315 			 */
2316 			cpi->base_transfer_speed = 100000;
2317 			cpi->hba_inquiry = PI_TAG_ABLE;
2318 #ifdef	CAM_NEW_TRAN_CODE
2319 			cpi->transport = XPORT_FC;
2320 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2321 #endif
2322 		} else {
2323 			sdparam *sdp = isp->isp_param;
2324 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2325 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2326 			cpi->hba_misc = 0;
2327 			cpi->initiator_id = sdp->isp_initiator_id;
2328 			cpi->base_transfer_speed = 3300;
2329 #ifdef	CAM_NEW_TRAN_CODE
2330 			cpi->transport = XPORT_SPI;
2331 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2332 #endif
2333 		}
2334 #ifdef	CAM_NEW_TRAN_CODE
2335 		cpi->protocol = PROTO_SCSI;
2336 		cpi->protocol_version = SCSI_REV_2;
2337 #endif
2338 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2339 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2340 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2341 		cpi->unit_number = cam_sim_unit(sim);
2342 		cpi->ccb_h.status = CAM_REQ_CMP;
2343 		xpt_done(ccb);
2344 		break;
2345 	}
2346 	default:
2347 		ccb->ccb_h.status = CAM_REQ_INVALID;
2348 		xpt_done(ccb);
2349 		break;
2350 	}
2351 }
2352 
2353 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2354 void
2355 isp_done(struct ccb_scsiio *sccb)
2356 {
2357 	struct ispsoftc *isp = XS_ISP(sccb);
2358 
2359 	if (XS_NOERR(sccb))
2360 		XS_SETERR(sccb, CAM_REQ_CMP);
2361 
2362 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2363 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2364 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2365 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2366 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2367 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2368 		} else {
2369 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2370 		}
2371 	}
2372 
2373 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2374 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2375 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2376 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2377 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2378 			if (sccb->scsi_status != SCSI_STATUS_OK)
2379 				isp_prt(isp, ISP_LOGDEBUG2,
2380 				    "freeze devq %d.%d %x %x",
2381 				    sccb->ccb_h.target_id,
2382 				    sccb->ccb_h.target_lun, sccb->ccb_h.status,
2383 				    sccb->scsi_status);
2384 		}
2385 	}
2386 
2387 	/*
2388 	 * If we were frozen waiting resources, clear that we were frozen
2389 	 * waiting for resources. If we are no longer frozen, and the devq
2390 	 * isn't frozen, mark the completing CCB to have the XPT layer
2391 	 * release the simq.
2392 	 */
2393 	if (isp->isp_osinfo.simqfrozen & SIMQFRZ_RESOURCE) {
2394 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_RESOURCE;
2395 		if (isp->isp_osinfo.simqfrozen == 0) {
2396 			if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2397 				isp_prt(isp, ISP_LOGDEBUG2,
2398 				    "isp_done->relsimq");
2399 				sccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2400 			} else {
2401 				isp_prt(isp, ISP_LOGDEBUG2,
2402 				    "isp_done->devq frozen");
2403 			}
2404 		} else {
2405 			isp_prt(isp, ISP_LOGDEBUG2,
2406 			    "isp_done -> simqfrozen = %x",
2407 			    isp->isp_osinfo.simqfrozen);
2408 		}
2409 	}
2410 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2411 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2412 		xpt_print_path(sccb->ccb_h.path);
2413 		isp_prt(isp, ISP_LOGINFO,
2414 		    "cam completion status 0x%x", sccb->ccb_h.status);
2415 	}
2416 
2417 	XS_CMD_S_DONE(sccb);
2418 	if (XS_CMD_WDOG_P(sccb) == 0) {
2419 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2420 		if (XS_CMD_GRACE_P(sccb)) {
2421 			isp_prt(isp, ISP_LOGDEBUG2,
2422 			    "finished command on borrowed time");
2423 		}
2424 		XS_CMD_S_CLEAR(sccb);
2425 		ISPLOCK_2_CAMLOCK(isp);
2426 		xpt_done((union ccb *) sccb);
2427 		CAMLOCK_2_ISPLOCK(isp);
2428 	}
2429 }
2430 
2431 int
2432 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2433 {
2434 	int bus, rv = 0;
2435 	switch (cmd) {
2436 	case ISPASYNC_NEW_TGT_PARAMS:
2437 	{
2438 #ifdef	CAM_NEW_TRAN_CODE
2439 		struct ccb_trans_settings_scsi *scsi;
2440 		struct ccb_trans_settings_spi *spi;
2441 #endif
2442 		int flags, tgt;
2443 		sdparam *sdp = isp->isp_param;
2444 		struct ccb_trans_settings cts;
2445 		struct cam_path *tmppath;
2446 
2447 		bzero(&cts, sizeof (struct ccb_trans_settings));
2448 
2449 		tgt = *((int *)arg);
2450 		bus = (tgt >> 16) & 0xffff;
2451 		tgt &= 0xffff;
2452 		sdp += bus;
2453 		ISPLOCK_2_CAMLOCK(isp);
2454 		if (xpt_create_path(&tmppath, NULL,
2455 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2456 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2457 			CAMLOCK_2_ISPLOCK(isp);
2458 			isp_prt(isp, ISP_LOGWARN,
2459 			    "isp_async cannot make temp path for %d.%d",
2460 			    tgt, bus);
2461 			rv = -1;
2462 			break;
2463 		}
2464 		CAMLOCK_2_ISPLOCK(isp);
2465 		flags = sdp->isp_devparam[tgt].cur_dflags;
2466 #ifdef	CAM_NEW_TRAN_CODE
2467 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2468 		cts.protocol = PROTO_SCSI;
2469 		cts.transport = XPORT_SPI;
2470 
2471 		scsi = &cts.proto_specific.scsi;
2472 		spi = &cts.xport_specific.spi;
2473 
2474 		if (flags & DPARM_TQING) {
2475 			scsi->valid |= CTS_SCSI_VALID_TQ;
2476 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2477 			spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2478 		}
2479 
2480 		if (flags & DPARM_DISC) {
2481 			spi->valid |= CTS_SPI_VALID_DISC;
2482 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2483 		}
2484 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2485 		if (flags & DPARM_WIDE) {
2486 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2487 		} else {
2488 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2489 		}
2490 		if (flags & DPARM_SYNC) {
2491 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2492 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2493 			spi->sync_period = sdp->isp_devparam[tgt].cur_period;
2494 			spi->sync_offset = sdp->isp_devparam[tgt].cur_offset;
2495 		}
2496 #else
2497 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2498 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2499 		if (flags & DPARM_DISC) {
2500 			cts.flags |= CCB_TRANS_DISC_ENB;
2501 		}
2502 		if (flags & DPARM_TQING) {
2503 			cts.flags |= CCB_TRANS_TAG_ENB;
2504 		}
2505 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2506 		cts.bus_width = (flags & DPARM_WIDE)?
2507 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2508 		cts.sync_period = sdp->isp_devparam[tgt].cur_period;
2509 		cts.sync_offset = sdp->isp_devparam[tgt].cur_offset;
2510 		if (flags & DPARM_SYNC) {
2511 			cts.valid |=
2512 			    CCB_TRANS_SYNC_RATE_VALID |
2513 			    CCB_TRANS_SYNC_OFFSET_VALID;
2514 		}
2515 #endif
2516 		isp_prt(isp, ISP_LOGDEBUG2,
2517 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2518 		    bus, tgt, sdp->isp_devparam[tgt].cur_period,
2519 		    sdp->isp_devparam[tgt].cur_offset, flags);
2520 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2521 		ISPLOCK_2_CAMLOCK(isp);
2522 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2523 		xpt_free_path(tmppath);
2524 		CAMLOCK_2_ISPLOCK(isp);
2525 		break;
2526 	}
2527 	case ISPASYNC_BUS_RESET:
2528 		bus = *((int *)arg);
2529 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2530 		    bus);
2531 		if (bus > 0 && isp->isp_path2) {
2532 			ISPLOCK_2_CAMLOCK(isp);
2533 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2534 			CAMLOCK_2_ISPLOCK(isp);
2535 		} else if (isp->isp_path) {
2536 			ISPLOCK_2_CAMLOCK(isp);
2537 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2538 			CAMLOCK_2_ISPLOCK(isp);
2539 		}
2540 		break;
2541 	case ISPASYNC_LIP:
2542 		if (isp->isp_path) {
2543 			if (isp->isp_osinfo.simqfrozen == 0) {
2544 				isp_prt(isp, ISP_LOGDEBUG0, "LIP freeze simq");
2545 				ISPLOCK_2_CAMLOCK(isp);
2546 				xpt_freeze_simq(isp->isp_sim, 1);
2547 				CAMLOCK_2_ISPLOCK(isp);
2548 			}
2549 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2550 		}
2551 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2552 		break;
2553 	case ISPASYNC_LOOP_RESET:
2554 		if (isp->isp_path) {
2555 			if (isp->isp_osinfo.simqfrozen == 0) {
2556 				isp_prt(isp, ISP_LOGDEBUG0,
2557 				    "Loop Reset freeze simq");
2558 				ISPLOCK_2_CAMLOCK(isp);
2559 				xpt_freeze_simq(isp->isp_sim, 1);
2560 				CAMLOCK_2_ISPLOCK(isp);
2561 			}
2562 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2563 		}
2564 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2565 		break;
2566 	case ISPASYNC_LOOP_DOWN:
2567 		if (isp->isp_path) {
2568 			if (isp->isp_osinfo.simqfrozen == 0) {
2569 				isp_prt(isp, ISP_LOGDEBUG0,
2570 				    "loop down freeze simq");
2571 				ISPLOCK_2_CAMLOCK(isp);
2572 				xpt_freeze_simq(isp->isp_sim, 1);
2573 				CAMLOCK_2_ISPLOCK(isp);
2574 			}
2575 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2576 		}
2577 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2578 		break;
2579 	case ISPASYNC_LOOP_UP:
2580 		/*
2581 		 * Now we just note that Loop has come up. We don't
2582 		 * actually do anything because we're waiting for a
2583 		 * Change Notify before activating the FC cleanup
2584 		 * thread to look at the state of the loop again.
2585 		 */
2586 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2587 		break;
2588 	case ISPASYNC_PROMENADE:
2589 	{
2590 		struct cam_path *tmppath;
2591 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2592 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2593 		static const char *roles[4] = {
2594 		    "(none)", "Target", "Initiator", "Target/Initiator"
2595 		};
2596 		fcparam *fcp = isp->isp_param;
2597 		int tgt = *((int *) arg);
2598 		int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2599 		struct lportdb *lp = &fcp->portdb[tgt];
2600 
2601 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2602 		    roles[lp->roles & 0x3],
2603 		    (lp->valid)? "Arrived" : "Departed",
2604 		    (u_int32_t) (lp->port_wwn >> 32),
2605 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2606 		    (u_int32_t) (lp->node_wwn >> 32),
2607 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2608 
2609 		ISPLOCK_2_CAMLOCK(isp);
2610 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2611 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2612 			CAMLOCK_2_ISPLOCK(isp);
2613                         break;
2614                 }
2615 		/*
2616 		 * Policy: only announce targets.
2617 		 */
2618 		if (lp->roles & is_tgt_mask) {
2619 			if (lp->valid) {
2620 				xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2621 			} else {
2622 				xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2623 			}
2624 		}
2625 		xpt_free_path(tmppath);
2626 		CAMLOCK_2_ISPLOCK(isp);
2627 		break;
2628 	}
2629 	case ISPASYNC_CHANGE_NOTIFY:
2630 		if (arg == ISPASYNC_CHANGE_PDB) {
2631 			isp_prt(isp, ISP_LOGINFO,
2632 			    "Port Database Changed");
2633 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2634 			isp_prt(isp, ISP_LOGINFO,
2635 			    "Name Server Database Changed");
2636 		}
2637 		cv_signal(&isp->isp_osinfo.kthread_cv);
2638 		break;
2639 	case ISPASYNC_FABRIC_DEV:
2640 	{
2641 		int target, lrange;
2642 		struct lportdb *lp = NULL;
2643 		char *pt;
2644 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
2645 		u_int32_t portid;
2646 		u_int64_t wwpn, wwnn;
2647 		fcparam *fcp = isp->isp_param;
2648 
2649 		portid =
2650 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
2651 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
2652 		    (((u_int32_t) resp->snscb_port_id[2]));
2653 
2654 		wwpn =
2655 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
2656 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
2657 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
2658 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
2659 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
2660 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
2661 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
2662 		    (((u_int64_t)resp->snscb_portname[7]));
2663 
2664 		wwnn =
2665 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
2666 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
2667 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
2668 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
2669 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
2670 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
2671 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
2672 		    (((u_int64_t)resp->snscb_nodename[7]));
2673 		if (portid == 0 || wwpn == 0) {
2674 			break;
2675 		}
2676 
2677 		switch (resp->snscb_port_type) {
2678 		case 1:
2679 			pt = "   N_Port";
2680 			break;
2681 		case 2:
2682 			pt = "  NL_Port";
2683 			break;
2684 		case 3:
2685 			pt = "F/NL_Port";
2686 			break;
2687 		case 0x7f:
2688 			pt = "  Nx_Port";
2689 			break;
2690 		case 0x81:
2691 			pt = "  F_port";
2692 			break;
2693 		case 0x82:
2694 			pt = "  FL_Port";
2695 			break;
2696 		case 0x84:
2697 			pt = "   E_port";
2698 			break;
2699 		default:
2700 			pt = "?";
2701 			break;
2702 		}
2703 		isp_prt(isp, ISP_LOGINFO,
2704 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
2705 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
2706 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
2707 		/*
2708 		 * We're only interested in SCSI_FCP types (for now)
2709 		 */
2710 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
2711 			break;
2712 		}
2713 		if (fcp->isp_topo != TOPO_F_PORT)
2714 			lrange = FC_SNS_ID+1;
2715 		else
2716 			lrange = 0;
2717 		/*
2718 		 * Is it already in our list?
2719 		 */
2720 		for (target = lrange; target < MAX_FC_TARG; target++) {
2721 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2722 				continue;
2723 			}
2724 			lp = &fcp->portdb[target];
2725 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
2726 				lp->fabric_dev = 1;
2727 				break;
2728 			}
2729 		}
2730 		if (target < MAX_FC_TARG) {
2731 			break;
2732 		}
2733 		for (target = lrange; target < MAX_FC_TARG; target++) {
2734 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2735 				continue;
2736 			}
2737 			lp = &fcp->portdb[target];
2738 			if (lp->port_wwn == 0) {
2739 				break;
2740 			}
2741 		}
2742 		if (target == MAX_FC_TARG) {
2743 			isp_prt(isp, ISP_LOGWARN,
2744 			    "no more space for fabric devices");
2745 			break;
2746 		}
2747 		lp->node_wwn = wwnn;
2748 		lp->port_wwn = wwpn;
2749 		lp->portid = portid;
2750 		lp->fabric_dev = 1;
2751 		break;
2752 	}
2753 #ifdef	ISP_TARGET_MODE
2754 	case ISPASYNC_TARGET_MESSAGE:
2755 	{
2756 		tmd_msg_t *mp = arg;
2757 		isp_prt(isp, ISP_LOGDEBUG2,
2758 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2759 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2760 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2761 		    mp->nt_msg[0]);
2762 		break;
2763 	}
2764 	case ISPASYNC_TARGET_EVENT:
2765 	{
2766 		tmd_event_t *ep = arg;
2767 		isp_prt(isp, ISP_LOGDEBUG2,
2768 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2769 		break;
2770 	}
2771 	case ISPASYNC_TARGET_ACTION:
2772 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2773 		default:
2774 			isp_prt(isp, ISP_LOGWARN,
2775 			   "event 0x%x for unhandled target action",
2776 			    ((isphdr_t *)arg)->rqs_entry_type);
2777 			break;
2778 		case RQSTYPE_ATIO:
2779 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2780 			break;
2781 		case RQSTYPE_ATIO2:
2782 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2783 			break;
2784 		case RQSTYPE_CTIO2:
2785 		case RQSTYPE_CTIO:
2786 			rv = isp_handle_platform_ctio(isp, arg);
2787 			break;
2788 		case RQSTYPE_ENABLE_LUN:
2789 		case RQSTYPE_MODIFY_LUN:
2790 			isp_cv_signal_rqe(isp, ((lun_entry_t *)arg)->le_status);
2791 			break;
2792 		}
2793 		break;
2794 #endif
2795 	case ISPASYNC_FW_CRASH:
2796 	{
2797 		u_int16_t mbox1, mbox6;
2798 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
2799 		if (IS_DUALBUS(isp)) {
2800 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
2801 		} else {
2802 			mbox6 = 0;
2803 		}
2804                 isp_prt(isp, ISP_LOGERR,
2805                     "Internal Firmware on bus %d Error @ RISC Address 0x%x",
2806                     mbox6, mbox1);
2807 		isp_reinit(isp);
2808 		break;
2809 	}
2810 	default:
2811 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2812 		rv = -1;
2813 		break;
2814 	}
2815 	return (rv);
2816 }
2817 
2818 
2819 /*
2820  * Locks are held before coming here.
2821  */
2822 void
2823 isp_uninit(struct ispsoftc *isp)
2824 {
2825 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2826 	DISABLE_INTS(isp);
2827 }
2828 
2829 void
2830 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2831 {
2832 	va_list ap;
2833 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2834 		return;
2835 	}
2836 	printf("%s: ", device_get_nameunit(isp->isp_dev));
2837 	va_start(ap, fmt);
2838 	vprintf(fmt, ap);
2839 	va_end(ap);
2840 	printf("\n");
2841 }
2842