xref: /freebsd-12.1/sys/dev/advansys/advlib.c (revision 718cf2cc)
1 /*-
2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification, immediately at the beginning of the file.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 /*-
34  * Ported from:
35  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36  *
37  * Copyright (c) 1995-1996 Advanced System Products, Inc.
38  * All Rights Reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that redistributions of source
42  * code retain the above copyright notice and this comment without
43  * modification.
44  */
45 
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48 
49 #include <sys/param.h>
50 #include <sys/conf.h>
51 #include <sys/lock.h>
52 #include <sys/kernel.h>
53 #include <sys/mutex.h>
54 #include <sys/systm.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/bus.h>
59 #include <sys/rman.h>
60 
61 #include <cam/cam.h>
62 #include <cam/cam_ccb.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_xpt_sim.h>
65 
66 #include <cam/scsi/scsi_all.h>
67 #include <cam/scsi/scsi_message.h>
68 #include <cam/scsi/scsi_da.h>
69 #include <cam/scsi/scsi_cd.h>
70 
71 #include <vm/vm.h>
72 #include <vm/vm_param.h>
73 #include <vm/pmap.h>
74 
75 #include <dev/advansys/advansys.h>
76 #include <dev/advansys/advmcode.h>
77 
78 struct adv_quirk_entry {
79 	struct scsi_inquiry_pattern inq_pat;
80 	u_int8_t quirks;
81 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
82 #define ADV_QUIRK_FIX_ASYN_XFER		0x02
83 };
84 
85 static struct adv_quirk_entry adv_quirk_table[] =
86 {
87 	{
88 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
89 		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
90 	},
91 	{
92 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
93 		0
94 	},
95 	{
96 		{
97 		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
98 		  "TANDBERG", " TDC 36", "*"
99 		},
100 		0
101 	},
102 	{
103 		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
104 		0
105 	},
106 	{
107 		{
108 		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
109 		  "*", "*", "*"
110 		},
111 		0
112 	},
113 	{
114 		{
115 		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
116 		  "*", "*", "*"
117 		},
118 		0
119 	},
120 	{
121 		/* Default quirk entry */
122 		{
123 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
124 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
125                 },
126                 ADV_QUIRK_FIX_ASYN_XFER,
127 	}
128 };
129 
130 /*
131  * Allowable periods in ns
132  */
133 static u_int8_t adv_sdtr_period_tbl[] =
134 {
135 	25,
136 	30,
137 	35,
138 	40,
139 	50,
140 	60,
141 	70,
142 	85
143 };
144 
145 static u_int8_t adv_sdtr_period_tbl_ultra[] =
146 {
147 	12,
148 	19,
149 	25,
150 	32,
151 	38,
152 	44,
153 	50,
154 	57,
155 	63,
156 	69,
157 	75,
158 	82,
159 	88,
160 	94,
161 	100,
162 	107
163 };
164 
165 struct ext_msg {
166 	u_int8_t msg_type;
167 	u_int8_t msg_len;
168 	u_int8_t msg_req;
169 	union {
170 		struct {
171 			u_int8_t sdtr_xfer_period;
172 			u_int8_t sdtr_req_ack_offset;
173 		} sdtr;
174 		struct {
175        			u_int8_t wdtr_width;
176 		} wdtr;
177 		struct {
178 			u_int8_t mdp[4];
179 		} mdp;
180 	} u_ext_msg;
181 	u_int8_t res;
182 };
183 
184 #define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
185 #define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
186 #define	wdtr_width	u_ext_msg.wdtr.wdtr_width
187 #define	mdp_b3		u_ext_msg.mdp_b3
188 #define	mdp_b2		u_ext_msg.mdp_b2
189 #define	mdp_b1		u_ext_msg.mdp_b1
190 #define	mdp_b0		u_ext_msg.mdp_b0
191 
192 /*
193  * Some of the early PCI adapters have problems with
194  * async transfers.  Instead use an offset of 1.
195  */
196 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
197 
198 /* LRAM routines */
199 static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
200 					u_int16_t *buffer, int count);
201 static void	 adv_write_lram_16_multi(struct adv_softc *adv,
202 					 u_int16_t s_addr, u_int16_t *buffer,
203 					 int count);
204 static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
205 				  u_int16_t set_value, int count);
206 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
207 				  int count);
208 
209 static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
210 					      u_int16_t addr, u_int16_t value);
211 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
212 
213 
214 static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
215 				   u_int32_t value);
216 static void	 adv_write_lram_32_multi(struct adv_softc *adv,
217 					 u_int16_t s_addr, u_int32_t *buffer,
218 					 int count);
219 
220 /* EEPROM routines */
221 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
222 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
223 				     u_int16_t value);
224 static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
225 					  u_int8_t cmd_reg);
226 static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
227 					    struct adv_eeprom_config *eeconfig);
228 
229 /* Initialization */
230 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
231 				    u_int16_t *mcode_buf, u_int16_t mcode_size);
232 
233 static void	 adv_reinit_lram(struct adv_softc *adv);
234 static void	 adv_init_lram(struct adv_softc *adv);
235 static int	 adv_init_microcode_var(struct adv_softc *adv);
236 static void	 adv_init_qlink_var(struct adv_softc *adv);
237 
238 /* Interrupts */
239 static void	 adv_disable_interrupt(struct adv_softc *adv);
240 static void	 adv_enable_interrupt(struct adv_softc *adv);
241 static void	 adv_toggle_irq_act(struct adv_softc *adv);
242 
243 /* Chip Control */
244 static int	 adv_host_req_chip_halt(struct adv_softc *adv);
245 static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
246 #if 0
247 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
248 #endif
249 
250 /* Queue handling and execution */
251 static __inline int
252 		 adv_sgcount_to_qcount(int sgcount);
253 
254 static __inline int
adv_sgcount_to_qcount(int sgcount)255 adv_sgcount_to_qcount(int sgcount)
256 {
257 	int	n_sg_list_qs;
258 
259 	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
260 	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
261 		n_sg_list_qs++;
262 	return (n_sg_list_qs + 1);
263 }
264 
265 #if BYTE_ORDER == BIG_ENDIAN
266 static void	 adv_adj_endian_qdone_info(struct adv_q_done_info *);
267 static void	 adv_adj_scsiq_endian(struct adv_scsi_q *);
268 #endif
269 static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
270 				u_int16_t *inbuf, int words);
271 static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
272 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
273 				       u_int8_t free_q_head, u_int8_t n_free_q);
274 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
275 				      u_int8_t free_q_head);
276 static int	 adv_send_scsi_queue(struct adv_softc *adv,
277 				     struct adv_scsi_q *scsiq,
278 				     u_int8_t n_q_required);
279 static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
280 					     struct adv_scsi_q *scsiq,
281 					     u_int q_no);
282 static void	 adv_put_ready_queue(struct adv_softc *adv,
283 				     struct adv_scsi_q *scsiq, u_int q_no);
284 static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
285 			       u_int16_t *buffer, int words);
286 
287 /* Messages */
288 static void	 adv_handle_extmsg_in(struct adv_softc *adv,
289 				      u_int16_t halt_q_addr, u_int8_t q_cntl,
290 				      target_bit_vector target_id,
291 				      int tid);
292 static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
293 				 u_int8_t sdtr_offset);
294 static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
295 					u_int8_t sdtr_data);
296 
297 
298 /* Exported functions first */
299 
300 void
advasync(void * callback_arg,u_int32_t code,struct cam_path * path,void * arg)301 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
302 {
303 	struct adv_softc *adv;
304 
305 	adv = (struct adv_softc *)callback_arg;
306 	mtx_assert(&adv->lock, MA_OWNED);
307 	switch (code) {
308 	case AC_FOUND_DEVICE:
309 	{
310 		struct ccb_getdev *cgd;
311 		target_bit_vector target_mask;
312 		int num_entries;
313         	caddr_t match;
314 		struct adv_quirk_entry *entry;
315 		struct adv_target_transinfo* tinfo;
316 
317 		cgd = (struct ccb_getdev *)arg;
318 
319 		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
320 
321 		num_entries = nitems(adv_quirk_table);
322 		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
323 				       (caddr_t)adv_quirk_table,
324 				       num_entries, sizeof(*adv_quirk_table),
325 				       scsi_inquiry_match);
326 
327 		if (match == NULL)
328 			panic("advasync: device didn't match wildcard entry!!");
329 
330 		entry = (struct adv_quirk_entry *)match;
331 
332 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
333 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
334 				adv->fix_asyn_xfer_always |= target_mask;
335 			else
336 				adv->fix_asyn_xfer_always &= ~target_mask;
337 			/*
338 			 * We start out life with all bits set and clear them
339 			 * after we've determined that the fix isn't necessary.
340 			 * It may well be that we've already cleared a target
341 			 * before the full inquiry session completes, so don't
342 			 * gratuitously set a target bit even if it has this
343 			 * quirk.  But, if the quirk exonerates a device, clear
344 			 * the bit now.
345 			 */
346 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
347 				adv->fix_asyn_xfer &= ~target_mask;
348 		}
349 		/*
350 		 * Reset our sync settings now that we've determined
351 		 * what quirks are in effect for the device.
352 		 */
353 		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
354 		adv_set_syncrate(adv, cgd->ccb_h.path,
355 				 cgd->ccb_h.target_id,
356 				 tinfo->current.period,
357 				 tinfo->current.offset,
358 				 ADV_TRANS_CUR);
359 		break;
360 	}
361 	case AC_LOST_DEVICE:
362 	{
363 		u_int target_mask;
364 
365 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
366 			target_mask = 0x01 << xpt_path_target_id(path);
367 			adv->fix_asyn_xfer |= target_mask;
368 		}
369 
370 		/*
371 		 * Revert to async transfers
372 		 * for the next device.
373 		 */
374 		adv_set_syncrate(adv, /*path*/NULL,
375 				 xpt_path_target_id(path),
376 				 /*period*/0,
377 				 /*offset*/0,
378 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
379 	}
380 	default:
381 		break;
382 	}
383 }
384 
385 void
adv_set_bank(struct adv_softc * adv,u_int8_t bank)386 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
387 {
388 	u_int8_t control;
389 
390 	/*
391 	 * Start out with the bank reset to 0
392 	 */
393 	control = ADV_INB(adv, ADV_CHIP_CTRL)
394 		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
395 			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
396 			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
397 	if (bank == 1) {
398 		control |= ADV_CC_BANK_ONE;
399 	} else if (bank == 2) {
400 		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
401 	}
402 	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
403 }
404 
405 u_int8_t
adv_read_lram_8(struct adv_softc * adv,u_int16_t addr)406 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
407 {
408 	u_int8_t   byte_data;
409 	u_int16_t  word_data;
410 
411 	/*
412 	 * LRAM is accessed on 16bit boundaries.
413 	 */
414 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
415 	word_data = ADV_INW(adv, ADV_LRAM_DATA);
416 	if (addr & 1) {
417 #if BYTE_ORDER == BIG_ENDIAN
418 		byte_data = (u_int8_t)(word_data & 0xFF);
419 #else
420 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
421 #endif
422 	} else {
423 #if BYTE_ORDER == BIG_ENDIAN
424 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
425 #else
426 		byte_data = (u_int8_t)(word_data & 0xFF);
427 #endif
428 	}
429 	return (byte_data);
430 }
431 
432 void
adv_write_lram_8(struct adv_softc * adv,u_int16_t addr,u_int8_t value)433 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
434 {
435 	u_int16_t word_data;
436 
437 	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
438 	if (addr & 1) {
439 		word_data &= 0x00FF;
440 		word_data |= (((u_int8_t)value << 8) & 0xFF00);
441 	} else {
442 		word_data &= 0xFF00;
443 		word_data |= ((u_int8_t)value & 0x00FF);
444 	}
445 	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
446 }
447 
448 
449 u_int16_t
adv_read_lram_16(struct adv_softc * adv,u_int16_t addr)450 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
451 {
452 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
453 	return (ADV_INW(adv, ADV_LRAM_DATA));
454 }
455 
456 void
adv_write_lram_16(struct adv_softc * adv,u_int16_t addr,u_int16_t value)457 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
458 {
459 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
460 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
461 }
462 
463 /*
464  * Determine if there is a board at "iobase" by looking
465  * for the AdvanSys signatures.  Return 1 if a board is
466  * found, 0 otherwise.
467  */
468 int
adv_find_signature(struct resource * res)469 adv_find_signature(struct resource *res)
470 {
471 	u_int16_t signature;
472 
473 	if (bus_read_1(res, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
474 		signature = bus_read_2(res, ADV_SIGNATURE_WORD);
475 		if ((signature == ADV_1000_ID0W)
476 		 || (signature == ADV_1000_ID0W_FIX))
477 			return (1);
478 	}
479 	return (0);
480 }
481 
482 void
adv_lib_init(struct adv_softc * adv)483 adv_lib_init(struct adv_softc *adv)
484 {
485 	if ((adv->type & ADV_ULTRA) != 0) {
486 		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
487 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
488 	} else {
489 		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
490 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
491 	}
492 }
493 
494 u_int16_t
adv_get_eeprom_config(struct adv_softc * adv,struct adv_eeprom_config * eeprom_config)495 adv_get_eeprom_config(struct adv_softc *adv, struct
496 		      adv_eeprom_config  *eeprom_config)
497 {
498 	u_int16_t	sum;
499 	u_int16_t	*wbuf;
500 	u_int8_t	cfg_beg;
501 	u_int8_t	cfg_end;
502 	u_int8_t	s_addr;
503 
504 	wbuf = (u_int16_t *)eeprom_config;
505 	sum = 0;
506 
507 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
508 		*wbuf = adv_read_eeprom_16(adv, s_addr);
509 		sum += *wbuf;
510 	}
511 
512 	if (adv->type & ADV_VL) {
513 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
514 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
515 	} else {
516 		cfg_beg = ADV_EEPROM_CFG_BEG;
517 		cfg_end = ADV_EEPROM_MAX_ADDR;
518 	}
519 
520 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
521 		*wbuf = adv_read_eeprom_16(adv, s_addr);
522 		sum += *wbuf;
523 #ifdef ADV_DEBUG_EEPROM
524 		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
525 #endif
526 	}
527 	*wbuf = adv_read_eeprom_16(adv, s_addr);
528 	return (sum);
529 }
530 
531 int
adv_set_eeprom_config(struct adv_softc * adv,struct adv_eeprom_config * eeprom_config)532 adv_set_eeprom_config(struct adv_softc *adv,
533 		      struct adv_eeprom_config *eeprom_config)
534 {
535 	int	retry;
536 
537 	retry = 0;
538 	while (1) {
539 		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
540 			break;
541 		}
542 		if (++retry > ADV_EEPROM_MAX_RETRY) {
543 			break;
544 		}
545 	}
546 	return (retry > ADV_EEPROM_MAX_RETRY);
547 }
548 
549 int
adv_reset_chip(struct adv_softc * adv,int reset_bus)550 adv_reset_chip(struct adv_softc *adv, int reset_bus)
551 {
552 	adv_stop_chip(adv);
553 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
554 				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
555 	DELAY(60);
556 
557 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
558 	adv_set_chip_ih(adv, ADV_INS_HALT);
559 
560 	if (reset_bus)
561 		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
562 
563 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
564 	if (reset_bus)
565 		DELAY(200 * 1000);
566 
567 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
568 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
569 	return (adv_is_chip_halted(adv));
570 }
571 
572 int
adv_test_external_lram(struct adv_softc * adv)573 adv_test_external_lram(struct adv_softc* adv)
574 {
575 	u_int16_t	q_addr;
576 	u_int16_t	saved_value;
577 	int		success;
578 
579 	success = 0;
580 
581 	q_addr = ADV_QNO_TO_QADDR(241);
582 	saved_value = adv_read_lram_16(adv, q_addr);
583 	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
584 		success = 1;
585 		adv_write_lram_16(adv, q_addr, saved_value);
586 	}
587 	return (success);
588 }
589 
590 
591 int
adv_init_lram_and_mcode(struct adv_softc * adv)592 adv_init_lram_and_mcode(struct adv_softc *adv)
593 {
594 	u_int32_t	retval;
595 
596 	adv_disable_interrupt(adv);
597 
598 	adv_init_lram(adv);
599 
600 	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
601 				    adv_mcode_size);
602 	if (retval != adv_mcode_chksum) {
603 		device_printf(adv->dev,
604 		    "Microcode download failed checksum!\n");
605 		return (1);
606 	}
607 
608 	if (adv_init_microcode_var(adv) != 0)
609 		return (1);
610 
611 	adv_enable_interrupt(adv);
612 	return (0);
613 }
614 
615 u_int8_t
adv_get_chip_irq(struct adv_softc * adv)616 adv_get_chip_irq(struct adv_softc *adv)
617 {
618 	u_int16_t	cfg_lsw;
619 	u_int8_t	chip_irq;
620 
621 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
622 
623 	if ((adv->type & ADV_VL) != 0) {
624 		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
625 		if ((chip_irq == 0) ||
626 		    (chip_irq == 4) ||
627 		    (chip_irq == 7)) {
628 			return (0);
629 		}
630 		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
631 	}
632 	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
633 	if (chip_irq == 3)
634 		chip_irq += 2;
635 	return (chip_irq + ADV_MIN_IRQ_NO);
636 }
637 
638 u_int8_t
adv_set_chip_irq(struct adv_softc * adv,u_int8_t irq_no)639 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
640 {
641 	u_int16_t	cfg_lsw;
642 
643 	if ((adv->type & ADV_VL) != 0) {
644 		if (irq_no != 0) {
645 			if ((irq_no < ADV_MIN_IRQ_NO)
646 			 || (irq_no > ADV_MAX_IRQ_NO)) {
647 				irq_no = 0;
648 			} else {
649 				irq_no -= ADV_MIN_IRQ_NO - 1;
650 			}
651 		}
652 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
653 		cfg_lsw |= 0x0010;
654 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
655 		adv_toggle_irq_act(adv);
656 
657 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
658 		cfg_lsw |= (irq_no & 0x07) << 2;
659 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
660 		adv_toggle_irq_act(adv);
661 	} else if ((adv->type & ADV_ISA) != 0) {
662 		if (irq_no == 15)
663 			irq_no -= 2;
664 		irq_no -= ADV_MIN_IRQ_NO;
665 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
666 		cfg_lsw |= (irq_no & 0x03) << 2;
667 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
668 	}
669 	return (adv_get_chip_irq(adv));
670 }
671 
672 void
adv_set_chip_scsiid(struct adv_softc * adv,int new_id)673 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
674 {
675 	u_int16_t cfg_lsw;
676 
677 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
678 	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
679 		return;
680     	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
681 	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
682 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
683 }
684 
685 int
adv_execute_scsi_queue(struct adv_softc * adv,struct adv_scsi_q * scsiq,u_int32_t datalen)686 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
687 		       u_int32_t datalen)
688 {
689 	struct		adv_target_transinfo* tinfo;
690 	u_int32_t	*p_data_addr;
691 	u_int32_t	*p_data_bcount;
692 	int		disable_syn_offset_one_fix;
693 	int		retval;
694 	u_int		n_q_required;
695 	u_int32_t	addr;
696 	u_int8_t	sg_entry_cnt;
697 	u_int8_t	target_ix;
698 	u_int8_t	sg_entry_cnt_minus_one;
699 	u_int8_t	tid_no;
700 
701 	if (!dumping)
702 		mtx_assert(&adv->lock, MA_OWNED);
703 	scsiq->q1.q_no = 0;
704 	retval = 1;  /* Default to error case */
705 	target_ix = scsiq->q2.target_ix;
706 	tid_no = ADV_TIX_TO_TID(target_ix);
707 	tinfo = &adv->tinfo[tid_no];
708 
709 	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
710 		/* Renegotiate if appropriate. */
711 		adv_set_syncrate(adv, /*struct cam_path */NULL,
712 				 tid_no, /*period*/0, /*offset*/0,
713 				 ADV_TRANS_CUR);
714 		if (tinfo->current.period != tinfo->goal.period) {
715 			adv_msgout_sdtr(adv, tinfo->goal.period,
716 					tinfo->goal.offset);
717 			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
718 		}
719 	}
720 
721 	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
722 		sg_entry_cnt = scsiq->sg_head->entry_cnt;
723 		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
724 
725 #ifdef DIAGNOSTIC
726 		if (sg_entry_cnt <= 1)
727 			panic("adv_execute_scsi_queue: Queue "
728 			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
729 
730 		if (sg_entry_cnt > ADV_MAX_SG_LIST)
731 			panic("adv_execute_scsi_queue: "
732 			      "Queue with too many segs.");
733 
734 		if ((adv->type & (ADV_ISA | ADV_VL)) != 0) {
735 			int i;
736 
737 			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
738 				addr = scsiq->sg_head->sg_list[i].addr +
739 				       scsiq->sg_head->sg_list[i].bytes;
740 
741 				if ((addr & 0x0003) != 0)
742 					panic("adv_execute_scsi_queue: SG "
743 					      "with odd address or byte count");
744 			}
745 		}
746 #endif
747 		p_data_addr =
748 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
749 		p_data_bcount =
750 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
751 
752 		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
753 		scsiq->sg_head->queue_cnt = n_q_required - 1;
754 	} else {
755 		p_data_addr = &scsiq->q1.data_addr;
756 		p_data_bcount = &scsiq->q1.data_cnt;
757 		n_q_required = 1;
758 	}
759 
760 	disable_syn_offset_one_fix = FALSE;
761 
762 	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
763 	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
764 
765 		if (datalen != 0) {
766 			if (datalen < 512) {
767 				disable_syn_offset_one_fix = TRUE;
768 			} else {
769 				if (scsiq->cdbptr[0] == INQUIRY
770 				 || scsiq->cdbptr[0] == REQUEST_SENSE
771 				 || scsiq->cdbptr[0] == READ_CAPACITY
772 				 || scsiq->cdbptr[0] == MODE_SELECT_6
773 				 || scsiq->cdbptr[0] == MODE_SENSE_6
774 				 || scsiq->cdbptr[0] == MODE_SENSE_10
775 				 || scsiq->cdbptr[0] == MODE_SELECT_10
776 				 || scsiq->cdbptr[0] == READ_TOC) {
777 					disable_syn_offset_one_fix = TRUE;
778 				}
779 			}
780 		}
781 	}
782 
783 	if (disable_syn_offset_one_fix) {
784 		scsiq->q2.tag_code &=
785 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
786 		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
787 				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
788 	}
789 
790 	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
791 	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
792 		u_int8_t extra_bytes;
793 
794 		addr = *p_data_addr + *p_data_bcount;
795 		extra_bytes = addr & 0x0003;
796 		if (extra_bytes != 0
797 		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
798 		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
799 			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
800 			scsiq->q1.extra_bytes = extra_bytes;
801 			*p_data_bcount -= extra_bytes;
802 		}
803 	}
804 
805 	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
806 	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
807 		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
808 
809 	return (retval);
810 }
811 
812 
813 u_int8_t
adv_copy_lram_doneq(struct adv_softc * adv,u_int16_t q_addr,struct adv_q_done_info * scsiq,u_int32_t max_dma_count)814 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
815 		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
816 {
817 	u_int16_t val;
818 	u_int8_t  sg_queue_cnt;
819 
820 	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
821 		       (u_int16_t *)scsiq,
822 		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
823 
824 #if BYTE_ORDER == BIG_ENDIAN
825 	adv_adj_endian_qdone_info(scsiq);
826 #endif
827 
828 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
829 	scsiq->q_status = val & 0xFF;
830 	scsiq->q_no = (val >> 8) & 0XFF;
831 
832 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
833 	scsiq->cntl = val & 0xFF;
834 	sg_queue_cnt = (val >> 8) & 0xFF;
835 
836 	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
837 	scsiq->sense_len = val & 0xFF;
838 	scsiq->extra_bytes = (val >> 8) & 0xFF;
839 
840 	/*
841 	 * Due to a bug in accessing LRAM on the 940UA, the residual
842 	 * is split into separate high and low 16bit quantities.
843 	 */
844 	scsiq->remain_bytes =
845 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
846 	scsiq->remain_bytes |=
847 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
848 
849 	/*
850 	 * XXX Is this just a safeguard or will the counter really
851 	 * have bogus upper bits?
852 	 */
853 	scsiq->remain_bytes &= max_dma_count;
854 
855 	return (sg_queue_cnt);
856 }
857 
858 int
adv_start_chip(struct adv_softc * adv)859 adv_start_chip(struct adv_softc *adv)
860 {
861 	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
862 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
863 		return (0);
864 	return (1);
865 }
866 
867 int
adv_stop_execution(struct adv_softc * adv)868 adv_stop_execution(struct adv_softc *adv)
869 {
870 	int count;
871 
872 	count = 0;
873 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
874 		adv_write_lram_8(adv, ADV_STOP_CODE_B,
875 				 ADV_STOP_REQ_RISC_STOP);
876 		do {
877 			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
878 				ADV_STOP_ACK_RISC_STOP) {
879 				return (1);
880 			}
881 			DELAY(1000);
882 		} while (count++ < 20);
883 	}
884 	return (0);
885 }
886 
887 int
adv_is_chip_halted(struct adv_softc * adv)888 adv_is_chip_halted(struct adv_softc *adv)
889 {
890 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
891 		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
892 			return (1);
893 		}
894 	}
895 	return (0);
896 }
897 
898 /*
899  * XXX The numeric constants and the loops in this routine
900  * need to be documented.
901  */
902 void
adv_ack_interrupt(struct adv_softc * adv)903 adv_ack_interrupt(struct adv_softc *adv)
904 {
905 	u_int8_t	host_flag;
906 	u_int8_t	risc_flag;
907 	int		loop;
908 
909 	loop = 0;
910 	do {
911 		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
912 		if (loop++ > 0x7FFF) {
913 			break;
914 		}
915 	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
916 
917 	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
918 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
919 			 host_flag | ADV_HOST_FLAG_ACK_INT);
920 
921 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
922 	loop = 0;
923 	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
924 		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
925 		if (loop++ > 3) {
926 			break;
927 		}
928 	}
929 
930 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
931 }
932 
933 /*
934  * Handle all conditions that may halt the chip waiting
935  * for us to intervene.
936  */
937 void
adv_isr_chip_halted(struct adv_softc * adv)938 adv_isr_chip_halted(struct adv_softc *adv)
939 {
940 	u_int16_t	  int_halt_code;
941 	u_int16_t	  halt_q_addr;
942 	target_bit_vector target_mask;
943 	target_bit_vector scsi_busy;
944 	u_int8_t	  halt_qp;
945 	u_int8_t	  target_ix;
946 	u_int8_t	  q_cntl;
947 	u_int8_t	  tid_no;
948 
949 	if (!dumping)
950 		mtx_assert(&adv->lock, MA_OWNED);
951 	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
952 	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
953 	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
954 	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
955 	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
956 	tid_no = ADV_TIX_TO_TID(target_ix);
957 	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
958 	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
959 		/*
960 		 * Temporarily disable the async fix by removing
961 		 * this target from the list of affected targets,
962 		 * setting our async rate, and then putting us
963 		 * back into the mask.
964 		 */
965 		adv->fix_asyn_xfer &= ~target_mask;
966 		adv_set_syncrate(adv, /*struct cam_path */NULL,
967 				 tid_no, /*period*/0, /*offset*/0,
968 				 ADV_TRANS_ACTIVE);
969 		adv->fix_asyn_xfer |= target_mask;
970 	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
971 		adv_set_syncrate(adv, /*struct cam_path */NULL,
972 				 tid_no, /*period*/0, /*offset*/0,
973 				 ADV_TRANS_ACTIVE);
974 	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
975 		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
976 				     target_mask, tid_no);
977 	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
978 		struct	  adv_target_transinfo* tinfo;
979 		struct	  adv_ccb_info *cinfo;
980 		union	  ccb *ccb;
981 		u_int32_t cinfo_index;
982 		u_int8_t  tag_code;
983 		u_int8_t  q_status;
984 
985 		tinfo = &adv->tinfo[tid_no];
986 		q_cntl |= QC_REQ_SENSE;
987 
988 		/* Renegotiate if appropriate. */
989 		adv_set_syncrate(adv, /*struct cam_path */NULL,
990 				 tid_no, /*period*/0, /*offset*/0,
991 				 ADV_TRANS_CUR);
992 		if (tinfo->current.period != tinfo->goal.period) {
993 			adv_msgout_sdtr(adv, tinfo->goal.period,
994 					tinfo->goal.offset);
995 			q_cntl |= QC_MSG_OUT;
996 		}
997 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
998 
999 		/* Don't tag request sense commands */
1000 		tag_code = adv_read_lram_8(adv,
1001 					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
1002 		tag_code &=
1003 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
1004 
1005 		if ((adv->fix_asyn_xfer & target_mask) != 0
1006 		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
1007 			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
1008 				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
1009 		}
1010 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
1011 				 tag_code);
1012 		q_status = adv_read_lram_8(adv,
1013 					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1014 		q_status |= (QS_READY | QS_BUSY);
1015 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1016 				 q_status);
1017 		/*
1018 		 * Freeze the devq until we can handle the sense condition.
1019 		 */
1020 		cinfo_index =
1021 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1022 		cinfo = &adv->ccb_infos[cinfo_index];
1023 		ccb = adv->ccb_infos[cinfo_index].ccb;
1024 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1025 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1026 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1027 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1028 			      /*queued_only*/TRUE);
1029 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1030 		scsi_busy &= ~target_mask;
1031 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1032 		/*
1033 		 * Ensure we have enough time to actually
1034 		 * retrieve the sense.
1035 		 */
1036 		callout_reset(&cinfo->timer, 5 * hz, adv_timeout, ccb);
1037 	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1038 		struct	ext_msg out_msg;
1039 
1040 		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1041 				       (u_int16_t *) &out_msg,
1042 				       sizeof(out_msg)/2);
1043 
1044 		if ((out_msg.msg_type == MSG_EXTENDED)
1045 		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1046 		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1047 
1048 			/* Revert to Async */
1049 			adv_set_syncrate(adv, /*struct cam_path */NULL,
1050 					 tid_no, /*period*/0, /*offset*/0,
1051 					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1052 		}
1053 		q_cntl &= ~QC_MSG_OUT;
1054 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1055 	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1056 		union ccb *ccb;
1057 		u_int32_t cinfo_index;
1058 
1059 		adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_SCSI_STATUS);
1060 		cinfo_index =
1061 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1062 		ccb = adv->ccb_infos[cinfo_index].ccb;
1063 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1064 		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1065 		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1066 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1067 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1068 			      /*queued_only*/TRUE);
1069 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1070 		scsi_busy &= ~target_mask;
1071 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1072 	} else {
1073 		printf("Unhandled Halt Code %x\n", int_halt_code);
1074 	}
1075 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1076 }
1077 
1078 void
adv_sdtr_to_period_offset(struct adv_softc * adv,u_int8_t sync_data,u_int8_t * period,u_int8_t * offset,int tid)1079 adv_sdtr_to_period_offset(struct adv_softc *adv,
1080 			  u_int8_t sync_data, u_int8_t *period,
1081 			  u_int8_t *offset, int tid)
1082 {
1083 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1084 	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1085 		*period = *offset = 0;
1086 	} else {
1087 		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1088 		*offset = sync_data & 0xF;
1089 	}
1090 }
1091 
1092 void
adv_set_syncrate(struct adv_softc * adv,struct cam_path * path,u_int tid,u_int period,u_int offset,u_int type)1093 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1094 		 u_int tid, u_int period, u_int offset, u_int type)
1095 {
1096 	struct adv_target_transinfo* tinfo;
1097 	u_int old_period;
1098 	u_int old_offset;
1099 	u_int8_t sdtr_data;
1100 
1101 	mtx_assert(&adv->lock, MA_OWNED);
1102 	tinfo = &adv->tinfo[tid];
1103 
1104 	/* Filter our input */
1105 	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1106 					      &offset, tid);
1107 
1108 	old_period = tinfo->current.period;
1109 	old_offset = tinfo->current.offset;
1110 
1111 	if ((type & ADV_TRANS_CUR) != 0
1112 	 && ((old_period != period || old_offset != offset)
1113 	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1114 		int halted;
1115 
1116 		halted = adv_is_chip_halted(adv);
1117 		if (halted == 0)
1118 			/* Must halt the chip first */
1119 			adv_host_req_chip_halt(adv);
1120 
1121 		/* Update current hardware settings */
1122 		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1123 
1124 		/*
1125 		 * If a target can run in sync mode, we don't need
1126 		 * to check it for sync problems.
1127 		 */
1128 		if (offset != 0)
1129 			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1130 
1131 		if (halted == 0)
1132 			/* Start the chip again */
1133 			adv_start_chip(adv);
1134 
1135 		tinfo->current.period = period;
1136 		tinfo->current.offset = offset;
1137 
1138 		if (path != NULL) {
1139 			/*
1140 			 * Tell the SCSI layer about the
1141 			 * new transfer parameters.
1142 			 */
1143 			struct	ccb_trans_settings neg;
1144 			memset(&neg, 0, sizeof (neg));
1145 			struct ccb_trans_settings_spi *spi =
1146 			    &neg.xport_specific.spi;
1147 
1148 			neg.protocol = PROTO_SCSI;
1149 			neg.protocol_version = SCSI_REV_2;
1150 			neg.transport = XPORT_SPI;
1151 			neg.transport_version = 2;
1152 
1153 			spi->sync_offset = offset;
1154 			spi->sync_period = period;
1155 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1156 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1157 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1158 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1159 		}
1160 	}
1161 
1162 	if ((type & ADV_TRANS_GOAL) != 0) {
1163 		tinfo->goal.period = period;
1164 		tinfo->goal.offset = offset;
1165 	}
1166 
1167 	if ((type & ADV_TRANS_USER) != 0) {
1168 		tinfo->user.period = period;
1169 		tinfo->user.offset = offset;
1170 	}
1171 }
1172 
1173 u_int8_t
adv_period_offset_to_sdtr(struct adv_softc * adv,u_int * period,u_int * offset,int tid)1174 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1175 			  u_int *offset, int tid)
1176 {
1177 	u_int i;
1178 	u_int dummy_offset;
1179 	u_int dummy_period;
1180 
1181 	if (offset == NULL) {
1182 		dummy_offset = 0;
1183 		offset = &dummy_offset;
1184 	}
1185 
1186 	if (period == NULL) {
1187 		dummy_period = 0;
1188 		period = &dummy_period;
1189 	}
1190 
1191 	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1192 	if (*period != 0 && *offset != 0) {
1193 		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1194 			if (*period <= adv->sdtr_period_tbl[i]) {
1195 				/*
1196 				 * When responding to a target that requests
1197 				 * sync, the requested  rate may fall between
1198 				 * two rates that we can output, but still be
1199 				 * a rate that we can receive.  Because of this,
1200 				 * we want to respond to the target with
1201 				 * the same rate that it sent to us even
1202 				 * if the period we use to send data to it
1203 				 * is lower.  Only lower the response period
1204 				 * if we must.
1205 				 */
1206 				if (i == 0 /* Our maximum rate */)
1207 					*period = adv->sdtr_period_tbl[0];
1208 				return ((i << 4) | *offset);
1209 			}
1210 		}
1211 	}
1212 
1213 	/* Must go async */
1214 	*period = 0;
1215 	*offset = 0;
1216 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1217 		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1218 	return (0);
1219 }
1220 
1221 /* Internal Routines */
1222 
1223 static void
adv_read_lram_16_multi(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * buffer,int count)1224 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1225 		       u_int16_t *buffer, int count)
1226 {
1227 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1228 	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1229 }
1230 
1231 static void
adv_write_lram_16_multi(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * buffer,int count)1232 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1233 			u_int16_t *buffer, int count)
1234 {
1235 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1236 	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1237 }
1238 
1239 static void
adv_mset_lram_16(struct adv_softc * adv,u_int16_t s_addr,u_int16_t set_value,int count)1240 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1241 		 u_int16_t set_value, int count)
1242 {
1243 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1244 	bus_set_multi_2(adv->res, adv->reg_off + ADV_LRAM_DATA,
1245 	    set_value, count);
1246 }
1247 
1248 static u_int32_t
adv_msum_lram_16(struct adv_softc * adv,u_int16_t s_addr,int count)1249 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1250 {
1251 	u_int32_t	sum;
1252 	int		i;
1253 
1254 	sum = 0;
1255 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1256 	for (i = 0; i < count; i++)
1257 		sum += ADV_INW(adv, ADV_LRAM_DATA);
1258 	return (sum);
1259 }
1260 
1261 static int
adv_write_and_verify_lram_16(struct adv_softc * adv,u_int16_t addr,u_int16_t value)1262 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1263 			     u_int16_t value)
1264 {
1265 	int	retval;
1266 
1267 	retval = 0;
1268 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1269 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1270 	DELAY(10000);
1271 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1272 	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1273 		retval = 1;
1274 	return (retval);
1275 }
1276 
1277 static u_int32_t
adv_read_lram_32(struct adv_softc * adv,u_int16_t addr)1278 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1279 {
1280 	u_int16_t           val_low, val_high;
1281 
1282 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1283 
1284 #if BYTE_ORDER == BIG_ENDIAN
1285 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1286 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1287 #else
1288 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1289 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1290 #endif
1291 
1292 	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1293 }
1294 
1295 static void
adv_write_lram_32(struct adv_softc * adv,u_int16_t addr,u_int32_t value)1296 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1297 {
1298 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1299 
1300 #if BYTE_ORDER == BIG_ENDIAN
1301 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1302 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1303 #else
1304 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1305 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1306 #endif
1307 }
1308 
1309 static void
adv_write_lram_32_multi(struct adv_softc * adv,u_int16_t s_addr,u_int32_t * buffer,int count)1310 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1311 			u_int32_t *buffer, int count)
1312 {
1313 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1314 	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1315 }
1316 
1317 static u_int16_t
adv_read_eeprom_16(struct adv_softc * adv,u_int8_t addr)1318 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1319 {
1320 	u_int16_t read_wval;
1321 	u_int8_t  cmd_reg;
1322 
1323 	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1324 	DELAY(1000);
1325 	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1326 	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1327 	DELAY(1000);
1328 	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1329 	DELAY(1000);
1330 	return (read_wval);
1331 }
1332 
1333 static u_int16_t
adv_write_eeprom_16(struct adv_softc * adv,u_int8_t addr,u_int16_t value)1334 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1335 {
1336 	u_int16_t	read_value;
1337 
1338 	read_value = adv_read_eeprom_16(adv, addr);
1339 	if (read_value != value) {
1340 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1341 		DELAY(1000);
1342 
1343 		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1344 		DELAY(1000);
1345 
1346 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1347 		DELAY(20 * 1000);
1348 
1349 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1350 		DELAY(1000);
1351 		read_value = adv_read_eeprom_16(adv, addr);
1352 	}
1353 	return (read_value);
1354 }
1355 
1356 static int
adv_write_eeprom_cmd_reg(struct adv_softc * adv,u_int8_t cmd_reg)1357 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1358 {
1359 	u_int8_t read_back;
1360 	int	 retry;
1361 
1362 	retry = 0;
1363 	while (1) {
1364 		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1365 		DELAY(1000);
1366 		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1367 		if (read_back == cmd_reg) {
1368 			return (1);
1369 		}
1370 		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1371 			return (0);
1372 		}
1373 	}
1374 }
1375 
1376 static int
adv_set_eeprom_config_once(struct adv_softc * adv,struct adv_eeprom_config * eeprom_config)1377 adv_set_eeprom_config_once(struct adv_softc *adv,
1378 			   struct adv_eeprom_config *eeprom_config)
1379 {
1380 	int		n_error;
1381 	u_int16_t	*wbuf;
1382 	u_int16_t	sum;
1383 	u_int8_t	s_addr;
1384 	u_int8_t	cfg_beg;
1385 	u_int8_t	cfg_end;
1386 
1387 	wbuf = (u_int16_t *)eeprom_config;
1388 	n_error = 0;
1389 	sum = 0;
1390 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1391 		sum += *wbuf;
1392 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1393 			n_error++;
1394 		}
1395 	}
1396 	if (adv->type & ADV_VL) {
1397 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1398 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1399 	} else {
1400 		cfg_beg = ADV_EEPROM_CFG_BEG;
1401 		cfg_end = ADV_EEPROM_MAX_ADDR;
1402 	}
1403 
1404 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1405 		sum += *wbuf;
1406 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1407 			n_error++;
1408 		}
1409 	}
1410 	*wbuf = sum;
1411 	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1412 		n_error++;
1413 	}
1414 	wbuf = (u_int16_t *)eeprom_config;
1415 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1416 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1417 			n_error++;
1418 		}
1419 	}
1420 	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1421 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1422 			n_error++;
1423 		}
1424 	}
1425 	return (n_error);
1426 }
1427 
1428 static u_int32_t
adv_load_microcode(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * mcode_buf,u_int16_t mcode_size)1429 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1430 		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1431 {
1432 	u_int32_t chksum;
1433 	u_int16_t mcode_lram_size;
1434 	u_int16_t mcode_chksum;
1435 
1436 	mcode_lram_size = mcode_size >> 1;
1437 	/* XXX Why zero the memory just before you write the whole thing?? */
1438 	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1439 	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1440 
1441 	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1442 	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1443 						   ((mcode_size - s_addr
1444 						     - ADV_CODE_SEC_BEG) >> 1));
1445 	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1446 	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1447 	return (chksum);
1448 }
1449 
1450 static void
adv_reinit_lram(struct adv_softc * adv)1451 adv_reinit_lram(struct adv_softc *adv) {
1452 	adv_init_lram(adv);
1453 	adv_init_qlink_var(adv);
1454 }
1455 
1456 static void
adv_init_lram(struct adv_softc * adv)1457 adv_init_lram(struct adv_softc *adv)
1458 {
1459 	u_int8_t  i;
1460 	u_int16_t s_addr;
1461 
1462 	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1463 			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1464 
1465 	i = ADV_MIN_ACTIVE_QNO;
1466 	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1467 
1468 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1469 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1470 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1471 	i++;
1472 	s_addr += ADV_QBLK_SIZE;
1473 	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1474 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1475 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1476 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1477 	}
1478 
1479 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1480 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1481 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1482 	i++;
1483 	s_addr += ADV_QBLK_SIZE;
1484 
1485 	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1486 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1487 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1488 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1489 	}
1490 }
1491 
1492 static int
adv_init_microcode_var(struct adv_softc * adv)1493 adv_init_microcode_var(struct adv_softc *adv)
1494 {
1495 	int	 i;
1496 
1497 	for (i = 0; i <= ADV_MAX_TID; i++) {
1498 
1499 		/* Start out async all around */
1500 		adv_set_syncrate(adv, /*path*/NULL,
1501 				 i, 0, 0,
1502 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1503 	}
1504 
1505 	adv_init_qlink_var(adv);
1506 
1507 	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1508 	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1509 
1510 	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1511 
1512 	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1513 
1514 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1515 	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1516 		device_printf(adv->dev,
1517 		    "Unable to set program counter. Aborting.\n");
1518 		return (1);
1519 	}
1520 	return (0);
1521 }
1522 
1523 static void
adv_init_qlink_var(struct adv_softc * adv)1524 adv_init_qlink_var(struct adv_softc *adv)
1525 {
1526 	int	  i;
1527 	u_int16_t lram_addr;
1528 
1529 	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1530 	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1531 
1532 	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1533 	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1534 
1535 	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1536 			 (u_int8_t)((int) adv->max_openings + 1));
1537 	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1538 			 (u_int8_t)((int) adv->max_openings + 2));
1539 
1540 	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1541 
1542 	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1543 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1544 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1545 	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1546 	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1547 	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1548 
1549 	lram_addr = ADV_QADR_BEG;
1550 	for (i = 0; i < 32; i++, lram_addr += 2)
1551 		adv_write_lram_16(adv, lram_addr, 0);
1552 }
1553 
1554 static void
adv_disable_interrupt(struct adv_softc * adv)1555 adv_disable_interrupt(struct adv_softc *adv)
1556 {
1557 	u_int16_t cfg;
1558 
1559 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1560 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1561 }
1562 
1563 static void
adv_enable_interrupt(struct adv_softc * adv)1564 adv_enable_interrupt(struct adv_softc *adv)
1565 {
1566 	u_int16_t cfg;
1567 
1568 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1569 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1570 }
1571 
1572 static void
adv_toggle_irq_act(struct adv_softc * adv)1573 adv_toggle_irq_act(struct adv_softc *adv)
1574 {
1575 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1576 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1577 }
1578 
1579 void
adv_start_execution(struct adv_softc * adv)1580 adv_start_execution(struct adv_softc *adv)
1581 {
1582 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1583 		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1584 	}
1585 }
1586 
1587 int
adv_stop_chip(struct adv_softc * adv)1588 adv_stop_chip(struct adv_softc *adv)
1589 {
1590 	u_int8_t cc_val;
1591 
1592 	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1593 		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1594 	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1595 	adv_set_chip_ih(adv, ADV_INS_HALT);
1596 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1597 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1598 		return (0);
1599 	}
1600 	return (1);
1601 }
1602 
1603 static int
adv_host_req_chip_halt(struct adv_softc * adv)1604 adv_host_req_chip_halt(struct adv_softc *adv)
1605 {
1606 	int	 count;
1607 	u_int8_t saved_stop_code;
1608 
1609 	if (adv_is_chip_halted(adv))
1610 		return (1);
1611 
1612 	count = 0;
1613 	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1614 	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1615 			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1616 	while (adv_is_chip_halted(adv) == 0
1617 	    && count++ < 2000)
1618 		;
1619 
1620 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1621 	return (count < 2000);
1622 }
1623 
1624 static void
adv_set_chip_ih(struct adv_softc * adv,u_int16_t ins_code)1625 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1626 {
1627 	adv_set_bank(adv, 1);
1628 	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1629 	adv_set_bank(adv, 0);
1630 }
1631 
1632 #if 0
1633 static u_int8_t
1634 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1635 {
1636 	u_int8_t scsi_ctrl;
1637 
1638 	adv_set_bank(adv, 1);
1639 	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1640 	adv_set_bank(adv, 0);
1641 	return (scsi_ctrl);
1642 }
1643 #endif
1644 
1645 /*
1646  * XXX Looks like more padding issues in this routine as well.
1647  *     There has to be a way to turn this into an insw.
1648  */
1649 static void
adv_get_q_info(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * inbuf,int words)1650 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1651 	       u_int16_t *inbuf, int words)
1652 {
1653 	int	i;
1654 
1655 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1656 	for (i = 0; i < words; i++, inbuf++) {
1657 		if (i == 5) {
1658 			continue;
1659 		}
1660 		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1661 	}
1662 }
1663 
1664 static u_int
adv_get_num_free_queues(struct adv_softc * adv,u_int8_t n_qs)1665 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1666 {
1667 	u_int	  cur_used_qs;
1668 	u_int	  cur_free_qs;
1669 
1670 	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1671 
1672 	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1673 		cur_free_qs = adv->max_openings - cur_used_qs;
1674 		return (cur_free_qs);
1675 	}
1676 	adv->openings_needed = n_qs;
1677 	return (0);
1678 }
1679 
1680 static u_int8_t
adv_alloc_free_queues(struct adv_softc * adv,u_int8_t free_q_head,u_int8_t n_free_q)1681 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1682 		      u_int8_t n_free_q)
1683 {
1684 	int i;
1685 
1686 	for (i = 0; i < n_free_q; i++) {
1687 		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1688 		if (free_q_head == ADV_QLINK_END)
1689 			break;
1690 	}
1691 	return (free_q_head);
1692 }
1693 
1694 static u_int8_t
adv_alloc_free_queue(struct adv_softc * adv,u_int8_t free_q_head)1695 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1696 {
1697 	u_int16_t	q_addr;
1698 	u_int8_t	next_qp;
1699 	u_int8_t	q_status;
1700 
1701 	next_qp = ADV_QLINK_END;
1702 	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1703 	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1704 
1705 	if ((q_status & QS_READY) == 0)
1706 		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1707 
1708 	return (next_qp);
1709 }
1710 
1711 static int
adv_send_scsi_queue(struct adv_softc * adv,struct adv_scsi_q * scsiq,u_int8_t n_q_required)1712 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1713 		    u_int8_t n_q_required)
1714 {
1715 	u_int8_t	free_q_head;
1716 	u_int8_t	next_qp;
1717 	int		retval;
1718 
1719 	retval = 1;
1720 	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1721 	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1722 	    != ADV_QLINK_END) {
1723 		scsiq->q1.q_no = free_q_head;
1724 
1725 		/*
1726 		 * Now that we know our Q number, point our sense
1727 		 * buffer pointer to a bus dma mapped area where
1728 		 * we can dma the data to.
1729 		 */
1730 		scsiq->q1.sense_addr = adv->sense_physbase
1731 		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1732 		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1733 		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1734 		adv->cur_active += n_q_required;
1735 		retval = 0;
1736 	}
1737 	return (retval);
1738 }
1739 
1740 
1741 static void
adv_put_ready_sg_list_queue(struct adv_softc * adv,struct adv_scsi_q * scsiq,u_int q_no)1742 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1743 			    u_int q_no)
1744 {
1745 	u_int8_t	sg_list_dwords;
1746 	u_int8_t	sg_index, i;
1747 	u_int8_t	sg_entry_cnt;
1748 	u_int8_t	next_qp;
1749 	u_int16_t	q_addr;
1750 	struct		adv_sg_head *sg_head;
1751 	struct		adv_sg_list_q scsi_sg_q;
1752 
1753 	sg_head = scsiq->sg_head;
1754 
1755 	if (sg_head) {
1756 		sg_entry_cnt = sg_head->entry_cnt - 1;
1757 #ifdef DIAGNOSTIC
1758 		if (sg_entry_cnt == 0)
1759 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1760 			      "a SG list but only one element");
1761 		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1762 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1763 			      "a SG list but QC_SG_HEAD not set");
1764 #endif
1765 		q_addr = ADV_QNO_TO_QADDR(q_no);
1766 		sg_index = 1;
1767 		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1768 		scsi_sg_q.sg_head_qp = q_no;
1769 		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1770 		for (i = 0; i < sg_head->queue_cnt; i++) {
1771 			u_int8_t segs_this_q;
1772 
1773 			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1774 				segs_this_q = ADV_SG_LIST_PER_Q;
1775 			else {
1776 				/* This will be the last segment then */
1777 				segs_this_q = sg_entry_cnt;
1778 				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1779 			}
1780 			scsi_sg_q.seq_no = i + 1;
1781 			sg_list_dwords = segs_this_q << 1;
1782 			if (i == 0) {
1783 				scsi_sg_q.sg_list_cnt = segs_this_q;
1784 				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1785 			} else {
1786 				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1787 				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1788 			}
1789 			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1790 			scsi_sg_q.q_no = next_qp;
1791 			q_addr = ADV_QNO_TO_QADDR(next_qp);
1792 
1793 			adv_write_lram_16_multi(adv,
1794 						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1795 						(u_int16_t *)&scsi_sg_q,
1796 						sizeof(scsi_sg_q) >> 1);
1797 			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1798 						(u_int32_t *)&sg_head->sg_list[sg_index],
1799 						sg_list_dwords);
1800 			sg_entry_cnt -= segs_this_q;
1801 			sg_index += ADV_SG_LIST_PER_Q;
1802 		}
1803 	}
1804 	adv_put_ready_queue(adv, scsiq, q_no);
1805 }
1806 
1807 static void
adv_put_ready_queue(struct adv_softc * adv,struct adv_scsi_q * scsiq,u_int q_no)1808 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1809 		    u_int q_no)
1810 {
1811 	struct		adv_target_transinfo* tinfo;
1812 	u_int		q_addr;
1813 	u_int		tid_no;
1814 
1815 	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1816 	tinfo = &adv->tinfo[tid_no];
1817 	if ((tinfo->current.period != tinfo->goal.period)
1818 	 || (tinfo->current.offset != tinfo->goal.offset)) {
1819 
1820 		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1821 		scsiq->q1.cntl |= QC_MSG_OUT;
1822 	}
1823 	q_addr = ADV_QNO_TO_QADDR(q_no);
1824 
1825 	scsiq->q1.status = QS_FREE;
1826 
1827 	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1828 				(u_int16_t *)scsiq->cdbptr,
1829 				scsiq->q2.cdb_len >> 1);
1830 
1831 #if BYTE_ORDER == BIG_ENDIAN
1832 	adv_adj_scsiq_endian(scsiq);
1833 #endif
1834 
1835 	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1836 		      (u_int16_t *) &scsiq->q1.cntl,
1837 		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1838 
1839 #ifdef CC_WRITE_IO_COUNT
1840 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1841 			  adv->req_count);
1842 #endif
1843 
1844 #ifdef CC_CLEAR_DMA_REMAIN
1845 
1846 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1847 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1848 #endif
1849 
1850 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1851 			  (scsiq->q1.q_no << 8) | QS_READY);
1852 }
1853 
1854 static void
adv_put_scsiq(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * buffer,int words)1855 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1856 	      u_int16_t *buffer, int words)
1857 {
1858 	int	i;
1859 
1860 	/*
1861 	 * XXX This routine makes *gross* assumptions
1862 	 * about padding in the data structures.
1863 	 * Either the data structures should have explicit
1864 	 * padding members added, or they should have padding
1865 	 * turned off via compiler attributes depending on
1866 	 * which yields better overall performance.  My hunch
1867 	 * would be that turning off padding would be the
1868 	 * faster approach as an outsw is much faster than
1869 	 * this crude loop and accessing un-aligned data
1870 	 * members isn't *that* expensive.  The other choice
1871 	 * would be to modify the ASC script so that the
1872 	 * the adv_scsiq_1 structure can be re-arranged so
1873 	 * padding isn't required.
1874 	 */
1875 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1876 	for (i = 0; i < words; i++, buffer++) {
1877 		if (i == 2 || i == 10) {
1878 			continue;
1879 		}
1880 		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1881 	}
1882 }
1883 
1884 #if BYTE_ORDER == BIG_ENDIAN
1885 void
adv_adj_endian_qdone_info(struct adv_q_done_info * scsiq)1886 adv_adj_endian_qdone_info(struct adv_q_done_info *scsiq)
1887 {
1888 
1889 	panic("adv(4) not supported on big-endian machines.\n");
1890 }
1891 
1892 void
adv_adj_scsiq_endian(struct adv_scsi_q * scsiq)1893 adv_adj_scsiq_endian(struct adv_scsi_q *scsiq)
1894 {
1895 
1896 	panic("adv(4) not supported on big-endian machines.\n");
1897 }
1898 #endif
1899 
1900 static void
adv_handle_extmsg_in(struct adv_softc * adv,u_int16_t halt_q_addr,u_int8_t q_cntl,target_bit_vector target_mask,int tid_no)1901 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1902 		     u_int8_t q_cntl, target_bit_vector target_mask,
1903 		     int tid_no)
1904 {
1905 	struct	ext_msg ext_msg;
1906 
1907 	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1908 			       sizeof(ext_msg) >> 1);
1909 	if ((ext_msg.msg_type == MSG_EXTENDED)
1910 	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1911 	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1912 		union	  ccb *ccb;
1913 		struct	  adv_target_transinfo* tinfo;
1914 		u_int32_t cinfo_index;
1915 		u_int	 period;
1916 		u_int	 offset;
1917 		int	 sdtr_accept;
1918 		u_int8_t orig_offset;
1919 
1920 		cinfo_index =
1921 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1922 		ccb = adv->ccb_infos[cinfo_index].ccb;
1923 		tinfo = &adv->tinfo[tid_no];
1924 		sdtr_accept = TRUE;
1925 
1926 		orig_offset = ext_msg.req_ack_offset;
1927 		if (ext_msg.xfer_period < tinfo->goal.period) {
1928                 	sdtr_accept = FALSE;
1929 			ext_msg.xfer_period = tinfo->goal.period;
1930 		}
1931 
1932 		/* Perform range checking */
1933 		period = ext_msg.xfer_period;
1934 		offset = ext_msg.req_ack_offset;
1935 		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1936 		ext_msg.xfer_period = period;
1937 		ext_msg.req_ack_offset = offset;
1938 
1939 		/* Record our current sync settings */
1940 		adv_set_syncrate(adv, ccb->ccb_h.path,
1941 				 tid_no, ext_msg.xfer_period,
1942 				 ext_msg.req_ack_offset,
1943 				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1944 
1945 		/* Offset too high or large period forced async */
1946 		if (orig_offset != ext_msg.req_ack_offset)
1947 			sdtr_accept = FALSE;
1948 
1949 		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1950 			/* Valid response to our requested negotiation */
1951 			q_cntl &= ~QC_MSG_OUT;
1952 		} else {
1953 			/* Must Respond */
1954 			q_cntl |= QC_MSG_OUT;
1955 			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1956 					ext_msg.req_ack_offset);
1957 		}
1958 
1959 	} else if (ext_msg.msg_type == MSG_EXTENDED
1960 		&& ext_msg.msg_req == MSG_EXT_WDTR
1961 		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1962 
1963 		ext_msg.wdtr_width = 0;
1964 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1965 					(u_int16_t *)&ext_msg,
1966 					sizeof(ext_msg) >> 1);
1967 		q_cntl |= QC_MSG_OUT;
1968         } else {
1969 
1970 		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1971 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1972 					(u_int16_t *)&ext_msg,
1973 					sizeof(ext_msg) >> 1);
1974 		q_cntl |= QC_MSG_OUT;
1975         }
1976 	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1977 }
1978 
1979 static void
adv_msgout_sdtr(struct adv_softc * adv,u_int8_t sdtr_period,u_int8_t sdtr_offset)1980 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1981 		u_int8_t sdtr_offset)
1982 {
1983 	struct	 ext_msg sdtr_buf;
1984 
1985 	sdtr_buf.msg_type = MSG_EXTENDED;
1986 	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1987 	sdtr_buf.msg_req = MSG_EXT_SDTR;
1988 	sdtr_buf.xfer_period = sdtr_period;
1989 	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1990 	sdtr_buf.req_ack_offset = sdtr_offset;
1991 	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1992 				(u_int16_t *) &sdtr_buf,
1993 				sizeof(sdtr_buf) / 2);
1994 }
1995 
1996 int
adv_abort_ccb(struct adv_softc * adv,int target,int lun,union ccb * ccb,u_int32_t status,int queued_only)1997 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1998 	      u_int32_t status, int queued_only)
1999 {
2000 	u_int16_t q_addr;
2001 	u_int8_t  q_no;
2002 	struct adv_q_done_info scsiq_buf;
2003 	struct adv_q_done_info *scsiq;
2004 	u_int8_t  target_ix;
2005 	int	  count;
2006 
2007 	if (!dumping)
2008 		mtx_assert(&adv->lock, MA_OWNED);
2009 	scsiq = &scsiq_buf;
2010 	target_ix = ADV_TIDLUN_TO_IX(target, lun);
2011 	count = 0;
2012 	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
2013 		struct adv_ccb_info *ccb_info;
2014 		q_addr = ADV_QNO_TO_QADDR(q_no);
2015 
2016 		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
2017 		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
2018 		if (((scsiq->q_status & QS_READY) != 0)
2019 		 && ((scsiq->q_status & QS_ABORTED) == 0)
2020 		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2021 		 && (scsiq->d2.target_ix == target_ix)
2022 		 && (queued_only == 0
2023 		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2024 		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2025 			union ccb *aborted_ccb;
2026 			struct adv_ccb_info *cinfo;
2027 
2028 			scsiq->q_status |= QS_ABORTED;
2029 			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2030 					 scsiq->q_status);
2031 			aborted_ccb = ccb_info->ccb;
2032 			/* Don't clobber earlier error codes */
2033 			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2034 			  == CAM_REQ_INPROG)
2035 				aborted_ccb->ccb_h.status |= status;
2036 			cinfo = (struct adv_ccb_info *)
2037 			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2038 			cinfo->state |= ACCB_ABORT_QUEUED;
2039 			count++;
2040 		}
2041 	}
2042 	return (count);
2043 }
2044 
2045 int
adv_reset_bus(struct adv_softc * adv,int initiate_bus_reset)2046 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2047 {
2048 	int count;
2049 	int i;
2050 	union ccb *ccb;
2051 
2052 	if (!dumping)
2053 		mtx_assert(&adv->lock, MA_OWNED);
2054 	i = 200;
2055 	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2056 	    && i--)
2057 		DELAY(1000);
2058 	adv_reset_chip(adv, initiate_bus_reset);
2059 	adv_reinit_lram(adv);
2060 	for (i = 0; i <= ADV_MAX_TID; i++)
2061 		adv_set_syncrate(adv, NULL, i, /*period*/0,
2062 				 /*offset*/0, ADV_TRANS_CUR);
2063 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2064 
2065 	/* Tell the XPT layer that a bus reset occurred */
2066 	if (adv->path != NULL)
2067 		xpt_async(AC_BUS_RESET, adv->path, NULL);
2068 
2069 	count = 0;
2070 	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2071 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2072 			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2073 		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2074 		count++;
2075 	}
2076 
2077 	adv_start_chip(adv);
2078 	return (count);
2079 }
2080 
2081 static void
adv_set_sdtr_reg_at_id(struct adv_softc * adv,int tid,u_int8_t sdtr_data)2082 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2083 {
2084 	int orig_id;
2085 
2086     	adv_set_bank(adv, 1);
2087     	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2088     	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2089 	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2090 		adv_set_bank(adv, 0);
2091 		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2092 	}
2093     	adv_set_bank(adv, 1);
2094     	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2095 	adv_set_bank(adv, 0);
2096 }
2097