1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1998 - 2008 Søren Schmidt <[email protected]>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/endian.h>
34 #include <sys/ata.h>
35 #include <sys/bio.h>
36 #include <sys/conf.h>
37 #include <sys/ctype.h>
38 #include <sys/bus.h>
39 #include <sys/sema.h>
40 #include <sys/taskqueue.h>
41 #include <vm/uma.h>
42 #include <machine/bus.h>
43 #include <sys/rman.h>
44 #include <dev/ata/ata-all.h>
45 #include <dev/ata/ata-pci.h>
46 #include <ata_if.h>
47
48 #include <vm/vm.h>
49 #include <vm/pmap.h>
50
51 #include <cam/cam.h>
52 #include <cam/cam_ccb.h>
53
54 /* prototypes */
55 static int ata_generic_status(device_t dev);
56 static int ata_wait(struct ata_channel *ch, int unit, u_int8_t);
57 static void ata_pio_read(struct ata_request *, int);
58 static void ata_pio_write(struct ata_request *, int);
59 static void ata_tf_read(struct ata_request *);
60 static void ata_tf_write(struct ata_request *);
61
62 /*
63 * low level ATA functions
64 */
65 void
ata_generic_hw(device_t dev)66 ata_generic_hw(device_t dev)
67 {
68 struct ata_channel *ch = device_get_softc(dev);
69
70 ch->hw.begin_transaction = ata_begin_transaction;
71 ch->hw.end_transaction = ata_end_transaction;
72 ch->hw.status = ata_generic_status;
73 ch->hw.softreset = NULL;
74 ch->hw.command = ata_generic_command;
75 ch->hw.tf_read = ata_tf_read;
76 ch->hw.tf_write = ata_tf_write;
77 ch->hw.pm_read = NULL;
78 ch->hw.pm_write = NULL;
79 }
80
81 /* must be called with ATA channel locked and state_mtx held */
82 int
ata_begin_transaction(struct ata_request * request)83 ata_begin_transaction(struct ata_request *request)
84 {
85 struct ata_channel *ch = device_get_softc(request->parent);
86 int dummy, error;
87
88 ATA_DEBUG_RQ(request, "begin transaction");
89
90 /* disable ATAPI DMA writes if HW doesn't support it */
91 if ((ch->flags & ATA_NO_ATAPI_DMA) &&
92 (request->flags & ATA_R_ATAPI) == ATA_R_ATAPI)
93 request->flags &= ~ATA_R_DMA;
94 if ((ch->flags & ATA_ATAPI_DMA_RO) &&
95 ((request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)) ==
96 (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)))
97 request->flags &= ~ATA_R_DMA;
98
99 switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA)) {
100 /* ATA PIO data transfer and control commands */
101 default:
102 {
103 /* record command direction here as our request might be gone later */
104 int write = (request->flags & ATA_R_WRITE);
105
106 /* issue command */
107 if (ch->hw.command(request)) {
108 device_printf(request->parent, "error issuing %s command\n",
109 ata_cmd2str(request));
110 request->result = EIO;
111 goto begin_finished;
112 }
113
114 /* device reset doesn't interrupt */
115 if (request->u.ata.command == ATA_DEVICE_RESET) {
116 int timeout = 1000000;
117 do {
118 DELAY(10);
119 request->status = ATA_IDX_INB(ch, ATA_STATUS);
120 } while (request->status & ATA_S_BUSY && timeout--);
121 if (request->status & ATA_S_ERROR)
122 request->error = ATA_IDX_INB(ch, ATA_ERROR);
123 ch->hw.tf_read(request);
124 goto begin_finished;
125 }
126
127 /* if write command output the data */
128 if (write) {
129 if (ata_wait(ch, request->unit, (ATA_S_READY | ATA_S_DRQ)) < 0) {
130 device_printf(request->parent,
131 "timeout waiting for write DRQ\n");
132 request->result = EIO;
133 goto begin_finished;
134 }
135 ata_pio_write(request, request->transfersize);
136 }
137 }
138 goto begin_continue;
139
140 /* ATA DMA data transfer commands */
141 case ATA_R_DMA:
142 /* check sanity, setup SG list and DMA engine */
143 if ((error = ch->dma.load(request, NULL, &dummy))) {
144 device_printf(request->parent, "setting up DMA failed\n");
145 request->result = error;
146 goto begin_finished;
147 }
148
149 /* start DMA engine if necessary */
150 if ((ch->flags & ATA_DMA_BEFORE_CMD) &&
151 ch->dma.start && ch->dma.start(request)) {
152 device_printf(request->parent, "error starting DMA\n");
153 request->result = EIO;
154 goto begin_finished;
155 }
156
157 /* issue command */
158 if (ch->hw.command(request)) {
159 device_printf(request->parent, "error issuing %s command\n",
160 ata_cmd2str(request));
161 request->result = EIO;
162 goto begin_finished;
163 }
164
165 /* start DMA engine */
166 if (!(ch->flags & ATA_DMA_BEFORE_CMD) &&
167 ch->dma.start && ch->dma.start(request)) {
168 device_printf(request->parent, "error starting DMA\n");
169 request->result = EIO;
170 goto begin_finished;
171 }
172 goto begin_continue;
173
174 /* ATAPI PIO commands */
175 case ATA_R_ATAPI:
176 /* is this just a POLL DSC command ? */
177 if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) {
178 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(request->unit));
179 DELAY(10);
180 if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DSC))
181 request->result = EBUSY;
182 goto begin_finished;
183 }
184
185 /* start ATAPI operation */
186 if (ch->hw.command(request)) {
187 device_printf(request->parent, "error issuing ATA PACKET command\n");
188 request->result = EIO;
189 goto begin_finished;
190 }
191 goto begin_continue;
192
193 /* ATAPI DMA commands */
194 case ATA_R_ATAPI|ATA_R_DMA:
195 /* is this just a POLL DSC command ? */
196 if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) {
197 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(request->unit));
198 DELAY(10);
199 if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DSC))
200 request->result = EBUSY;
201 goto begin_finished;
202 }
203
204 /* check sanity, setup SG list and DMA engine */
205 if ((error = ch->dma.load(request, NULL, &dummy))) {
206 device_printf(request->parent, "setting up DMA failed\n");
207 request->result = error;
208 goto begin_finished;
209 }
210
211 /* start ATAPI operation */
212 if (ch->hw.command(request)) {
213 device_printf(request->parent, "error issuing ATA PACKET command\n");
214 request->result = EIO;
215 goto begin_finished;
216 }
217
218 /* start DMA engine */
219 if (ch->dma.start && ch->dma.start(request)) {
220 request->result = EIO;
221 goto begin_finished;
222 }
223 goto begin_continue;
224 }
225 /* NOT REACHED */
226 printf("ata_begin_transaction OOPS!!!\n");
227
228 begin_finished:
229 if (ch->dma.unload) {
230 ch->dma.unload(request);
231 }
232 return ATA_OP_FINISHED;
233
234 begin_continue:
235 callout_reset(&request->callout, request->timeout * hz,
236 ata_timeout, request);
237 return ATA_OP_CONTINUES;
238 }
239
240 /* must be called with ATA channel locked and state_mtx held */
241 int
ata_end_transaction(struct ata_request * request)242 ata_end_transaction(struct ata_request *request)
243 {
244 struct ata_channel *ch = device_get_softc(request->parent);
245 int length;
246
247 ATA_DEBUG_RQ(request, "end transaction");
248
249 /* clear interrupt and get status */
250 request->status = ATA_IDX_INB(ch, ATA_STATUS);
251
252 switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_CONTROL)) {
253 /* ATA PIO data transfer and control commands */
254 default:
255
256 /* on timeouts we have no data or anything so just return */
257 if (request->flags & ATA_R_TIMEOUT)
258 goto end_finished;
259
260 /* Read back registers to the request struct. */
261 if ((request->status & ATA_S_ERROR) ||
262 (request->flags & (ATA_R_CONTROL | ATA_R_NEEDRESULT))) {
263 ch->hw.tf_read(request);
264 }
265
266 /* if we got an error we are done with the HW */
267 if (request->status & ATA_S_ERROR) {
268 request->error = ATA_IDX_INB(ch, ATA_ERROR);
269 goto end_finished;
270 }
271
272 /* are we moving data ? */
273 if (request->flags & (ATA_R_READ | ATA_R_WRITE)) {
274 /* if read data get it */
275 if (request->flags & ATA_R_READ) {
276 int flags = ATA_S_DRQ;
277
278 if (request->u.ata.command != ATA_ATAPI_IDENTIFY)
279 flags |= ATA_S_READY;
280 if (ata_wait(ch, request->unit, flags) < 0) {
281 device_printf(request->parent,
282 "timeout waiting for read DRQ\n");
283 request->result = EIO;
284 goto end_finished;
285 }
286 ata_pio_read(request, request->transfersize);
287 }
288
289 /* update how far we've gotten */
290 request->donecount += request->transfersize;
291
292 /* do we need a scoop more ? */
293 if (request->bytecount > request->donecount) {
294 /* set this transfer size according to HW capabilities */
295 request->transfersize =
296 min((request->bytecount - request->donecount),
297 request->transfersize);
298
299 /* if data write command, output the data */
300 if (request->flags & ATA_R_WRITE) {
301 /* if we get an error here we are done with the HW */
302 if (ata_wait(ch, request->unit, (ATA_S_READY | ATA_S_DRQ)) < 0) {
303 device_printf(request->parent,
304 "timeout waiting for write DRQ\n");
305 request->status = ATA_IDX_INB(ch, ATA_STATUS);
306 goto end_finished;
307 }
308
309 /* output data and return waiting for new interrupt */
310 ata_pio_write(request, request->transfersize);
311 goto end_continue;
312 }
313
314 /* if data read command, return & wait for interrupt */
315 if (request->flags & ATA_R_READ)
316 goto end_continue;
317 }
318 }
319 /* done with HW */
320 goto end_finished;
321
322 /* ATA DMA data transfer commands */
323 case ATA_R_DMA:
324
325 /* stop DMA engine and get status */
326 if (ch->dma.stop)
327 request->dma->status = ch->dma.stop(request);
328
329 /* did we get error or data */
330 if (request->status & ATA_S_ERROR)
331 request->error = ATA_IDX_INB(ch, ATA_ERROR);
332 else if (request->dma->status & ATA_BMSTAT_ERROR)
333 request->status |= ATA_S_ERROR;
334 else if (!(request->flags & ATA_R_TIMEOUT))
335 request->donecount = request->bytecount;
336
337 /* Read back registers to the request struct. */
338 if ((request->status & ATA_S_ERROR) ||
339 (request->flags & (ATA_R_CONTROL | ATA_R_NEEDRESULT))) {
340 ch->hw.tf_read(request);
341 }
342
343 /* release SG list etc */
344 ch->dma.unload(request);
345
346 /* done with HW */
347 goto end_finished;
348
349 /* ATAPI PIO commands */
350 case ATA_R_ATAPI:
351 length = ATA_IDX_INB(ch, ATA_CYL_LSB)|(ATA_IDX_INB(ch, ATA_CYL_MSB)<<8);
352
353 /* on timeouts we have no data or anything so just return */
354 if (request->flags & ATA_R_TIMEOUT)
355 goto end_finished;
356
357 switch ((ATA_IDX_INB(ch, ATA_IREASON) & (ATA_I_CMD | ATA_I_IN)) |
358 (request->status & ATA_S_DRQ)) {
359 case ATAPI_P_CMDOUT:
360 /* this seems to be needed for some (slow) devices */
361 DELAY(10);
362
363 if (!(request->status & ATA_S_DRQ)) {
364 device_printf(request->parent, "command interrupt without DRQ\n");
365 request->status = ATA_S_ERROR;
366 goto end_finished;
367 }
368 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb,
369 (request->flags & ATA_R_ATAPI16) ? 8 : 6);
370 /* return wait for interrupt */
371 goto end_continue;
372
373 case ATAPI_P_WRITE:
374 if (request->flags & ATA_R_READ) {
375 request->status = ATA_S_ERROR;
376 device_printf(request->parent,
377 "%s trying to write on read buffer\n",
378 ata_cmd2str(request));
379 goto end_finished;
380 }
381 ata_pio_write(request, length);
382 request->donecount += length;
383
384 /* set next transfer size according to HW capabilities */
385 request->transfersize = min((request->bytecount-request->donecount),
386 request->transfersize);
387 /* return wait for interrupt */
388 goto end_continue;
389
390 case ATAPI_P_READ:
391 if (request->flags & ATA_R_WRITE) {
392 request->status = ATA_S_ERROR;
393 device_printf(request->parent,
394 "%s trying to read on write buffer\n",
395 ata_cmd2str(request));
396 goto end_finished;
397 }
398 ata_pio_read(request, length);
399 request->donecount += length;
400
401 /* set next transfer size according to HW capabilities */
402 request->transfersize = min((request->bytecount-request->donecount),
403 request->transfersize);
404 /* return wait for interrupt */
405 goto end_continue;
406
407 case ATAPI_P_DONEDRQ:
408 device_printf(request->parent,
409 "WARNING - %s DONEDRQ non conformant device\n",
410 ata_cmd2str(request));
411 if (request->flags & ATA_R_READ) {
412 ata_pio_read(request, length);
413 request->donecount += length;
414 }
415 else if (request->flags & ATA_R_WRITE) {
416 ata_pio_write(request, length);
417 request->donecount += length;
418 }
419 else
420 request->status = ATA_S_ERROR;
421 /* FALLTHROUGH */
422
423 case ATAPI_P_ABORT:
424 case ATAPI_P_DONE:
425 if (request->status & (ATA_S_ERROR | ATA_S_DWF))
426 request->error = ATA_IDX_INB(ch, ATA_ERROR);
427 goto end_finished;
428
429 default:
430 device_printf(request->parent, "unknown transfer phase\n");
431 request->status = ATA_S_ERROR;
432 }
433
434 /* done with HW */
435 goto end_finished;
436
437 /* ATAPI DMA commands */
438 case ATA_R_ATAPI|ATA_R_DMA:
439
440 /* stop DMA engine and get status */
441 if (ch->dma.stop)
442 request->dma->status = ch->dma.stop(request);
443
444 /* did we get error or data */
445 if (request->status & (ATA_S_ERROR | ATA_S_DWF))
446 request->error = ATA_IDX_INB(ch, ATA_ERROR);
447 else if (request->dma->status & ATA_BMSTAT_ERROR)
448 request->status |= ATA_S_ERROR;
449 else if (!(request->flags & ATA_R_TIMEOUT))
450 request->donecount = request->bytecount;
451
452 /* release SG list etc */
453 ch->dma.unload(request);
454
455 /* done with HW */
456 goto end_finished;
457 }
458 /* NOT REACHED */
459 printf("ata_end_transaction OOPS!!\n");
460
461 end_finished:
462 callout_stop(&request->callout);
463 return ATA_OP_FINISHED;
464
465 end_continue:
466 return ATA_OP_CONTINUES;
467 }
468
469 /* must be called with ATA channel locked and state_mtx held */
470 void
ata_generic_reset(device_t dev)471 ata_generic_reset(device_t dev)
472 {
473 struct ata_channel *ch = device_get_softc(dev);
474
475 u_int8_t ostat0 = 0, stat0 = 0, ostat1 = 0, stat1 = 0;
476 u_int8_t err = 0, lsb = 0, msb = 0;
477 int mask = 0, timeout;
478
479 /* do we have any signs of ATA/ATAPI HW being present ? */
480 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_MASTER));
481 DELAY(10);
482 ostat0 = ATA_IDX_INB(ch, ATA_STATUS);
483 if (((ostat0 & 0xf8) != 0xf8 || (ch->flags & ATA_KNOWN_PRESENCE)) &&
484 ostat0 != 0xa5) {
485 stat0 = ATA_S_BUSY;
486 mask |= 0x01;
487 }
488
489 /* in some setups we dont want to test for a slave */
490 if (!(ch->flags & ATA_NO_SLAVE)) {
491 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_SLAVE));
492 DELAY(10);
493 ostat1 = ATA_IDX_INB(ch, ATA_STATUS);
494 if (((ostat1 & 0xf8) != 0xf8 || (ch->flags & ATA_KNOWN_PRESENCE)) &&
495 ostat1 != 0xa5) {
496 stat1 = ATA_S_BUSY;
497 mask |= 0x02;
498 }
499 }
500
501 if (bootverbose)
502 device_printf(dev, "reset tp1 mask=%02x ostat0=%02x ostat1=%02x\n",
503 mask, ostat0, ostat1);
504
505 /* if nothing showed up there is no need to get any further */
506 /* XXX SOS is that too strong?, we just might lose devices here */
507 ch->devices = 0;
508 if (!mask)
509 return;
510
511 /* reset (both) devices on this channel */
512 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_MASTER));
513 DELAY(10);
514 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS | ATA_A_RESET);
515 ata_udelay(10000);
516 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS);
517 ata_udelay(100000);
518 ATA_IDX_INB(ch, ATA_ERROR);
519
520 /* wait for BUSY to go inactive */
521 for (timeout = 0; timeout < 310; timeout++) {
522 if ((mask & 0x01) && (stat0 & ATA_S_BUSY)) {
523 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(ATA_MASTER));
524 DELAY(10);
525 if (ch->flags & ATA_STATUS_IS_LONG)
526 stat0 = ATA_IDX_INL(ch, ATA_STATUS) & 0xff;
527 else
528 stat0 = ATA_IDX_INB(ch, ATA_STATUS);
529 err = ATA_IDX_INB(ch, ATA_ERROR);
530 lsb = ATA_IDX_INB(ch, ATA_CYL_LSB);
531 msb = ATA_IDX_INB(ch, ATA_CYL_MSB);
532 if (bootverbose)
533 device_printf(dev,
534 "stat0=0x%02x err=0x%02x lsb=0x%02x msb=0x%02x\n",
535 stat0, err, lsb, msb);
536 if (stat0 == err && lsb == err && msb == err &&
537 timeout > (stat0 & ATA_S_BUSY ? 100 : 10))
538 mask &= ~0x01;
539 if (!(stat0 & ATA_S_BUSY)) {
540 if ((err & 0x7f) == ATA_E_ILI) {
541 if (lsb == ATAPI_MAGIC_LSB && msb == ATAPI_MAGIC_MSB) {
542 ch->devices |= ATA_ATAPI_MASTER;
543 }
544 else if (lsb == 0 && msb == 0 && (stat0 & ATA_S_READY)) {
545 ch->devices |= ATA_ATA_MASTER;
546 }
547 }
548 else if ((stat0 & 0x0f) && err == lsb && err == msb) {
549 stat0 |= ATA_S_BUSY;
550 }
551 }
552 }
553
554 if ((mask & 0x02) && (stat1 & ATA_S_BUSY) &&
555 !((mask & 0x01) && (stat0 & ATA_S_BUSY))) {
556 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(ATA_SLAVE));
557 DELAY(10);
558 if (ch->flags & ATA_STATUS_IS_LONG)
559 stat1 = ATA_IDX_INL(ch, ATA_STATUS) & 0xff;
560 else
561 stat1 = ATA_IDX_INB(ch, ATA_STATUS);
562 err = ATA_IDX_INB(ch, ATA_ERROR);
563 lsb = ATA_IDX_INB(ch, ATA_CYL_LSB);
564 msb = ATA_IDX_INB(ch, ATA_CYL_MSB);
565 if (bootverbose)
566 device_printf(dev,
567 "stat1=0x%02x err=0x%02x lsb=0x%02x msb=0x%02x\n",
568 stat1, err, lsb, msb);
569 if (stat1 == err && lsb == err && msb == err &&
570 timeout > (stat1 & ATA_S_BUSY ? 100 : 10))
571 mask &= ~0x02;
572 if (!(stat1 & ATA_S_BUSY)) {
573 if ((err & 0x7f) == ATA_E_ILI) {
574 if (lsb == ATAPI_MAGIC_LSB && msb == ATAPI_MAGIC_MSB) {
575 ch->devices |= ATA_ATAPI_SLAVE;
576 }
577 else if (lsb == 0 && msb == 0 && (stat1 & ATA_S_READY)) {
578 ch->devices |= ATA_ATA_SLAVE;
579 }
580 }
581 else if ((stat1 & 0x0f) && err == lsb && err == msb) {
582 stat1 |= ATA_S_BUSY;
583 }
584 }
585 }
586
587 if ((ch->flags & ATA_KNOWN_PRESENCE) == 0 &&
588 timeout > ((mask == 0x03) ? 20 : 10)) {
589 if ((mask & 0x01) && stat0 == 0xff)
590 mask &= ~0x01;
591 if ((mask & 0x02) && stat1 == 0xff)
592 mask &= ~0x02;
593 }
594 if (((mask & 0x01) == 0 || !(stat0 & ATA_S_BUSY)) &&
595 ((mask & 0x02) == 0 || !(stat1 & ATA_S_BUSY)))
596 break;
597 ata_udelay(100000);
598 }
599
600 if (bootverbose)
601 device_printf(dev, "reset tp2 stat0=%02x stat1=%02x devices=0x%x\n",
602 stat0, stat1, ch->devices);
603 }
604
605 /* must be called with ATA channel locked and state_mtx held */
606 static int
ata_generic_status(device_t dev)607 ata_generic_status(device_t dev)
608 {
609 struct ata_channel *ch = device_get_softc(dev);
610
611 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) {
612 DELAY(100);
613 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY)
614 return 0;
615 }
616 return 1;
617 }
618
619 static int
ata_wait(struct ata_channel * ch,int unit,u_int8_t mask)620 ata_wait(struct ata_channel *ch, int unit, u_int8_t mask)
621 {
622 u_int8_t status;
623 int timeout = 0;
624
625 DELAY(1);
626
627 /* wait at max 1 second for device to get !BUSY */
628 while (timeout < 1000000) {
629 status = ATA_IDX_INB(ch, ATA_ALTSTAT);
630
631 /* if drive fails status, reselect the drive and try again */
632 if (status == 0xff) {
633 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(unit));
634 timeout += 1000;
635 DELAY(1000);
636 continue;
637 }
638
639 /* are we done ? */
640 if (!(status & ATA_S_BUSY))
641 break;
642
643 if (timeout > 1000) {
644 timeout += 1000;
645 DELAY(1000);
646 }
647 else {
648 timeout += 10;
649 DELAY(10);
650 }
651 }
652 if (timeout >= 1000000)
653 return -2;
654 if (!mask)
655 return (status & ATA_S_ERROR);
656
657 DELAY(1);
658
659 /* wait 50 msec for bits wanted */
660 timeout = 5000;
661 while (timeout--) {
662 status = ATA_IDX_INB(ch, ATA_ALTSTAT);
663 if ((status & mask) == mask)
664 return (status & ATA_S_ERROR);
665 DELAY(10);
666 }
667 return -3;
668 }
669
670 int
ata_generic_command(struct ata_request * request)671 ata_generic_command(struct ata_request *request)
672 {
673 struct ata_channel *ch = device_get_softc(request->parent);
674
675 /* select device */
676 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(request->unit));
677
678 /* ready to issue command ? */
679 if (ata_wait(ch, request->unit, 0) < 0) {
680 device_printf(request->parent, "timeout waiting to issue command\n");
681 request->flags |= ATA_R_TIMEOUT;
682 return (-1);
683 }
684
685 /* enable interrupt */
686 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);
687
688 if (request->flags & ATA_R_ATAPI) {
689 int timeout = 5000;
690 int res;
691
692 /* issue packet command to controller */
693 if (request->flags & ATA_R_DMA) {
694 ATA_IDX_OUTB(ch, ATA_FEATURE, ATA_F_DMA);
695 ATA_IDX_OUTB(ch, ATA_CYL_LSB, 0);
696 ATA_IDX_OUTB(ch, ATA_CYL_MSB, 0);
697 }
698 else {
699 ATA_IDX_OUTB(ch, ATA_FEATURE, 0);
700 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->transfersize);
701 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->transfersize >> 8);
702 }
703 ATA_IDX_OUTB(ch, ATA_COMMAND, ATA_PACKET_CMD);
704
705 /* command interrupt device ? just return and wait for interrupt */
706 if (request->flags & ATA_R_ATAPI_INTR)
707 return (0);
708
709 /* command processed ? */
710 res = ata_wait(ch, request->unit, 0);
711 if (res != 0) {
712 if (res < 0) {
713 device_printf(request->parent,
714 "timeout waiting for PACKET command\n");
715 request->flags |= ATA_R_TIMEOUT;
716 }
717 return (-1);
718 }
719 /* wait for ready to write ATAPI command block */
720 while (timeout--) {
721 int reason = ATA_IDX_INB(ch, ATA_IREASON);
722 int status = ATA_IDX_INB(ch, ATA_STATUS);
723
724 if (((reason & (ATA_I_CMD | ATA_I_IN)) |
725 (status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
726 break;
727 DELAY(20);
728 }
729 if (timeout <= 0) {
730 device_printf(request->parent,
731 "timeout waiting for ATAPI ready\n");
732 request->flags |= ATA_R_TIMEOUT;
733 return (-1);
734 }
735
736 /* this seems to be needed for some (slow) devices */
737 DELAY(10);
738
739 /* output command block */
740 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb,
741 (request->flags & ATA_R_ATAPI16) ? 8 : 6);
742 }
743 else {
744 ch->hw.tf_write(request);
745
746 /* issue command to controller */
747 ATA_IDX_OUTB(ch, ATA_COMMAND, request->u.ata.command);
748 }
749 return (0);
750 }
751
752 static void
ata_tf_read(struct ata_request * request)753 ata_tf_read(struct ata_request *request)
754 {
755 struct ata_channel *ch = device_get_softc(request->parent);
756
757 if (request->flags & ATA_R_48BIT) {
758 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT | ATA_A_HOB);
759 request->u.ata.count = (ATA_IDX_INB(ch, ATA_COUNT) << 8);
760 request->u.ata.lba =
761 ((u_int64_t)(ATA_IDX_INB(ch, ATA_SECTOR)) << 24) |
762 ((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_LSB)) << 32) |
763 ((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_MSB)) << 40);
764
765 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);
766 request->u.ata.count |= ATA_IDX_INB(ch, ATA_COUNT);
767 request->u.ata.lba |=
768 (ATA_IDX_INB(ch, ATA_SECTOR) |
769 (ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) |
770 (ATA_IDX_INB(ch, ATA_CYL_MSB) << 16));
771 }
772 else {
773 request->u.ata.count = ATA_IDX_INB(ch, ATA_COUNT);
774 request->u.ata.lba = ATA_IDX_INB(ch, ATA_SECTOR) |
775 (ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) |
776 (ATA_IDX_INB(ch, ATA_CYL_MSB) << 16) |
777 ((ATA_IDX_INB(ch, ATA_DRIVE) & 0xf) << 24);
778 }
779 }
780
781 static void
ata_tf_write(struct ata_request * request)782 ata_tf_write(struct ata_request *request)
783 {
784 struct ata_channel *ch = device_get_softc(request->parent);
785
786 if (request->flags & ATA_R_48BIT) {
787 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature >> 8);
788 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
789 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count >> 8);
790 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
791 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba >> 24);
792 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
793 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 32);
794 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
795 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 40);
796 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
797 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_LBA | ATA_DEV(request->unit));
798 }
799 else {
800 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
801 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
802 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
803 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
804 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
805 ATA_IDX_OUTB(ch, ATA_DRIVE,
806 ATA_D_IBM | ATA_D_LBA | ATA_DEV(request->unit) |
807 ((request->u.ata.lba >> 24) & 0x0f));
808 }
809 }
810
811 static void
ata_pio_read(struct ata_request * request,int length)812 ata_pio_read(struct ata_request *request, int length)
813 {
814 struct ata_channel *ch = device_get_softc(request->parent);
815 struct bio *bio;
816 uint8_t *addr;
817 vm_offset_t page;
818 int todo, done, off, moff, resid, size, i;
819 uint8_t buf[2] __aligned(2);
820
821 todo = min(request->transfersize, length);
822 page = done = resid = 0;
823 while (done < todo) {
824 size = todo - done;
825
826 /* Prepare data address and limit size (if not sequential). */
827 off = request->donecount + done;
828 if ((request->flags & ATA_R_DATA_IN_CCB) == 0 ||
829 (request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
830 addr = (uint8_t *)request->data + off;
831 } else if ((request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) {
832 bio = (struct bio *)request->data;
833 if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
834 addr = (uint8_t *)bio->bio_data + off;
835 } else {
836 moff = bio->bio_ma_offset + off;
837 page = pmap_quick_enter_page(
838 bio->bio_ma[moff / PAGE_SIZE]);
839 moff %= PAGE_SIZE;
840 size = min(size, PAGE_SIZE - moff);
841 addr = (void *)(page + moff);
842 }
843 } else
844 panic("ata_pio_read: Unsupported CAM data type %x\n",
845 (request->ccb->ccb_h.flags & CAM_DATA_MASK));
846
847 /* We may have extra byte already read but not stored. */
848 if (resid) {
849 addr[0] = buf[1];
850 addr++;
851 done++;
852 size--;
853 }
854
855 /* Process main part of data. */
856 resid = size % 2;
857 if (__predict_false((ch->flags & ATA_USE_16BIT) ||
858 (size % 4) != 0 || ((uintptr_t)addr % 4) != 0)) {
859 #ifndef __NO_STRICT_ALIGNMENT
860 if (__predict_false((uintptr_t)addr % 2)) {
861 for (i = 0; i + 1 < size; i += 2) {
862 *(uint16_t *)&buf =
863 ATA_IDX_INW_STRM(ch, ATA_DATA);
864 addr[i] = buf[0];
865 addr[i + 1] = buf[1];
866 }
867 } else
868 #endif
869 ATA_IDX_INSW_STRM(ch, ATA_DATA, (void*)addr,
870 size / 2);
871
872 /* If we have extra byte of data, leave it for later. */
873 if (resid) {
874 *(uint16_t *)&buf =
875 ATA_IDX_INW_STRM(ch, ATA_DATA);
876 addr[size - 1] = buf[0];
877 }
878 } else
879 ATA_IDX_INSL_STRM(ch, ATA_DATA, (void*)addr, size / 4);
880
881 if (page) {
882 pmap_quick_remove_page(page);
883 page = 0;
884 }
885 done += size;
886 }
887
888 if (length > done) {
889 device_printf(request->parent,
890 "WARNING - %s read data overrun %d > %d\n",
891 ata_cmd2str(request), length, done);
892 for (i = done + resid; i < length; i += 2)
893 ATA_IDX_INW(ch, ATA_DATA);
894 }
895 }
896
897 static void
ata_pio_write(struct ata_request * request,int length)898 ata_pio_write(struct ata_request *request, int length)
899 {
900 struct ata_channel *ch = device_get_softc(request->parent);
901 struct bio *bio;
902 uint8_t *addr;
903 vm_offset_t page;
904 int todo, done, off, moff, resid, size, i;
905 uint8_t buf[2] __aligned(2);
906
907 todo = min(request->transfersize, length);
908 page = done = resid = 0;
909 while (done < todo) {
910 size = todo - done;
911
912 /* Prepare data address and limit size (if not sequential). */
913 off = request->donecount + done;
914 if ((request->flags & ATA_R_DATA_IN_CCB) == 0 ||
915 (request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
916 addr = (uint8_t *)request->data + off;
917 } else if ((request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) {
918 bio = (struct bio *)request->data;
919 if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
920 addr = (uint8_t *)bio->bio_data + off;
921 } else {
922 moff = bio->bio_ma_offset + off;
923 page = pmap_quick_enter_page(
924 bio->bio_ma[moff / PAGE_SIZE]);
925 moff %= PAGE_SIZE;
926 size = min(size, PAGE_SIZE - moff);
927 addr = (void *)(page + moff);
928 }
929 } else
930 panic("ata_pio_write: Unsupported CAM data type %x\n",
931 (request->ccb->ccb_h.flags & CAM_DATA_MASK));
932
933 /* We may have extra byte to be written first. */
934 if (resid) {
935 buf[1] = addr[0];
936 ATA_IDX_OUTW_STRM(ch, ATA_DATA, *(uint16_t *)&buf);
937 addr++;
938 done++;
939 size--;
940 }
941
942 /* Process main part of data. */
943 resid = size % 2;
944 if (__predict_false((ch->flags & ATA_USE_16BIT) ||
945 (size % 4) != 0 || ((uintptr_t)addr % 4) != 0)) {
946 #ifndef __NO_STRICT_ALIGNMENT
947 if (__predict_false((uintptr_t)addr % 2)) {
948 for (i = 0; i + 1 < size; i += 2) {
949 buf[0] = addr[i];
950 buf[1] = addr[i + 1];
951 ATA_IDX_OUTW_STRM(ch, ATA_DATA,
952 *(uint16_t *)&buf);
953 }
954 } else
955 #endif
956 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (void*)addr,
957 size / 2);
958
959 /* If we have extra byte of data, save it for later. */
960 if (resid)
961 buf[0] = addr[size - 1];
962 } else
963 ATA_IDX_OUTSL_STRM(ch, ATA_DATA,
964 (void*)addr, size / sizeof(int32_t));
965
966 if (page) {
967 pmap_quick_remove_page(page);
968 page = 0;
969 }
970 done += size;
971 }
972
973 /* We may have extra byte of data to be written. Pad it with zero. */
974 if (resid) {
975 buf[1] = 0;
976 ATA_IDX_OUTW_STRM(ch, ATA_DATA, *(uint16_t *)&buf);
977 }
978
979 if (length > done) {
980 device_printf(request->parent,
981 "WARNING - %s write data underrun %d > %d\n",
982 ata_cmd2str(request), length, done);
983 for (i = done + resid; i < length; i += 2)
984 ATA_IDX_OUTW(ch, ATA_DATA, 0);
985 }
986 }
987