1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1998 - 2008 Søren Schmidt <[email protected]>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/endian.h>
36 #include <sys/ata.h>
37 #include <sys/bio.h>
38 #include <sys/conf.h>
39 #include <sys/ctype.h>
40 #include <sys/bus.h>
41 #include <sys/sema.h>
42 #include <sys/taskqueue.h>
43 #include <vm/uma.h>
44 #include <machine/bus.h>
45 #include <sys/rman.h>
46 #include <dev/ata/ata-all.h>
47 #include <dev/ata/ata-pci.h>
48 #include <ata_if.h>
49
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55
56 /* prototypes */
57 static int ata_generic_status(device_t dev);
58 static int ata_wait(struct ata_channel *ch, int unit, u_int8_t);
59 static void ata_pio_read(struct ata_request *, int);
60 static void ata_pio_write(struct ata_request *, int);
61 static void ata_tf_read(struct ata_request *);
62 static void ata_tf_write(struct ata_request *);
63
64 /*
65 * low level ATA functions
66 */
67 void
ata_generic_hw(device_t dev)68 ata_generic_hw(device_t dev)
69 {
70 struct ata_channel *ch = device_get_softc(dev);
71
72 ch->hw.begin_transaction = ata_begin_transaction;
73 ch->hw.end_transaction = ata_end_transaction;
74 ch->hw.status = ata_generic_status;
75 ch->hw.softreset = NULL;
76 ch->hw.command = ata_generic_command;
77 ch->hw.tf_read = ata_tf_read;
78 ch->hw.tf_write = ata_tf_write;
79 ch->hw.pm_read = NULL;
80 ch->hw.pm_write = NULL;
81 }
82
83 /* must be called with ATA channel locked and state_mtx held */
84 int
ata_begin_transaction(struct ata_request * request)85 ata_begin_transaction(struct ata_request *request)
86 {
87 struct ata_channel *ch = device_get_softc(request->parent);
88 int dummy, error;
89
90 ATA_DEBUG_RQ(request, "begin transaction");
91
92 /* disable ATAPI DMA writes if HW doesn't support it */
93 if ((ch->flags & ATA_NO_ATAPI_DMA) &&
94 (request->flags & ATA_R_ATAPI) == ATA_R_ATAPI)
95 request->flags &= ~ATA_R_DMA;
96 if ((ch->flags & ATA_ATAPI_DMA_RO) &&
97 ((request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)) ==
98 (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)))
99 request->flags &= ~ATA_R_DMA;
100
101 switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA)) {
102
103 /* ATA PIO data transfer and control commands */
104 default:
105 {
106 /* record command direction here as our request might be gone later */
107 int write = (request->flags & ATA_R_WRITE);
108
109 /* issue command */
110 if (ch->hw.command(request)) {
111 device_printf(request->parent, "error issuing %s command\n",
112 ata_cmd2str(request));
113 request->result = EIO;
114 goto begin_finished;
115 }
116
117 /* device reset doesn't interrupt */
118 if (request->u.ata.command == ATA_DEVICE_RESET) {
119
120 int timeout = 1000000;
121 do {
122 DELAY(10);
123 request->status = ATA_IDX_INB(ch, ATA_STATUS);
124 } while (request->status & ATA_S_BUSY && timeout--);
125 if (request->status & ATA_S_ERROR)
126 request->error = ATA_IDX_INB(ch, ATA_ERROR);
127 ch->hw.tf_read(request);
128 goto begin_finished;
129 }
130
131 /* if write command output the data */
132 if (write) {
133 if (ata_wait(ch, request->unit, (ATA_S_READY | ATA_S_DRQ)) < 0) {
134 device_printf(request->parent,
135 "timeout waiting for write DRQ\n");
136 request->result = EIO;
137 goto begin_finished;
138 }
139 ata_pio_write(request, request->transfersize);
140 }
141 }
142 goto begin_continue;
143
144 /* ATA DMA data transfer commands */
145 case ATA_R_DMA:
146 /* check sanity, setup SG list and DMA engine */
147 if ((error = ch->dma.load(request, NULL, &dummy))) {
148 device_printf(request->parent, "setting up DMA failed\n");
149 request->result = error;
150 goto begin_finished;
151 }
152
153 /* start DMA engine if necessary */
154 if ((ch->flags & ATA_DMA_BEFORE_CMD) &&
155 ch->dma.start && ch->dma.start(request)) {
156 device_printf(request->parent, "error starting DMA\n");
157 request->result = EIO;
158 goto begin_finished;
159 }
160
161 /* issue command */
162 if (ch->hw.command(request)) {
163 device_printf(request->parent, "error issuing %s command\n",
164 ata_cmd2str(request));
165 request->result = EIO;
166 goto begin_finished;
167 }
168
169 /* start DMA engine */
170 if (!(ch->flags & ATA_DMA_BEFORE_CMD) &&
171 ch->dma.start && ch->dma.start(request)) {
172 device_printf(request->parent, "error starting DMA\n");
173 request->result = EIO;
174 goto begin_finished;
175 }
176 goto begin_continue;
177
178 /* ATAPI PIO commands */
179 case ATA_R_ATAPI:
180 /* is this just a POLL DSC command ? */
181 if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) {
182 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(request->unit));
183 DELAY(10);
184 if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DSC))
185 request->result = EBUSY;
186 goto begin_finished;
187 }
188
189 /* start ATAPI operation */
190 if (ch->hw.command(request)) {
191 device_printf(request->parent, "error issuing ATA PACKET command\n");
192 request->result = EIO;
193 goto begin_finished;
194 }
195 goto begin_continue;
196
197 /* ATAPI DMA commands */
198 case ATA_R_ATAPI|ATA_R_DMA:
199 /* is this just a POLL DSC command ? */
200 if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) {
201 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(request->unit));
202 DELAY(10);
203 if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DSC))
204 request->result = EBUSY;
205 goto begin_finished;
206 }
207
208 /* check sanity, setup SG list and DMA engine */
209 if ((error = ch->dma.load(request, NULL, &dummy))) {
210 device_printf(request->parent, "setting up DMA failed\n");
211 request->result = error;
212 goto begin_finished;
213 }
214
215 /* start ATAPI operation */
216 if (ch->hw.command(request)) {
217 device_printf(request->parent, "error issuing ATA PACKET command\n");
218 request->result = EIO;
219 goto begin_finished;
220 }
221
222 /* start DMA engine */
223 if (ch->dma.start && ch->dma.start(request)) {
224 request->result = EIO;
225 goto begin_finished;
226 }
227 goto begin_continue;
228 }
229 /* NOT REACHED */
230 printf("ata_begin_transaction OOPS!!!\n");
231
232 begin_finished:
233 if (ch->dma.unload) {
234 ch->dma.unload(request);
235 }
236 return ATA_OP_FINISHED;
237
238 begin_continue:
239 callout_reset(&request->callout, request->timeout * hz,
240 (timeout_t*)ata_timeout, request);
241 return ATA_OP_CONTINUES;
242 }
243
244 /* must be called with ATA channel locked and state_mtx held */
245 int
ata_end_transaction(struct ata_request * request)246 ata_end_transaction(struct ata_request *request)
247 {
248 struct ata_channel *ch = device_get_softc(request->parent);
249 int length;
250
251 ATA_DEBUG_RQ(request, "end transaction");
252
253 /* clear interrupt and get status */
254 request->status = ATA_IDX_INB(ch, ATA_STATUS);
255
256 switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_CONTROL)) {
257
258 /* ATA PIO data transfer and control commands */
259 default:
260
261 /* on timeouts we have no data or anything so just return */
262 if (request->flags & ATA_R_TIMEOUT)
263 goto end_finished;
264
265 /* Read back registers to the request struct. */
266 if ((request->status & ATA_S_ERROR) ||
267 (request->flags & (ATA_R_CONTROL | ATA_R_NEEDRESULT))) {
268 ch->hw.tf_read(request);
269 }
270
271 /* if we got an error we are done with the HW */
272 if (request->status & ATA_S_ERROR) {
273 request->error = ATA_IDX_INB(ch, ATA_ERROR);
274 goto end_finished;
275 }
276
277 /* are we moving data ? */
278 if (request->flags & (ATA_R_READ | ATA_R_WRITE)) {
279
280 /* if read data get it */
281 if (request->flags & ATA_R_READ) {
282 int flags = ATA_S_DRQ;
283
284 if (request->u.ata.command != ATA_ATAPI_IDENTIFY)
285 flags |= ATA_S_READY;
286 if (ata_wait(ch, request->unit, flags) < 0) {
287 device_printf(request->parent,
288 "timeout waiting for read DRQ\n");
289 request->result = EIO;
290 goto end_finished;
291 }
292 ata_pio_read(request, request->transfersize);
293 }
294
295 /* update how far we've gotten */
296 request->donecount += request->transfersize;
297
298 /* do we need a scoop more ? */
299 if (request->bytecount > request->donecount) {
300
301 /* set this transfer size according to HW capabilities */
302 request->transfersize =
303 min((request->bytecount - request->donecount),
304 request->transfersize);
305
306 /* if data write command, output the data */
307 if (request->flags & ATA_R_WRITE) {
308
309 /* if we get an error here we are done with the HW */
310 if (ata_wait(ch, request->unit, (ATA_S_READY | ATA_S_DRQ)) < 0) {
311 device_printf(request->parent,
312 "timeout waiting for write DRQ\n");
313 request->status = ATA_IDX_INB(ch, ATA_STATUS);
314 goto end_finished;
315 }
316
317 /* output data and return waiting for new interrupt */
318 ata_pio_write(request, request->transfersize);
319 goto end_continue;
320 }
321
322 /* if data read command, return & wait for interrupt */
323 if (request->flags & ATA_R_READ)
324 goto end_continue;
325 }
326 }
327 /* done with HW */
328 goto end_finished;
329
330 /* ATA DMA data transfer commands */
331 case ATA_R_DMA:
332
333 /* stop DMA engine and get status */
334 if (ch->dma.stop)
335 request->dma->status = ch->dma.stop(request);
336
337 /* did we get error or data */
338 if (request->status & ATA_S_ERROR)
339 request->error = ATA_IDX_INB(ch, ATA_ERROR);
340 else if (request->dma->status & ATA_BMSTAT_ERROR)
341 request->status |= ATA_S_ERROR;
342 else if (!(request->flags & ATA_R_TIMEOUT))
343 request->donecount = request->bytecount;
344
345 /* Read back registers to the request struct. */
346 if ((request->status & ATA_S_ERROR) ||
347 (request->flags & (ATA_R_CONTROL | ATA_R_NEEDRESULT))) {
348 ch->hw.tf_read(request);
349 }
350
351 /* release SG list etc */
352 ch->dma.unload(request);
353
354 /* done with HW */
355 goto end_finished;
356
357 /* ATAPI PIO commands */
358 case ATA_R_ATAPI:
359 length = ATA_IDX_INB(ch, ATA_CYL_LSB)|(ATA_IDX_INB(ch, ATA_CYL_MSB)<<8);
360
361 /* on timeouts we have no data or anything so just return */
362 if (request->flags & ATA_R_TIMEOUT)
363 goto end_finished;
364
365 switch ((ATA_IDX_INB(ch, ATA_IREASON) & (ATA_I_CMD | ATA_I_IN)) |
366 (request->status & ATA_S_DRQ)) {
367
368 case ATAPI_P_CMDOUT:
369 /* this seems to be needed for some (slow) devices */
370 DELAY(10);
371
372 if (!(request->status & ATA_S_DRQ)) {
373 device_printf(request->parent, "command interrupt without DRQ\n");
374 request->status = ATA_S_ERROR;
375 goto end_finished;
376 }
377 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb,
378 (request->flags & ATA_R_ATAPI16) ? 8 : 6);
379 /* return wait for interrupt */
380 goto end_continue;
381
382 case ATAPI_P_WRITE:
383 if (request->flags & ATA_R_READ) {
384 request->status = ATA_S_ERROR;
385 device_printf(request->parent,
386 "%s trying to write on read buffer\n",
387 ata_cmd2str(request));
388 goto end_finished;
389 }
390 ata_pio_write(request, length);
391 request->donecount += length;
392
393 /* set next transfer size according to HW capabilities */
394 request->transfersize = min((request->bytecount-request->donecount),
395 request->transfersize);
396 /* return wait for interrupt */
397 goto end_continue;
398
399 case ATAPI_P_READ:
400 if (request->flags & ATA_R_WRITE) {
401 request->status = ATA_S_ERROR;
402 device_printf(request->parent,
403 "%s trying to read on write buffer\n",
404 ata_cmd2str(request));
405 goto end_finished;
406 }
407 ata_pio_read(request, length);
408 request->donecount += length;
409
410 /* set next transfer size according to HW capabilities */
411 request->transfersize = min((request->bytecount-request->donecount),
412 request->transfersize);
413 /* return wait for interrupt */
414 goto end_continue;
415
416 case ATAPI_P_DONEDRQ:
417 device_printf(request->parent,
418 "WARNING - %s DONEDRQ non conformant device\n",
419 ata_cmd2str(request));
420 if (request->flags & ATA_R_READ) {
421 ata_pio_read(request, length);
422 request->donecount += length;
423 }
424 else if (request->flags & ATA_R_WRITE) {
425 ata_pio_write(request, length);
426 request->donecount += length;
427 }
428 else
429 request->status = ATA_S_ERROR;
430 /* FALLTHROUGH */
431
432 case ATAPI_P_ABORT:
433 case ATAPI_P_DONE:
434 if (request->status & (ATA_S_ERROR | ATA_S_DWF))
435 request->error = ATA_IDX_INB(ch, ATA_ERROR);
436 goto end_finished;
437
438 default:
439 device_printf(request->parent, "unknown transfer phase\n");
440 request->status = ATA_S_ERROR;
441 }
442
443 /* done with HW */
444 goto end_finished;
445
446 /* ATAPI DMA commands */
447 case ATA_R_ATAPI|ATA_R_DMA:
448
449 /* stop DMA engine and get status */
450 if (ch->dma.stop)
451 request->dma->status = ch->dma.stop(request);
452
453 /* did we get error or data */
454 if (request->status & (ATA_S_ERROR | ATA_S_DWF))
455 request->error = ATA_IDX_INB(ch, ATA_ERROR);
456 else if (request->dma->status & ATA_BMSTAT_ERROR)
457 request->status |= ATA_S_ERROR;
458 else if (!(request->flags & ATA_R_TIMEOUT))
459 request->donecount = request->bytecount;
460
461 /* release SG list etc */
462 ch->dma.unload(request);
463
464 /* done with HW */
465 goto end_finished;
466 }
467 /* NOT REACHED */
468 printf("ata_end_transaction OOPS!!\n");
469
470 end_finished:
471 callout_stop(&request->callout);
472 return ATA_OP_FINISHED;
473
474 end_continue:
475 return ATA_OP_CONTINUES;
476 }
477
478 /* must be called with ATA channel locked and state_mtx held */
479 void
ata_generic_reset(device_t dev)480 ata_generic_reset(device_t dev)
481 {
482 struct ata_channel *ch = device_get_softc(dev);
483
484 u_int8_t ostat0 = 0, stat0 = 0, ostat1 = 0, stat1 = 0;
485 u_int8_t err = 0, lsb = 0, msb = 0;
486 int mask = 0, timeout;
487
488 /* do we have any signs of ATA/ATAPI HW being present ? */
489 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_MASTER));
490 DELAY(10);
491 ostat0 = ATA_IDX_INB(ch, ATA_STATUS);
492 if (((ostat0 & 0xf8) != 0xf8 || (ch->flags & ATA_KNOWN_PRESENCE)) &&
493 ostat0 != 0xa5) {
494 stat0 = ATA_S_BUSY;
495 mask |= 0x01;
496 }
497
498 /* in some setups we dont want to test for a slave */
499 if (!(ch->flags & ATA_NO_SLAVE)) {
500 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_SLAVE));
501 DELAY(10);
502 ostat1 = ATA_IDX_INB(ch, ATA_STATUS);
503 if (((ostat1 & 0xf8) != 0xf8 || (ch->flags & ATA_KNOWN_PRESENCE)) &&
504 ostat1 != 0xa5) {
505 stat1 = ATA_S_BUSY;
506 mask |= 0x02;
507 }
508 }
509
510 if (bootverbose)
511 device_printf(dev, "reset tp1 mask=%02x ostat0=%02x ostat1=%02x\n",
512 mask, ostat0, ostat1);
513
514 /* if nothing showed up there is no need to get any further */
515 /* XXX SOS is that too strong?, we just might lose devices here */
516 ch->devices = 0;
517 if (!mask)
518 return;
519
520 /* reset (both) devices on this channel */
521 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_MASTER));
522 DELAY(10);
523 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS | ATA_A_RESET);
524 ata_udelay(10000);
525 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS);
526 ata_udelay(100000);
527 ATA_IDX_INB(ch, ATA_ERROR);
528
529 /* wait for BUSY to go inactive */
530 for (timeout = 0; timeout < 310; timeout++) {
531 if ((mask & 0x01) && (stat0 & ATA_S_BUSY)) {
532 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(ATA_MASTER));
533 DELAY(10);
534 if (ch->flags & ATA_STATUS_IS_LONG)
535 stat0 = ATA_IDX_INL(ch, ATA_STATUS) & 0xff;
536 else
537 stat0 = ATA_IDX_INB(ch, ATA_STATUS);
538 err = ATA_IDX_INB(ch, ATA_ERROR);
539 lsb = ATA_IDX_INB(ch, ATA_CYL_LSB);
540 msb = ATA_IDX_INB(ch, ATA_CYL_MSB);
541 if (bootverbose)
542 device_printf(dev,
543 "stat0=0x%02x err=0x%02x lsb=0x%02x msb=0x%02x\n",
544 stat0, err, lsb, msb);
545 if (stat0 == err && lsb == err && msb == err &&
546 timeout > (stat0 & ATA_S_BUSY ? 100 : 10))
547 mask &= ~0x01;
548 if (!(stat0 & ATA_S_BUSY)) {
549 if ((err & 0x7f) == ATA_E_ILI) {
550 if (lsb == ATAPI_MAGIC_LSB && msb == ATAPI_MAGIC_MSB) {
551 ch->devices |= ATA_ATAPI_MASTER;
552 }
553 else if (lsb == 0 && msb == 0 && (stat0 & ATA_S_READY)) {
554 ch->devices |= ATA_ATA_MASTER;
555 }
556 }
557 else if ((stat0 & 0x0f) && err == lsb && err == msb) {
558 stat0 |= ATA_S_BUSY;
559 }
560 }
561 }
562
563 if ((mask & 0x02) && (stat1 & ATA_S_BUSY) &&
564 !((mask & 0x01) && (stat0 & ATA_S_BUSY))) {
565 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(ATA_SLAVE));
566 DELAY(10);
567 if (ch->flags & ATA_STATUS_IS_LONG)
568 stat1 = ATA_IDX_INL(ch, ATA_STATUS) & 0xff;
569 else
570 stat1 = ATA_IDX_INB(ch, ATA_STATUS);
571 err = ATA_IDX_INB(ch, ATA_ERROR);
572 lsb = ATA_IDX_INB(ch, ATA_CYL_LSB);
573 msb = ATA_IDX_INB(ch, ATA_CYL_MSB);
574 if (bootverbose)
575 device_printf(dev,
576 "stat1=0x%02x err=0x%02x lsb=0x%02x msb=0x%02x\n",
577 stat1, err, lsb, msb);
578 if (stat1 == err && lsb == err && msb == err &&
579 timeout > (stat1 & ATA_S_BUSY ? 100 : 10))
580 mask &= ~0x02;
581 if (!(stat1 & ATA_S_BUSY)) {
582 if ((err & 0x7f) == ATA_E_ILI) {
583 if (lsb == ATAPI_MAGIC_LSB && msb == ATAPI_MAGIC_MSB) {
584 ch->devices |= ATA_ATAPI_SLAVE;
585 }
586 else if (lsb == 0 && msb == 0 && (stat1 & ATA_S_READY)) {
587 ch->devices |= ATA_ATA_SLAVE;
588 }
589 }
590 else if ((stat1 & 0x0f) && err == lsb && err == msb) {
591 stat1 |= ATA_S_BUSY;
592 }
593 }
594 }
595
596 if ((ch->flags & ATA_KNOWN_PRESENCE) == 0 &&
597 timeout > ((mask == 0x03) ? 20 : 10)) {
598 if ((mask & 0x01) && stat0 == 0xff)
599 mask &= ~0x01;
600 if ((mask & 0x02) && stat1 == 0xff)
601 mask &= ~0x02;
602 }
603 if (((mask & 0x01) == 0 || !(stat0 & ATA_S_BUSY)) &&
604 ((mask & 0x02) == 0 || !(stat1 & ATA_S_BUSY)))
605 break;
606 ata_udelay(100000);
607 }
608
609 if (bootverbose)
610 device_printf(dev, "reset tp2 stat0=%02x stat1=%02x devices=0x%x\n",
611 stat0, stat1, ch->devices);
612 }
613
614 /* must be called with ATA channel locked and state_mtx held */
615 static int
ata_generic_status(device_t dev)616 ata_generic_status(device_t dev)
617 {
618 struct ata_channel *ch = device_get_softc(dev);
619
620 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) {
621 DELAY(100);
622 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY)
623 return 0;
624 }
625 return 1;
626 }
627
628 static int
ata_wait(struct ata_channel * ch,int unit,u_int8_t mask)629 ata_wait(struct ata_channel *ch, int unit, u_int8_t mask)
630 {
631 u_int8_t status;
632 int timeout = 0;
633
634 DELAY(1);
635
636 /* wait at max 1 second for device to get !BUSY */
637 while (timeout < 1000000) {
638 status = ATA_IDX_INB(ch, ATA_ALTSTAT);
639
640 /* if drive fails status, reselect the drive and try again */
641 if (status == 0xff) {
642 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(unit));
643 timeout += 1000;
644 DELAY(1000);
645 continue;
646 }
647
648 /* are we done ? */
649 if (!(status & ATA_S_BUSY))
650 break;
651
652 if (timeout > 1000) {
653 timeout += 1000;
654 DELAY(1000);
655 }
656 else {
657 timeout += 10;
658 DELAY(10);
659 }
660 }
661 if (timeout >= 1000000)
662 return -2;
663 if (!mask)
664 return (status & ATA_S_ERROR);
665
666 DELAY(1);
667
668 /* wait 50 msec for bits wanted */
669 timeout = 5000;
670 while (timeout--) {
671 status = ATA_IDX_INB(ch, ATA_ALTSTAT);
672 if ((status & mask) == mask)
673 return (status & ATA_S_ERROR);
674 DELAY(10);
675 }
676 return -3;
677 }
678
679 int
ata_generic_command(struct ata_request * request)680 ata_generic_command(struct ata_request *request)
681 {
682 struct ata_channel *ch = device_get_softc(request->parent);
683
684 /* select device */
685 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(request->unit));
686
687 /* ready to issue command ? */
688 if (ata_wait(ch, request->unit, 0) < 0) {
689 device_printf(request->parent, "timeout waiting to issue command\n");
690 request->flags |= ATA_R_TIMEOUT;
691 return (-1);
692 }
693
694 /* enable interrupt */
695 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);
696
697 if (request->flags & ATA_R_ATAPI) {
698 int timeout = 5000;
699 int res;
700
701 /* issue packet command to controller */
702 if (request->flags & ATA_R_DMA) {
703 ATA_IDX_OUTB(ch, ATA_FEATURE, ATA_F_DMA);
704 ATA_IDX_OUTB(ch, ATA_CYL_LSB, 0);
705 ATA_IDX_OUTB(ch, ATA_CYL_MSB, 0);
706 }
707 else {
708 ATA_IDX_OUTB(ch, ATA_FEATURE, 0);
709 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->transfersize);
710 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->transfersize >> 8);
711 }
712 ATA_IDX_OUTB(ch, ATA_COMMAND, ATA_PACKET_CMD);
713
714 /* command interrupt device ? just return and wait for interrupt */
715 if (request->flags & ATA_R_ATAPI_INTR)
716 return (0);
717
718 /* command processed ? */
719 res = ata_wait(ch, request->unit, 0);
720 if (res != 0) {
721 if (res < 0) {
722 device_printf(request->parent,
723 "timeout waiting for PACKET command\n");
724 request->flags |= ATA_R_TIMEOUT;
725 }
726 return (-1);
727 }
728 /* wait for ready to write ATAPI command block */
729 while (timeout--) {
730 int reason = ATA_IDX_INB(ch, ATA_IREASON);
731 int status = ATA_IDX_INB(ch, ATA_STATUS);
732
733 if (((reason & (ATA_I_CMD | ATA_I_IN)) |
734 (status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
735 break;
736 DELAY(20);
737 }
738 if (timeout <= 0) {
739 device_printf(request->parent,
740 "timeout waiting for ATAPI ready\n");
741 request->flags |= ATA_R_TIMEOUT;
742 return (-1);
743 }
744
745 /* this seems to be needed for some (slow) devices */
746 DELAY(10);
747
748 /* output command block */
749 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb,
750 (request->flags & ATA_R_ATAPI16) ? 8 : 6);
751 }
752 else {
753 ch->hw.tf_write(request);
754
755 /* issue command to controller */
756 ATA_IDX_OUTB(ch, ATA_COMMAND, request->u.ata.command);
757 }
758 return (0);
759 }
760
761 static void
ata_tf_read(struct ata_request * request)762 ata_tf_read(struct ata_request *request)
763 {
764 struct ata_channel *ch = device_get_softc(request->parent);
765
766 if (request->flags & ATA_R_48BIT) {
767 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT | ATA_A_HOB);
768 request->u.ata.count = (ATA_IDX_INB(ch, ATA_COUNT) << 8);
769 request->u.ata.lba =
770 ((u_int64_t)(ATA_IDX_INB(ch, ATA_SECTOR)) << 24) |
771 ((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_LSB)) << 32) |
772 ((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_MSB)) << 40);
773
774 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);
775 request->u.ata.count |= ATA_IDX_INB(ch, ATA_COUNT);
776 request->u.ata.lba |=
777 (ATA_IDX_INB(ch, ATA_SECTOR) |
778 (ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) |
779 (ATA_IDX_INB(ch, ATA_CYL_MSB) << 16));
780 }
781 else {
782 request->u.ata.count = ATA_IDX_INB(ch, ATA_COUNT);
783 request->u.ata.lba = ATA_IDX_INB(ch, ATA_SECTOR) |
784 (ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) |
785 (ATA_IDX_INB(ch, ATA_CYL_MSB) << 16) |
786 ((ATA_IDX_INB(ch, ATA_DRIVE) & 0xf) << 24);
787 }
788 }
789
790 static void
ata_tf_write(struct ata_request * request)791 ata_tf_write(struct ata_request *request)
792 {
793 struct ata_channel *ch = device_get_softc(request->parent);
794
795 if (request->flags & ATA_R_48BIT) {
796 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature >> 8);
797 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
798 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count >> 8);
799 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
800 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba >> 24);
801 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
802 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 32);
803 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
804 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 40);
805 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
806 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_LBA | ATA_DEV(request->unit));
807 }
808 else {
809 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature);
810 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count);
811 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba);
812 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8);
813 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16);
814 ATA_IDX_OUTB(ch, ATA_DRIVE,
815 ATA_D_IBM | ATA_D_LBA | ATA_DEV(request->unit) |
816 ((request->u.ata.lba >> 24) & 0x0f));
817 }
818 }
819
820 static void
ata_pio_read(struct ata_request * request,int length)821 ata_pio_read(struct ata_request *request, int length)
822 {
823 struct ata_channel *ch = device_get_softc(request->parent);
824 struct bio *bio;
825 uint8_t *addr;
826 vm_offset_t page;
827 int todo, done, off, moff, resid, size, i;
828 uint8_t buf[2] __aligned(2);
829
830 todo = min(request->transfersize, length);
831 page = done = resid = 0;
832 while (done < todo) {
833 size = todo - done;
834
835 /* Prepare data address and limit size (if not sequential). */
836 off = request->donecount + done;
837 if ((request->flags & ATA_R_DATA_IN_CCB) == 0 ||
838 (request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
839 addr = (uint8_t *)request->data + off;
840 } else if ((request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) {
841 bio = (struct bio *)request->data;
842 if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
843 addr = (uint8_t *)bio->bio_data + off;
844 } else {
845 moff = bio->bio_ma_offset + off;
846 page = pmap_quick_enter_page(
847 bio->bio_ma[moff / PAGE_SIZE]);
848 moff %= PAGE_SIZE;
849 size = min(size, PAGE_SIZE - moff);
850 addr = (void *)(page + moff);
851 }
852 } else
853 panic("ata_pio_read: Unsupported CAM data type %x\n",
854 (request->ccb->ccb_h.flags & CAM_DATA_MASK));
855
856 /* We may have extra byte already read but not stored. */
857 if (resid) {
858 addr[0] = buf[1];
859 addr++;
860 done++;
861 size--;
862 }
863
864 /* Process main part of data. */
865 resid = size % 2;
866 if (__predict_false((ch->flags & ATA_USE_16BIT) ||
867 (size % 4) != 0 || ((uintptr_t)addr % 4) != 0)) {
868 #ifndef __NO_STRICT_ALIGNMENT
869 if (__predict_false((uintptr_t)addr % 2)) {
870 for (i = 0; i + 1 < size; i += 2) {
871 *(uint16_t *)&buf =
872 ATA_IDX_INW_STRM(ch, ATA_DATA);
873 addr[i] = buf[0];
874 addr[i + 1] = buf[1];
875 }
876 } else
877 #endif
878 ATA_IDX_INSW_STRM(ch, ATA_DATA, (void*)addr,
879 size / 2);
880
881 /* If we have extra byte of data, leave it for later. */
882 if (resid) {
883 *(uint16_t *)&buf =
884 ATA_IDX_INW_STRM(ch, ATA_DATA);
885 addr[size - 1] = buf[0];
886 }
887 } else
888 ATA_IDX_INSL_STRM(ch, ATA_DATA, (void*)addr, size / 4);
889
890 if (page) {
891 pmap_quick_remove_page(page);
892 page = 0;
893 }
894 done += size;
895 }
896
897 if (length > done) {
898 device_printf(request->parent,
899 "WARNING - %s read data overrun %d > %d\n",
900 ata_cmd2str(request), length, done);
901 for (i = done + resid; i < length; i += 2)
902 ATA_IDX_INW(ch, ATA_DATA);
903 }
904 }
905
906 static void
ata_pio_write(struct ata_request * request,int length)907 ata_pio_write(struct ata_request *request, int length)
908 {
909 struct ata_channel *ch = device_get_softc(request->parent);
910 struct bio *bio;
911 uint8_t *addr;
912 vm_offset_t page;
913 int todo, done, off, moff, resid, size, i;
914 uint8_t buf[2] __aligned(2);
915
916 todo = min(request->transfersize, length);
917 page = done = resid = 0;
918 while (done < todo) {
919 size = todo - done;
920
921 /* Prepare data address and limit size (if not sequential). */
922 off = request->donecount + done;
923 if ((request->flags & ATA_R_DATA_IN_CCB) == 0 ||
924 (request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
925 addr = (uint8_t *)request->data + off;
926 } else if ((request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) {
927 bio = (struct bio *)request->data;
928 if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
929 addr = (uint8_t *)bio->bio_data + off;
930 } else {
931 moff = bio->bio_ma_offset + off;
932 page = pmap_quick_enter_page(
933 bio->bio_ma[moff / PAGE_SIZE]);
934 moff %= PAGE_SIZE;
935 size = min(size, PAGE_SIZE - moff);
936 addr = (void *)(page + moff);
937 }
938 } else
939 panic("ata_pio_write: Unsupported CAM data type %x\n",
940 (request->ccb->ccb_h.flags & CAM_DATA_MASK));
941
942 /* We may have extra byte to be written first. */
943 if (resid) {
944 buf[1] = addr[0];
945 ATA_IDX_OUTW_STRM(ch, ATA_DATA, *(uint16_t *)&buf);
946 addr++;
947 done++;
948 size--;
949 }
950
951 /* Process main part of data. */
952 resid = size % 2;
953 if (__predict_false((ch->flags & ATA_USE_16BIT) ||
954 (size % 4) != 0 || ((uintptr_t)addr % 4) != 0)) {
955 #ifndef __NO_STRICT_ALIGNMENT
956 if (__predict_false((uintptr_t)addr % 2)) {
957 for (i = 0; i + 1 < size; i += 2) {
958 buf[0] = addr[i];
959 buf[1] = addr[i + 1];
960 ATA_IDX_OUTW_STRM(ch, ATA_DATA,
961 *(uint16_t *)&buf);
962 }
963 } else
964 #endif
965 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (void*)addr,
966 size / 2);
967
968 /* If we have extra byte of data, save it for later. */
969 if (resid)
970 buf[0] = addr[size - 1];
971 } else
972 ATA_IDX_OUTSL_STRM(ch, ATA_DATA,
973 (void*)addr, size / sizeof(int32_t));
974
975 if (page) {
976 pmap_quick_remove_page(page);
977 page = 0;
978 }
979 done += size;
980 }
981
982 /* We may have extra byte of data to be written. Pad it with zero. */
983 if (resid) {
984 buf[1] = 0;
985 ATA_IDX_OUTW_STRM(ch, ATA_DATA, *(uint16_t *)&buf);
986 }
987
988 if (length > done) {
989 device_printf(request->parent,
990 "WARNING - %s write data underrun %d > %d\n",
991 ata_cmd2str(request), length, done);
992 for (i = done + resid; i < length; i += 2)
993 ATA_IDX_OUTW(ch, ATA_DATA, 0);
994 }
995 }
996