1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 1998 - 2008 Søren Schmidt <[email protected]>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ata.h>
33 #include <sys/kernel.h>
34 #include <sys/module.h>
35 #include <sys/endian.h>
36 #include <sys/ctype.h>
37 #include <sys/conf.h>
38 #include <sys/bus.h>
39 #include <sys/bio.h>
40 #include <sys/malloc.h>
41 #include <sys/sysctl.h>
42 #include <sys/sema.h>
43 #include <sys/taskqueue.h>
44 #include <vm/uma.h>
45 #include <machine/stdarg.h>
46 #include <machine/resource.h>
47 #include <machine/bus.h>
48 #include <sys/rman.h>
49 #include <dev/ata/ata-all.h>
50 #include <dev/pci/pcivar.h>
51 #include <ata_if.h>
52
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_xpt_sim.h>
57 #include <cam/cam_debug.h>
58
59 /* prototypes */
60 static void ataaction(struct cam_sim *sim, union ccb *ccb);
61 static void atapoll(struct cam_sim *sim);
62 static void ata_cam_begin_transaction(device_t dev, union ccb *ccb);
63 static void ata_cam_end_transaction(device_t dev, struct ata_request *request);
64 static void ata_cam_request_sense(device_t dev, struct ata_request *request);
65 static int ata_check_ids(device_t dev, union ccb *ccb);
66 static void ata_conn_event(void *context, int dummy);
67 static void ata_interrupt_locked(void *data);
68 static int ata_module_event_handler(module_t mod, int what, void *arg);
69 static void ata_periodic_poll(void *data);
70 static int ata_str2mode(const char *str);
71
72 /* global vars */
73 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
74 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
75 devclass_t ata_devclass;
76 int ata_dma_check_80pin = 1;
77
78 /* sysctl vars */
79 static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
80 "ATA driver parameters");
81 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin,
82 CTLFLAG_RWTUN, &ata_dma_check_80pin, 0,
83 "Check for 80pin cable before setting ATA DMA mode");
84 FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver");
85
86 /*
87 * newbus device interface related functions
88 */
89 int
ata_probe(device_t dev)90 ata_probe(device_t dev)
91 {
92 return (BUS_PROBE_LOW_PRIORITY);
93 }
94
95 int
ata_attach(device_t dev)96 ata_attach(device_t dev)
97 {
98 struct ata_channel *ch = device_get_softc(dev);
99 int error, rid;
100 struct cam_devq *devq;
101 const char *res;
102 char buf[64];
103 int i, mode;
104
105 /* check that we have a virgin channel to attach */
106 if (ch->r_irq)
107 return EEXIST;
108
109 /* initialize the softc basics */
110 ch->dev = dev;
111 ch->state = ATA_IDLE;
112 bzero(&ch->state_mtx, sizeof(struct mtx));
113 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF);
114 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev);
115 for (i = 0; i < 16; i++) {
116 ch->user[i].revision = 0;
117 snprintf(buf, sizeof(buf), "dev%d.sata_rev", i);
118 if (resource_int_value(device_get_name(dev),
119 device_get_unit(dev), buf, &mode) != 0 &&
120 resource_int_value(device_get_name(dev),
121 device_get_unit(dev), "sata_rev", &mode) != 0)
122 mode = -1;
123 if (mode >= 0)
124 ch->user[i].revision = mode;
125 ch->user[i].mode = 0;
126 snprintf(buf, sizeof(buf), "dev%d.mode", i);
127 if (resource_string_value(device_get_name(dev),
128 device_get_unit(dev), buf, &res) == 0)
129 mode = ata_str2mode(res);
130 else if (resource_string_value(device_get_name(dev),
131 device_get_unit(dev), "mode", &res) == 0)
132 mode = ata_str2mode(res);
133 else
134 mode = -1;
135 if (mode >= 0)
136 ch->user[i].mode = mode;
137 if (ch->flags & ATA_SATA)
138 ch->user[i].bytecount = 8192;
139 else
140 ch->user[i].bytecount = 65536;
141 ch->user[i].caps = 0;
142 ch->curr[i] = ch->user[i];
143 if (ch->flags & ATA_SATA) {
144 if (ch->pm_level > 0)
145 ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ;
146 if (ch->pm_level > 1)
147 ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ;
148 } else {
149 if (!(ch->flags & ATA_NO_48BIT_DMA))
150 ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48;
151 }
152 }
153 callout_init(&ch->poll_callout, 1);
154
155 /* allocate DMA resources if DMA HW present*/
156 if (ch->dma.alloc)
157 ch->dma.alloc(dev);
158
159 /* setup interrupt delivery */
160 rid = ATA_IRQ_RID;
161 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
162 RF_SHAREABLE | RF_ACTIVE);
163 if (!ch->r_irq) {
164 device_printf(dev, "unable to allocate interrupt\n");
165 return ENXIO;
166 }
167 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
168 ata_interrupt, ch, &ch->ih))) {
169 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
170 device_printf(dev, "unable to setup interrupt\n");
171 return error;
172 }
173
174 if (ch->flags & ATA_PERIODIC_POLL)
175 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
176 mtx_lock(&ch->state_mtx);
177 /* Create the device queue for our SIM. */
178 devq = cam_simq_alloc(1);
179 if (devq == NULL) {
180 device_printf(dev, "Unable to allocate simq\n");
181 error = ENOMEM;
182 goto err1;
183 }
184 /* Construct SIM entry */
185 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch,
186 device_get_unit(dev), &ch->state_mtx, 1, 0, devq);
187 if (ch->sim == NULL) {
188 device_printf(dev, "unable to allocate sim\n");
189 cam_simq_free(devq);
190 error = ENOMEM;
191 goto err1;
192 }
193 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
194 device_printf(dev, "unable to register xpt bus\n");
195 error = ENXIO;
196 goto err2;
197 }
198 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
199 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
200 device_printf(dev, "unable to create path\n");
201 error = ENXIO;
202 goto err3;
203 }
204 mtx_unlock(&ch->state_mtx);
205 return (0);
206
207 err3:
208 xpt_bus_deregister(cam_sim_path(ch->sim));
209 err2:
210 cam_sim_free(ch->sim, /*free_devq*/TRUE);
211 ch->sim = NULL;
212 err1:
213 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq);
214 mtx_unlock(&ch->state_mtx);
215 if (ch->flags & ATA_PERIODIC_POLL)
216 callout_drain(&ch->poll_callout);
217 return (error);
218 }
219
220 int
ata_detach(device_t dev)221 ata_detach(device_t dev)
222 {
223 struct ata_channel *ch = device_get_softc(dev);
224
225 /* check that we have a valid channel to detach */
226 if (!ch->r_irq)
227 return ENXIO;
228
229 /* grap the channel lock so no new requests gets launched */
230 mtx_lock(&ch->state_mtx);
231 ch->state |= ATA_STALL_QUEUE;
232 mtx_unlock(&ch->state_mtx);
233 if (ch->flags & ATA_PERIODIC_POLL)
234 callout_drain(&ch->poll_callout);
235
236 taskqueue_drain(taskqueue_thread, &ch->conntask);
237
238 mtx_lock(&ch->state_mtx);
239 xpt_async(AC_LOST_DEVICE, ch->path, NULL);
240 xpt_free_path(ch->path);
241 xpt_bus_deregister(cam_sim_path(ch->sim));
242 cam_sim_free(ch->sim, /*free_devq*/TRUE);
243 ch->sim = NULL;
244 mtx_unlock(&ch->state_mtx);
245
246 /* release resources */
247 bus_teardown_intr(dev, ch->r_irq, ch->ih);
248 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
249 ch->r_irq = NULL;
250
251 /* free DMA resources if DMA HW present*/
252 if (ch->dma.free)
253 ch->dma.free(dev);
254
255 mtx_destroy(&ch->state_mtx);
256 return 0;
257 }
258
259 static void
ata_conn_event(void * context,int dummy)260 ata_conn_event(void *context, int dummy)
261 {
262 device_t dev = (device_t)context;
263 struct ata_channel *ch = device_get_softc(dev);
264 union ccb *ccb;
265
266 mtx_lock(&ch->state_mtx);
267 if (ch->sim == NULL) {
268 mtx_unlock(&ch->state_mtx);
269 return;
270 }
271 ata_reinit(dev);
272 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
273 return;
274 if (xpt_create_path(&ccb->ccb_h.path, NULL,
275 cam_sim_path(ch->sim),
276 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
277 xpt_free_ccb(ccb);
278 return;
279 }
280 xpt_rescan(ccb);
281 mtx_unlock(&ch->state_mtx);
282 }
283
284 int
ata_reinit(device_t dev)285 ata_reinit(device_t dev)
286 {
287 struct ata_channel *ch = device_get_softc(dev);
288 struct ata_request *request;
289
290 xpt_freeze_simq(ch->sim, 1);
291 if ((request = ch->running)) {
292 ch->running = NULL;
293 if (ch->state == ATA_ACTIVE)
294 ch->state = ATA_IDLE;
295 callout_stop(&request->callout);
296 if (ch->dma.unload)
297 ch->dma.unload(request);
298 request->result = ERESTART;
299 ata_cam_end_transaction(dev, request);
300 }
301 /* reset the controller HW, the channel and device(s) */
302 ATA_RESET(dev);
303 /* Tell the XPT about the event */
304 xpt_async(AC_BUS_RESET, ch->path, NULL);
305 xpt_release_simq(ch->sim, TRUE);
306 return(0);
307 }
308
309 int
ata_suspend(device_t dev)310 ata_suspend(device_t dev)
311 {
312 struct ata_channel *ch;
313
314 /* check for valid device */
315 if (!dev || !(ch = device_get_softc(dev)))
316 return ENXIO;
317
318 if (ch->flags & ATA_PERIODIC_POLL)
319 callout_drain(&ch->poll_callout);
320 mtx_lock(&ch->state_mtx);
321 xpt_freeze_simq(ch->sim, 1);
322 while (ch->state != ATA_IDLE)
323 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100);
324 mtx_unlock(&ch->state_mtx);
325 return(0);
326 }
327
328 int
ata_resume(device_t dev)329 ata_resume(device_t dev)
330 {
331 struct ata_channel *ch;
332 int error;
333
334 /* check for valid device */
335 if (!dev || !(ch = device_get_softc(dev)))
336 return ENXIO;
337
338 mtx_lock(&ch->state_mtx);
339 error = ata_reinit(dev);
340 xpt_release_simq(ch->sim, TRUE);
341 mtx_unlock(&ch->state_mtx);
342 if (ch->flags & ATA_PERIODIC_POLL)
343 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
344 return error;
345 }
346
347 void
ata_interrupt(void * data)348 ata_interrupt(void *data)
349 {
350 struct ata_channel *ch = (struct ata_channel *)data;
351
352 mtx_lock(&ch->state_mtx);
353 ata_interrupt_locked(data);
354 mtx_unlock(&ch->state_mtx);
355 }
356
357 static void
ata_interrupt_locked(void * data)358 ata_interrupt_locked(void *data)
359 {
360 struct ata_channel *ch = (struct ata_channel *)data;
361 struct ata_request *request;
362
363 /* ignore interrupt if its not for us */
364 if (ch->hw.status && !ch->hw.status(ch->dev))
365 return;
366
367 /* do we have a running request */
368 if (!(request = ch->running))
369 return;
370
371 ATA_DEBUG_RQ(request, "interrupt");
372
373 /* safetycheck for the right state */
374 if (ch->state == ATA_IDLE) {
375 device_printf(request->dev, "interrupt on idle channel ignored\n");
376 return;
377 }
378
379 /*
380 * we have the HW locks, so end the transaction for this request
381 * if it finishes immediately otherwise wait for next interrupt
382 */
383 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
384 ch->running = NULL;
385 if (ch->state == ATA_ACTIVE)
386 ch->state = ATA_IDLE;
387 ata_cam_end_transaction(ch->dev, request);
388 return;
389 }
390 }
391
392 static void
ata_periodic_poll(void * data)393 ata_periodic_poll(void *data)
394 {
395 struct ata_channel *ch = (struct ata_channel *)data;
396
397 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch);
398 ata_interrupt(ch);
399 }
400
401 void
ata_print_cable(device_t dev,u_int8_t * who)402 ata_print_cable(device_t dev, u_int8_t *who)
403 {
404 device_printf(dev,
405 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who);
406 }
407
408 /*
409 * misc support functions
410 */
411 void
ata_default_registers(device_t dev)412 ata_default_registers(device_t dev)
413 {
414 struct ata_channel *ch = device_get_softc(dev);
415
416 /* fill in the defaults from whats setup already */
417 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
418 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
419 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
420 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
421 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
422 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
423 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
424 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
425 }
426
427 void
ata_udelay(int interval)428 ata_udelay(int interval)
429 {
430 /* for now just use DELAY, the timer/sleep subsystems are not there yet */
431 if (1 || interval < (1000000/hz) || ata_delayed_attach)
432 DELAY(interval);
433 else
434 pause("ataslp", interval/(1000000/hz));
435 }
436
437 const char *
ata_cmd2str(struct ata_request * request)438 ata_cmd2str(struct ata_request *request)
439 {
440 static char buffer[20];
441
442 if (request->flags & ATA_R_ATAPI) {
443 switch (request->u.atapi.sense.key ?
444 request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) {
445 case 0x00: return ("TEST_UNIT_READY");
446 case 0x01: return ("REZERO");
447 case 0x03: return ("REQUEST_SENSE");
448 case 0x04: return ("FORMAT");
449 case 0x08: return ("READ");
450 case 0x0a: return ("WRITE");
451 case 0x10: return ("WEOF");
452 case 0x11: return ("SPACE");
453 case 0x12: return ("INQUIRY");
454 case 0x15: return ("MODE_SELECT");
455 case 0x19: return ("ERASE");
456 case 0x1a: return ("MODE_SENSE");
457 case 0x1b: return ("START_STOP");
458 case 0x1e: return ("PREVENT_ALLOW");
459 case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES");
460 case 0x25: return ("READ_CAPACITY");
461 case 0x28: return ("READ_BIG");
462 case 0x2a: return ("WRITE_BIG");
463 case 0x2b: return ("LOCATE");
464 case 0x34: return ("READ_POSITION");
465 case 0x35: return ("SYNCHRONIZE_CACHE");
466 case 0x3b: return ("WRITE_BUFFER");
467 case 0x3c: return ("READ_BUFFER");
468 case 0x42: return ("READ_SUBCHANNEL");
469 case 0x43: return ("READ_TOC");
470 case 0x45: return ("PLAY_10");
471 case 0x47: return ("PLAY_MSF");
472 case 0x48: return ("PLAY_TRACK");
473 case 0x4b: return ("PAUSE");
474 case 0x51: return ("READ_DISK_INFO");
475 case 0x52: return ("READ_TRACK_INFO");
476 case 0x53: return ("RESERVE_TRACK");
477 case 0x54: return ("SEND_OPC_INFO");
478 case 0x55: return ("MODE_SELECT_BIG");
479 case 0x58: return ("REPAIR_TRACK");
480 case 0x59: return ("READ_MASTER_CUE");
481 case 0x5a: return ("MODE_SENSE_BIG");
482 case 0x5b: return ("CLOSE_TRACK/SESSION");
483 case 0x5c: return ("READ_BUFFER_CAPACITY");
484 case 0x5d: return ("SEND_CUE_SHEET");
485 case 0x96: return ("SERVICE_ACTION_IN");
486 case 0xa1: return ("BLANK_CMD");
487 case 0xa3: return ("SEND_KEY");
488 case 0xa4: return ("REPORT_KEY");
489 case 0xa5: return ("PLAY_12");
490 case 0xa6: return ("LOAD_UNLOAD");
491 case 0xad: return ("READ_DVD_STRUCTURE");
492 case 0xb4: return ("PLAY_CD");
493 case 0xbb: return ("SET_SPEED");
494 case 0xbd: return ("MECH_STATUS");
495 case 0xbe: return ("READ_CD");
496 case 0xff: return ("POLL_DSC");
497 }
498 } else {
499 switch (request->u.ata.command) {
500 case 0x00:
501 switch (request->u.ata.feature) {
502 case 0x00: return ("NOP FLUSHQUEUE");
503 case 0x01: return ("NOP AUTOPOLL");
504 }
505 return ("NOP");
506 case 0x03: return ("CFA_REQUEST_EXTENDED_ERROR");
507 case 0x06:
508 switch (request->u.ata.feature) {
509 case 0x01: return ("DSM TRIM");
510 }
511 return "DSM";
512 case 0x08: return ("DEVICE_RESET");
513 case 0x20: return ("READ");
514 case 0x24: return ("READ48");
515 case 0x25: return ("READ_DMA48");
516 case 0x26: return ("READ_DMA_QUEUED48");
517 case 0x27: return ("READ_NATIVE_MAX_ADDRESS48");
518 case 0x29: return ("READ_MUL48");
519 case 0x2a: return ("READ_STREAM_DMA48");
520 case 0x2b: return ("READ_STREAM48");
521 case 0x2f: return ("READ_LOG_EXT");
522 case 0x30: return ("WRITE");
523 case 0x34: return ("WRITE48");
524 case 0x35: return ("WRITE_DMA48");
525 case 0x36: return ("WRITE_DMA_QUEUED48");
526 case 0x37: return ("SET_MAX_ADDRESS48");
527 case 0x39: return ("WRITE_MUL48");
528 case 0x3a: return ("WRITE_STREAM_DMA48");
529 case 0x3b: return ("WRITE_STREAM48");
530 case 0x3d: return ("WRITE_DMA_FUA48");
531 case 0x3e: return ("WRITE_DMA_QUEUED_FUA48");
532 case 0x3f: return ("WRITE_LOG_EXT");
533 case 0x40: return ("READ_VERIFY");
534 case 0x42: return ("READ_VERIFY48");
535 case 0x45:
536 switch (request->u.ata.feature) {
537 case 0x55: return ("WRITE_UNCORRECTABLE48 PSEUDO");
538 case 0xaa: return ("WRITE_UNCORRECTABLE48 FLAGGED");
539 }
540 return "WRITE_UNCORRECTABLE48";
541 case 0x51: return ("CONFIGURE_STREAM");
542 case 0x60: return ("READ_FPDMA_QUEUED");
543 case 0x61: return ("WRITE_FPDMA_QUEUED");
544 case 0x63: return ("NCQ_NON_DATA");
545 case 0x64: return ("SEND_FPDMA_QUEUED");
546 case 0x65: return ("RECEIVE_FPDMA_QUEUED");
547 case 0x67:
548 if (request->u.ata.feature == 0xec)
549 return ("SEP_ATTN IDENTIFY");
550 switch (request->u.ata.lba) {
551 case 0x00: return ("SEP_ATTN READ BUFFER");
552 case 0x02: return ("SEP_ATTN RECEIVE DIAGNOSTIC RESULTS");
553 case 0x80: return ("SEP_ATTN WRITE BUFFER");
554 case 0x82: return ("SEP_ATTN SEND DIAGNOSTIC");
555 }
556 return ("SEP_ATTN");
557 case 0x70: return ("SEEK");
558 case 0x87: return ("CFA_TRANSLATE_SECTOR");
559 case 0x90: return ("EXECUTE_DEVICE_DIAGNOSTIC");
560 case 0x92: return ("DOWNLOAD_MICROCODE");
561 case 0xa0: return ("PACKET");
562 case 0xa1: return ("ATAPI_IDENTIFY");
563 case 0xa2: return ("SERVICE");
564 case 0xb0:
565 switch(request->u.ata.feature) {
566 case 0xd0: return ("SMART READ ATTR VALUES");
567 case 0xd1: return ("SMART READ ATTR THRESHOLDS");
568 case 0xd3: return ("SMART SAVE ATTR VALUES");
569 case 0xd4: return ("SMART EXECUTE OFFLINE IMMEDIATE");
570 case 0xd5: return ("SMART READ LOG DATA");
571 case 0xd8: return ("SMART ENABLE OPERATION");
572 case 0xd9: return ("SMART DISABLE OPERATION");
573 case 0xda: return ("SMART RETURN STATUS");
574 }
575 return ("SMART");
576 case 0xb1: return ("DEVICE CONFIGURATION");
577 case 0xc0: return ("CFA_ERASE");
578 case 0xc4: return ("READ_MUL");
579 case 0xc5: return ("WRITE_MUL");
580 case 0xc6: return ("SET_MULTI");
581 case 0xc7: return ("READ_DMA_QUEUED");
582 case 0xc8: return ("READ_DMA");
583 case 0xca: return ("WRITE_DMA");
584 case 0xcc: return ("WRITE_DMA_QUEUED");
585 case 0xcd: return ("CFA_WRITE_MULTIPLE_WITHOUT_ERASE");
586 case 0xce: return ("WRITE_MUL_FUA48");
587 case 0xd1: return ("CHECK_MEDIA_CARD_TYPE");
588 case 0xda: return ("GET_MEDIA_STATUS");
589 case 0xde: return ("MEDIA_LOCK");
590 case 0xdf: return ("MEDIA_UNLOCK");
591 case 0xe0: return ("STANDBY_IMMEDIATE");
592 case 0xe1: return ("IDLE_IMMEDIATE");
593 case 0xe2: return ("STANDBY");
594 case 0xe3: return ("IDLE");
595 case 0xe4: return ("READ_BUFFER/PM");
596 case 0xe5: return ("CHECK_POWER_MODE");
597 case 0xe6: return ("SLEEP");
598 case 0xe7: return ("FLUSHCACHE");
599 case 0xe8: return ("WRITE_PM");
600 case 0xea: return ("FLUSHCACHE48");
601 case 0xec: return ("ATA_IDENTIFY");
602 case 0xed: return ("MEDIA_EJECT");
603 case 0xef:
604 switch (request->u.ata.feature) {
605 case 0x03: return ("SETFEATURES SET TRANSFER MODE");
606 case 0x02: return ("SETFEATURES ENABLE WCACHE");
607 case 0x82: return ("SETFEATURES DISABLE WCACHE");
608 case 0x06: return ("SETFEATURES ENABLE PUIS");
609 case 0x86: return ("SETFEATURES DISABLE PUIS");
610 case 0x07: return ("SETFEATURES SPIN-UP");
611 case 0x10: return ("SETFEATURES ENABLE SATA FEATURE");
612 case 0x90: return ("SETFEATURES DISABLE SATA FEATURE");
613 case 0xaa: return ("SETFEATURES ENABLE RCACHE");
614 case 0x55: return ("SETFEATURES DISABLE RCACHE");
615 case 0x5d: return ("SETFEATURES ENABLE RELIRQ");
616 case 0xdd: return ("SETFEATURES DISABLE RELIRQ");
617 case 0x5e: return ("SETFEATURES ENABLE SRVIRQ");
618 case 0xde: return ("SETFEATURES DISABLE SRVIRQ");
619 }
620 return "SETFEATURES";
621 case 0xf1: return ("SECURITY_SET_PASSWORD");
622 case 0xf2: return ("SECURITY_UNLOCK");
623 case 0xf3: return ("SECURITY_ERASE_PREPARE");
624 case 0xf4: return ("SECURITY_ERASE_UNIT");
625 case 0xf5: return ("SECURITY_FREEZE_LOCK");
626 case 0xf6: return ("SECURITY_DISABLE_PASSWORD");
627 case 0xf8: return ("READ_NATIVE_MAX_ADDRESS");
628 case 0xf9: return ("SET_MAX_ADDRESS");
629 }
630 }
631 sprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command);
632 return (buffer);
633 }
634
635 const char *
ata_mode2str(int mode)636 ata_mode2str(int mode)
637 {
638 switch (mode) {
639 case -1: return "UNSUPPORTED";
640 case ATA_PIO0: return "PIO0";
641 case ATA_PIO1: return "PIO1";
642 case ATA_PIO2: return "PIO2";
643 case ATA_PIO3: return "PIO3";
644 case ATA_PIO4: return "PIO4";
645 case ATA_WDMA0: return "WDMA0";
646 case ATA_WDMA1: return "WDMA1";
647 case ATA_WDMA2: return "WDMA2";
648 case ATA_UDMA0: return "UDMA16";
649 case ATA_UDMA1: return "UDMA25";
650 case ATA_UDMA2: return "UDMA33";
651 case ATA_UDMA3: return "UDMA40";
652 case ATA_UDMA4: return "UDMA66";
653 case ATA_UDMA5: return "UDMA100";
654 case ATA_UDMA6: return "UDMA133";
655 case ATA_SA150: return "SATA150";
656 case ATA_SA300: return "SATA300";
657 case ATA_SA600: return "SATA600";
658 default:
659 if (mode & ATA_DMA_MASK)
660 return "BIOSDMA";
661 else
662 return "BIOSPIO";
663 }
664 }
665
666 static int
ata_str2mode(const char * str)667 ata_str2mode(const char *str)
668 {
669
670 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0);
671 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1);
672 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2);
673 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3);
674 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4);
675 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0);
676 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1);
677 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2);
678 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0);
679 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0);
680 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1);
681 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1);
682 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2);
683 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2);
684 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3);
685 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3);
686 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4);
687 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4);
688 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5);
689 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5);
690 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6);
691 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6);
692 return (-1);
693 }
694
695 int
ata_atapi(device_t dev,int target)696 ata_atapi(device_t dev, int target)
697 {
698 struct ata_channel *ch = device_get_softc(dev);
699
700 return (ch->devices & (ATA_ATAPI_MASTER << target));
701 }
702
703 void
ata_timeout(void * arg)704 ata_timeout(void *arg)
705 {
706 struct ata_request *request;
707 struct ata_channel *ch;
708
709 request = arg;
710 ch = device_get_softc(request->parent);
711 //request->flags |= ATA_R_DEBUG;
712 ATA_DEBUG_RQ(request, "timeout");
713
714 /*
715 * If we have an ATA_ACTIVE request running, we flag the request
716 * ATA_R_TIMEOUT so ata_cam_end_transaction() will handle it correctly.
717 * Also, NULL out the running request so we wont loose the race with
718 * an eventual interrupt arriving late.
719 */
720 if (ch->state == ATA_ACTIVE) {
721 request->flags |= ATA_R_TIMEOUT;
722 if (ch->dma.unload)
723 ch->dma.unload(request);
724 ch->running = NULL;
725 ch->state = ATA_IDLE;
726 ata_cam_end_transaction(ch->dev, request);
727 }
728 mtx_unlock(&ch->state_mtx);
729 }
730
731 static void
ata_cam_begin_transaction(device_t dev,union ccb * ccb)732 ata_cam_begin_transaction(device_t dev, union ccb *ccb)
733 {
734 struct ata_channel *ch = device_get_softc(dev);
735 struct ata_request *request;
736
737 request = &ch->request;
738 bzero(request, sizeof(*request));
739
740 /* setup request */
741 request->dev = NULL;
742 request->parent = dev;
743 request->unit = ccb->ccb_h.target_id;
744 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
745 request->data = ccb->ataio.data_ptr;
746 request->bytecount = ccb->ataio.dxfer_len;
747 request->u.ata.command = ccb->ataio.cmd.command;
748 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) |
749 (uint16_t)ccb->ataio.cmd.features;
750 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) |
751 (uint16_t)ccb->ataio.cmd.sector_count;
752 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) {
753 request->flags |= ATA_R_48BIT;
754 request->u.ata.lba =
755 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) |
756 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) |
757 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24);
758 } else {
759 request->u.ata.lba =
760 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24);
761 }
762 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) |
763 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) |
764 (uint64_t)ccb->ataio.cmd.lba_low;
765 if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)
766 request->flags |= ATA_R_NEEDRESULT;
767 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
768 ccb->ataio.cmd.flags & CAM_ATAIO_DMA)
769 request->flags |= ATA_R_DMA;
770 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
771 request->flags |= ATA_R_READ;
772 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
773 request->flags |= ATA_R_WRITE;
774 if (ccb->ataio.cmd.command == ATA_READ_MUL ||
775 ccb->ataio.cmd.command == ATA_READ_MUL48 ||
776 ccb->ataio.cmd.command == ATA_WRITE_MUL ||
777 ccb->ataio.cmd.command == ATA_WRITE_MUL48) {
778 request->transfersize = min(request->bytecount,
779 ch->curr[ccb->ccb_h.target_id].bytecount);
780 } else
781 request->transfersize = min(request->bytecount, 512);
782 } else {
783 request->data = ccb->csio.data_ptr;
784 request->bytecount = ccb->csio.dxfer_len;
785 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
786 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes,
787 request->u.atapi.ccb, ccb->csio.cdb_len);
788 request->flags |= ATA_R_ATAPI;
789 if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
790 request->flags |= ATA_R_ATAPI16;
791 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
792 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
793 request->flags |= ATA_R_DMA;
794 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
795 request->flags |= ATA_R_READ;
796 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
797 request->flags |= ATA_R_WRITE;
798 request->transfersize = min(request->bytecount,
799 ch->curr[ccb->ccb_h.target_id].bytecount);
800 }
801 request->retries = 0;
802 request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
803 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
804 request->ccb = ccb;
805 request->flags |= ATA_R_DATA_IN_CCB;
806
807 ch->running = request;
808 ch->state = ATA_ACTIVE;
809 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
810 ch->running = NULL;
811 ch->state = ATA_IDLE;
812 ata_cam_end_transaction(dev, request);
813 return;
814 }
815 }
816
817 static void
ata_cam_request_sense(device_t dev,struct ata_request * request)818 ata_cam_request_sense(device_t dev, struct ata_request *request)
819 {
820 struct ata_channel *ch = device_get_softc(dev);
821 union ccb *ccb = request->ccb;
822
823 ch->requestsense = 1;
824
825 bzero(request, sizeof(*request));
826 request->dev = NULL;
827 request->parent = dev;
828 request->unit = ccb->ccb_h.target_id;
829 request->data = (void *)&ccb->csio.sense_data;
830 request->bytecount = ccb->csio.sense_len;
831 request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE;
832 request->u.atapi.ccb[4] = ccb->csio.sense_len;
833 request->flags |= ATA_R_ATAPI;
834 if (ch->curr[ccb->ccb_h.target_id].atapi == 16)
835 request->flags |= ATA_R_ATAPI16;
836 if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA)
837 request->flags |= ATA_R_DMA;
838 request->flags |= ATA_R_READ;
839 request->transfersize = min(request->bytecount,
840 ch->curr[ccb->ccb_h.target_id].bytecount);
841 request->retries = 0;
842 request->timeout = (ccb->ccb_h.timeout + 999) / 1000;
843 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED);
844 request->ccb = ccb;
845
846 ch->running = request;
847 ch->state = ATA_ACTIVE;
848 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
849 ch->running = NULL;
850 ch->state = ATA_IDLE;
851 ata_cam_end_transaction(dev, request);
852 return;
853 }
854 }
855
856 static void
ata_cam_process_sense(device_t dev,struct ata_request * request)857 ata_cam_process_sense(device_t dev, struct ata_request *request)
858 {
859 struct ata_channel *ch = device_get_softc(dev);
860 union ccb *ccb = request->ccb;
861 int fatalerr = 0;
862
863 ch->requestsense = 0;
864
865 if (request->flags & ATA_R_TIMEOUT)
866 fatalerr = 1;
867 if ((request->flags & ATA_R_TIMEOUT) == 0 &&
868 (request->status & ATA_S_ERROR) == 0 &&
869 request->result == 0) {
870 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
871 } else {
872 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
873 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
874 }
875
876 xpt_done(ccb);
877 /* Do error recovery if needed. */
878 if (fatalerr)
879 ata_reinit(dev);
880 }
881
882 static void
ata_cam_end_transaction(device_t dev,struct ata_request * request)883 ata_cam_end_transaction(device_t dev, struct ata_request *request)
884 {
885 struct ata_channel *ch = device_get_softc(dev);
886 union ccb *ccb = request->ccb;
887 int fatalerr = 0;
888
889 if (ch->requestsense) {
890 ata_cam_process_sense(dev, request);
891 return;
892 }
893
894 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
895 if (request->flags & ATA_R_TIMEOUT) {
896 xpt_freeze_simq(ch->sim, 1);
897 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
898 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ;
899 fatalerr = 1;
900 } else if (request->status & ATA_S_ERROR) {
901 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
902 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
903 } else {
904 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
905 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
906 }
907 } else if (request->result == ERESTART)
908 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
909 else if (request->result != 0)
910 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
911 else
912 ccb->ccb_h.status |= CAM_REQ_CMP;
913 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
914 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
915 xpt_freeze_devq(ccb->ccb_h.path, 1);
916 ccb->ccb_h.status |= CAM_DEV_QFRZN;
917 }
918 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
919 ((request->status & ATA_S_ERROR) ||
920 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) {
921 struct ata_res *res = &ccb->ataio.res;
922 res->status = request->status;
923 res->error = request->error;
924 res->lba_low = request->u.ata.lba;
925 res->lba_mid = request->u.ata.lba >> 8;
926 res->lba_high = request->u.ata.lba >> 16;
927 res->device = request->u.ata.lba >> 24;
928 res->lba_low_exp = request->u.ata.lba >> 24;
929 res->lba_mid_exp = request->u.ata.lba >> 32;
930 res->lba_high_exp = request->u.ata.lba >> 40;
931 res->sector_count = request->u.ata.count;
932 res->sector_count_exp = request->u.ata.count >> 8;
933 }
934 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
935 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
936 ccb->ataio.resid =
937 ccb->ataio.dxfer_len - request->donecount;
938 } else {
939 ccb->csio.resid =
940 ccb->csio.dxfer_len - request->donecount;
941 }
942 }
943 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR &&
944 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)
945 ata_cam_request_sense(dev, request);
946 else
947 xpt_done(ccb);
948 /* Do error recovery if needed. */
949 if (fatalerr)
950 ata_reinit(dev);
951 }
952
953 static int
ata_check_ids(device_t dev,union ccb * ccb)954 ata_check_ids(device_t dev, union ccb *ccb)
955 {
956 struct ata_channel *ch = device_get_softc(dev);
957
958 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) {
959 ccb->ccb_h.status = CAM_TID_INVALID;
960 xpt_done(ccb);
961 return (-1);
962 }
963 if (ccb->ccb_h.target_lun != 0) {
964 ccb->ccb_h.status = CAM_LUN_INVALID;
965 xpt_done(ccb);
966 return (-1);
967 }
968 /*
969 * It's a programming error to see AUXILIARY register requests.
970 */
971 KASSERT(ccb->ccb_h.func_code != XPT_ATA_IO ||
972 ((ccb->ataio.ata_flags & ATA_FLAG_AUX) == 0),
973 ("AUX register unsupported"));
974 return (0);
975 }
976
977 static void
ataaction(struct cam_sim * sim,union ccb * ccb)978 ataaction(struct cam_sim *sim, union ccb *ccb)
979 {
980 device_t dev, parent;
981 struct ata_channel *ch;
982
983 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n",
984 ccb->ccb_h.func_code));
985
986 ch = (struct ata_channel *)cam_sim_softc(sim);
987 dev = ch->dev;
988 switch (ccb->ccb_h.func_code) {
989 /* Common cases first */
990 case XPT_ATA_IO: /* Execute the requested I/O operation */
991 case XPT_SCSI_IO:
992 if (ata_check_ids(dev, ccb))
993 return;
994 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER)
995 << ccb->ccb_h.target_id)) == 0) {
996 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
997 break;
998 }
999 if (ch->running)
1000 device_printf(dev, "already running!\n");
1001 if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1002 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) &&
1003 (ccb->ataio.cmd.control & ATA_A_RESET)) {
1004 struct ata_res *res = &ccb->ataio.res;
1005
1006 bzero(res, sizeof(*res));
1007 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) {
1008 res->lba_high = 0;
1009 res->lba_mid = 0;
1010 } else {
1011 res->lba_high = 0xeb;
1012 res->lba_mid = 0x14;
1013 }
1014 ccb->ccb_h.status = CAM_REQ_CMP;
1015 break;
1016 }
1017 ata_cam_begin_transaction(dev, ccb);
1018 return;
1019 case XPT_ABORT: /* Abort the specified CCB */
1020 /* XXX Implement */
1021 ccb->ccb_h.status = CAM_REQ_INVALID;
1022 break;
1023 case XPT_SET_TRAN_SETTINGS:
1024 {
1025 struct ccb_trans_settings *cts = &ccb->cts;
1026 struct ata_cam_device *d;
1027
1028 if (ata_check_ids(dev, ccb))
1029 return;
1030 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1031 d = &ch->curr[ccb->ccb_h.target_id];
1032 else
1033 d = &ch->user[ccb->ccb_h.target_id];
1034 if (ch->flags & ATA_SATA) {
1035 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
1036 d->revision = cts->xport_specific.sata.revision;
1037 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) {
1038 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1039 d->mode = ATA_SETMODE(ch->dev,
1040 ccb->ccb_h.target_id,
1041 cts->xport_specific.sata.mode);
1042 } else
1043 d->mode = cts->xport_specific.sata.mode;
1044 }
1045 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT)
1046 d->bytecount = min(8192, cts->xport_specific.sata.bytecount);
1047 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
1048 d->atapi = cts->xport_specific.sata.atapi;
1049 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
1050 d->caps = cts->xport_specific.sata.caps;
1051 } else {
1052 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) {
1053 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1054 d->mode = ATA_SETMODE(ch->dev,
1055 ccb->ccb_h.target_id,
1056 cts->xport_specific.ata.mode);
1057 } else
1058 d->mode = cts->xport_specific.ata.mode;
1059 }
1060 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT)
1061 d->bytecount = cts->xport_specific.ata.bytecount;
1062 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI)
1063 d->atapi = cts->xport_specific.ata.atapi;
1064 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS)
1065 d->caps = cts->xport_specific.ata.caps;
1066 }
1067 ccb->ccb_h.status = CAM_REQ_CMP;
1068 break;
1069 }
1070 case XPT_GET_TRAN_SETTINGS:
1071 {
1072 struct ccb_trans_settings *cts = &ccb->cts;
1073 struct ata_cam_device *d;
1074
1075 if (ata_check_ids(dev, ccb))
1076 return;
1077 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
1078 d = &ch->curr[ccb->ccb_h.target_id];
1079 else
1080 d = &ch->user[ccb->ccb_h.target_id];
1081 cts->protocol = PROTO_UNSPECIFIED;
1082 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
1083 if (ch->flags & ATA_SATA) {
1084 cts->transport = XPORT_SATA;
1085 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1086 cts->xport_specific.sata.valid = 0;
1087 cts->xport_specific.sata.mode = d->mode;
1088 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
1089 cts->xport_specific.sata.bytecount = d->bytecount;
1090 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
1091 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1092 cts->xport_specific.sata.revision =
1093 ATA_GETREV(dev, ccb->ccb_h.target_id);
1094 if (cts->xport_specific.sata.revision != 0xff) {
1095 cts->xport_specific.sata.valid |=
1096 CTS_SATA_VALID_REVISION;
1097 }
1098 cts->xport_specific.sata.caps =
1099 d->caps & CTS_SATA_CAPS_D;
1100 if (ch->pm_level) {
1101 cts->xport_specific.sata.caps |=
1102 CTS_SATA_CAPS_H_PMREQ;
1103 }
1104 cts->xport_specific.sata.caps &=
1105 ch->user[ccb->ccb_h.target_id].caps;
1106 } else {
1107 cts->xport_specific.sata.revision = d->revision;
1108 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
1109 cts->xport_specific.sata.caps = d->caps;
1110 }
1111 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
1112 cts->xport_specific.sata.atapi = d->atapi;
1113 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
1114 } else {
1115 cts->transport = XPORT_ATA;
1116 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
1117 cts->xport_specific.ata.valid = 0;
1118 cts->xport_specific.ata.mode = d->mode;
1119 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE;
1120 cts->xport_specific.ata.bytecount = d->bytecount;
1121 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT;
1122 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
1123 cts->xport_specific.ata.caps =
1124 d->caps & CTS_ATA_CAPS_D;
1125 if (!(ch->flags & ATA_NO_48BIT_DMA))
1126 cts->xport_specific.ata.caps |=
1127 CTS_ATA_CAPS_H_DMA48;
1128 cts->xport_specific.ata.caps &=
1129 ch->user[ccb->ccb_h.target_id].caps;
1130 } else
1131 cts->xport_specific.ata.caps = d->caps;
1132 cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS;
1133 cts->xport_specific.ata.atapi = d->atapi;
1134 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI;
1135 }
1136 ccb->ccb_h.status = CAM_REQ_CMP;
1137 break;
1138 }
1139 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1140 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1141 ata_reinit(dev);
1142 ccb->ccb_h.status = CAM_REQ_CMP;
1143 break;
1144 case XPT_TERM_IO: /* Terminate the I/O process */
1145 /* XXX Implement */
1146 ccb->ccb_h.status = CAM_REQ_INVALID;
1147 break;
1148 case XPT_PATH_INQ: /* Path routing inquiry */
1149 {
1150 struct ccb_pathinq *cpi = &ccb->cpi;
1151
1152 parent = device_get_parent(dev);
1153 cpi->version_num = 1; /* XXX??? */
1154 cpi->hba_inquiry = PI_SDTR_ABLE;
1155 cpi->target_sprt = 0;
1156 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
1157 cpi->hba_eng_cnt = 0;
1158 if (ch->flags & ATA_NO_SLAVE)
1159 cpi->max_target = 0;
1160 else
1161 cpi->max_target = 1;
1162 cpi->max_lun = 0;
1163 cpi->initiator_id = 0;
1164 cpi->bus_id = cam_sim_bus(sim);
1165 if (ch->flags & ATA_SATA)
1166 cpi->base_transfer_speed = 150000;
1167 else
1168 cpi->base_transfer_speed = 3300;
1169 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1170 strlcpy(cpi->hba_vid, "ATA", HBA_IDLEN);
1171 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1172 cpi->unit_number = cam_sim_unit(sim);
1173 if (ch->flags & ATA_SATA)
1174 cpi->transport = XPORT_SATA;
1175 else
1176 cpi->transport = XPORT_ATA;
1177 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
1178 cpi->protocol = PROTO_ATA;
1179 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
1180 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS;
1181 if (device_get_devclass(device_get_parent(parent)) ==
1182 devclass_find("pci")) {
1183 cpi->hba_vendor = pci_get_vendor(parent);
1184 cpi->hba_device = pci_get_device(parent);
1185 cpi->hba_subvendor = pci_get_subvendor(parent);
1186 cpi->hba_subdevice = pci_get_subdevice(parent);
1187 }
1188 cpi->ccb_h.status = CAM_REQ_CMP;
1189 break;
1190 }
1191 default:
1192 ccb->ccb_h.status = CAM_REQ_INVALID;
1193 break;
1194 }
1195 xpt_done(ccb);
1196 }
1197
1198 static void
atapoll(struct cam_sim * sim)1199 atapoll(struct cam_sim *sim)
1200 {
1201 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim);
1202
1203 ata_interrupt_locked(ch);
1204 }
1205
1206 /*
1207 * module handeling
1208 */
1209 static int
ata_module_event_handler(module_t mod,int what,void * arg)1210 ata_module_event_handler(module_t mod, int what, void *arg)
1211 {
1212
1213 switch (what) {
1214 case MOD_LOAD:
1215 ata_devclass = devclass_find("ata");
1216 return 0;
1217
1218 case MOD_UNLOAD:
1219 return 0;
1220
1221 default:
1222 return EOPNOTSUPP;
1223 }
1224 }
1225
1226 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1227 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_DRIVERS, SI_ORDER_ANY);
1228 MODULE_VERSION(ata, 1);
1229 MODULE_DEPEND(ata, cam, 1, 1, 1);
1230