xref: /linux-6.15/include/linux/libata.h (revision 5e8d780d)
1 /*
2  *  Copyright 2003-2005 Red Hat, Inc.  All rights reserved.
3  *  Copyright 2003-2005 Jeff Garzik
4  *
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2, or (at your option)
9  *  any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; see the file COPYING.  If not, write to
18  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19  *
20  *
21  *  libata documentation is available via 'make {ps|pdf}docs',
22  *  as Documentation/DocBook/libata.*
23  *
24  */
25 
26 #ifndef __LINUX_LIBATA_H__
27 #define __LINUX_LIBATA_H__
28 
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/scatterlist.h>
34 #include <asm/io.h>
35 #include <linux/ata.h>
36 #include <linux/workqueue.h>
37 #include <scsi/scsi_host.h>
38 
39 /*
40  * compile-time options: to be removed as soon as all the drivers are
41  * converted to the new debugging mechanism
42  */
43 #undef ATA_DEBUG		/* debugging output */
44 #undef ATA_VERBOSE_DEBUG	/* yet more debugging output */
45 #undef ATA_IRQ_TRAP		/* define to ack screaming irqs */
46 #undef ATA_NDEBUG		/* define to disable quick runtime checks */
47 #undef ATA_ENABLE_PATA		/* define to enable PATA support in some
48 				 * low-level drivers */
49 
50 
51 /* note: prints function name for you */
52 #ifdef ATA_DEBUG
53 #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
54 #ifdef ATA_VERBOSE_DEBUG
55 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
56 #else
57 #define VPRINTK(fmt, args...)
58 #endif	/* ATA_VERBOSE_DEBUG */
59 #else
60 #define DPRINTK(fmt, args...)
61 #define VPRINTK(fmt, args...)
62 #endif	/* ATA_DEBUG */
63 
64 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
65 
66 /* NEW: debug levels */
67 #define HAVE_LIBATA_MSG 1
68 
69 enum {
70 	ATA_MSG_DRV	= 0x0001,
71 	ATA_MSG_INFO	= 0x0002,
72 	ATA_MSG_PROBE	= 0x0004,
73 	ATA_MSG_WARN	= 0x0008,
74 	ATA_MSG_MALLOC	= 0x0010,
75 	ATA_MSG_CTL	= 0x0020,
76 	ATA_MSG_INTR	= 0x0040,
77 	ATA_MSG_ERR	= 0x0080,
78 };
79 
80 #define ata_msg_drv(p)    ((p)->msg_enable & ATA_MSG_DRV)
81 #define ata_msg_info(p)   ((p)->msg_enable & ATA_MSG_INFO)
82 #define ata_msg_probe(p)  ((p)->msg_enable & ATA_MSG_PROBE)
83 #define ata_msg_warn(p)   ((p)->msg_enable & ATA_MSG_WARN)
84 #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
85 #define ata_msg_ctl(p)    ((p)->msg_enable & ATA_MSG_CTL)
86 #define ata_msg_intr(p)   ((p)->msg_enable & ATA_MSG_INTR)
87 #define ata_msg_err(p)    ((p)->msg_enable & ATA_MSG_ERR)
88 
89 static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
90 {
91 	if (dval < 0 || dval >= (sizeof(u32) * 8))
92 		return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
93 	if (!dval)
94 		return 0;
95 	return (1 << dval) - 1;
96 }
97 
98 /* defines only for the constants which don't work well as enums */
99 #define ATA_TAG_POISON		0xfafbfcfdU
100 
101 /* move to PCI layer? */
102 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
103 {
104 	return &pdev->dev;
105 }
106 
107 enum {
108 	/* various global constants */
109 	LIBATA_MAX_PRD		= ATA_MAX_PRD / 2,
110 	ATA_MAX_PORTS		= 8,
111 	ATA_DEF_QUEUE		= 1,
112 	/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
113 	ATA_MAX_QUEUE		= 32,
114 	ATA_TAG_INTERNAL	= ATA_MAX_QUEUE - 1,
115 	ATA_MAX_SECTORS		= 200,	/* FIXME */
116 	ATA_MAX_SECTORS_LBA48	= 65535,
117 	ATA_MAX_BUS		= 2,
118 	ATA_DEF_BUSY_WAIT	= 10000,
119 	ATA_SHORT_PAUSE		= (HZ >> 6) + 1,
120 
121 	ATA_SHT_EMULATED	= 1,
122 	ATA_SHT_CMD_PER_LUN	= 1,
123 	ATA_SHT_THIS_ID		= -1,
124 	ATA_SHT_USE_CLUSTERING	= 1,
125 
126 	/* struct ata_device stuff */
127 	ATA_DFLAG_LBA		= (1 << 0), /* device supports LBA */
128 	ATA_DFLAG_LBA48		= (1 << 1), /* device supports LBA48 */
129 	ATA_DFLAG_CDB_INTR	= (1 << 2), /* device asserts INTRQ when ready for CDB */
130 	ATA_DFLAG_NCQ		= (1 << 3), /* device supports NCQ */
131 	ATA_DFLAG_CFG_MASK	= (1 << 8) - 1,
132 
133 	ATA_DFLAG_PIO		= (1 << 8), /* device currently in PIO mode */
134 	ATA_DFLAG_INIT_MASK	= (1 << 16) - 1,
135 
136 	ATA_DFLAG_DETACH	= (1 << 16),
137 	ATA_DFLAG_DETACHED	= (1 << 17),
138 
139 	ATA_DEV_UNKNOWN		= 0,	/* unknown device */
140 	ATA_DEV_ATA		= 1,	/* ATA device */
141 	ATA_DEV_ATA_UNSUP	= 2,	/* ATA device (unsupported) */
142 	ATA_DEV_ATAPI		= 3,	/* ATAPI device */
143 	ATA_DEV_ATAPI_UNSUP	= 4,	/* ATAPI device (unsupported) */
144 	ATA_DEV_NONE		= 5,	/* no device */
145 
146 	/* struct ata_port flags */
147 	ATA_FLAG_SLAVE_POSS	= (1 << 0), /* host supports slave dev */
148 					    /* (doesn't imply presence) */
149 	ATA_FLAG_SATA		= (1 << 1),
150 	ATA_FLAG_NO_LEGACY	= (1 << 2), /* no legacy mode check */
151 	ATA_FLAG_MMIO		= (1 << 3), /* use MMIO, not PIO */
152 	ATA_FLAG_SRST		= (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
153 	ATA_FLAG_SATA_RESET	= (1 << 5), /* (obsolete) use COMRESET */
154 	ATA_FLAG_NO_ATAPI	= (1 << 6), /* No ATAPI support */
155 	ATA_FLAG_PIO_DMA	= (1 << 7), /* PIO cmds via DMA */
156 	ATA_FLAG_PIO_LBA48	= (1 << 8), /* Host DMA engine is LBA28 only */
157 	ATA_FLAG_PIO_POLLING	= (1 << 9), /* use polling PIO if LLD
158 					     * doesn't handle PIO interrupts */
159 	ATA_FLAG_NCQ		= (1 << 10), /* host supports NCQ */
160 	ATA_FLAG_HRST_TO_RESUME	= (1 << 11), /* hardreset to resume phy */
161 	ATA_FLAG_SKIP_D2H_BSY	= (1 << 12), /* can't wait for the first D2H
162 					      * Register FIS clearing BSY */
163 
164 	ATA_FLAG_DEBUGMSG	= (1 << 13),
165 	ATA_FLAG_FLUSH_PORT_TASK = (1 << 14), /* flush port task */
166 
167 	ATA_FLAG_EH_PENDING	= (1 << 15), /* EH pending */
168 	ATA_FLAG_EH_IN_PROGRESS	= (1 << 16), /* EH in progress */
169 	ATA_FLAG_FROZEN		= (1 << 17), /* port is frozen */
170 	ATA_FLAG_RECOVERED	= (1 << 18), /* recovery action performed */
171 	ATA_FLAG_LOADING	= (1 << 19), /* boot/loading probe */
172 	ATA_FLAG_UNLOADING	= (1 << 20), /* module is unloading */
173 	ATA_FLAG_SCSI_HOTPLUG	= (1 << 21), /* SCSI hotplug scheduled */
174 
175 	ATA_FLAG_DISABLED	= (1 << 22), /* port is disabled, ignore it */
176 	ATA_FLAG_SUSPENDED	= (1 << 23), /* port is suspended (power) */
177 
178 	/* bits 24:31 of ap->flags are reserved for LLDD specific flags */
179 
180 	/* struct ata_queued_cmd flags */
181 	ATA_QCFLAG_ACTIVE	= (1 << 0), /* cmd not yet ack'd to scsi lyer */
182 	ATA_QCFLAG_SG		= (1 << 1), /* have s/g table? */
183 	ATA_QCFLAG_SINGLE	= (1 << 2), /* no s/g, just a single buffer */
184 	ATA_QCFLAG_DMAMAP	= ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
185 	ATA_QCFLAG_IO		= (1 << 3), /* standard IO command */
186 	ATA_QCFLAG_RESULT_TF	= (1 << 4), /* result TF requested */
187 
188 	ATA_QCFLAG_FAILED	= (1 << 16), /* cmd failed and is owned by EH */
189 	ATA_QCFLAG_SENSE_VALID	= (1 << 17), /* sense data valid */
190 	ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
191 
192 	/* host set flags */
193 	ATA_HOST_SIMPLEX	= (1 << 0),	/* Host is simplex, one DMA channel per host_set only */
194 
195 	/* various lengths of time */
196 	ATA_TMOUT_BOOT		= 30 * HZ,	/* heuristic */
197 	ATA_TMOUT_BOOT_QUICK	= 7 * HZ,	/* heuristic */
198 	ATA_TMOUT_INTERNAL	= 30 * HZ,
199 	ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
200 
201 	/* ATA bus states */
202 	BUS_UNKNOWN		= 0,
203 	BUS_DMA			= 1,
204 	BUS_IDLE		= 2,
205 	BUS_NOINTR		= 3,
206 	BUS_NODATA		= 4,
207 	BUS_TIMER		= 5,
208 	BUS_PIO			= 6,
209 	BUS_EDD			= 7,
210 	BUS_IDENTIFY		= 8,
211 	BUS_PACKET		= 9,
212 
213 	/* SATA port states */
214 	PORT_UNKNOWN		= 0,
215 	PORT_ENABLED		= 1,
216 	PORT_DISABLED		= 2,
217 
218 	/* encoding various smaller bitmaps into a single
219 	 * unsigned int bitmap
220 	 */
221 	ATA_BITS_PIO		= 5,
222 	ATA_BITS_MWDMA		= 3,
223 	ATA_BITS_UDMA		= 8,
224 
225 	ATA_SHIFT_PIO		= 0,
226 	ATA_SHIFT_MWDMA		= ATA_SHIFT_PIO + ATA_BITS_PIO,
227 	ATA_SHIFT_UDMA		= ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
228 
229 	ATA_MASK_PIO		= ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
230 	ATA_MASK_MWDMA		= ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
231 	ATA_MASK_UDMA		= ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
232 
233 	/* size of buffer to pad xfers ending on unaligned boundaries */
234 	ATA_DMA_PAD_SZ		= 4,
235 	ATA_DMA_PAD_BUF_SZ	= ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
236 
237 	/* masks for port functions */
238 	ATA_PORT_PRIMARY	= (1 << 0),
239 	ATA_PORT_SECONDARY	= (1 << 1),
240 
241 	/* ering size */
242 	ATA_ERING_SIZE		= 32,
243 
244 	/* desc_len for ata_eh_info and context */
245 	ATA_EH_DESC_LEN		= 80,
246 
247 	/* reset / recovery action types */
248 	ATA_EH_REVALIDATE	= (1 << 0),
249 	ATA_EH_SOFTRESET	= (1 << 1),
250 	ATA_EH_HARDRESET	= (1 << 2),
251 
252 	ATA_EH_RESET_MASK	= ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
253 	ATA_EH_PERDEV_MASK	= ATA_EH_REVALIDATE,
254 
255 	/* ata_eh_info->flags */
256 	ATA_EHI_HOTPLUGGED	= (1 << 0),  /* could have been hotplugged */
257 
258 	ATA_EHI_DID_RESET	= (1 << 16), /* already reset this port */
259 
260 	/* max repeat if error condition is still set after ->error_handler */
261 	ATA_EH_MAX_REPEAT	= 5,
262 
263 	/* how hard are we gonna try to probe/recover devices */
264 	ATA_PROBE_MAX_TRIES	= 3,
265 	ATA_EH_RESET_TRIES	= 3,
266 	ATA_EH_DEV_TRIES	= 3,
267 
268 	/* Drive spinup time (time from power-on to the first D2H FIS)
269 	 * in msecs - 8s currently.  Failing to get ready in this time
270 	 * isn't critical.  It will result in reset failure for
271 	 * controllers which can't wait for the first D2H FIS.  libata
272 	 * will retry, so it just has to be long enough to spin up
273 	 * most devices.
274 	 */
275 	ATA_SPINUP_WAIT		= 8000,
276 };
277 
278 enum hsm_task_states {
279 	HSM_ST_UNKNOWN,		/* state unknown */
280 	HSM_ST_IDLE,		/* no command on going */
281 	HSM_ST,			/* (waiting the device to) transfer data */
282 	HSM_ST_LAST,		/* (waiting the device to) complete command */
283 	HSM_ST_ERR,		/* error */
284 	HSM_ST_FIRST,		/* (waiting the device to)
285 				   write CDB or first data block */
286 };
287 
288 enum ata_completion_errors {
289 	AC_ERR_DEV		= (1 << 0), /* device reported error */
290 	AC_ERR_HSM		= (1 << 1), /* host state machine violation */
291 	AC_ERR_TIMEOUT		= (1 << 2), /* timeout */
292 	AC_ERR_MEDIA		= (1 << 3), /* media error */
293 	AC_ERR_ATA_BUS		= (1 << 4), /* ATA bus error */
294 	AC_ERR_HOST_BUS		= (1 << 5), /* host bus error */
295 	AC_ERR_SYSTEM		= (1 << 6), /* system error */
296 	AC_ERR_INVALID		= (1 << 7), /* invalid argument */
297 	AC_ERR_OTHER		= (1 << 8), /* unknown */
298 };
299 
300 /* forward declarations */
301 struct scsi_device;
302 struct ata_port_operations;
303 struct ata_port;
304 struct ata_queued_cmd;
305 
306 /* typedefs */
307 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
308 typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
309 typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
310 typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
311 
312 struct ata_ioports {
313 	unsigned long		cmd_addr;
314 	unsigned long		data_addr;
315 	unsigned long		error_addr;
316 	unsigned long		feature_addr;
317 	unsigned long		nsect_addr;
318 	unsigned long		lbal_addr;
319 	unsigned long		lbam_addr;
320 	unsigned long		lbah_addr;
321 	unsigned long		device_addr;
322 	unsigned long		status_addr;
323 	unsigned long		command_addr;
324 	unsigned long		altstatus_addr;
325 	unsigned long		ctl_addr;
326 	unsigned long		bmdma_addr;
327 	unsigned long		scr_addr;
328 };
329 
330 struct ata_probe_ent {
331 	struct list_head	node;
332 	struct device 		*dev;
333 	const struct ata_port_operations *port_ops;
334 	struct scsi_host_template *sht;
335 	struct ata_ioports	port[ATA_MAX_PORTS];
336 	unsigned int		n_ports;
337 	unsigned int		hard_port_no;
338 	unsigned int		pio_mask;
339 	unsigned int		mwdma_mask;
340 	unsigned int		udma_mask;
341 	unsigned int		legacy_mode;
342 	unsigned long		irq;
343 	unsigned int		irq_flags;
344 	unsigned long		host_flags;
345 	unsigned long		host_set_flags;
346 	void __iomem		*mmio_base;
347 	void			*private_data;
348 };
349 
350 struct ata_host_set {
351 	spinlock_t		lock;
352 	struct device 		*dev;
353 	unsigned long		irq;
354 	void __iomem		*mmio_base;
355 	unsigned int		n_ports;
356 	void			*private_data;
357 	const struct ata_port_operations *ops;
358 	unsigned long		flags;
359 	int			simplex_claimed;	/* Keep seperate in case we
360 							   ever need to do this locked */
361 	struct ata_host_set	*next;		/* for legacy mode */
362 	struct ata_port		*ports[0];
363 };
364 
365 struct ata_queued_cmd {
366 	struct ata_port		*ap;
367 	struct ata_device	*dev;
368 
369 	struct scsi_cmnd	*scsicmd;
370 	void			(*scsidone)(struct scsi_cmnd *);
371 
372 	struct ata_taskfile	tf;
373 	u8			cdb[ATAPI_CDB_LEN];
374 
375 	unsigned long		flags;		/* ATA_QCFLAG_xxx */
376 	unsigned int		tag;
377 	unsigned int		n_elem;
378 	unsigned int		orig_n_elem;
379 
380 	int			dma_dir;
381 
382 	unsigned int		pad_len;
383 
384 	unsigned int		nsect;
385 	unsigned int		cursect;
386 
387 	unsigned int		nbytes;
388 	unsigned int		curbytes;
389 
390 	unsigned int		cursg;
391 	unsigned int		cursg_ofs;
392 
393 	struct scatterlist	sgent;
394 	struct scatterlist	pad_sgent;
395 	void			*buf_virt;
396 
397 	/* DO NOT iterate over __sg manually, use ata_for_each_sg() */
398 	struct scatterlist	*__sg;
399 
400 	unsigned int		err_mask;
401 	struct ata_taskfile	result_tf;
402 	ata_qc_cb_t		complete_fn;
403 
404 	void			*private_data;
405 };
406 
407 struct ata_host_stats {
408 	unsigned long		unhandled_irq;
409 	unsigned long		idle_irq;
410 	unsigned long		rw_reqbuf;
411 };
412 
413 struct ata_ering_entry {
414 	int			is_io;
415 	unsigned int		err_mask;
416 	u64			timestamp;
417 };
418 
419 struct ata_ering {
420 	int			cursor;
421 	struct ata_ering_entry	ring[ATA_ERING_SIZE];
422 };
423 
424 struct ata_device {
425 	struct ata_port		*ap;
426 	unsigned int		devno;		/* 0 or 1 */
427 	unsigned long		flags;		/* ATA_DFLAG_xxx */
428 	struct scsi_device	*sdev;		/* attached SCSI device */
429 	/* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
430 	u64			n_sectors;	/* size of device, if ATA */
431 	unsigned int		class;		/* ATA_DEV_xxx */
432 	u16			id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
433 	u8			pio_mode;
434 	u8			dma_mode;
435 	u8			xfer_mode;
436 	unsigned int		xfer_shift;	/* ATA_SHIFT_xxx */
437 
438 	unsigned int		multi_count;	/* sectors count for
439 						   READ/WRITE MULTIPLE */
440 	unsigned int		max_sectors;	/* per-device max sectors */
441 	unsigned int		cdb_len;
442 
443 	/* per-dev xfer mask */
444 	unsigned int		pio_mask;
445 	unsigned int		mwdma_mask;
446 	unsigned int		udma_mask;
447 
448 	/* for CHS addressing */
449 	u16			cylinders;	/* Number of cylinders */
450 	u16			heads;		/* Number of heads */
451 	u16			sectors;	/* Number of sectors per track */
452 
453 	/* error history */
454 	struct ata_ering	ering;
455 };
456 
457 /* Offset into struct ata_device.  Fields above it are maintained
458  * acress device init.  Fields below are zeroed.
459  */
460 #define ATA_DEVICE_CLEAR_OFFSET		offsetof(struct ata_device, n_sectors)
461 
462 struct ata_eh_info {
463 	struct ata_device	*dev;		/* offending device */
464 	u32			serror;		/* SError from LLDD */
465 	unsigned int		err_mask;	/* port-wide err_mask */
466 	unsigned int		action;		/* ATA_EH_* action mask */
467 	unsigned int		dev_action[ATA_MAX_DEVICES]; /* dev EH action */
468 	unsigned int		flags;		/* ATA_EHI_* flags */
469 
470 	unsigned long		hotplug_timestamp;
471 	unsigned int		probe_mask;
472 
473 	char			desc[ATA_EH_DESC_LEN];
474 	int			desc_len;
475 };
476 
477 struct ata_eh_context {
478 	struct ata_eh_info	i;
479 	int			tries[ATA_MAX_DEVICES];
480 	unsigned int		classes[ATA_MAX_DEVICES];
481 	unsigned int		did_probe_mask;
482 };
483 
484 struct ata_port {
485 	struct Scsi_Host	*host;	/* our co-allocated scsi host */
486 	const struct ata_port_operations *ops;
487 	spinlock_t		*lock;
488 	unsigned long		flags;	/* ATA_FLAG_xxx */
489 	unsigned int		id;	/* unique id req'd by scsi midlyr */
490 	unsigned int		port_no; /* unique port #; from zero */
491 	unsigned int		hard_port_no;	/* hardware port #; from zero */
492 
493 	struct ata_prd		*prd;	 /* our SG list */
494 	dma_addr_t		prd_dma; /* and its DMA mapping */
495 
496 	void			*pad;	/* array of DMA pad buffers */
497 	dma_addr_t		pad_dma;
498 
499 	struct ata_ioports	ioaddr;	/* ATA cmd/ctl/dma register blocks */
500 
501 	u8			ctl;	/* cache of ATA control register */
502 	u8			last_ctl;	/* Cache last written value */
503 	unsigned int		pio_mask;
504 	unsigned int		mwdma_mask;
505 	unsigned int		udma_mask;
506 	unsigned int		cbl;	/* cable type; ATA_CBL_xxx */
507 	unsigned int		hw_sata_spd_limit;
508 	unsigned int		sata_spd_limit;	/* SATA PHY speed limit */
509 
510 	/* record runtime error info, protected by host_set lock */
511 	struct ata_eh_info	eh_info;
512 	/* EH context owned by EH */
513 	struct ata_eh_context	eh_context;
514 
515 	struct ata_device	device[ATA_MAX_DEVICES];
516 
517 	struct ata_queued_cmd	qcmd[ATA_MAX_QUEUE];
518 	unsigned long		qc_allocated;
519 	unsigned int		qc_active;
520 
521 	unsigned int		active_tag;
522 	u32			sactive;
523 
524 	struct ata_host_stats	stats;
525 	struct ata_host_set	*host_set;
526 	struct device 		*dev;
527 
528 	struct work_struct	port_task;
529 	struct work_struct	hotplug_task;
530 	struct work_struct	scsi_rescan_task;
531 
532 	unsigned int		hsm_task_state;
533 
534 	u32			msg_enable;
535 	struct list_head	eh_done_q;
536 	wait_queue_head_t	eh_wait_q;
537 
538 	void			*private_data;
539 
540 	u8			sector_buf[ATA_SECT_SIZE]; /* owned by EH */
541 };
542 
543 struct ata_port_operations {
544 	void (*port_disable) (struct ata_port *);
545 
546 	void (*dev_config) (struct ata_port *, struct ata_device *);
547 
548 	void (*set_piomode) (struct ata_port *, struct ata_device *);
549 	void (*set_dmamode) (struct ata_port *, struct ata_device *);
550 	unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
551 
552 	void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
553 	void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
554 
555 	void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
556 	u8   (*check_status)(struct ata_port *ap);
557 	u8   (*check_altstatus)(struct ata_port *ap);
558 	void (*dev_select)(struct ata_port *ap, unsigned int device);
559 
560 	void (*phy_reset) (struct ata_port *ap); /* obsolete */
561 	void (*set_mode) (struct ata_port *ap);
562 
563 	void (*post_set_mode) (struct ata_port *ap);
564 
565 	int (*check_atapi_dma) (struct ata_queued_cmd *qc);
566 
567 	void (*bmdma_setup) (struct ata_queued_cmd *qc);
568 	void (*bmdma_start) (struct ata_queued_cmd *qc);
569 
570 	void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
571 
572 	void (*qc_prep) (struct ata_queued_cmd *qc);
573 	unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
574 
575 	/* Error handlers.  ->error_handler overrides ->eng_timeout and
576 	 * indicates that new-style EH is in place.
577 	 */
578 	void (*eng_timeout) (struct ata_port *ap); /* obsolete */
579 
580 	void (*freeze) (struct ata_port *ap);
581 	void (*thaw) (struct ata_port *ap);
582 	void (*error_handler) (struct ata_port *ap);
583 	void (*post_internal_cmd) (struct ata_queued_cmd *qc);
584 
585 	irqreturn_t (*irq_handler)(int, void *, struct pt_regs *);
586 	void (*irq_clear) (struct ata_port *);
587 
588 	u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
589 	void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
590 			   u32 val);
591 
592 	int (*port_start) (struct ata_port *ap);
593 	void (*port_stop) (struct ata_port *ap);
594 
595 	void (*host_stop) (struct ata_host_set *host_set);
596 
597 	void (*bmdma_stop) (struct ata_queued_cmd *qc);
598 	u8   (*bmdma_status) (struct ata_port *ap);
599 };
600 
601 struct ata_port_info {
602 	struct scsi_host_template	*sht;
603 	unsigned long		host_flags;
604 	unsigned long		pio_mask;
605 	unsigned long		mwdma_mask;
606 	unsigned long		udma_mask;
607 	const struct ata_port_operations *port_ops;
608 	void 			*private_data;
609 };
610 
611 struct ata_timing {
612 	unsigned short mode;		/* ATA mode */
613 	unsigned short setup;		/* t1 */
614 	unsigned short act8b;		/* t2 for 8-bit I/O */
615 	unsigned short rec8b;		/* t2i for 8-bit I/O */
616 	unsigned short cyc8b;		/* t0 for 8-bit I/O */
617 	unsigned short active;		/* t2 or tD */
618 	unsigned short recover;		/* t2i or tK */
619 	unsigned short cycle;		/* t0 */
620 	unsigned short udma;		/* t2CYCTYP/2 */
621 };
622 
623 #define FIT(v,vmin,vmax)	max_t(short,min_t(short,v,vmax),vmin)
624 
625 extern const unsigned long sata_deb_timing_boot[];
626 extern const unsigned long sata_deb_timing_eh[];
627 extern const unsigned long sata_deb_timing_before_fsrst[];
628 
629 extern void ata_port_probe(struct ata_port *);
630 extern void __sata_phy_reset(struct ata_port *ap);
631 extern void sata_phy_reset(struct ata_port *ap);
632 extern void ata_bus_reset(struct ata_port *ap);
633 extern int sata_set_spd(struct ata_port *ap);
634 extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
635 extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
636 extern int ata_std_prereset(struct ata_port *ap);
637 extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
638 extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
639 extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
640 extern int ata_dev_revalidate(struct ata_device *dev, int post_reset);
641 extern void ata_port_disable(struct ata_port *);
642 extern void ata_std_ports(struct ata_ioports *ioaddr);
643 #ifdef CONFIG_PCI
644 extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
645 			     unsigned int n_ports);
646 extern void ata_pci_remove_one (struct pci_dev *pdev);
647 extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
648 extern int ata_pci_device_resume(struct pci_dev *pdev);
649 extern int ata_pci_clear_simplex(struct pci_dev *pdev);
650 #endif /* CONFIG_PCI */
651 extern int ata_device_add(const struct ata_probe_ent *ent);
652 extern void ata_port_detach(struct ata_port *ap);
653 extern void ata_host_set_remove(struct ata_host_set *host_set);
654 extern int ata_scsi_detect(struct scsi_host_template *sht);
655 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
656 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
657 extern int ata_scsi_release(struct Scsi_Host *host);
658 extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
659 extern int sata_scr_valid(struct ata_port *ap);
660 extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
661 extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
662 extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
663 extern int ata_port_online(struct ata_port *ap);
664 extern int ata_port_offline(struct ata_port *ap);
665 extern int ata_scsi_device_resume(struct scsi_device *);
666 extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
667 extern int ata_device_resume(struct ata_device *);
668 extern int ata_device_suspend(struct ata_device *, pm_message_t state);
669 extern int ata_ratelimit(void);
670 extern unsigned int ata_busy_sleep(struct ata_port *ap,
671 				   unsigned long timeout_pat,
672 				   unsigned long timeout);
673 extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
674 				void *data, unsigned long delay);
675 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
676 			     unsigned long interval_msec,
677 			     unsigned long timeout_msec);
678 
679 /*
680  * Default driver ops implementations
681  */
682 extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
683 extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
684 extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
685 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
686 extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
687 extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
688 extern u8 ata_check_status(struct ata_port *ap);
689 extern u8 ata_altstatus(struct ata_port *ap);
690 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
691 extern int ata_port_start (struct ata_port *ap);
692 extern void ata_port_stop (struct ata_port *ap);
693 extern void ata_host_stop (struct ata_host_set *host_set);
694 extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
695 extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
696 			       unsigned int buflen, int write_data);
697 extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
698 			      unsigned int buflen, int write_data);
699 extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
700 			      unsigned int buflen, int write_data);
701 extern void ata_qc_prep(struct ata_queued_cmd *qc);
702 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
703 extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
704 extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
705 		unsigned int buflen);
706 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
707 		 unsigned int n_elem);
708 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
709 extern void ata_id_string(const u16 *id, unsigned char *s,
710 			  unsigned int ofs, unsigned int len);
711 extern void ata_id_c_string(const u16 *id, unsigned char *s,
712 			    unsigned int ofs, unsigned int len);
713 extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
714 extern void ata_bmdma_start (struct ata_queued_cmd *qc);
715 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
716 extern u8   ata_bmdma_status(struct ata_port *ap);
717 extern void ata_bmdma_irq_clear(struct ata_port *ap);
718 extern void ata_bmdma_freeze(struct ata_port *ap);
719 extern void ata_bmdma_thaw(struct ata_port *ap);
720 extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
721 			       ata_reset_fn_t softreset,
722 			       ata_reset_fn_t hardreset,
723 			       ata_postreset_fn_t postreset);
724 extern void ata_bmdma_error_handler(struct ata_port *ap);
725 extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
726 extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
727 			u8 status, int in_wq);
728 extern void ata_qc_complete(struct ata_queued_cmd *qc);
729 extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
730 				    void (*finish_qc)(struct ata_queued_cmd *));
731 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
732 			      void (*done)(struct scsi_cmnd *));
733 extern int ata_std_bios_param(struct scsi_device *sdev,
734 			      struct block_device *bdev,
735 			      sector_t capacity, int geom[]);
736 extern int ata_scsi_slave_config(struct scsi_device *sdev);
737 extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
738 extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
739 				       int queue_depth);
740 extern struct ata_device *ata_dev_pair(struct ata_device *adev);
741 
742 /*
743  * Timing helpers
744  */
745 
746 extern unsigned int ata_pio_need_iordy(const struct ata_device *);
747 extern int ata_timing_compute(struct ata_device *, unsigned short,
748 			      struct ata_timing *, int, int);
749 extern void ata_timing_merge(const struct ata_timing *,
750 			     const struct ata_timing *, struct ata_timing *,
751 			     unsigned int);
752 
753 enum {
754 	ATA_TIMING_SETUP	= (1 << 0),
755 	ATA_TIMING_ACT8B	= (1 << 1),
756 	ATA_TIMING_REC8B	= (1 << 2),
757 	ATA_TIMING_CYC8B	= (1 << 3),
758 	ATA_TIMING_8BIT		= ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
759 				  ATA_TIMING_CYC8B,
760 	ATA_TIMING_ACTIVE	= (1 << 4),
761 	ATA_TIMING_RECOVER	= (1 << 5),
762 	ATA_TIMING_CYCLE	= (1 << 6),
763 	ATA_TIMING_UDMA		= (1 << 7),
764 	ATA_TIMING_ALL		= ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
765 				  ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
766 				  ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
767 				  ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
768 };
769 
770 
771 #ifdef CONFIG_PCI
772 struct pci_bits {
773 	unsigned int		reg;	/* PCI config register to read */
774 	unsigned int		width;	/* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
775 	unsigned long		mask;
776 	unsigned long		val;
777 };
778 
779 extern void ata_pci_host_stop (struct ata_host_set *host_set);
780 extern struct ata_probe_ent *
781 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
782 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
783 extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
784 #endif /* CONFIG_PCI */
785 
786 /*
787  * EH
788  */
789 extern void ata_eng_timeout(struct ata_port *ap);
790 
791 extern void ata_port_schedule_eh(struct ata_port *ap);
792 extern int ata_port_abort(struct ata_port *ap);
793 extern int ata_port_freeze(struct ata_port *ap);
794 
795 extern void ata_eh_freeze_port(struct ata_port *ap);
796 extern void ata_eh_thaw_port(struct ata_port *ap);
797 
798 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
799 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
800 
801 extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
802 		      ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
803 		      ata_postreset_fn_t postreset);
804 
805 /*
806  * printk helpers
807  */
808 #define ata_port_printk(ap, lv, fmt, args...) \
809 	printk(lv"ata%u: "fmt, (ap)->id , ##args)
810 
811 #define ata_dev_printk(dev, lv, fmt, args...) \
812 	printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
813 
814 /*
815  * ata_eh_info helpers
816  */
817 #define ata_ehi_push_desc(ehi, fmt, args...) do { \
818 	(ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
819 				     ATA_EH_DESC_LEN - (ehi)->desc_len, \
820 				     fmt , ##args); \
821 } while (0)
822 
823 #define ata_ehi_clear_desc(ehi) do { \
824 	(ehi)->desc[0] = '\0'; \
825 	(ehi)->desc_len = 0; \
826 } while (0)
827 
828 static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
829 {
830 	if (ehi->flags & ATA_EHI_HOTPLUGGED)
831 		return;
832 
833 	ehi->flags |= ATA_EHI_HOTPLUGGED;
834 	ehi->hotplug_timestamp = jiffies;
835 
836 	ehi->err_mask |= AC_ERR_ATA_BUS;
837 	ehi->action |= ATA_EH_SOFTRESET;
838 	ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
839 }
840 
841 /*
842  * qc helpers
843  */
844 static inline int
845 ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
846 {
847 	if (sg == &qc->pad_sgent)
848 		return 1;
849 	if (qc->pad_len)
850 		return 0;
851 	if (((sg - qc->__sg) + 1) == qc->n_elem)
852 		return 1;
853 	return 0;
854 }
855 
856 static inline struct scatterlist *
857 ata_qc_first_sg(struct ata_queued_cmd *qc)
858 {
859 	if (qc->n_elem)
860 		return qc->__sg;
861 	if (qc->pad_len)
862 		return &qc->pad_sgent;
863 	return NULL;
864 }
865 
866 static inline struct scatterlist *
867 ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
868 {
869 	if (sg == &qc->pad_sgent)
870 		return NULL;
871 	if (++sg - qc->__sg < qc->n_elem)
872 		return sg;
873 	if (qc->pad_len)
874 		return &qc->pad_sgent;
875 	return NULL;
876 }
877 
878 #define ata_for_each_sg(sg, qc) \
879 	for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
880 
881 static inline unsigned int ata_tag_valid(unsigned int tag)
882 {
883 	return (tag < ATA_MAX_QUEUE) ? 1 : 0;
884 }
885 
886 static inline unsigned int ata_tag_internal(unsigned int tag)
887 {
888 	return tag == ATA_MAX_QUEUE - 1;
889 }
890 
891 /*
892  * device helpers
893  */
894 static inline unsigned int ata_class_enabled(unsigned int class)
895 {
896 	return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
897 }
898 
899 static inline unsigned int ata_class_disabled(unsigned int class)
900 {
901 	return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
902 }
903 
904 static inline unsigned int ata_class_absent(unsigned int class)
905 {
906 	return !ata_class_enabled(class) && !ata_class_disabled(class);
907 }
908 
909 static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
910 {
911 	return ata_class_enabled(dev->class);
912 }
913 
914 static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
915 {
916 	return ata_class_disabled(dev->class);
917 }
918 
919 static inline unsigned int ata_dev_absent(const struct ata_device *dev)
920 {
921 	return ata_class_absent(dev->class);
922 }
923 
924 /*
925  * port helpers
926  */
927 static inline int ata_port_max_devices(const struct ata_port *ap)
928 {
929 	if (ap->flags & ATA_FLAG_SLAVE_POSS)
930 		return 2;
931 	return 1;
932 }
933 
934 
935 static inline u8 ata_chk_status(struct ata_port *ap)
936 {
937 	return ap->ops->check_status(ap);
938 }
939 
940 
941 /**
942  *	ata_pause - Flush writes and pause 400 nanoseconds.
943  *	@ap: Port to wait for.
944  *
945  *	LOCKING:
946  *	Inherited from caller.
947  */
948 
949 static inline void ata_pause(struct ata_port *ap)
950 {
951 	ata_altstatus(ap);
952 	ndelay(400);
953 }
954 
955 
956 /**
957  *	ata_busy_wait - Wait for a port status register
958  *	@ap: Port to wait for.
959  *
960  *	Waits up to max*10 microseconds for the selected bits in the port's
961  *	status register to be cleared.
962  *	Returns final value of status register.
963  *
964  *	LOCKING:
965  *	Inherited from caller.
966  */
967 
968 static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
969 			       unsigned int max)
970 {
971 	u8 status;
972 
973 	do {
974 		udelay(10);
975 		status = ata_chk_status(ap);
976 		max--;
977 	} while ((status & bits) && (max > 0));
978 
979 	return status;
980 }
981 
982 
983 /**
984  *	ata_wait_idle - Wait for a port to be idle.
985  *	@ap: Port to wait for.
986  *
987  *	Waits up to 10ms for port's BUSY and DRQ signals to clear.
988  *	Returns final value of status register.
989  *
990  *	LOCKING:
991  *	Inherited from caller.
992  */
993 
994 static inline u8 ata_wait_idle(struct ata_port *ap)
995 {
996 	u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
997 
998 	if (status & (ATA_BUSY | ATA_DRQ)) {
999 		unsigned long l = ap->ioaddr.status_addr;
1000 		if (ata_msg_warn(ap))
1001 			printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
1002 				status, l);
1003 	}
1004 
1005 	return status;
1006 }
1007 
1008 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
1009 {
1010 	qc->tf.ctl |= ATA_NIEN;
1011 }
1012 
1013 static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
1014 						       unsigned int tag)
1015 {
1016 	if (likely(ata_tag_valid(tag)))
1017 		return &ap->qcmd[tag];
1018 	return NULL;
1019 }
1020 
1021 static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
1022 						     unsigned int tag)
1023 {
1024 	struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1025 
1026 	if (unlikely(!qc) || !ap->ops->error_handler)
1027 		return qc;
1028 
1029 	if ((qc->flags & (ATA_QCFLAG_ACTIVE |
1030 			  ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
1031 		return qc;
1032 
1033 	return NULL;
1034 }
1035 
1036 static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
1037 {
1038 	memset(tf, 0, sizeof(*tf));
1039 
1040 	tf->ctl = dev->ap->ctl;
1041 	if (dev->devno == 0)
1042 		tf->device = ATA_DEVICE_OBS;
1043 	else
1044 		tf->device = ATA_DEVICE_OBS | ATA_DEV1;
1045 }
1046 
1047 static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1048 {
1049 	qc->__sg = NULL;
1050 	qc->flags = 0;
1051 	qc->cursect = qc->cursg = qc->cursg_ofs = 0;
1052 	qc->nsect = 0;
1053 	qc->nbytes = qc->curbytes = 0;
1054 	qc->err_mask = 0;
1055 
1056 	ata_tf_init(qc->dev, &qc->tf);
1057 
1058 	/* init result_tf such that it indicates normal completion */
1059 	qc->result_tf.command = ATA_DRDY;
1060 	qc->result_tf.feature = 0;
1061 }
1062 
1063 /**
1064  *	ata_irq_on - Enable interrupts on a port.
1065  *	@ap: Port on which interrupts are enabled.
1066  *
1067  *	Enable interrupts on a legacy IDE device using MMIO or PIO,
1068  *	wait for idle, clear any pending interrupts.
1069  *
1070  *	LOCKING:
1071  *	Inherited from caller.
1072  */
1073 
1074 static inline u8 ata_irq_on(struct ata_port *ap)
1075 {
1076 	struct ata_ioports *ioaddr = &ap->ioaddr;
1077 	u8 tmp;
1078 
1079 	ap->ctl &= ~ATA_NIEN;
1080 	ap->last_ctl = ap->ctl;
1081 
1082 	if (ap->flags & ATA_FLAG_MMIO)
1083 		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1084 	else
1085 		outb(ap->ctl, ioaddr->ctl_addr);
1086 	tmp = ata_wait_idle(ap);
1087 
1088 	ap->ops->irq_clear(ap);
1089 
1090 	return tmp;
1091 }
1092 
1093 
1094 /**
1095  *	ata_irq_ack - Acknowledge a device interrupt.
1096  *	@ap: Port on which interrupts are enabled.
1097  *
1098  *	Wait up to 10 ms for legacy IDE device to become idle (BUSY
1099  *	or BUSY+DRQ clear).  Obtain dma status and port status from
1100  *	device.  Clear the interrupt.  Return port status.
1101  *
1102  *	LOCKING:
1103  */
1104 
1105 static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
1106 {
1107 	unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
1108 	u8 host_stat, post_stat, status;
1109 
1110 	status = ata_busy_wait(ap, bits, 1000);
1111 	if (status & bits)
1112 		if (ata_msg_err(ap))
1113 			printk(KERN_ERR "abnormal status 0x%X\n", status);
1114 
1115 	/* get controller status; clear intr, err bits */
1116 	if (ap->flags & ATA_FLAG_MMIO) {
1117 		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
1118 		host_stat = readb(mmio + ATA_DMA_STATUS);
1119 		writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1120 		       mmio + ATA_DMA_STATUS);
1121 
1122 		post_stat = readb(mmio + ATA_DMA_STATUS);
1123 	} else {
1124 		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1125 		outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1126 		     ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1127 
1128 		post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1129 	}
1130 
1131 	if (ata_msg_intr(ap))
1132 		printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
1133 			__FUNCTION__,
1134 			host_stat, post_stat, status);
1135 
1136 	return status;
1137 }
1138 
1139 static inline int ata_try_flush_cache(const struct ata_device *dev)
1140 {
1141 	return ata_id_wcache_enabled(dev->id) ||
1142 	       ata_id_has_flush(dev->id) ||
1143 	       ata_id_has_flush_ext(dev->id);
1144 }
1145 
1146 static inline unsigned int ac_err_mask(u8 status)
1147 {
1148 	if (status & (ATA_BUSY | ATA_DRQ))
1149 		return AC_ERR_HSM;
1150 	if (status & (ATA_ERR | ATA_DF))
1151 		return AC_ERR_DEV;
1152 	return 0;
1153 }
1154 
1155 static inline unsigned int __ac_err_mask(u8 status)
1156 {
1157 	unsigned int mask = ac_err_mask(status);
1158 	if (mask == 0)
1159 		return AC_ERR_OTHER;
1160 	return mask;
1161 }
1162 
1163 static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
1164 {
1165 	ap->pad_dma = 0;
1166 	ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
1167 				     &ap->pad_dma, GFP_KERNEL);
1168 	return (ap->pad == NULL) ? -ENOMEM : 0;
1169 }
1170 
1171 static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
1172 {
1173 	dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
1174 }
1175 
1176 static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1177 {
1178 	return (struct ata_port *) &host->hostdata[0];
1179 }
1180 
1181 #endif /* __LINUX_LIBATA_H__ */
1182