blob: 96355b05fe5c7e980df16820e59520783cb0fcf0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 libata-core.c - helper library for ATA
3
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
6
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
10 by reference.
11
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
22
23 */
24
25#include <linux/config.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/list.h>
31#include <linux/mm.h>
32#include <linux/highmem.h>
33#include <linux/spinlock.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/timer.h>
37#include <linux/interrupt.h>
38#include <linux/completion.h>
39#include <linux/suspend.h>
40#include <linux/workqueue.h>
41#include <scsi/scsi.h>
42#include "scsi.h"
43#include "scsi_priv.h"
44#include <scsi/scsi_host.h>
45#include <linux/libata.h>
46#include <asm/io.h>
47#include <asm/semaphore.h>
48#include <asm/byteorder.h>
49
50#include "libata.h"
51
52static unsigned int ata_busy_sleep (struct ata_port *ap,
53 unsigned long tmout_pat,
54 unsigned long tmout);
Albert Lee8bf62ece2005-05-12 15:29:42 -040055static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056static void ata_set_mode(struct ata_port *ap);
57static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
58static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
59static int fgb(u32 bitmap);
60static int ata_choose_xfer_mode(struct ata_port *ap,
61 u8 *xfer_mode_out,
62 unsigned int *xfer_shift_out);
63static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
64static void __ata_qc_complete(struct ata_queued_cmd *qc);
65
66static unsigned int ata_unique_id = 1;
67static struct workqueue_struct *ata_wq;
68
69MODULE_AUTHOR("Jeff Garzik");
70MODULE_DESCRIPTION("Library module for ATA devices");
71MODULE_LICENSE("GPL");
72MODULE_VERSION(DRV_VERSION);
73
74/**
75 * ata_tf_load - send taskfile registers to host controller
76 * @ap: Port to which output is sent
77 * @tf: ATA taskfile register set
78 *
79 * Outputs ATA taskfile to standard ATA host controller.
80 *
81 * LOCKING:
82 * Inherited from caller.
83 */
84
85static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
86{
87 struct ata_ioports *ioaddr = &ap->ioaddr;
88 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
89
90 if (tf->ctl != ap->last_ctl) {
91 outb(tf->ctl, ioaddr->ctl_addr);
92 ap->last_ctl = tf->ctl;
93 ata_wait_idle(ap);
94 }
95
96 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
97 outb(tf->hob_feature, ioaddr->feature_addr);
98 outb(tf->hob_nsect, ioaddr->nsect_addr);
99 outb(tf->hob_lbal, ioaddr->lbal_addr);
100 outb(tf->hob_lbam, ioaddr->lbam_addr);
101 outb(tf->hob_lbah, ioaddr->lbah_addr);
102 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
103 tf->hob_feature,
104 tf->hob_nsect,
105 tf->hob_lbal,
106 tf->hob_lbam,
107 tf->hob_lbah);
108 }
109
110 if (is_addr) {
111 outb(tf->feature, ioaddr->feature_addr);
112 outb(tf->nsect, ioaddr->nsect_addr);
113 outb(tf->lbal, ioaddr->lbal_addr);
114 outb(tf->lbam, ioaddr->lbam_addr);
115 outb(tf->lbah, ioaddr->lbah_addr);
116 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
117 tf->feature,
118 tf->nsect,
119 tf->lbal,
120 tf->lbam,
121 tf->lbah);
122 }
123
124 if (tf->flags & ATA_TFLAG_DEVICE) {
125 outb(tf->device, ioaddr->device_addr);
126 VPRINTK("device 0x%X\n", tf->device);
127 }
128
129 ata_wait_idle(ap);
130}
131
132/**
133 * ata_tf_load_mmio - send taskfile registers to host controller
134 * @ap: Port to which output is sent
135 * @tf: ATA taskfile register set
136 *
137 * Outputs ATA taskfile to standard ATA host controller using MMIO.
138 *
139 * LOCKING:
140 * Inherited from caller.
141 */
142
143static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
144{
145 struct ata_ioports *ioaddr = &ap->ioaddr;
146 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
147
148 if (tf->ctl != ap->last_ctl) {
149 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
150 ap->last_ctl = tf->ctl;
151 ata_wait_idle(ap);
152 }
153
154 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
155 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
156 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
157 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
158 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
159 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
160 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
161 tf->hob_feature,
162 tf->hob_nsect,
163 tf->hob_lbal,
164 tf->hob_lbam,
165 tf->hob_lbah);
166 }
167
168 if (is_addr) {
169 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
170 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
171 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
172 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
173 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
174 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
175 tf->feature,
176 tf->nsect,
177 tf->lbal,
178 tf->lbam,
179 tf->lbah);
180 }
181
182 if (tf->flags & ATA_TFLAG_DEVICE) {
183 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
184 VPRINTK("device 0x%X\n", tf->device);
185 }
186
187 ata_wait_idle(ap);
188}
189
190void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
191{
192 if (ap->flags & ATA_FLAG_MMIO)
193 ata_tf_load_mmio(ap, tf);
194 else
195 ata_tf_load_pio(ap, tf);
196}
197
198/**
199 * ata_exec_command - issue ATA command to host controller
200 * @ap: port to which command is being issued
201 * @tf: ATA taskfile register set
202 *
203 * Issues PIO/MMIO write to ATA command register, with proper
204 * synchronization with interrupt handler / other threads.
205 *
206 * LOCKING:
207 * spin_lock_irqsave(host_set lock)
208 */
209
210static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
211{
212 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
213
214 outb(tf->command, ap->ioaddr.command_addr);
215 ata_pause(ap);
216}
217
218
219/**
220 * ata_exec_command_mmio - issue ATA command to host controller
221 * @ap: port to which command is being issued
222 * @tf: ATA taskfile register set
223 *
224 * Issues MMIO write to ATA command register, with proper
225 * synchronization with interrupt handler / other threads.
226 *
227 * LOCKING:
228 * spin_lock_irqsave(host_set lock)
229 */
230
231static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
232{
233 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
234
235 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
236 ata_pause(ap);
237}
238
239void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
240{
241 if (ap->flags & ATA_FLAG_MMIO)
242 ata_exec_command_mmio(ap, tf);
243 else
244 ata_exec_command_pio(ap, tf);
245}
246
247/**
248 * ata_exec - issue ATA command to host controller
249 * @ap: port to which command is being issued
250 * @tf: ATA taskfile register set
251 *
252 * Issues PIO/MMIO write to ATA command register, with proper
253 * synchronization with interrupt handler / other threads.
254 *
255 * LOCKING:
256 * Obtains host_set lock.
257 */
258
259static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
260{
261 unsigned long flags;
262
263 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
264 spin_lock_irqsave(&ap->host_set->lock, flags);
265 ap->ops->exec_command(ap, tf);
266 spin_unlock_irqrestore(&ap->host_set->lock, flags);
267}
268
269/**
270 * ata_tf_to_host - issue ATA taskfile to host controller
271 * @ap: port to which command is being issued
272 * @tf: ATA taskfile register set
273 *
274 * Issues ATA taskfile register set to ATA host controller,
275 * with proper synchronization with interrupt handler and
276 * other threads.
277 *
278 * LOCKING:
279 * Obtains host_set lock.
280 */
281
282static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
283{
284 ap->ops->tf_load(ap, tf);
285
286 ata_exec(ap, tf);
287}
288
289/**
290 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
291 * @ap: port to which command is being issued
292 * @tf: ATA taskfile register set
293 *
294 * Issues ATA taskfile register set to ATA host controller,
295 * with proper synchronization with interrupt handler and
296 * other threads.
297 *
298 * LOCKING:
299 * spin_lock_irqsave(host_set lock)
300 */
301
302void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
303{
304 ap->ops->tf_load(ap, tf);
305 ap->ops->exec_command(ap, tf);
306}
307
308/**
309 * ata_tf_read - input device's ATA taskfile shadow registers
310 * @ap: Port from which input is read
311 * @tf: ATA taskfile register set for storing input
312 *
313 * Reads ATA taskfile registers for currently-selected device
314 * into @tf.
315 *
316 * LOCKING:
317 * Inherited from caller.
318 */
319
320static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
321{
322 struct ata_ioports *ioaddr = &ap->ioaddr;
323
324 tf->nsect = inb(ioaddr->nsect_addr);
325 tf->lbal = inb(ioaddr->lbal_addr);
326 tf->lbam = inb(ioaddr->lbam_addr);
327 tf->lbah = inb(ioaddr->lbah_addr);
328 tf->device = inb(ioaddr->device_addr);
329
330 if (tf->flags & ATA_TFLAG_LBA48) {
331 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
332 tf->hob_feature = inb(ioaddr->error_addr);
333 tf->hob_nsect = inb(ioaddr->nsect_addr);
334 tf->hob_lbal = inb(ioaddr->lbal_addr);
335 tf->hob_lbam = inb(ioaddr->lbam_addr);
336 tf->hob_lbah = inb(ioaddr->lbah_addr);
337 }
338}
339
340/**
341 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
342 * @ap: Port from which input is read
343 * @tf: ATA taskfile register set for storing input
344 *
345 * Reads ATA taskfile registers for currently-selected device
346 * into @tf via MMIO.
347 *
348 * LOCKING:
349 * Inherited from caller.
350 */
351
352static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
353{
354 struct ata_ioports *ioaddr = &ap->ioaddr;
355
356 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
357 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
358 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
359 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
360 tf->device = readb((void __iomem *)ioaddr->device_addr);
361
362 if (tf->flags & ATA_TFLAG_LBA48) {
363 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
364 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
365 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
366 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
367 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
368 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
369 }
370}
371
372void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
373{
374 if (ap->flags & ATA_FLAG_MMIO)
375 ata_tf_read_mmio(ap, tf);
376 else
377 ata_tf_read_pio(ap, tf);
378}
379
380/**
381 * ata_check_status_pio - Read device status reg & clear interrupt
382 * @ap: port where the device is
383 *
384 * Reads ATA taskfile status register for currently-selected device
385 * and return it's value. This also clears pending interrupts
386 * from this device
387 *
388 * LOCKING:
389 * Inherited from caller.
390 */
391static u8 ata_check_status_pio(struct ata_port *ap)
392{
393 return inb(ap->ioaddr.status_addr);
394}
395
396/**
397 * ata_check_status_mmio - Read device status reg & clear interrupt
398 * @ap: port where the device is
399 *
400 * Reads ATA taskfile status register for currently-selected device
401 * via MMIO and return it's value. This also clears pending interrupts
402 * from this device
403 *
404 * LOCKING:
405 * Inherited from caller.
406 */
407static u8 ata_check_status_mmio(struct ata_port *ap)
408{
409 return readb((void __iomem *) ap->ioaddr.status_addr);
410}
411
412u8 ata_check_status(struct ata_port *ap)
413{
414 if (ap->flags & ATA_FLAG_MMIO)
415 return ata_check_status_mmio(ap);
416 return ata_check_status_pio(ap);
417}
418
419u8 ata_altstatus(struct ata_port *ap)
420{
421 if (ap->ops->check_altstatus)
422 return ap->ops->check_altstatus(ap);
423
424 if (ap->flags & ATA_FLAG_MMIO)
425 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
426 return inb(ap->ioaddr.altstatus_addr);
427}
428
429u8 ata_chk_err(struct ata_port *ap)
430{
431 if (ap->ops->check_err)
432 return ap->ops->check_err(ap);
433
434 if (ap->flags & ATA_FLAG_MMIO) {
435 return readb((void __iomem *) ap->ioaddr.error_addr);
436 }
437 return inb(ap->ioaddr.error_addr);
438}
439
440/**
441 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
442 * @tf: Taskfile to convert
443 * @fis: Buffer into which data will output
444 * @pmp: Port multiplier port
445 *
446 * Converts a standard ATA taskfile to a Serial ATA
447 * FIS structure (Register - Host to Device).
448 *
449 * LOCKING:
450 * Inherited from caller.
451 */
452
453void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
454{
455 fis[0] = 0x27; /* Register - Host to Device FIS */
456 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
457 bit 7 indicates Command FIS */
458 fis[2] = tf->command;
459 fis[3] = tf->feature;
460
461 fis[4] = tf->lbal;
462 fis[5] = tf->lbam;
463 fis[6] = tf->lbah;
464 fis[7] = tf->device;
465
466 fis[8] = tf->hob_lbal;
467 fis[9] = tf->hob_lbam;
468 fis[10] = tf->hob_lbah;
469 fis[11] = tf->hob_feature;
470
471 fis[12] = tf->nsect;
472 fis[13] = tf->hob_nsect;
473 fis[14] = 0;
474 fis[15] = tf->ctl;
475
476 fis[16] = 0;
477 fis[17] = 0;
478 fis[18] = 0;
479 fis[19] = 0;
480}
481
482/**
483 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
484 * @fis: Buffer from which data will be input
485 * @tf: Taskfile to output
486 *
487 * Converts a standard ATA taskfile to a Serial ATA
488 * FIS structure (Register - Host to Device).
489 *
490 * LOCKING:
491 * Inherited from caller.
492 */
493
494void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
495{
496 tf->command = fis[2]; /* status */
497 tf->feature = fis[3]; /* error */
498
499 tf->lbal = fis[4];
500 tf->lbam = fis[5];
501 tf->lbah = fis[6];
502 tf->device = fis[7];
503
504 tf->hob_lbal = fis[8];
505 tf->hob_lbam = fis[9];
506 tf->hob_lbah = fis[10];
507
508 tf->nsect = fis[12];
509 tf->hob_nsect = fis[13];
510}
511
512/**
513 * ata_prot_to_cmd - determine which read/write opcodes to use
514 * @protocol: ATA_PROT_xxx taskfile protocol
515 * @lba48: true is lba48 is present
516 *
517 * Given necessary input, determine which read/write commands
518 * to use to transfer data.
519 *
520 * LOCKING:
521 * None.
522 */
523static int ata_prot_to_cmd(int protocol, int lba48)
524{
525 int rcmd = 0, wcmd = 0;
526
527 switch (protocol) {
528 case ATA_PROT_PIO:
529 if (lba48) {
530 rcmd = ATA_CMD_PIO_READ_EXT;
531 wcmd = ATA_CMD_PIO_WRITE_EXT;
532 } else {
533 rcmd = ATA_CMD_PIO_READ;
534 wcmd = ATA_CMD_PIO_WRITE;
535 }
536 break;
537
538 case ATA_PROT_DMA:
539 if (lba48) {
540 rcmd = ATA_CMD_READ_EXT;
541 wcmd = ATA_CMD_WRITE_EXT;
542 } else {
543 rcmd = ATA_CMD_READ;
544 wcmd = ATA_CMD_WRITE;
545 }
546 break;
547
548 default:
549 return -1;
550 }
551
552 return rcmd | (wcmd << 8);
553}
554
555/**
556 * ata_dev_set_protocol - set taskfile protocol and r/w commands
557 * @dev: device to examine and configure
558 *
559 * Examine the device configuration, after we have
560 * read the identify-device page and configured the
561 * data transfer mode. Set internal state related to
562 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
563 * and calculate the proper read/write commands to use.
564 *
565 * LOCKING:
566 * caller.
567 */
568static void ata_dev_set_protocol(struct ata_device *dev)
569{
570 int pio = (dev->flags & ATA_DFLAG_PIO);
571 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
572 int proto, cmd;
573
574 if (pio)
575 proto = dev->xfer_protocol = ATA_PROT_PIO;
576 else
577 proto = dev->xfer_protocol = ATA_PROT_DMA;
578
579 cmd = ata_prot_to_cmd(proto, lba48);
580 if (cmd < 0)
581 BUG();
582
583 dev->read_cmd = cmd & 0xff;
584 dev->write_cmd = (cmd >> 8) & 0xff;
585}
586
587static const char * xfer_mode_str[] = {
588 "UDMA/16",
589 "UDMA/25",
590 "UDMA/33",
591 "UDMA/44",
592 "UDMA/66",
593 "UDMA/100",
594 "UDMA/133",
595 "UDMA7",
596 "MWDMA0",
597 "MWDMA1",
598 "MWDMA2",
599 "PIO0",
600 "PIO1",
601 "PIO2",
602 "PIO3",
603 "PIO4",
604};
605
606/**
607 * ata_udma_string - convert UDMA bit offset to string
608 * @mask: mask of bits supported; only highest bit counts.
609 *
610 * Determine string which represents the highest speed
611 * (highest bit in @udma_mask).
612 *
613 * LOCKING:
614 * None.
615 *
616 * RETURNS:
617 * Constant C string representing highest speed listed in
618 * @udma_mask, or the constant C string "<n/a>".
619 */
620
621static const char *ata_mode_string(unsigned int mask)
622{
623 int i;
624
625 for (i = 7; i >= 0; i--)
626 if (mask & (1 << i))
627 goto out;
628 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
629 if (mask & (1 << i))
630 goto out;
631 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
632 if (mask & (1 << i))
633 goto out;
634
635 return "<n/a>";
636
637out:
638 return xfer_mode_str[i];
639}
640
641/**
642 * ata_pio_devchk - PATA device presence detection
643 * @ap: ATA channel to examine
644 * @device: Device to examine (starting at zero)
645 *
646 * This technique was originally described in
647 * Hale Landis's ATADRVR (www.ata-atapi.com), and
648 * later found its way into the ATA/ATAPI spec.
649 *
650 * Write a pattern to the ATA shadow registers,
651 * and if a device is present, it will respond by
652 * correctly storing and echoing back the
653 * ATA shadow register contents.
654 *
655 * LOCKING:
656 * caller.
657 */
658
659static unsigned int ata_pio_devchk(struct ata_port *ap,
660 unsigned int device)
661{
662 struct ata_ioports *ioaddr = &ap->ioaddr;
663 u8 nsect, lbal;
664
665 ap->ops->dev_select(ap, device);
666
667 outb(0x55, ioaddr->nsect_addr);
668 outb(0xaa, ioaddr->lbal_addr);
669
670 outb(0xaa, ioaddr->nsect_addr);
671 outb(0x55, ioaddr->lbal_addr);
672
673 outb(0x55, ioaddr->nsect_addr);
674 outb(0xaa, ioaddr->lbal_addr);
675
676 nsect = inb(ioaddr->nsect_addr);
677 lbal = inb(ioaddr->lbal_addr);
678
679 if ((nsect == 0x55) && (lbal == 0xaa))
680 return 1; /* we found a device */
681
682 return 0; /* nothing found */
683}
684
685/**
686 * ata_mmio_devchk - PATA device presence detection
687 * @ap: ATA channel to examine
688 * @device: Device to examine (starting at zero)
689 *
690 * This technique was originally described in
691 * Hale Landis's ATADRVR (www.ata-atapi.com), and
692 * later found its way into the ATA/ATAPI spec.
693 *
694 * Write a pattern to the ATA shadow registers,
695 * and if a device is present, it will respond by
696 * correctly storing and echoing back the
697 * ATA shadow register contents.
698 *
699 * LOCKING:
700 * caller.
701 */
702
703static unsigned int ata_mmio_devchk(struct ata_port *ap,
704 unsigned int device)
705{
706 struct ata_ioports *ioaddr = &ap->ioaddr;
707 u8 nsect, lbal;
708
709 ap->ops->dev_select(ap, device);
710
711 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
712 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
713
714 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
715 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
716
717 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
718 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
719
720 nsect = readb((void __iomem *) ioaddr->nsect_addr);
721 lbal = readb((void __iomem *) ioaddr->lbal_addr);
722
723 if ((nsect == 0x55) && (lbal == 0xaa))
724 return 1; /* we found a device */
725
726 return 0; /* nothing found */
727}
728
729/**
730 * ata_devchk - PATA device presence detection
731 * @ap: ATA channel to examine
732 * @device: Device to examine (starting at zero)
733 *
734 * Dispatch ATA device presence detection, depending
735 * on whether we are using PIO or MMIO to talk to the
736 * ATA shadow registers.
737 *
738 * LOCKING:
739 * caller.
740 */
741
742static unsigned int ata_devchk(struct ata_port *ap,
743 unsigned int device)
744{
745 if (ap->flags & ATA_FLAG_MMIO)
746 return ata_mmio_devchk(ap, device);
747 return ata_pio_devchk(ap, device);
748}
749
750/**
751 * ata_dev_classify - determine device type based on ATA-spec signature
752 * @tf: ATA taskfile register set for device to be identified
753 *
754 * Determine from taskfile register contents whether a device is
755 * ATA or ATAPI, as per "Signature and persistence" section
756 * of ATA/PI spec (volume 1, sect 5.14).
757 *
758 * LOCKING:
759 * None.
760 *
761 * RETURNS:
762 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
763 * the event of failure.
764 */
765
766unsigned int ata_dev_classify(struct ata_taskfile *tf)
767{
768 /* Apple's open source Darwin code hints that some devices only
769 * put a proper signature into the LBA mid/high registers,
770 * So, we only check those. It's sufficient for uniqueness.
771 */
772
773 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
774 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
775 DPRINTK("found ATA device by sig\n");
776 return ATA_DEV_ATA;
777 }
778
779 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
780 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
781 DPRINTK("found ATAPI device by sig\n");
782 return ATA_DEV_ATAPI;
783 }
784
785 DPRINTK("unknown device\n");
786 return ATA_DEV_UNKNOWN;
787}
788
789/**
790 * ata_dev_try_classify - Parse returned ATA device signature
791 * @ap: ATA channel to examine
792 * @device: Device to examine (starting at zero)
793 *
794 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
795 * an ATA/ATAPI-defined set of values is placed in the ATA
796 * shadow registers, indicating the results of device detection
797 * and diagnostics.
798 *
799 * Select the ATA device, and read the values from the ATA shadow
800 * registers. Then parse according to the Error register value,
801 * and the spec-defined values examined by ata_dev_classify().
802 *
803 * LOCKING:
804 * caller.
805 */
806
807static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
808{
809 struct ata_device *dev = &ap->device[device];
810 struct ata_taskfile tf;
811 unsigned int class;
812 u8 err;
813
814 ap->ops->dev_select(ap, device);
815
816 memset(&tf, 0, sizeof(tf));
817
818 err = ata_chk_err(ap);
819 ap->ops->tf_read(ap, &tf);
820
821 dev->class = ATA_DEV_NONE;
822
823 /* see if device passed diags */
824 if (err == 1)
825 /* do nothing */ ;
826 else if ((device == 0) && (err == 0x81))
827 /* do nothing */ ;
828 else
829 return err;
830
831 /* determine if device if ATA or ATAPI */
832 class = ata_dev_classify(&tf);
833 if (class == ATA_DEV_UNKNOWN)
834 return err;
835 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
836 return err;
837
838 dev->class = class;
839
840 return err;
841}
842
843/**
844 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
845 * @id: IDENTIFY DEVICE results we will examine
846 * @s: string into which data is output
847 * @ofs: offset into identify device page
848 * @len: length of string to return. must be an even number.
849 *
850 * The strings in the IDENTIFY DEVICE page are broken up into
851 * 16-bit chunks. Run through the string, and output each
852 * 8-bit chunk linearly, regardless of platform.
853 *
854 * LOCKING:
855 * caller.
856 */
857
858void ata_dev_id_string(u16 *id, unsigned char *s,
859 unsigned int ofs, unsigned int len)
860{
861 unsigned int c;
862
863 while (len > 0) {
864 c = id[ofs] >> 8;
865 *s = c;
866 s++;
867
868 c = id[ofs] & 0xff;
869 *s = c;
870 s++;
871
872 ofs++;
873 len -= 2;
874 }
875}
876
877void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
878{
879}
880
881/**
882 * ata_std_dev_select - Select device 0/1 on ATA bus
883 * @ap: ATA channel to manipulate
884 * @device: ATA device (numbered from zero) to select
885 *
886 * Use the method defined in the ATA specification to
887 * make either device 0, or device 1, active on the
888 * ATA channel.
889 *
890 * LOCKING:
891 * caller.
892 */
893
894void ata_std_dev_select (struct ata_port *ap, unsigned int device)
895{
896 u8 tmp;
897
898 if (device == 0)
899 tmp = ATA_DEVICE_OBS;
900 else
901 tmp = ATA_DEVICE_OBS | ATA_DEV1;
902
903 if (ap->flags & ATA_FLAG_MMIO) {
904 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
905 } else {
906 outb(tmp, ap->ioaddr.device_addr);
907 }
908 ata_pause(ap); /* needed; also flushes, for mmio */
909}
910
911/**
912 * ata_dev_select - Select device 0/1 on ATA bus
913 * @ap: ATA channel to manipulate
914 * @device: ATA device (numbered from zero) to select
915 * @wait: non-zero to wait for Status register BSY bit to clear
916 * @can_sleep: non-zero if context allows sleeping
917 *
918 * Use the method defined in the ATA specification to
919 * make either device 0, or device 1, active on the
920 * ATA channel.
921 *
922 * This is a high-level version of ata_std_dev_select(),
923 * which additionally provides the services of inserting
924 * the proper pauses and status polling, where needed.
925 *
926 * LOCKING:
927 * caller.
928 */
929
930void ata_dev_select(struct ata_port *ap, unsigned int device,
931 unsigned int wait, unsigned int can_sleep)
932{
933 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
934 ap->id, device, wait);
935
936 if (wait)
937 ata_wait_idle(ap);
938
939 ap->ops->dev_select(ap, device);
940
941 if (wait) {
942 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
943 msleep(150);
944 ata_wait_idle(ap);
945 }
946}
947
948/**
949 * ata_dump_id - IDENTIFY DEVICE info debugging output
950 * @dev: Device whose IDENTIFY DEVICE page we will dump
951 *
952 * Dump selected 16-bit words from a detected device's
953 * IDENTIFY PAGE page.
954 *
955 * LOCKING:
956 * caller.
957 */
958
959static inline void ata_dump_id(struct ata_device *dev)
960{
961 DPRINTK("49==0x%04x "
962 "53==0x%04x "
963 "63==0x%04x "
964 "64==0x%04x "
965 "75==0x%04x \n",
966 dev->id[49],
967 dev->id[53],
968 dev->id[63],
969 dev->id[64],
970 dev->id[75]);
971 DPRINTK("80==0x%04x "
972 "81==0x%04x "
973 "82==0x%04x "
974 "83==0x%04x "
975 "84==0x%04x \n",
976 dev->id[80],
977 dev->id[81],
978 dev->id[82],
979 dev->id[83],
980 dev->id[84]);
981 DPRINTK("88==0x%04x "
982 "93==0x%04x\n",
983 dev->id[88],
984 dev->id[93]);
985}
986
987/**
988 * ata_dev_identify - obtain IDENTIFY x DEVICE page
989 * @ap: port on which device we wish to probe resides
990 * @device: device bus address, starting at zero
991 *
992 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
993 * command, and read back the 512-byte device information page.
994 * The device information page is fed to us via the standard
995 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
996 * using standard PIO-IN paths)
997 *
998 * After reading the device information page, we use several
999 * bits of information from it to initialize data structures
1000 * that will be used during the lifetime of the ata_device.
1001 * Other data from the info page is used to disqualify certain
1002 * older ATA devices we do not wish to support.
1003 *
1004 * LOCKING:
1005 * Inherited from caller. Some functions called by this function
1006 * obtain the host_set lock.
1007 */
1008
1009static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1010{
1011 struct ata_device *dev = &ap->device[device];
Albert Lee8bf62ece2005-05-12 15:29:42 -04001012 unsigned int major_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 u16 tmp;
1014 unsigned long xfer_modes;
1015 u8 status;
1016 unsigned int using_edd;
1017 DECLARE_COMPLETION(wait);
1018 struct ata_queued_cmd *qc;
1019 unsigned long flags;
1020 int rc;
1021
1022 if (!ata_dev_present(dev)) {
1023 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1024 ap->id, device);
1025 return;
1026 }
1027
1028 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1029 using_edd = 0;
1030 else
1031 using_edd = 1;
1032
1033 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1034
1035 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1036 dev->class == ATA_DEV_NONE);
1037
1038 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1039
1040 qc = ata_qc_new_init(ap, dev);
1041 BUG_ON(qc == NULL);
1042
1043 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1044 qc->dma_dir = DMA_FROM_DEVICE;
1045 qc->tf.protocol = ATA_PROT_PIO;
1046 qc->nsect = 1;
1047
1048retry:
1049 if (dev->class == ATA_DEV_ATA) {
1050 qc->tf.command = ATA_CMD_ID_ATA;
1051 DPRINTK("do ATA identify\n");
1052 } else {
1053 qc->tf.command = ATA_CMD_ID_ATAPI;
1054 DPRINTK("do ATAPI identify\n");
1055 }
1056
1057 qc->waiting = &wait;
1058 qc->complete_fn = ata_qc_complete_noop;
1059
1060 spin_lock_irqsave(&ap->host_set->lock, flags);
1061 rc = ata_qc_issue(qc);
1062 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1063
1064 if (rc)
1065 goto err_out;
1066 else
1067 wait_for_completion(&wait);
1068
1069 status = ata_chk_status(ap);
1070 if (status & ATA_ERR) {
1071 /*
1072 * arg! EDD works for all test cases, but seems to return
1073 * the ATA signature for some ATAPI devices. Until the
1074 * reason for this is found and fixed, we fix up the mess
1075 * here. If IDENTIFY DEVICE returns command aborted
1076 * (as ATAPI devices do), then we issue an
1077 * IDENTIFY PACKET DEVICE.
1078 *
1079 * ATA software reset (SRST, the default) does not appear
1080 * to have this problem.
1081 */
1082 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1083 u8 err = ata_chk_err(ap);
1084 if (err & ATA_ABORTED) {
1085 dev->class = ATA_DEV_ATAPI;
1086 qc->cursg = 0;
1087 qc->cursg_ofs = 0;
1088 qc->cursect = 0;
1089 qc->nsect = 1;
1090 goto retry;
1091 }
1092 }
1093 goto err_out;
1094 }
1095
1096 swap_buf_le16(dev->id, ATA_ID_WORDS);
1097
1098 /* print device capabilities */
1099 printk(KERN_DEBUG "ata%u: dev %u cfg "
1100 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1101 ap->id, device, dev->id[49],
1102 dev->id[82], dev->id[83], dev->id[84],
1103 dev->id[85], dev->id[86], dev->id[87],
1104 dev->id[88]);
1105
1106 /*
1107 * common ATA, ATAPI feature tests
1108 */
1109
Albert Lee8bf62ece2005-05-12 15:29:42 -04001110 /* we require DMA support (bits 8 of word 49) */
1111 if (!ata_id_has_dma(dev->id)) {
1112 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 goto err_out_nosup;
1114 }
1115
1116 /* quick-n-dirty find max transfer mode; for printk only */
1117 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1118 if (!xfer_modes)
1119 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1120 if (!xfer_modes) {
1121 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1122 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1123 }
1124
1125 ata_dump_id(dev);
1126
1127 /* ATA-specific feature tests */
1128 if (dev->class == ATA_DEV_ATA) {
1129 if (!ata_id_is_ata(dev->id)) /* sanity check */
1130 goto err_out_nosup;
1131
Albert Lee8bf62ece2005-05-12 15:29:42 -04001132 /* get major version */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 tmp = dev->id[ATA_ID_MAJOR_VER];
Albert Lee8bf62ece2005-05-12 15:29:42 -04001134 for (major_version = 14; major_version >= 1; major_version--)
1135 if (tmp & (1 << major_version))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 break;
1137
Albert Lee8bf62ece2005-05-12 15:29:42 -04001138 /*
1139 * The exact sequence expected by certain pre-ATA4 drives is:
1140 * SRST RESET
1141 * IDENTIFY
1142 * INITIALIZE DEVICE PARAMETERS
1143 * anything else..
1144 * Some drives were very specific about that exact sequence.
1145 */
1146 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1147 ata_dev_init_params(ap, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Albert Lee8bf62ece2005-05-12 15:29:42 -04001149 if (ata_id_has_lba(dev->id)) {
1150 dev->flags |= ATA_DFLAG_LBA;
1151
1152 if (ata_id_has_lba48(dev->id)) {
1153 dev->flags |= ATA_DFLAG_LBA48;
1154 dev->n_sectors = ata_id_u64(dev->id, 100);
1155 } else {
1156 dev->n_sectors = ata_id_u32(dev->id, 60);
1157 }
1158
1159 /* print device info to dmesg */
1160 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1161 ap->id, device,
1162 major_version,
1163 ata_mode_string(xfer_modes),
1164 (unsigned long long)dev->n_sectors,
1165 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1166 } else {
1167 /* CHS */
1168
1169 /* Default translation */
1170 dev->cylinders = dev->id[1];
1171 dev->heads = dev->id[3];
1172 dev->sectors = dev->id[6];
1173 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1174
1175 if (ata_id_current_chs_valid(dev->id)) {
1176 /* Current CHS translation is valid. */
1177 dev->cylinders = dev->id[54];
1178 dev->heads = dev->id[55];
1179 dev->sectors = dev->id[56];
1180
1181 dev->n_sectors = ata_id_u32(dev->id, 57);
1182 }
1183
1184 /* print device info to dmesg */
1185 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1186 ap->id, device,
1187 major_version,
1188 ata_mode_string(xfer_modes),
1189 (unsigned long long)dev->n_sectors,
1190 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 }
1193
1194 ap->host->max_cmd_len = 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 }
1196
1197 /* ATAPI-specific feature tests */
1198 else {
1199 if (ata_id_is_ata(dev->id)) /* sanity check */
1200 goto err_out_nosup;
1201
1202 rc = atapi_cdb_len(dev->id);
1203 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1204 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1205 goto err_out_nosup;
1206 }
1207 ap->cdb_len = (unsigned int) rc;
1208 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1209
1210 /* print device info to dmesg */
1211 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1212 ap->id, device,
1213 ata_mode_string(xfer_modes));
1214 }
1215
1216 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1217 return;
1218
1219err_out_nosup:
1220 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1221 ap->id, device);
1222err_out:
1223 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1224 DPRINTK("EXIT, err\n");
1225}
1226
1227/**
1228 * ata_bus_probe - Reset and probe ATA bus
1229 * @ap: Bus to probe
1230 *
1231 * LOCKING:
1232 *
1233 * RETURNS:
1234 * Zero on success, non-zero on error.
1235 */
1236
1237static int ata_bus_probe(struct ata_port *ap)
1238{
1239 unsigned int i, found = 0;
1240
1241 ap->ops->phy_reset(ap);
1242 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1243 goto err_out;
1244
1245 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1246 ata_dev_identify(ap, i);
1247 if (ata_dev_present(&ap->device[i])) {
1248 found = 1;
1249 if (ap->ops->dev_config)
1250 ap->ops->dev_config(ap, &ap->device[i]);
1251 }
1252 }
1253
1254 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1255 goto err_out_disable;
1256
1257 ata_set_mode(ap);
1258 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1259 goto err_out_disable;
1260
1261 return 0;
1262
1263err_out_disable:
1264 ap->ops->port_disable(ap);
1265err_out:
1266 return -1;
1267}
1268
1269/**
1270 * ata_port_probe -
1271 * @ap:
1272 *
1273 * LOCKING:
1274 */
1275
1276void ata_port_probe(struct ata_port *ap)
1277{
1278 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1279}
1280
1281/**
1282 * __sata_phy_reset -
1283 * @ap:
1284 *
1285 * LOCKING:
1286 *
1287 */
1288void __sata_phy_reset(struct ata_port *ap)
1289{
1290 u32 sstatus;
1291 unsigned long timeout = jiffies + (HZ * 5);
1292
1293 if (ap->flags & ATA_FLAG_SATA_RESET) {
1294 scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */
1295 scr_read(ap, SCR_STATUS); /* dummy read; flush */
1296 udelay(400); /* FIXME: a guess */
1297 }
1298 scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/clear reset */
1299
1300 /* wait for phy to become ready, if necessary */
1301 do {
1302 msleep(200);
1303 sstatus = scr_read(ap, SCR_STATUS);
1304 if ((sstatus & 0xf) != 1)
1305 break;
1306 } while (time_before(jiffies, timeout));
1307
1308 /* TODO: phy layer with polling, timeouts, etc. */
1309 if (sata_dev_present(ap))
1310 ata_port_probe(ap);
1311 else {
1312 sstatus = scr_read(ap, SCR_STATUS);
1313 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1314 ap->id, sstatus);
1315 ata_port_disable(ap);
1316 }
1317
1318 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1319 return;
1320
1321 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1322 ata_port_disable(ap);
1323 return;
1324 }
1325
1326 ap->cbl = ATA_CBL_SATA;
1327}
1328
1329/**
1330 * __sata_phy_reset -
1331 * @ap:
1332 *
1333 * LOCKING:
1334 *
1335 */
1336void sata_phy_reset(struct ata_port *ap)
1337{
1338 __sata_phy_reset(ap);
1339 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1340 return;
1341 ata_bus_reset(ap);
1342}
1343
1344/**
1345 * ata_port_disable -
1346 * @ap:
1347 *
1348 * LOCKING:
1349 */
1350
1351void ata_port_disable(struct ata_port *ap)
1352{
1353 ap->device[0].class = ATA_DEV_NONE;
1354 ap->device[1].class = ATA_DEV_NONE;
1355 ap->flags |= ATA_FLAG_PORT_DISABLED;
1356}
1357
1358static struct {
1359 unsigned int shift;
1360 u8 base;
1361} xfer_mode_classes[] = {
1362 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1363 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1364 { ATA_SHIFT_PIO, XFER_PIO_0 },
1365};
1366
1367static inline u8 base_from_shift(unsigned int shift)
1368{
1369 int i;
1370
1371 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1372 if (xfer_mode_classes[i].shift == shift)
1373 return xfer_mode_classes[i].base;
1374
1375 return 0xff;
1376}
1377
1378static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1379{
1380 int ofs, idx;
1381 u8 base;
1382
1383 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1384 return;
1385
1386 if (dev->xfer_shift == ATA_SHIFT_PIO)
1387 dev->flags |= ATA_DFLAG_PIO;
1388
1389 ata_dev_set_xfermode(ap, dev);
1390
1391 base = base_from_shift(dev->xfer_shift);
1392 ofs = dev->xfer_mode - base;
1393 idx = ofs + dev->xfer_shift;
1394 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1395
1396 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1397 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1398
1399 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1400 ap->id, dev->devno, xfer_mode_str[idx]);
1401}
1402
1403static int ata_host_set_pio(struct ata_port *ap)
1404{
1405 unsigned int mask;
1406 int x, i;
1407 u8 base, xfer_mode;
1408
1409 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1410 x = fgb(mask);
1411 if (x < 0) {
1412 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1413 return -1;
1414 }
1415
1416 base = base_from_shift(ATA_SHIFT_PIO);
1417 xfer_mode = base + x;
1418
1419 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1420 (int)base, (int)xfer_mode, mask, x);
1421
1422 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1423 struct ata_device *dev = &ap->device[i];
1424 if (ata_dev_present(dev)) {
1425 dev->pio_mode = xfer_mode;
1426 dev->xfer_mode = xfer_mode;
1427 dev->xfer_shift = ATA_SHIFT_PIO;
1428 if (ap->ops->set_piomode)
1429 ap->ops->set_piomode(ap, dev);
1430 }
1431 }
1432
1433 return 0;
1434}
1435
1436static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1437 unsigned int xfer_shift)
1438{
1439 int i;
1440
1441 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1442 struct ata_device *dev = &ap->device[i];
1443 if (ata_dev_present(dev)) {
1444 dev->dma_mode = xfer_mode;
1445 dev->xfer_mode = xfer_mode;
1446 dev->xfer_shift = xfer_shift;
1447 if (ap->ops->set_dmamode)
1448 ap->ops->set_dmamode(ap, dev);
1449 }
1450 }
1451}
1452
1453/**
1454 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1455 * @ap: port on which timings will be programmed
1456 *
1457 * LOCKING:
1458 *
1459 */
1460static void ata_set_mode(struct ata_port *ap)
1461{
1462 unsigned int i, xfer_shift;
1463 u8 xfer_mode;
1464 int rc;
1465
1466 /* step 1: always set host PIO timings */
1467 rc = ata_host_set_pio(ap);
1468 if (rc)
1469 goto err_out;
1470
1471 /* step 2: choose the best data xfer mode */
1472 xfer_mode = xfer_shift = 0;
1473 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1474 if (rc)
1475 goto err_out;
1476
1477 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1478 if (xfer_shift != ATA_SHIFT_PIO)
1479 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1480
1481 /* step 4: update devices' xfer mode */
1482 ata_dev_set_mode(ap, &ap->device[0]);
1483 ata_dev_set_mode(ap, &ap->device[1]);
1484
1485 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1486 return;
1487
1488 if (ap->ops->post_set_mode)
1489 ap->ops->post_set_mode(ap);
1490
1491 for (i = 0; i < 2; i++) {
1492 struct ata_device *dev = &ap->device[i];
1493 ata_dev_set_protocol(dev);
1494 }
1495
1496 return;
1497
1498err_out:
1499 ata_port_disable(ap);
1500}
1501
1502/**
1503 * ata_busy_sleep - sleep until BSY clears, or timeout
1504 * @ap: port containing status register to be polled
1505 * @tmout_pat: impatience timeout
1506 * @tmout: overall timeout
1507 *
1508 * LOCKING:
1509 *
1510 */
1511
1512static unsigned int ata_busy_sleep (struct ata_port *ap,
1513 unsigned long tmout_pat,
1514 unsigned long tmout)
1515{
1516 unsigned long timer_start, timeout;
1517 u8 status;
1518
1519 status = ata_busy_wait(ap, ATA_BUSY, 300);
1520 timer_start = jiffies;
1521 timeout = timer_start + tmout_pat;
1522 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1523 msleep(50);
1524 status = ata_busy_wait(ap, ATA_BUSY, 3);
1525 }
1526
1527 if (status & ATA_BUSY)
1528 printk(KERN_WARNING "ata%u is slow to respond, "
1529 "please be patient\n", ap->id);
1530
1531 timeout = timer_start + tmout;
1532 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1533 msleep(50);
1534 status = ata_chk_status(ap);
1535 }
1536
1537 if (status & ATA_BUSY) {
1538 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1539 ap->id, tmout / HZ);
1540 return 1;
1541 }
1542
1543 return 0;
1544}
1545
1546static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1547{
1548 struct ata_ioports *ioaddr = &ap->ioaddr;
1549 unsigned int dev0 = devmask & (1 << 0);
1550 unsigned int dev1 = devmask & (1 << 1);
1551 unsigned long timeout;
1552
1553 /* if device 0 was found in ata_devchk, wait for its
1554 * BSY bit to clear
1555 */
1556 if (dev0)
1557 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1558
1559 /* if device 1 was found in ata_devchk, wait for
1560 * register access, then wait for BSY to clear
1561 */
1562 timeout = jiffies + ATA_TMOUT_BOOT;
1563 while (dev1) {
1564 u8 nsect, lbal;
1565
1566 ap->ops->dev_select(ap, 1);
1567 if (ap->flags & ATA_FLAG_MMIO) {
1568 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1569 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1570 } else {
1571 nsect = inb(ioaddr->nsect_addr);
1572 lbal = inb(ioaddr->lbal_addr);
1573 }
1574 if ((nsect == 1) && (lbal == 1))
1575 break;
1576 if (time_after(jiffies, timeout)) {
1577 dev1 = 0;
1578 break;
1579 }
1580 msleep(50); /* give drive a breather */
1581 }
1582 if (dev1)
1583 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1584
1585 /* is all this really necessary? */
1586 ap->ops->dev_select(ap, 0);
1587 if (dev1)
1588 ap->ops->dev_select(ap, 1);
1589 if (dev0)
1590 ap->ops->dev_select(ap, 0);
1591}
1592
1593/**
1594 * ata_bus_edd -
1595 * @ap:
1596 *
1597 * LOCKING:
1598 *
1599 */
1600
1601static unsigned int ata_bus_edd(struct ata_port *ap)
1602{
1603 struct ata_taskfile tf;
1604
1605 /* set up execute-device-diag (bus reset) taskfile */
1606 /* also, take interrupts to a known state (disabled) */
1607 DPRINTK("execute-device-diag\n");
1608 ata_tf_init(ap, &tf, 0);
1609 tf.ctl |= ATA_NIEN;
1610 tf.command = ATA_CMD_EDD;
1611 tf.protocol = ATA_PROT_NODATA;
1612
1613 /* do bus reset */
1614 ata_tf_to_host(ap, &tf);
1615
1616 /* spec says at least 2ms. but who knows with those
1617 * crazy ATAPI devices...
1618 */
1619 msleep(150);
1620
1621 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1622}
1623
1624static unsigned int ata_bus_softreset(struct ata_port *ap,
1625 unsigned int devmask)
1626{
1627 struct ata_ioports *ioaddr = &ap->ioaddr;
1628
1629 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1630
1631 /* software reset. causes dev0 to be selected */
1632 if (ap->flags & ATA_FLAG_MMIO) {
1633 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1634 udelay(20); /* FIXME: flush */
1635 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1636 udelay(20); /* FIXME: flush */
1637 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1638 } else {
1639 outb(ap->ctl, ioaddr->ctl_addr);
1640 udelay(10);
1641 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1642 udelay(10);
1643 outb(ap->ctl, ioaddr->ctl_addr);
1644 }
1645
1646 /* spec mandates ">= 2ms" before checking status.
1647 * We wait 150ms, because that was the magic delay used for
1648 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1649 * between when the ATA command register is written, and then
1650 * status is checked. Because waiting for "a while" before
1651 * checking status is fine, post SRST, we perform this magic
1652 * delay here as well.
1653 */
1654 msleep(150);
1655
1656 ata_bus_post_reset(ap, devmask);
1657
1658 return 0;
1659}
1660
1661/**
1662 * ata_bus_reset - reset host port and associated ATA channel
1663 * @ap: port to reset
1664 *
1665 * This is typically the first time we actually start issuing
1666 * commands to the ATA channel. We wait for BSY to clear, then
1667 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1668 * result. Determine what devices, if any, are on the channel
1669 * by looking at the device 0/1 error register. Look at the signature
1670 * stored in each device's taskfile registers, to determine if
1671 * the device is ATA or ATAPI.
1672 *
1673 * LOCKING:
1674 * Inherited from caller. Some functions called by this function
1675 * obtain the host_set lock.
1676 *
1677 * SIDE EFFECTS:
1678 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1679 */
1680
1681void ata_bus_reset(struct ata_port *ap)
1682{
1683 struct ata_ioports *ioaddr = &ap->ioaddr;
1684 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1685 u8 err;
1686 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1687
1688 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1689
1690 /* determine if device 0/1 are present */
1691 if (ap->flags & ATA_FLAG_SATA_RESET)
1692 dev0 = 1;
1693 else {
1694 dev0 = ata_devchk(ap, 0);
1695 if (slave_possible)
1696 dev1 = ata_devchk(ap, 1);
1697 }
1698
1699 if (dev0)
1700 devmask |= (1 << 0);
1701 if (dev1)
1702 devmask |= (1 << 1);
1703
1704 /* select device 0 again */
1705 ap->ops->dev_select(ap, 0);
1706
1707 /* issue bus reset */
1708 if (ap->flags & ATA_FLAG_SRST)
1709 rc = ata_bus_softreset(ap, devmask);
1710 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1711 /* set up device control */
1712 if (ap->flags & ATA_FLAG_MMIO)
1713 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1714 else
1715 outb(ap->ctl, ioaddr->ctl_addr);
1716 rc = ata_bus_edd(ap);
1717 }
1718
1719 if (rc)
1720 goto err_out;
1721
1722 /*
1723 * determine by signature whether we have ATA or ATAPI devices
1724 */
1725 err = ata_dev_try_classify(ap, 0);
1726 if ((slave_possible) && (err != 0x81))
1727 ata_dev_try_classify(ap, 1);
1728
1729 /* re-enable interrupts */
1730 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1731 ata_irq_on(ap);
1732
1733 /* is double-select really necessary? */
1734 if (ap->device[1].class != ATA_DEV_NONE)
1735 ap->ops->dev_select(ap, 1);
1736 if (ap->device[0].class != ATA_DEV_NONE)
1737 ap->ops->dev_select(ap, 0);
1738
1739 /* if no devices were detected, disable this port */
1740 if ((ap->device[0].class == ATA_DEV_NONE) &&
1741 (ap->device[1].class == ATA_DEV_NONE))
1742 goto err_out;
1743
1744 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1745 /* set up device control for ATA_FLAG_SATA_RESET */
1746 if (ap->flags & ATA_FLAG_MMIO)
1747 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1748 else
1749 outb(ap->ctl, ioaddr->ctl_addr);
1750 }
1751
1752 DPRINTK("EXIT\n");
1753 return;
1754
1755err_out:
1756 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1757 ap->ops->port_disable(ap);
1758
1759 DPRINTK("EXIT\n");
1760}
1761
1762static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1763{
1764 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1765 ap->id, dev->devno);
1766}
1767
1768static const char * ata_dma_blacklist [] = {
1769 "WDC AC11000H",
1770 "WDC AC22100H",
1771 "WDC AC32500H",
1772 "WDC AC33100H",
1773 "WDC AC31600H",
1774 "WDC AC32100H",
1775 "WDC AC23200L",
1776 "Compaq CRD-8241B",
1777 "CRD-8400B",
1778 "CRD-8480B",
1779 "CRD-8482B",
1780 "CRD-84",
1781 "SanDisk SDP3B",
1782 "SanDisk SDP3B-64",
1783 "SANYO CD-ROM CRD",
1784 "HITACHI CDR-8",
1785 "HITACHI CDR-8335",
1786 "HITACHI CDR-8435",
1787 "Toshiba CD-ROM XM-6202B",
1788 "CD-532E-A",
1789 "E-IDE CD-ROM CR-840",
1790 "CD-ROM Drive/F5A",
1791 "WPI CDD-820",
1792 "SAMSUNG CD-ROM SC-148C",
1793 "SAMSUNG CD-ROM SC",
1794 "SanDisk SDP3B-64",
1795 "SAMSUNG CD-ROM SN-124",
1796 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1797 "_NEC DV5800A",
1798};
1799
1800static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1801{
1802 unsigned char model_num[40];
1803 char *s;
1804 unsigned int len;
1805 int i;
1806
1807 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1808 sizeof(model_num));
1809 s = &model_num[0];
1810 len = strnlen(s, sizeof(model_num));
1811
1812 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1813 while ((len > 0) && (s[len - 1] == ' ')) {
1814 len--;
1815 s[len] = 0;
1816 }
1817
1818 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1819 if (!strncmp(ata_dma_blacklist[i], s, len))
1820 return 1;
1821
1822 return 0;
1823}
1824
1825static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1826{
1827 struct ata_device *master, *slave;
1828 unsigned int mask;
1829
1830 master = &ap->device[0];
1831 slave = &ap->device[1];
1832
1833 assert (ata_dev_present(master) || ata_dev_present(slave));
1834
1835 if (shift == ATA_SHIFT_UDMA) {
1836 mask = ap->udma_mask;
1837 if (ata_dev_present(master)) {
1838 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1839 if (ata_dma_blacklisted(ap, master)) {
1840 mask = 0;
1841 ata_pr_blacklisted(ap, master);
1842 }
1843 }
1844 if (ata_dev_present(slave)) {
1845 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1846 if (ata_dma_blacklisted(ap, slave)) {
1847 mask = 0;
1848 ata_pr_blacklisted(ap, slave);
1849 }
1850 }
1851 }
1852 else if (shift == ATA_SHIFT_MWDMA) {
1853 mask = ap->mwdma_mask;
1854 if (ata_dev_present(master)) {
1855 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1856 if (ata_dma_blacklisted(ap, master)) {
1857 mask = 0;
1858 ata_pr_blacklisted(ap, master);
1859 }
1860 }
1861 if (ata_dev_present(slave)) {
1862 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1863 if (ata_dma_blacklisted(ap, slave)) {
1864 mask = 0;
1865 ata_pr_blacklisted(ap, slave);
1866 }
1867 }
1868 }
1869 else if (shift == ATA_SHIFT_PIO) {
1870 mask = ap->pio_mask;
1871 if (ata_dev_present(master)) {
1872 /* spec doesn't return explicit support for
1873 * PIO0-2, so we fake it
1874 */
1875 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1876 tmp_mode <<= 3;
1877 tmp_mode |= 0x7;
1878 mask &= tmp_mode;
1879 }
1880 if (ata_dev_present(slave)) {
1881 /* spec doesn't return explicit support for
1882 * PIO0-2, so we fake it
1883 */
1884 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1885 tmp_mode <<= 3;
1886 tmp_mode |= 0x7;
1887 mask &= tmp_mode;
1888 }
1889 }
1890 else {
1891 mask = 0xffffffff; /* shut up compiler warning */
1892 BUG();
1893 }
1894
1895 return mask;
1896}
1897
1898/* find greatest bit */
1899static int fgb(u32 bitmap)
1900{
1901 unsigned int i;
1902 int x = -1;
1903
1904 for (i = 0; i < 32; i++)
1905 if (bitmap & (1 << i))
1906 x = i;
1907
1908 return x;
1909}
1910
1911/**
1912 * ata_choose_xfer_mode - attempt to find best transfer mode
1913 * @ap: Port for which an xfer mode will be selected
1914 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
1915 * @xfer_shift_out: (output) bit shift that selects this mode
1916 *
1917 * LOCKING:
1918 *
1919 * RETURNS:
1920 * Zero on success, negative on error.
1921 */
1922
1923static int ata_choose_xfer_mode(struct ata_port *ap,
1924 u8 *xfer_mode_out,
1925 unsigned int *xfer_shift_out)
1926{
1927 unsigned int mask, shift;
1928 int x, i;
1929
1930 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
1931 shift = xfer_mode_classes[i].shift;
1932 mask = ata_get_mode_mask(ap, shift);
1933
1934 x = fgb(mask);
1935 if (x >= 0) {
1936 *xfer_mode_out = xfer_mode_classes[i].base + x;
1937 *xfer_shift_out = shift;
1938 return 0;
1939 }
1940 }
1941
1942 return -1;
1943}
1944
1945/**
1946 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1947 * @ap: Port associated with device @dev
1948 * @dev: Device to which command will be sent
1949 *
1950 * LOCKING:
1951 */
1952
1953static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
1954{
1955 DECLARE_COMPLETION(wait);
1956 struct ata_queued_cmd *qc;
1957 int rc;
1958 unsigned long flags;
1959
1960 /* set up set-features taskfile */
1961 DPRINTK("set features - xfer mode\n");
1962
1963 qc = ata_qc_new_init(ap, dev);
1964 BUG_ON(qc == NULL);
1965
1966 qc->tf.command = ATA_CMD_SET_FEATURES;
1967 qc->tf.feature = SETFEATURES_XFER;
1968 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1969 qc->tf.protocol = ATA_PROT_NODATA;
1970 qc->tf.nsect = dev->xfer_mode;
1971
1972 qc->waiting = &wait;
1973 qc->complete_fn = ata_qc_complete_noop;
1974
1975 spin_lock_irqsave(&ap->host_set->lock, flags);
1976 rc = ata_qc_issue(qc);
1977 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1978
1979 if (rc)
1980 ata_port_disable(ap);
1981 else
1982 wait_for_completion(&wait);
1983
1984 DPRINTK("EXIT\n");
1985}
1986
1987/**
Albert Lee8bf62ece2005-05-12 15:29:42 -04001988 * ata_dev_init_params - Issue INIT DEV PARAMS command
1989 * @ap: Port associated with device @dev
1990 * @dev: Device to which command will be sent
1991 *
1992 * LOCKING:
1993 */
1994
1995static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
1996{
1997 DECLARE_COMPLETION(wait);
1998 struct ata_queued_cmd *qc;
1999 int rc;
2000 unsigned long flags;
2001 u16 sectors = dev->id[6];
2002 u16 heads = dev->id[3];
2003
2004 /* Number of sectors per track 1-255. Number of heads 1-16 */
2005 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2006 return;
2007
2008 /* set up init dev params taskfile */
2009 DPRINTK("init dev params \n");
2010
2011 qc = ata_qc_new_init(ap, dev);
2012 BUG_ON(qc == NULL);
2013
2014 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2015 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2016 qc->tf.protocol = ATA_PROT_NODATA;
2017 qc->tf.nsect = sectors;
2018 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2019
2020 qc->waiting = &wait;
2021 qc->complete_fn = ata_qc_complete_noop;
2022
2023 spin_lock_irqsave(&ap->host_set->lock, flags);
2024 rc = ata_qc_issue(qc);
2025 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2026
2027 if (rc)
2028 ata_port_disable(ap);
2029 else
2030 wait_for_completion(&wait);
2031
2032 DPRINTK("EXIT\n");
2033}
2034
2035/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 * ata_sg_clean -
2037 * @qc:
2038 *
2039 * LOCKING:
2040 */
2041
2042static void ata_sg_clean(struct ata_queued_cmd *qc)
2043{
2044 struct ata_port *ap = qc->ap;
2045 struct scatterlist *sg = qc->sg;
2046 int dir = qc->dma_dir;
2047
2048 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2049 assert(sg != NULL);
2050
2051 if (qc->flags & ATA_QCFLAG_SINGLE)
2052 assert(qc->n_elem == 1);
2053
2054 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2055
2056 if (qc->flags & ATA_QCFLAG_SG)
2057 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2058 else
2059 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2060 sg_dma_len(&sg[0]), dir);
2061
2062 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2063 qc->sg = NULL;
2064}
2065
2066/**
2067 * ata_fill_sg - Fill PCI IDE PRD table
2068 * @qc: Metadata associated with taskfile to be transferred
2069 *
2070 * LOCKING:
2071 *
2072 */
2073static void ata_fill_sg(struct ata_queued_cmd *qc)
2074{
2075 struct scatterlist *sg = qc->sg;
2076 struct ata_port *ap = qc->ap;
2077 unsigned int idx, nelem;
2078
2079 assert(sg != NULL);
2080 assert(qc->n_elem > 0);
2081
2082 idx = 0;
2083 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2084 u32 addr, offset;
2085 u32 sg_len, len;
2086
2087 /* determine if physical DMA addr spans 64K boundary.
2088 * Note h/w doesn't support 64-bit, so we unconditionally
2089 * truncate dma_addr_t to u32.
2090 */
2091 addr = (u32) sg_dma_address(sg);
2092 sg_len = sg_dma_len(sg);
2093
2094 while (sg_len) {
2095 offset = addr & 0xffff;
2096 len = sg_len;
2097 if ((offset + sg_len) > 0x10000)
2098 len = 0x10000 - offset;
2099
2100 ap->prd[idx].addr = cpu_to_le32(addr);
2101 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2102 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2103
2104 idx++;
2105 sg_len -= len;
2106 addr += len;
2107 }
2108 }
2109
2110 if (idx)
2111 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2112}
2113/**
2114 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2115 * @qc: Metadata associated with taskfile to check
2116 *
2117 * LOCKING:
2118 * RETURNS: 0 when ATAPI DMA can be used
2119 * nonzero otherwise
2120 */
2121int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2122{
2123 struct ata_port *ap = qc->ap;
2124 int rc = 0; /* Assume ATAPI DMA is OK by default */
2125
2126 if (ap->ops->check_atapi_dma)
2127 rc = ap->ops->check_atapi_dma(qc);
2128
2129 return rc;
2130}
2131/**
2132 * ata_qc_prep - Prepare taskfile for submission
2133 * @qc: Metadata associated with taskfile to be prepared
2134 *
2135 * LOCKING:
2136 * spin_lock_irqsave(host_set lock)
2137 */
2138void ata_qc_prep(struct ata_queued_cmd *qc)
2139{
2140 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2141 return;
2142
2143 ata_fill_sg(qc);
2144}
2145
2146void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2147{
2148 struct scatterlist *sg;
2149
2150 qc->flags |= ATA_QCFLAG_SINGLE;
2151
2152 memset(&qc->sgent, 0, sizeof(qc->sgent));
2153 qc->sg = &qc->sgent;
2154 qc->n_elem = 1;
2155 qc->buf_virt = buf;
2156
2157 sg = qc->sg;
2158 sg->page = virt_to_page(buf);
2159 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2160 sg_dma_len(sg) = buflen;
2161}
2162
2163void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2164 unsigned int n_elem)
2165{
2166 qc->flags |= ATA_QCFLAG_SG;
2167 qc->sg = sg;
2168 qc->n_elem = n_elem;
2169}
2170
2171/**
2172 * ata_sg_setup_one -
2173 * @qc:
2174 *
2175 * LOCKING:
2176 * spin_lock_irqsave(host_set lock)
2177 *
2178 * RETURNS:
2179 *
2180 */
2181
2182static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2183{
2184 struct ata_port *ap = qc->ap;
2185 int dir = qc->dma_dir;
2186 struct scatterlist *sg = qc->sg;
2187 dma_addr_t dma_address;
2188
2189 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2190 sg_dma_len(sg), dir);
2191 if (dma_mapping_error(dma_address))
2192 return -1;
2193
2194 sg_dma_address(sg) = dma_address;
2195
2196 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2197 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2198
2199 return 0;
2200}
2201
2202/**
2203 * ata_sg_setup -
2204 * @qc:
2205 *
2206 * LOCKING:
2207 * spin_lock_irqsave(host_set lock)
2208 *
2209 * RETURNS:
2210 *
2211 */
2212
2213static int ata_sg_setup(struct ata_queued_cmd *qc)
2214{
2215 struct ata_port *ap = qc->ap;
2216 struct scatterlist *sg = qc->sg;
2217 int n_elem, dir;
2218
2219 VPRINTK("ENTER, ata%u\n", ap->id);
2220 assert(qc->flags & ATA_QCFLAG_SG);
2221
2222 dir = qc->dma_dir;
2223 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2224 if (n_elem < 1)
2225 return -1;
2226
2227 DPRINTK("%d sg elements mapped\n", n_elem);
2228
2229 qc->n_elem = n_elem;
2230
2231 return 0;
2232}
2233
2234/**
2235 * ata_pio_poll -
2236 * @ap:
2237 *
2238 * LOCKING:
2239 *
2240 * RETURNS:
2241 *
2242 */
2243
2244static unsigned long ata_pio_poll(struct ata_port *ap)
2245{
2246 u8 status;
2247 unsigned int poll_state = PIO_ST_UNKNOWN;
2248 unsigned int reg_state = PIO_ST_UNKNOWN;
2249 const unsigned int tmout_state = PIO_ST_TMOUT;
2250
2251 switch (ap->pio_task_state) {
2252 case PIO_ST:
2253 case PIO_ST_POLL:
2254 poll_state = PIO_ST_POLL;
2255 reg_state = PIO_ST;
2256 break;
2257 case PIO_ST_LAST:
2258 case PIO_ST_LAST_POLL:
2259 poll_state = PIO_ST_LAST_POLL;
2260 reg_state = PIO_ST_LAST;
2261 break;
2262 default:
2263 BUG();
2264 break;
2265 }
2266
2267 status = ata_chk_status(ap);
2268 if (status & ATA_BUSY) {
2269 if (time_after(jiffies, ap->pio_task_timeout)) {
2270 ap->pio_task_state = tmout_state;
2271 return 0;
2272 }
2273 ap->pio_task_state = poll_state;
2274 return ATA_SHORT_PAUSE;
2275 }
2276
2277 ap->pio_task_state = reg_state;
2278 return 0;
2279}
2280
2281/**
2282 * ata_pio_complete -
2283 * @ap:
2284 *
2285 * LOCKING:
2286 */
2287
2288static void ata_pio_complete (struct ata_port *ap)
2289{
2290 struct ata_queued_cmd *qc;
2291 u8 drv_stat;
2292
2293 /*
2294 * This is purely hueristic. This is a fast path.
2295 * Sometimes when we enter, BSY will be cleared in
2296 * a chk-status or two. If not, the drive is probably seeking
2297 * or something. Snooze for a couple msecs, then
2298 * chk-status again. If still busy, fall back to
2299 * PIO_ST_POLL state.
2300 */
2301 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2302 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2303 msleep(2);
2304 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2305 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2306 ap->pio_task_state = PIO_ST_LAST_POLL;
2307 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2308 return;
2309 }
2310 }
2311
2312 drv_stat = ata_wait_idle(ap);
2313 if (!ata_ok(drv_stat)) {
2314 ap->pio_task_state = PIO_ST_ERR;
2315 return;
2316 }
2317
2318 qc = ata_qc_from_tag(ap, ap->active_tag);
2319 assert(qc != NULL);
2320
2321 ap->pio_task_state = PIO_ST_IDLE;
2322
2323 ata_irq_on(ap);
2324
2325 ata_qc_complete(qc, drv_stat);
2326}
2327
2328void swap_buf_le16(u16 *buf, unsigned int buf_words)
2329{
2330#ifdef __BIG_ENDIAN
2331 unsigned int i;
2332
2333 for (i = 0; i < buf_words; i++)
2334 buf[i] = le16_to_cpu(buf[i]);
2335#endif /* __BIG_ENDIAN */
2336}
2337
2338static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2339 unsigned int buflen, int write_data)
2340{
2341 unsigned int i;
2342 unsigned int words = buflen >> 1;
2343 u16 *buf16 = (u16 *) buf;
2344 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2345
2346 if (write_data) {
2347 for (i = 0; i < words; i++)
2348 writew(le16_to_cpu(buf16[i]), mmio);
2349 } else {
2350 for (i = 0; i < words; i++)
2351 buf16[i] = cpu_to_le16(readw(mmio));
2352 }
2353}
2354
2355static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2356 unsigned int buflen, int write_data)
2357{
2358 unsigned int dwords = buflen >> 1;
2359
2360 if (write_data)
2361 outsw(ap->ioaddr.data_addr, buf, dwords);
2362 else
2363 insw(ap->ioaddr.data_addr, buf, dwords);
2364}
2365
2366static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2367 unsigned int buflen, int do_write)
2368{
2369 if (ap->flags & ATA_FLAG_MMIO)
2370 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2371 else
2372 ata_pio_data_xfer(ap, buf, buflen, do_write);
2373}
2374
2375static void ata_pio_sector(struct ata_queued_cmd *qc)
2376{
2377 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2378 struct scatterlist *sg = qc->sg;
2379 struct ata_port *ap = qc->ap;
2380 struct page *page;
2381 unsigned int offset;
2382 unsigned char *buf;
2383
2384 if (qc->cursect == (qc->nsect - 1))
2385 ap->pio_task_state = PIO_ST_LAST;
2386
2387 page = sg[qc->cursg].page;
2388 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2389
2390 /* get the current page and offset */
2391 page = nth_page(page, (offset >> PAGE_SHIFT));
2392 offset %= PAGE_SIZE;
2393
2394 buf = kmap(page) + offset;
2395
2396 qc->cursect++;
2397 qc->cursg_ofs++;
2398
2399 if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) {
2400 qc->cursg++;
2401 qc->cursg_ofs = 0;
2402 }
2403
2404 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2405
2406 /* do the actual data transfer */
2407 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2408 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2409
2410 kunmap(page);
2411}
2412
2413static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2414{
2415 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2416 struct scatterlist *sg = qc->sg;
2417 struct ata_port *ap = qc->ap;
2418 struct page *page;
2419 unsigned char *buf;
2420 unsigned int offset, count;
2421
2422 if (qc->curbytes == qc->nbytes - bytes)
2423 ap->pio_task_state = PIO_ST_LAST;
2424
2425next_sg:
2426 sg = &qc->sg[qc->cursg];
2427
2428next_page:
2429 page = sg->page;
2430 offset = sg->offset + qc->cursg_ofs;
2431
2432 /* get the current page and offset */
2433 page = nth_page(page, (offset >> PAGE_SHIFT));
2434 offset %= PAGE_SIZE;
2435
2436 count = min(sg_dma_len(sg) - qc->cursg_ofs, bytes);
2437
2438 /* don't cross page boundaries */
2439 count = min(count, (unsigned int)PAGE_SIZE - offset);
2440
2441 buf = kmap(page) + offset;
2442
2443 bytes -= count;
2444 qc->curbytes += count;
2445 qc->cursg_ofs += count;
2446
2447 if (qc->cursg_ofs == sg_dma_len(sg)) {
2448 qc->cursg++;
2449 qc->cursg_ofs = 0;
2450 }
2451
2452 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2453
2454 /* do the actual data transfer */
2455 ata_data_xfer(ap, buf, count, do_write);
2456
2457 kunmap(page);
2458
2459 if (bytes) {
2460 if (qc->cursg_ofs < sg_dma_len(sg))
2461 goto next_page;
2462 goto next_sg;
2463 }
2464}
2465
2466static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2467{
2468 struct ata_port *ap = qc->ap;
2469 struct ata_device *dev = qc->dev;
2470 unsigned int ireason, bc_lo, bc_hi, bytes;
2471 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2472
2473 ap->ops->tf_read(ap, &qc->tf);
2474 ireason = qc->tf.nsect;
2475 bc_lo = qc->tf.lbam;
2476 bc_hi = qc->tf.lbah;
2477 bytes = (bc_hi << 8) | bc_lo;
2478
2479 /* shall be cleared to zero, indicating xfer of data */
2480 if (ireason & (1 << 0))
2481 goto err_out;
2482
2483 /* make sure transfer direction matches expected */
2484 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2485 if (do_write != i_write)
2486 goto err_out;
2487
2488 __atapi_pio_bytes(qc, bytes);
2489
2490 return;
2491
2492err_out:
2493 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2494 ap->id, dev->devno);
2495 ap->pio_task_state = PIO_ST_ERR;
2496}
2497
2498/**
2499 * ata_pio_sector -
2500 * @ap:
2501 *
2502 * LOCKING:
2503 */
2504
2505static void ata_pio_block(struct ata_port *ap)
2506{
2507 struct ata_queued_cmd *qc;
2508 u8 status;
2509
2510 /*
2511 * This is purely hueristic. This is a fast path.
2512 * Sometimes when we enter, BSY will be cleared in
2513 * a chk-status or two. If not, the drive is probably seeking
2514 * or something. Snooze for a couple msecs, then
2515 * chk-status again. If still busy, fall back to
2516 * PIO_ST_POLL state.
2517 */
2518 status = ata_busy_wait(ap, ATA_BUSY, 5);
2519 if (status & ATA_BUSY) {
2520 msleep(2);
2521 status = ata_busy_wait(ap, ATA_BUSY, 10);
2522 if (status & ATA_BUSY) {
2523 ap->pio_task_state = PIO_ST_POLL;
2524 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2525 return;
2526 }
2527 }
2528
2529 qc = ata_qc_from_tag(ap, ap->active_tag);
2530 assert(qc != NULL);
2531
2532 if (is_atapi_taskfile(&qc->tf)) {
2533 /* no more data to transfer or unsupported ATAPI command */
2534 if ((status & ATA_DRQ) == 0) {
2535 ap->pio_task_state = PIO_ST_IDLE;
2536
2537 ata_irq_on(ap);
2538
2539 ata_qc_complete(qc, status);
2540 return;
2541 }
2542
2543 atapi_pio_bytes(qc);
2544 } else {
2545 /* handle BSY=0, DRQ=0 as error */
2546 if ((status & ATA_DRQ) == 0) {
2547 ap->pio_task_state = PIO_ST_ERR;
2548 return;
2549 }
2550
2551 ata_pio_sector(qc);
2552 }
2553}
2554
2555static void ata_pio_error(struct ata_port *ap)
2556{
2557 struct ata_queued_cmd *qc;
2558 u8 drv_stat;
2559
2560 qc = ata_qc_from_tag(ap, ap->active_tag);
2561 assert(qc != NULL);
2562
2563 drv_stat = ata_chk_status(ap);
2564 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2565 ap->id, drv_stat);
2566
2567 ap->pio_task_state = PIO_ST_IDLE;
2568
2569 ata_irq_on(ap);
2570
2571 ata_qc_complete(qc, drv_stat | ATA_ERR);
2572}
2573
2574static void ata_pio_task(void *_data)
2575{
2576 struct ata_port *ap = _data;
2577 unsigned long timeout = 0;
2578
2579 switch (ap->pio_task_state) {
2580 case PIO_ST_IDLE:
2581 return;
2582
2583 case PIO_ST:
2584 ata_pio_block(ap);
2585 break;
2586
2587 case PIO_ST_LAST:
2588 ata_pio_complete(ap);
2589 break;
2590
2591 case PIO_ST_POLL:
2592 case PIO_ST_LAST_POLL:
2593 timeout = ata_pio_poll(ap);
2594 break;
2595
2596 case PIO_ST_TMOUT:
2597 case PIO_ST_ERR:
2598 ata_pio_error(ap);
2599 return;
2600 }
2601
2602 if (timeout)
2603 queue_delayed_work(ata_wq, &ap->pio_task,
2604 timeout);
2605 else
2606 queue_work(ata_wq, &ap->pio_task);
2607}
2608
2609static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2610 struct scsi_cmnd *cmd)
2611{
2612 DECLARE_COMPLETION(wait);
2613 struct ata_queued_cmd *qc;
2614 unsigned long flags;
2615 int rc;
2616
2617 DPRINTK("ATAPI request sense\n");
2618
2619 qc = ata_qc_new_init(ap, dev);
2620 BUG_ON(qc == NULL);
2621
2622 /* FIXME: is this needed? */
2623 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2624
2625 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2626 qc->dma_dir = DMA_FROM_DEVICE;
2627
2628 memset(&qc->cdb, 0, sizeof(ap->cdb_len));
2629 qc->cdb[0] = REQUEST_SENSE;
2630 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2631
2632 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2633 qc->tf.command = ATA_CMD_PACKET;
2634
2635 qc->tf.protocol = ATA_PROT_ATAPI;
2636 qc->tf.lbam = (8 * 1024) & 0xff;
2637 qc->tf.lbah = (8 * 1024) >> 8;
2638 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2639
2640 qc->waiting = &wait;
2641 qc->complete_fn = ata_qc_complete_noop;
2642
2643 spin_lock_irqsave(&ap->host_set->lock, flags);
2644 rc = ata_qc_issue(qc);
2645 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2646
2647 if (rc)
2648 ata_port_disable(ap);
2649 else
2650 wait_for_completion(&wait);
2651
2652 DPRINTK("EXIT\n");
2653}
2654
2655/**
2656 * ata_qc_timeout - Handle timeout of queued command
2657 * @qc: Command that timed out
2658 *
2659 * Some part of the kernel (currently, only the SCSI layer)
2660 * has noticed that the active command on port @ap has not
2661 * completed after a specified length of time. Handle this
2662 * condition by disabling DMA (if necessary) and completing
2663 * transactions, with error if necessary.
2664 *
2665 * This also handles the case of the "lost interrupt", where
2666 * for some reason (possibly hardware bug, possibly driver bug)
2667 * an interrupt was not delivered to the driver, even though the
2668 * transaction completed successfully.
2669 *
2670 * LOCKING:
2671 */
2672
2673static void ata_qc_timeout(struct ata_queued_cmd *qc)
2674{
2675 struct ata_port *ap = qc->ap;
2676 struct ata_device *dev = qc->dev;
2677 u8 host_stat = 0, drv_stat;
2678
2679 DPRINTK("ENTER\n");
2680
2681 /* FIXME: doesn't this conflict with timeout handling? */
2682 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2683 struct scsi_cmnd *cmd = qc->scsicmd;
2684
2685 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2686
2687 /* finish completing original command */
2688 __ata_qc_complete(qc);
2689
2690 atapi_request_sense(ap, dev, cmd);
2691
2692 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2693 scsi_finish_command(cmd);
2694
2695 goto out;
2696 }
2697 }
2698
2699 /* hack alert! We cannot use the supplied completion
2700 * function from inside the ->eh_strategy_handler() thread.
2701 * libata is the only user of ->eh_strategy_handler() in
2702 * any kernel, so the default scsi_done() assumes it is
2703 * not being called from the SCSI EH.
2704 */
2705 qc->scsidone = scsi_finish_command;
2706
2707 switch (qc->tf.protocol) {
2708
2709 case ATA_PROT_DMA:
2710 case ATA_PROT_ATAPI_DMA:
2711 host_stat = ap->ops->bmdma_status(ap);
2712
2713 /* before we do anything else, clear DMA-Start bit */
2714 ap->ops->bmdma_stop(ap);
2715
2716 /* fall through */
2717
2718 default:
2719 ata_altstatus(ap);
2720 drv_stat = ata_chk_status(ap);
2721
2722 /* ack bmdma irq events */
2723 ap->ops->irq_clear(ap);
2724
2725 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2726 ap->id, qc->tf.command, drv_stat, host_stat);
2727
2728 /* complete taskfile transaction */
2729 ata_qc_complete(qc, drv_stat);
2730 break;
2731 }
2732out:
2733 DPRINTK("EXIT\n");
2734}
2735
2736/**
2737 * ata_eng_timeout - Handle timeout of queued command
2738 * @ap: Port on which timed-out command is active
2739 *
2740 * Some part of the kernel (currently, only the SCSI layer)
2741 * has noticed that the active command on port @ap has not
2742 * completed after a specified length of time. Handle this
2743 * condition by disabling DMA (if necessary) and completing
2744 * transactions, with error if necessary.
2745 *
2746 * This also handles the case of the "lost interrupt", where
2747 * for some reason (possibly hardware bug, possibly driver bug)
2748 * an interrupt was not delivered to the driver, even though the
2749 * transaction completed successfully.
2750 *
2751 * LOCKING:
2752 * Inherited from SCSI layer (none, can sleep)
2753 */
2754
2755void ata_eng_timeout(struct ata_port *ap)
2756{
2757 struct ata_queued_cmd *qc;
2758
2759 DPRINTK("ENTER\n");
2760
2761 qc = ata_qc_from_tag(ap, ap->active_tag);
2762 if (!qc) {
2763 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2764 ap->id);
2765 goto out;
2766 }
2767
2768 ata_qc_timeout(qc);
2769
2770out:
2771 DPRINTK("EXIT\n");
2772}
2773
2774/**
2775 * ata_qc_new - Request an available ATA command, for queueing
2776 * @ap: Port associated with device @dev
2777 * @dev: Device from whom we request an available command structure
2778 *
2779 * LOCKING:
2780 */
2781
2782static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2783{
2784 struct ata_queued_cmd *qc = NULL;
2785 unsigned int i;
2786
2787 for (i = 0; i < ATA_MAX_QUEUE; i++)
2788 if (!test_and_set_bit(i, &ap->qactive)) {
2789 qc = ata_qc_from_tag(ap, i);
2790 break;
2791 }
2792
2793 if (qc)
2794 qc->tag = i;
2795
2796 return qc;
2797}
2798
2799/**
2800 * ata_qc_new_init - Request an available ATA command, and initialize it
2801 * @ap: Port associated with device @dev
2802 * @dev: Device from whom we request an available command structure
2803 *
2804 * LOCKING:
2805 */
2806
2807struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2808 struct ata_device *dev)
2809{
2810 struct ata_queued_cmd *qc;
2811
2812 qc = ata_qc_new(ap);
2813 if (qc) {
2814 qc->sg = NULL;
2815 qc->flags = 0;
2816 qc->scsicmd = NULL;
2817 qc->ap = ap;
2818 qc->dev = dev;
2819 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2820 qc->nsect = 0;
2821 qc->nbytes = qc->curbytes = 0;
2822
2823 ata_tf_init(ap, &qc->tf, dev->devno);
2824
Albert Lee8bf62ece2005-05-12 15:29:42 -04002825 if (dev->flags & ATA_DFLAG_LBA) {
2826 qc->tf.flags |= ATA_TFLAG_LBA;
2827
2828 if (dev->flags & ATA_DFLAG_LBA48)
2829 qc->tf.flags |= ATA_TFLAG_LBA48;
2830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 }
2832
2833 return qc;
2834}
2835
2836static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2837{
2838 return 0;
2839}
2840
2841static void __ata_qc_complete(struct ata_queued_cmd *qc)
2842{
2843 struct ata_port *ap = qc->ap;
2844 unsigned int tag, do_clear = 0;
2845
2846 qc->flags = 0;
2847 tag = qc->tag;
2848 if (likely(ata_tag_valid(tag))) {
2849 if (tag == ap->active_tag)
2850 ap->active_tag = ATA_TAG_POISON;
2851 qc->tag = ATA_TAG_POISON;
2852 do_clear = 1;
2853 }
2854
2855 if (qc->waiting) {
2856 struct completion *waiting = qc->waiting;
2857 qc->waiting = NULL;
2858 complete(waiting);
2859 }
2860
2861 if (likely(do_clear))
2862 clear_bit(tag, &ap->qactive);
2863}
2864
2865/**
2866 * ata_qc_free - free unused ata_queued_cmd
2867 * @qc: Command to complete
2868 *
2869 * Designed to free unused ata_queued_cmd object
2870 * in case something prevents using it.
2871 *
2872 * LOCKING:
2873 *
2874 */
2875void ata_qc_free(struct ata_queued_cmd *qc)
2876{
2877 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2878 assert(qc->waiting == NULL); /* nothing should be waiting */
2879
2880 __ata_qc_complete(qc);
2881}
2882
2883/**
2884 * ata_qc_complete - Complete an active ATA command
2885 * @qc: Command to complete
2886 * @drv_stat: ATA status register contents
2887 *
2888 * LOCKING:
2889 *
2890 */
2891
2892void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2893{
2894 int rc;
2895
2896 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2897 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2898
2899 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
2900 ata_sg_clean(qc);
2901
2902 /* call completion callback */
2903 rc = qc->complete_fn(qc, drv_stat);
2904
2905 /* if callback indicates not to complete command (non-zero),
2906 * return immediately
2907 */
2908 if (rc != 0)
2909 return;
2910
2911 __ata_qc_complete(qc);
2912
2913 VPRINTK("EXIT\n");
2914}
2915
2916static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
2917{
2918 struct ata_port *ap = qc->ap;
2919
2920 switch (qc->tf.protocol) {
2921 case ATA_PROT_DMA:
2922 case ATA_PROT_ATAPI_DMA:
2923 return 1;
2924
2925 case ATA_PROT_ATAPI:
2926 case ATA_PROT_PIO:
2927 case ATA_PROT_PIO_MULT:
2928 if (ap->flags & ATA_FLAG_PIO_DMA)
2929 return 1;
2930
2931 /* fall through */
2932
2933 default:
2934 return 0;
2935 }
2936
2937 /* never reached */
2938}
2939
2940/**
2941 * ata_qc_issue - issue taskfile to device
2942 * @qc: command to issue to device
2943 *
2944 * Prepare an ATA command to submission to device.
2945 * This includes mapping the data into a DMA-able
2946 * area, filling in the S/G table, and finally
2947 * writing the taskfile to hardware, starting the command.
2948 *
2949 * LOCKING:
2950 * spin_lock_irqsave(host_set lock)
2951 *
2952 * RETURNS:
2953 * Zero on success, negative on error.
2954 */
2955
2956int ata_qc_issue(struct ata_queued_cmd *qc)
2957{
2958 struct ata_port *ap = qc->ap;
2959
2960 if (ata_should_dma_map(qc)) {
2961 if (qc->flags & ATA_QCFLAG_SG) {
2962 if (ata_sg_setup(qc))
2963 goto err_out;
2964 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
2965 if (ata_sg_setup_one(qc))
2966 goto err_out;
2967 }
2968 } else {
2969 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2970 }
2971
2972 ap->ops->qc_prep(qc);
2973
2974 qc->ap->active_tag = qc->tag;
2975 qc->flags |= ATA_QCFLAG_ACTIVE;
2976
2977 return ap->ops->qc_issue(qc);
2978
2979err_out:
2980 return -1;
2981}
2982
2983/**
2984 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2985 * @qc: command to issue to device
2986 *
2987 * Using various libata functions and hooks, this function
2988 * starts an ATA command. ATA commands are grouped into
2989 * classes called "protocols", and issuing each type of protocol
2990 * is slightly different.
2991 *
2992 * LOCKING:
2993 * spin_lock_irqsave(host_set lock)
2994 *
2995 * RETURNS:
2996 * Zero on success, negative on error.
2997 */
2998
2999int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3000{
3001 struct ata_port *ap = qc->ap;
3002
3003 ata_dev_select(ap, qc->dev->devno, 1, 0);
3004
3005 switch (qc->tf.protocol) {
3006 case ATA_PROT_NODATA:
3007 ata_tf_to_host_nolock(ap, &qc->tf);
3008 break;
3009
3010 case ATA_PROT_DMA:
3011 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3012 ap->ops->bmdma_setup(qc); /* set up bmdma */
3013 ap->ops->bmdma_start(qc); /* initiate bmdma */
3014 break;
3015
3016 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3017 ata_qc_set_polling(qc);
3018 ata_tf_to_host_nolock(ap, &qc->tf);
3019 ap->pio_task_state = PIO_ST;
3020 queue_work(ata_wq, &ap->pio_task);
3021 break;
3022
3023 case ATA_PROT_ATAPI:
3024 ata_qc_set_polling(qc);
3025 ata_tf_to_host_nolock(ap, &qc->tf);
3026 queue_work(ata_wq, &ap->packet_task);
3027 break;
3028
3029 case ATA_PROT_ATAPI_NODATA:
3030 ata_tf_to_host_nolock(ap, &qc->tf);
3031 queue_work(ata_wq, &ap->packet_task);
3032 break;
3033
3034 case ATA_PROT_ATAPI_DMA:
3035 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3036 ap->ops->bmdma_setup(qc); /* set up bmdma */
3037 queue_work(ata_wq, &ap->packet_task);
3038 break;
3039
3040 default:
3041 WARN_ON(1);
3042 return -1;
3043 }
3044
3045 return 0;
3046}
3047
3048/**
3049 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3050 * @qc: Info associated with this ATA transaction.
3051 *
3052 * LOCKING:
3053 * spin_lock_irqsave(host_set lock)
3054 */
3055
3056static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3057{
3058 struct ata_port *ap = qc->ap;
3059 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3060 u8 dmactl;
3061 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3062
3063 /* load PRD table addr. */
3064 mb(); /* make sure PRD table writes are visible to controller */
3065 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3066
3067 /* specify data direction, triple-check start bit is clear */
3068 dmactl = readb(mmio + ATA_DMA_CMD);
3069 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3070 if (!rw)
3071 dmactl |= ATA_DMA_WR;
3072 writeb(dmactl, mmio + ATA_DMA_CMD);
3073
3074 /* issue r/w command */
3075 ap->ops->exec_command(ap, &qc->tf);
3076}
3077
3078/**
3079 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3080 * @qc: Info associated with this ATA transaction.
3081 *
3082 * LOCKING:
3083 * spin_lock_irqsave(host_set lock)
3084 */
3085
3086static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3087{
3088 struct ata_port *ap = qc->ap;
3089 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3090 u8 dmactl;
3091
3092 /* start host DMA transaction */
3093 dmactl = readb(mmio + ATA_DMA_CMD);
3094 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3095
3096 /* Strictly, one may wish to issue a readb() here, to
3097 * flush the mmio write. However, control also passes
3098 * to the hardware at this point, and it will interrupt
3099 * us when we are to resume control. So, in effect,
3100 * we don't care when the mmio write flushes.
3101 * Further, a read of the DMA status register _immediately_
3102 * following the write may not be what certain flaky hardware
3103 * is expected, so I think it is best to not add a readb()
3104 * without first all the MMIO ATA cards/mobos.
3105 * Or maybe I'm just being paranoid.
3106 */
3107}
3108
3109/**
3110 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3111 * @qc: Info associated with this ATA transaction.
3112 *
3113 * LOCKING:
3114 * spin_lock_irqsave(host_set lock)
3115 */
3116
3117static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3118{
3119 struct ata_port *ap = qc->ap;
3120 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3121 u8 dmactl;
3122
3123 /* load PRD table addr. */
3124 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3125
3126 /* specify data direction, triple-check start bit is clear */
3127 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3128 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3129 if (!rw)
3130 dmactl |= ATA_DMA_WR;
3131 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3132
3133 /* issue r/w command */
3134 ap->ops->exec_command(ap, &qc->tf);
3135}
3136
3137/**
3138 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3139 * @qc: Info associated with this ATA transaction.
3140 *
3141 * LOCKING:
3142 * spin_lock_irqsave(host_set lock)
3143 */
3144
3145static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3146{
3147 struct ata_port *ap = qc->ap;
3148 u8 dmactl;
3149
3150 /* start host DMA transaction */
3151 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3152 outb(dmactl | ATA_DMA_START,
3153 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3154}
3155
3156void ata_bmdma_start(struct ata_queued_cmd *qc)
3157{
3158 if (qc->ap->flags & ATA_FLAG_MMIO)
3159 ata_bmdma_start_mmio(qc);
3160 else
3161 ata_bmdma_start_pio(qc);
3162}
3163
3164void ata_bmdma_setup(struct ata_queued_cmd *qc)
3165{
3166 if (qc->ap->flags & ATA_FLAG_MMIO)
3167 ata_bmdma_setup_mmio(qc);
3168 else
3169 ata_bmdma_setup_pio(qc);
3170}
3171
3172void ata_bmdma_irq_clear(struct ata_port *ap)
3173{
3174 if (ap->flags & ATA_FLAG_MMIO) {
3175 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3176 writeb(readb(mmio), mmio);
3177 } else {
3178 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3179 outb(inb(addr), addr);
3180 }
3181
3182}
3183
3184u8 ata_bmdma_status(struct ata_port *ap)
3185{
3186 u8 host_stat;
3187 if (ap->flags & ATA_FLAG_MMIO) {
3188 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3189 host_stat = readb(mmio + ATA_DMA_STATUS);
3190 } else
3191 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3192 return host_stat;
3193}
3194
3195void ata_bmdma_stop(struct ata_port *ap)
3196{
3197 if (ap->flags & ATA_FLAG_MMIO) {
3198 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3199
3200 /* clear start/stop bit */
3201 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3202 mmio + ATA_DMA_CMD);
3203 } else {
3204 /* clear start/stop bit */
3205 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3206 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3207 }
3208
3209 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3210 ata_altstatus(ap); /* dummy read */
3211}
3212
3213/**
3214 * ata_host_intr - Handle host interrupt for given (port, task)
3215 * @ap: Port on which interrupt arrived (possibly...)
3216 * @qc: Taskfile currently active in engine
3217 *
3218 * Handle host interrupt for given queued command. Currently,
3219 * only DMA interrupts are handled. All other commands are
3220 * handled via polling with interrupts disabled (nIEN bit).
3221 *
3222 * LOCKING:
3223 * spin_lock_irqsave(host_set lock)
3224 *
3225 * RETURNS:
3226 * One if interrupt was handled, zero if not (shared irq).
3227 */
3228
3229inline unsigned int ata_host_intr (struct ata_port *ap,
3230 struct ata_queued_cmd *qc)
3231{
3232 u8 status, host_stat;
3233
3234 switch (qc->tf.protocol) {
3235
3236 case ATA_PROT_DMA:
3237 case ATA_PROT_ATAPI_DMA:
3238 case ATA_PROT_ATAPI:
3239 /* check status of DMA engine */
3240 host_stat = ap->ops->bmdma_status(ap);
3241 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3242
3243 /* if it's not our irq... */
3244 if (!(host_stat & ATA_DMA_INTR))
3245 goto idle_irq;
3246
3247 /* before we do anything else, clear DMA-Start bit */
3248 ap->ops->bmdma_stop(ap);
3249
3250 /* fall through */
3251
3252 case ATA_PROT_ATAPI_NODATA:
3253 case ATA_PROT_NODATA:
3254 /* check altstatus */
3255 status = ata_altstatus(ap);
3256 if (status & ATA_BUSY)
3257 goto idle_irq;
3258
3259 /* check main status, clearing INTRQ */
3260 status = ata_chk_status(ap);
3261 if (unlikely(status & ATA_BUSY))
3262 goto idle_irq;
3263 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3264 ap->id, qc->tf.protocol, status);
3265
3266 /* ack bmdma irq events */
3267 ap->ops->irq_clear(ap);
3268
3269 /* complete taskfile transaction */
3270 ata_qc_complete(qc, status);
3271 break;
3272
3273 default:
3274 goto idle_irq;
3275 }
3276
3277 return 1; /* irq handled */
3278
3279idle_irq:
3280 ap->stats.idle_irq++;
3281
3282#ifdef ATA_IRQ_TRAP
3283 if ((ap->stats.idle_irq % 1000) == 0) {
3284 handled = 1;
3285 ata_irq_ack(ap, 0); /* debug trap */
3286 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3287 }
3288#endif
3289 return 0; /* irq not handled */
3290}
3291
3292/**
3293 * ata_interrupt - Default ATA host interrupt handler
3294 * @irq: irq line
3295 * @dev_instance: pointer to our host information structure
3296 * @regs: unused
3297 *
3298 * LOCKING:
3299 *
3300 * RETURNS:
3301 *
3302 */
3303
3304irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3305{
3306 struct ata_host_set *host_set = dev_instance;
3307 unsigned int i;
3308 unsigned int handled = 0;
3309 unsigned long flags;
3310
3311 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3312 spin_lock_irqsave(&host_set->lock, flags);
3313
3314 for (i = 0; i < host_set->n_ports; i++) {
3315 struct ata_port *ap;
3316
3317 ap = host_set->ports[i];
3318 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3319 struct ata_queued_cmd *qc;
3320
3321 qc = ata_qc_from_tag(ap, ap->active_tag);
3322 if (qc && (!(qc->tf.ctl & ATA_NIEN)))
3323 handled |= ata_host_intr(ap, qc);
3324 }
3325 }
3326
3327 spin_unlock_irqrestore(&host_set->lock, flags);
3328
3329 return IRQ_RETVAL(handled);
3330}
3331
3332/**
3333 * atapi_packet_task - Write CDB bytes to hardware
3334 * @_data: Port to which ATAPI device is attached.
3335 *
3336 * When device has indicated its readiness to accept
3337 * a CDB, this function is called. Send the CDB.
3338 * If DMA is to be performed, exit immediately.
3339 * Otherwise, we are in polling mode, so poll
3340 * status under operation succeeds or fails.
3341 *
3342 * LOCKING:
3343 * Kernel thread context (may sleep)
3344 */
3345
3346static void atapi_packet_task(void *_data)
3347{
3348 struct ata_port *ap = _data;
3349 struct ata_queued_cmd *qc;
3350 u8 status;
3351
3352 qc = ata_qc_from_tag(ap, ap->active_tag);
3353 assert(qc != NULL);
3354 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3355
3356 /* sleep-wait for BSY to clear */
3357 DPRINTK("busy wait\n");
3358 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3359 goto err_out;
3360
3361 /* make sure DRQ is set */
3362 status = ata_chk_status(ap);
3363 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3364 goto err_out;
3365
3366 /* send SCSI cdb */
3367 DPRINTK("send cdb\n");
3368 assert(ap->cdb_len >= 12);
3369 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3370
3371 /* if we are DMA'ing, irq handler takes over from here */
3372 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3373 ap->ops->bmdma_start(qc); /* initiate bmdma */
3374
3375 /* non-data commands are also handled via irq */
3376 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3377 /* do nothing */
3378 }
3379
3380 /* PIO commands are handled by polling */
3381 else {
3382 ap->pio_task_state = PIO_ST;
3383 queue_work(ata_wq, &ap->pio_task);
3384 }
3385
3386 return;
3387
3388err_out:
3389 ata_qc_complete(qc, ATA_ERR);
3390}
3391
3392int ata_port_start (struct ata_port *ap)
3393{
3394 struct device *dev = ap->host_set->dev;
3395
3396 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3397 if (!ap->prd)
3398 return -ENOMEM;
3399
3400 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3401
3402 return 0;
3403}
3404
3405void ata_port_stop (struct ata_port *ap)
3406{
3407 struct device *dev = ap->host_set->dev;
3408
3409 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3410}
3411
3412/**
3413 * ata_host_remove - Unregister SCSI host structure with upper layers
3414 * @ap: Port to unregister
3415 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3416 *
3417 * LOCKING:
3418 */
3419
3420static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3421{
3422 struct Scsi_Host *sh = ap->host;
3423
3424 DPRINTK("ENTER\n");
3425
3426 if (do_unregister)
3427 scsi_remove_host(sh);
3428
3429 ap->ops->port_stop(ap);
3430}
3431
3432/**
3433 * ata_host_init - Initialize an ata_port structure
3434 * @ap: Structure to initialize
3435 * @host: associated SCSI mid-layer structure
3436 * @host_set: Collection of hosts to which @ap belongs
3437 * @ent: Probe information provided by low-level driver
3438 * @port_no: Port number associated with this ata_port
3439 *
3440 * LOCKING:
3441 *
3442 */
3443
3444static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3445 struct ata_host_set *host_set,
3446 struct ata_probe_ent *ent, unsigned int port_no)
3447{
3448 unsigned int i;
3449
3450 host->max_id = 16;
3451 host->max_lun = 1;
3452 host->max_channel = 1;
3453 host->unique_id = ata_unique_id++;
3454 host->max_cmd_len = 12;
3455 scsi_set_device(host, ent->dev);
3456 scsi_assign_lock(host, &host_set->lock);
3457
3458 ap->flags = ATA_FLAG_PORT_DISABLED;
3459 ap->id = host->unique_id;
3460 ap->host = host;
3461 ap->ctl = ATA_DEVCTL_OBS;
3462 ap->host_set = host_set;
3463 ap->port_no = port_no;
3464 ap->hard_port_no =
3465 ent->legacy_mode ? ent->hard_port_no : port_no;
3466 ap->pio_mask = ent->pio_mask;
3467 ap->mwdma_mask = ent->mwdma_mask;
3468 ap->udma_mask = ent->udma_mask;
3469 ap->flags |= ent->host_flags;
3470 ap->ops = ent->port_ops;
3471 ap->cbl = ATA_CBL_NONE;
3472 ap->active_tag = ATA_TAG_POISON;
3473 ap->last_ctl = 0xFF;
3474
3475 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3476 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3477
3478 for (i = 0; i < ATA_MAX_DEVICES; i++)
3479 ap->device[i].devno = i;
3480
3481#ifdef ATA_IRQ_TRAP
3482 ap->stats.unhandled_irq = 1;
3483 ap->stats.idle_irq = 1;
3484#endif
3485
3486 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3487}
3488
3489/**
3490 * ata_host_add - Attach low-level ATA driver to system
3491 * @ent: Information provided by low-level driver
3492 * @host_set: Collections of ports to which we add
3493 * @port_no: Port number associated with this host
3494 *
3495 * LOCKING:
3496 *
3497 * RETURNS:
3498 *
3499 */
3500
3501static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3502 struct ata_host_set *host_set,
3503 unsigned int port_no)
3504{
3505 struct Scsi_Host *host;
3506 struct ata_port *ap;
3507 int rc;
3508
3509 DPRINTK("ENTER\n");
3510 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3511 if (!host)
3512 return NULL;
3513
3514 ap = (struct ata_port *) &host->hostdata[0];
3515
3516 ata_host_init(ap, host, host_set, ent, port_no);
3517
3518 rc = ap->ops->port_start(ap);
3519 if (rc)
3520 goto err_out;
3521
3522 return ap;
3523
3524err_out:
3525 scsi_host_put(host);
3526 return NULL;
3527}
3528
3529/**
3530 * ata_device_add -
3531 * @ent:
3532 *
3533 * LOCKING:
3534 *
3535 * RETURNS:
3536 *
3537 */
3538
3539int ata_device_add(struct ata_probe_ent *ent)
3540{
3541 unsigned int count = 0, i;
3542 struct device *dev = ent->dev;
3543 struct ata_host_set *host_set;
3544
3545 DPRINTK("ENTER\n");
3546 /* alloc a container for our list of ATA ports (buses) */
3547 host_set = kmalloc(sizeof(struct ata_host_set) +
3548 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3549 if (!host_set)
3550 return 0;
3551 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3552 spin_lock_init(&host_set->lock);
3553
3554 host_set->dev = dev;
3555 host_set->n_ports = ent->n_ports;
3556 host_set->irq = ent->irq;
3557 host_set->mmio_base = ent->mmio_base;
3558 host_set->private_data = ent->private_data;
3559 host_set->ops = ent->port_ops;
3560
3561 /* register each port bound to this device */
3562 for (i = 0; i < ent->n_ports; i++) {
3563 struct ata_port *ap;
3564 unsigned long xfer_mode_mask;
3565
3566 ap = ata_host_add(ent, host_set, i);
3567 if (!ap)
3568 goto err_out;
3569
3570 host_set->ports[i] = ap;
3571 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3572 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3573 (ap->pio_mask << ATA_SHIFT_PIO);
3574
3575 /* print per-port info to dmesg */
3576 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3577 "bmdma 0x%lX irq %lu\n",
3578 ap->id,
3579 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3580 ata_mode_string(xfer_mode_mask),
3581 ap->ioaddr.cmd_addr,
3582 ap->ioaddr.ctl_addr,
3583 ap->ioaddr.bmdma_addr,
3584 ent->irq);
3585
3586 ata_chk_status(ap);
3587 host_set->ops->irq_clear(ap);
3588 count++;
3589 }
3590
3591 if (!count) {
3592 kfree(host_set);
3593 return 0;
3594 }
3595
3596 /* obtain irq, that is shared between channels */
3597 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3598 DRV_NAME, host_set))
3599 goto err_out;
3600
3601 /* perform each probe synchronously */
3602 DPRINTK("probe begin\n");
3603 for (i = 0; i < count; i++) {
3604 struct ata_port *ap;
3605 int rc;
3606
3607 ap = host_set->ports[i];
3608
3609 DPRINTK("ata%u: probe begin\n", ap->id);
3610 rc = ata_bus_probe(ap);
3611 DPRINTK("ata%u: probe end\n", ap->id);
3612
3613 if (rc) {
3614 /* FIXME: do something useful here?
3615 * Current libata behavior will
3616 * tear down everything when
3617 * the module is removed
3618 * or the h/w is unplugged.
3619 */
3620 }
3621
3622 rc = scsi_add_host(ap->host, dev);
3623 if (rc) {
3624 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3625 ap->id);
3626 /* FIXME: do something useful here */
3627 /* FIXME: handle unconditional calls to
3628 * scsi_scan_host and ata_host_remove, below,
3629 * at the very least
3630 */
3631 }
3632 }
3633
3634 /* probes are done, now scan each port's disk(s) */
3635 DPRINTK("probe begin\n");
3636 for (i = 0; i < count; i++) {
3637 struct ata_port *ap = host_set->ports[i];
3638
3639 scsi_scan_host(ap->host);
3640 }
3641
3642 dev_set_drvdata(dev, host_set);
3643
3644 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3645 return ent->n_ports; /* success */
3646
3647err_out:
3648 for (i = 0; i < count; i++) {
3649 ata_host_remove(host_set->ports[i], 1);
3650 scsi_host_put(host_set->ports[i]->host);
3651 }
3652 kfree(host_set);
3653 VPRINTK("EXIT, returning 0\n");
3654 return 0;
3655}
3656
3657/**
3658 * ata_scsi_release - SCSI layer callback hook for host unload
3659 * @host: libata host to be unloaded
3660 *
3661 * Performs all duties necessary to shut down a libata port...
3662 * Kill port kthread, disable port, and release resources.
3663 *
3664 * LOCKING:
3665 * Inherited from SCSI layer.
3666 *
3667 * RETURNS:
3668 * One.
3669 */
3670
3671int ata_scsi_release(struct Scsi_Host *host)
3672{
3673 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3674
3675 DPRINTK("ENTER\n");
3676
3677 ap->ops->port_disable(ap);
3678 ata_host_remove(ap, 0);
3679
3680 DPRINTK("EXIT\n");
3681 return 1;
3682}
3683
3684/**
3685 * ata_std_ports - initialize ioaddr with standard port offsets.
3686 * @ioaddr: IO address structure to be initialized
3687 */
3688void ata_std_ports(struct ata_ioports *ioaddr)
3689{
3690 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3691 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3692 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3693 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3694 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3695 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3696 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3697 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3698 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3699 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3700}
3701
3702static struct ata_probe_ent *
3703ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
3704{
3705 struct ata_probe_ent *probe_ent;
3706
3707 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
3708 if (!probe_ent) {
3709 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
3710 kobject_name(&(dev->kobj)));
3711 return NULL;
3712 }
3713
3714 memset(probe_ent, 0, sizeof(*probe_ent));
3715
3716 INIT_LIST_HEAD(&probe_ent->node);
3717 probe_ent->dev = dev;
3718
3719 probe_ent->sht = port->sht;
3720 probe_ent->host_flags = port->host_flags;
3721 probe_ent->pio_mask = port->pio_mask;
3722 probe_ent->mwdma_mask = port->mwdma_mask;
3723 probe_ent->udma_mask = port->udma_mask;
3724 probe_ent->port_ops = port->port_ops;
3725
3726 return probe_ent;
3727}
3728
3729#ifdef CONFIG_PCI
3730struct ata_probe_ent *
3731ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
3732{
3733 struct ata_probe_ent *probe_ent =
3734 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
3735 if (!probe_ent)
3736 return NULL;
3737
3738 probe_ent->n_ports = 2;
3739 probe_ent->irq = pdev->irq;
3740 probe_ent->irq_flags = SA_SHIRQ;
3741
3742 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
3743 probe_ent->port[0].altstatus_addr =
3744 probe_ent->port[0].ctl_addr =
3745 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
3746 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3747
3748 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
3749 probe_ent->port[1].altstatus_addr =
3750 probe_ent->port[1].ctl_addr =
3751 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
3752 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
3753
3754 ata_std_ports(&probe_ent->port[0]);
3755 ata_std_ports(&probe_ent->port[1]);
3756
3757 return probe_ent;
3758}
3759
3760static struct ata_probe_ent *
3761ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
3762 struct ata_probe_ent **ppe2)
3763{
3764 struct ata_probe_ent *probe_ent, *probe_ent2;
3765
3766 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
3767 if (!probe_ent)
3768 return NULL;
3769 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
3770 if (!probe_ent2) {
3771 kfree(probe_ent);
3772 return NULL;
3773 }
3774
3775 probe_ent->n_ports = 1;
3776 probe_ent->irq = 14;
3777
3778 probe_ent->hard_port_no = 0;
3779 probe_ent->legacy_mode = 1;
3780
3781 probe_ent2->n_ports = 1;
3782 probe_ent2->irq = 15;
3783
3784 probe_ent2->hard_port_no = 1;
3785 probe_ent2->legacy_mode = 1;
3786
3787 probe_ent->port[0].cmd_addr = 0x1f0;
3788 probe_ent->port[0].altstatus_addr =
3789 probe_ent->port[0].ctl_addr = 0x3f6;
3790 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3791
3792 probe_ent2->port[0].cmd_addr = 0x170;
3793 probe_ent2->port[0].altstatus_addr =
3794 probe_ent2->port[0].ctl_addr = 0x376;
3795 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
3796
3797 ata_std_ports(&probe_ent->port[0]);
3798 ata_std_ports(&probe_ent2->port[0]);
3799
3800 *ppe2 = probe_ent2;
3801 return probe_ent;
3802}
3803
3804/**
3805 * ata_pci_init_one - Initialize/register PCI IDE host controller
3806 * @pdev: Controller to be initialized
3807 * @port_info: Information from low-level host driver
3808 * @n_ports: Number of ports attached to host controller
3809 *
3810 * LOCKING:
3811 * Inherited from PCI layer (may sleep).
3812 *
3813 * RETURNS:
3814 *
3815 */
3816
3817int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
3818 unsigned int n_ports)
3819{
3820 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
3821 struct ata_port_info *port[2];
3822 u8 tmp8, mask;
3823 unsigned int legacy_mode = 0;
3824 int disable_dev_on_err = 1;
3825 int rc;
3826
3827 DPRINTK("ENTER\n");
3828
3829 port[0] = port_info[0];
3830 if (n_ports > 1)
3831 port[1] = port_info[1];
3832 else
3833 port[1] = port[0];
3834
3835 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
3836 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
3837 /* TODO: support transitioning to native mode? */
3838 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
3839 mask = (1 << 2) | (1 << 0);
3840 if ((tmp8 & mask) != mask)
3841 legacy_mode = (1 << 3);
3842 }
3843
3844 /* FIXME... */
3845 if ((!legacy_mode) && (n_ports > 1)) {
3846 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
3847 return -EINVAL;
3848 }
3849
3850 rc = pci_enable_device(pdev);
3851 if (rc)
3852 return rc;
3853
3854 rc = pci_request_regions(pdev, DRV_NAME);
3855 if (rc) {
3856 disable_dev_on_err = 0;
3857 goto err_out;
3858 }
3859
3860 if (legacy_mode) {
3861 if (!request_region(0x1f0, 8, "libata")) {
3862 struct resource *conflict, res;
3863 res.start = 0x1f0;
3864 res.end = 0x1f0 + 8 - 1;
3865 conflict = ____request_resource(&ioport_resource, &res);
3866 if (!strcmp(conflict->name, "libata"))
3867 legacy_mode |= (1 << 0);
3868 else {
3869 disable_dev_on_err = 0;
3870 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
3871 }
3872 } else
3873 legacy_mode |= (1 << 0);
3874
3875 if (!request_region(0x170, 8, "libata")) {
3876 struct resource *conflict, res;
3877 res.start = 0x170;
3878 res.end = 0x170 + 8 - 1;
3879 conflict = ____request_resource(&ioport_resource, &res);
3880 if (!strcmp(conflict->name, "libata"))
3881 legacy_mode |= (1 << 1);
3882 else {
3883 disable_dev_on_err = 0;
3884 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
3885 }
3886 } else
3887 legacy_mode |= (1 << 1);
3888 }
3889
3890 /* we have legacy mode, but all ports are unavailable */
3891 if (legacy_mode == (1 << 3)) {
3892 rc = -EBUSY;
3893 goto err_out_regions;
3894 }
3895
3896 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3897 if (rc)
3898 goto err_out_regions;
3899 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3900 if (rc)
3901 goto err_out_regions;
3902
3903 if (legacy_mode) {
3904 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
3905 } else
3906 probe_ent = ata_pci_init_native_mode(pdev, port);
3907 if (!probe_ent) {
3908 rc = -ENOMEM;
3909 goto err_out_regions;
3910 }
3911
3912 pci_set_master(pdev);
3913
3914 /* FIXME: check ata_device_add return */
3915 if (legacy_mode) {
3916 if (legacy_mode & (1 << 0))
3917 ata_device_add(probe_ent);
3918 if (legacy_mode & (1 << 1))
3919 ata_device_add(probe_ent2);
3920 } else
3921 ata_device_add(probe_ent);
3922
3923 kfree(probe_ent);
3924 kfree(probe_ent2);
3925
3926 return 0;
3927
3928err_out_regions:
3929 if (legacy_mode & (1 << 0))
3930 release_region(0x1f0, 8);
3931 if (legacy_mode & (1 << 1))
3932 release_region(0x170, 8);
3933 pci_release_regions(pdev);
3934err_out:
3935 if (disable_dev_on_err)
3936 pci_disable_device(pdev);
3937 return rc;
3938}
3939
3940/**
3941 * ata_pci_remove_one - PCI layer callback for device removal
3942 * @pdev: PCI device that was removed
3943 *
3944 * PCI layer indicates to libata via this hook that
3945 * hot-unplug or module unload event has occured.
3946 * Handle this by unregistering all objects associated
3947 * with this PCI device. Free those objects. Then finally
3948 * release PCI resources and disable device.
3949 *
3950 * LOCKING:
3951 * Inherited from PCI layer (may sleep).
3952 */
3953
3954void ata_pci_remove_one (struct pci_dev *pdev)
3955{
3956 struct device *dev = pci_dev_to_dev(pdev);
3957 struct ata_host_set *host_set = dev_get_drvdata(dev);
3958 struct ata_port *ap;
3959 unsigned int i;
3960
3961 for (i = 0; i < host_set->n_ports; i++) {
3962 ap = host_set->ports[i];
3963
3964 scsi_remove_host(ap->host);
3965 }
3966
3967 free_irq(host_set->irq, host_set);
3968 if (host_set->ops->host_stop)
3969 host_set->ops->host_stop(host_set);
3970 if (host_set->mmio_base)
3971 iounmap(host_set->mmio_base);
3972
3973 for (i = 0; i < host_set->n_ports; i++) {
3974 ap = host_set->ports[i];
3975
3976 ata_scsi_release(ap->host);
3977
3978 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
3979 struct ata_ioports *ioaddr = &ap->ioaddr;
3980
3981 if (ioaddr->cmd_addr == 0x1f0)
3982 release_region(0x1f0, 8);
3983 else if (ioaddr->cmd_addr == 0x170)
3984 release_region(0x170, 8);
3985 }
3986
3987 scsi_host_put(ap->host);
3988 }
3989
3990 kfree(host_set);
3991
3992 pci_release_regions(pdev);
3993 pci_disable_device(pdev);
3994 dev_set_drvdata(dev, NULL);
3995}
3996
3997/* move to PCI subsystem */
3998int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
3999{
4000 unsigned long tmp = 0;
4001
4002 switch (bits->width) {
4003 case 1: {
4004 u8 tmp8 = 0;
4005 pci_read_config_byte(pdev, bits->reg, &tmp8);
4006 tmp = tmp8;
4007 break;
4008 }
4009 case 2: {
4010 u16 tmp16 = 0;
4011 pci_read_config_word(pdev, bits->reg, &tmp16);
4012 tmp = tmp16;
4013 break;
4014 }
4015 case 4: {
4016 u32 tmp32 = 0;
4017 pci_read_config_dword(pdev, bits->reg, &tmp32);
4018 tmp = tmp32;
4019 break;
4020 }
4021
4022 default:
4023 return -EINVAL;
4024 }
4025
4026 tmp &= bits->mask;
4027
4028 return (tmp == bits->val) ? 1 : 0;
4029}
4030#endif /* CONFIG_PCI */
4031
4032
4033/**
4034 * ata_init -
4035 *
4036 * LOCKING:
4037 *
4038 * RETURNS:
4039 *
4040 */
4041
4042static int __init ata_init(void)
4043{
4044 ata_wq = create_workqueue("ata");
4045 if (!ata_wq)
4046 return -ENOMEM;
4047
4048 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4049 return 0;
4050}
4051
4052static void __exit ata_exit(void)
4053{
4054 destroy_workqueue(ata_wq);
4055}
4056
4057module_init(ata_init);
4058module_exit(ata_exit);
4059
4060/*
4061 * libata is essentially a library of internal helper functions for
4062 * low-level ATA host controller drivers. As such, the API/ABI is
4063 * likely to change as new drivers are added and updated.
4064 * Do not depend on ABI/API stability.
4065 */
4066
4067EXPORT_SYMBOL_GPL(ata_std_bios_param);
4068EXPORT_SYMBOL_GPL(ata_std_ports);
4069EXPORT_SYMBOL_GPL(ata_device_add);
4070EXPORT_SYMBOL_GPL(ata_sg_init);
4071EXPORT_SYMBOL_GPL(ata_sg_init_one);
4072EXPORT_SYMBOL_GPL(ata_qc_complete);
4073EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4074EXPORT_SYMBOL_GPL(ata_eng_timeout);
4075EXPORT_SYMBOL_GPL(ata_tf_load);
4076EXPORT_SYMBOL_GPL(ata_tf_read);
4077EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4078EXPORT_SYMBOL_GPL(ata_std_dev_select);
4079EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4080EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4081EXPORT_SYMBOL_GPL(ata_check_status);
4082EXPORT_SYMBOL_GPL(ata_altstatus);
4083EXPORT_SYMBOL_GPL(ata_chk_err);
4084EXPORT_SYMBOL_GPL(ata_exec_command);
4085EXPORT_SYMBOL_GPL(ata_port_start);
4086EXPORT_SYMBOL_GPL(ata_port_stop);
4087EXPORT_SYMBOL_GPL(ata_interrupt);
4088EXPORT_SYMBOL_GPL(ata_qc_prep);
4089EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4090EXPORT_SYMBOL_GPL(ata_bmdma_start);
4091EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4092EXPORT_SYMBOL_GPL(ata_bmdma_status);
4093EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4094EXPORT_SYMBOL_GPL(ata_port_probe);
4095EXPORT_SYMBOL_GPL(sata_phy_reset);
4096EXPORT_SYMBOL_GPL(__sata_phy_reset);
4097EXPORT_SYMBOL_GPL(ata_bus_reset);
4098EXPORT_SYMBOL_GPL(ata_port_disable);
4099EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4100EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4101EXPORT_SYMBOL_GPL(ata_scsi_error);
4102EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4103EXPORT_SYMBOL_GPL(ata_scsi_release);
4104EXPORT_SYMBOL_GPL(ata_host_intr);
4105EXPORT_SYMBOL_GPL(ata_dev_classify);
4106EXPORT_SYMBOL_GPL(ata_dev_id_string);
4107EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4108
4109#ifdef CONFIG_PCI
4110EXPORT_SYMBOL_GPL(pci_test_config_bits);
4111EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4112EXPORT_SYMBOL_GPL(ata_pci_init_one);
4113EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4114#endif /* CONFIG_PCI */