Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 1 | #include <linux/types.h> |
| 2 | #include <linux/kernel.h> |
| 3 | #include <linux/ide.h> |
| 4 | #include <linux/scatterlist.h> |
| 5 | #include <linux/dma-mapping.h> |
| 6 | #include <linux/io.h> |
| 7 | |
| 8 | /** |
| 9 | * config_drive_for_dma - attempt to activate IDE DMA |
| 10 | * @drive: the drive to place in DMA mode |
| 11 | * |
| 12 | * If the drive supports at least mode 2 DMA or UDMA of any kind |
| 13 | * then attempt to place it into DMA mode. Drives that are known to |
| 14 | * support DMA but predate the DMA properties or that are known |
| 15 | * to have DMA handling bugs are also set up appropriately based |
| 16 | * on the good/bad drive lists. |
| 17 | */ |
| 18 | |
| 19 | int config_drive_for_dma(ide_drive_t *drive) |
| 20 | { |
| 21 | ide_hwif_t *hwif = drive->hwif; |
| 22 | u16 *id = drive->id; |
| 23 | |
| 24 | if (drive->media != ide_disk) { |
| 25 | if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) |
| 26 | return 0; |
| 27 | } |
| 28 | |
| 29 | /* |
| 30 | * Enable DMA on any drive that has |
| 31 | * UltraDMA (mode 0/1/2/3/4/5/6) enabled |
| 32 | */ |
| 33 | if ((id[ATA_ID_FIELD_VALID] & 4) && |
| 34 | ((id[ATA_ID_UDMA_MODES] >> 8) & 0x7f)) |
| 35 | return 1; |
| 36 | |
| 37 | /* |
| 38 | * Enable DMA on any drive that has mode2 DMA |
| 39 | * (multi or single) enabled |
| 40 | */ |
| 41 | if (id[ATA_ID_FIELD_VALID] & 2) /* regular DMA */ |
| 42 | if ((id[ATA_ID_MWDMA_MODES] & 0x404) == 0x404 || |
| 43 | (id[ATA_ID_SWDMA_MODES] & 0x404) == 0x404) |
| 44 | return 1; |
| 45 | |
| 46 | /* Consult the list of known "good" drives */ |
| 47 | if (ide_dma_good_drive(drive)) |
| 48 | return 1; |
| 49 | |
| 50 | return 0; |
| 51 | } |
| 52 | |
| 53 | /** |
| 54 | * ide_dma_host_set - Enable/disable DMA on a host |
| 55 | * @drive: drive to control |
| 56 | * |
| 57 | * Enable/disable DMA on an IDE controller following generic |
| 58 | * bus-mastering IDE controller behaviour. |
| 59 | */ |
| 60 | |
| 61 | void ide_dma_host_set(ide_drive_t *drive, int on) |
| 62 | { |
| 63 | ide_hwif_t *hwif = drive->hwif; |
| 64 | u8 unit = drive->dn & 1; |
| 65 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
| 66 | |
| 67 | if (on) |
| 68 | dma_stat |= (1 << (5 + unit)); |
| 69 | else |
| 70 | dma_stat &= ~(1 << (5 + unit)); |
| 71 | |
| 72 | if (hwif->host_flags & IDE_HFLAG_MMIO) |
| 73 | writeb(dma_stat, |
| 74 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); |
| 75 | else |
| 76 | outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); |
| 77 | } |
| 78 | EXPORT_SYMBOL_GPL(ide_dma_host_set); |
| 79 | |
| 80 | /** |
| 81 | * ide_build_dmatable - build IDE DMA table |
| 82 | * |
| 83 | * ide_build_dmatable() prepares a dma request. We map the command |
| 84 | * to get the pci bus addresses of the buffers and then build up |
| 85 | * the PRD table that the IDE layer wants to be fed. |
| 86 | * |
| 87 | * Most chipsets correctly interpret a length of 0x0000 as 64KB, |
| 88 | * but at least one (e.g. CS5530) misinterprets it as zero (!). |
| 89 | * So we break the 64KB entry into two 32KB entries instead. |
| 90 | * |
| 91 | * Returns the number of built PRD entries if all went okay, |
| 92 | * returns 0 otherwise. |
| 93 | * |
| 94 | * May also be invoked from trm290.c |
| 95 | */ |
| 96 | |
| 97 | int ide_build_dmatable(ide_drive_t *drive, struct request *rq) |
| 98 | { |
| 99 | ide_hwif_t *hwif = drive->hwif; |
| 100 | __le32 *table = (__le32 *)hwif->dmatable_cpu; |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 101 | unsigned int count = 0; |
| 102 | int i; |
| 103 | struct scatterlist *sg; |
Bartlomiej Zolnierkiewicz | 1f66019 | 2008-12-29 20:27:34 +0100 | [diff] [blame] | 104 | u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290); |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 105 | |
| 106 | hwif->sg_nents = ide_build_sglist(drive, rq); |
| 107 | if (hwif->sg_nents == 0) |
| 108 | return 0; |
| 109 | |
| 110 | for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) { |
| 111 | u32 cur_addr, cur_len, xcount, bcount; |
| 112 | |
| 113 | cur_addr = sg_dma_address(sg); |
| 114 | cur_len = sg_dma_len(sg); |
| 115 | |
| 116 | /* |
| 117 | * Fill in the dma table, without crossing any 64kB boundaries. |
| 118 | * Most hardware requires 16-bit alignment of all blocks, |
| 119 | * but the trm290 requires 32-bit alignment. |
| 120 | */ |
| 121 | |
| 122 | while (cur_len) { |
| 123 | if (count++ >= PRD_ENTRIES) |
| 124 | goto use_pio_instead; |
| 125 | |
| 126 | bcount = 0x10000 - (cur_addr & 0xffff); |
| 127 | if (bcount > cur_len) |
| 128 | bcount = cur_len; |
| 129 | *table++ = cpu_to_le32(cur_addr); |
| 130 | xcount = bcount & 0xffff; |
| 131 | if (is_trm290) |
| 132 | xcount = ((xcount >> 2) - 1) << 16; |
Bartlomiej Zolnierkiewicz | 769b49c | 2008-10-17 18:09:18 +0200 | [diff] [blame] | 133 | else if (xcount == 0x0000) { |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 134 | if (count++ >= PRD_ENTRIES) |
| 135 | goto use_pio_instead; |
| 136 | *table++ = cpu_to_le32(0x8000); |
| 137 | *table++ = cpu_to_le32(cur_addr + 0x8000); |
| 138 | xcount = 0x8000; |
| 139 | } |
| 140 | *table++ = cpu_to_le32(xcount); |
| 141 | cur_addr += bcount; |
| 142 | cur_len -= bcount; |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | if (count) { |
| 147 | if (!is_trm290) |
| 148 | *--table |= cpu_to_le32(0x80000000); |
| 149 | return count; |
| 150 | } |
| 151 | |
| 152 | use_pio_instead: |
| 153 | printk(KERN_ERR "%s: %s\n", drive->name, |
| 154 | count ? "DMA table too small" : "empty DMA table?"); |
| 155 | |
| 156 | ide_destroy_dmatable(drive); |
| 157 | |
| 158 | return 0; /* revert to PIO for this request */ |
| 159 | } |
| 160 | EXPORT_SYMBOL_GPL(ide_build_dmatable); |
| 161 | |
| 162 | /** |
| 163 | * ide_dma_setup - begin a DMA phase |
| 164 | * @drive: target device |
| 165 | * |
| 166 | * Build an IDE DMA PRD (IDE speak for scatter gather table) |
| 167 | * and then set up the DMA transfer registers for a device |
| 168 | * that follows generic IDE PCI DMA behaviour. Controllers can |
| 169 | * override this function if they need to |
| 170 | * |
| 171 | * Returns 0 on success. If a PIO fallback is required then 1 |
| 172 | * is returned. |
| 173 | */ |
| 174 | |
| 175 | int ide_dma_setup(ide_drive_t *drive) |
| 176 | { |
| 177 | ide_hwif_t *hwif = drive->hwif; |
| 178 | struct request *rq = hwif->hwgroup->rq; |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 179 | unsigned int reading = rq_data_dir(rq) ? 0 : ATA_DMA_WR; |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 180 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; |
| 181 | u8 dma_stat; |
| 182 | |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 183 | /* fall back to pio! */ |
| 184 | if (!ide_build_dmatable(drive, rq)) { |
| 185 | ide_map_sg(drive, rq); |
| 186 | return 1; |
| 187 | } |
| 188 | |
| 189 | /* PRD table */ |
| 190 | if (hwif->host_flags & IDE_HFLAG_MMIO) |
| 191 | writel(hwif->dmatable_dma, |
| 192 | (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS)); |
| 193 | else |
| 194 | outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); |
| 195 | |
| 196 | /* specify r/w */ |
| 197 | if (mmio) |
| 198 | writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); |
| 199 | else |
| 200 | outb(reading, hwif->dma_base + ATA_DMA_CMD); |
| 201 | |
| 202 | /* read DMA status for INTR & ERROR flags */ |
| 203 | dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
| 204 | |
| 205 | /* clear INTR & ERROR flags */ |
| 206 | if (mmio) |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 207 | writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR, |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 208 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); |
| 209 | else |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 210 | outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR, |
| 211 | hwif->dma_base + ATA_DMA_STATUS); |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 212 | |
| 213 | drive->waiting_for_dma = 1; |
| 214 | return 0; |
| 215 | } |
| 216 | EXPORT_SYMBOL_GPL(ide_dma_setup); |
| 217 | |
| 218 | /** |
| 219 | * dma_timer_expiry - handle a DMA timeout |
| 220 | * @drive: Drive that timed out |
| 221 | * |
| 222 | * An IDE DMA transfer timed out. In the event of an error we ask |
| 223 | * the driver to resolve the problem, if a DMA transfer is still |
| 224 | * in progress we continue to wait (arguably we need to add a |
| 225 | * secondary 'I don't care what the drive thinks' timeout here) |
| 226 | * Finally if we have an interrupt we let it complete the I/O. |
| 227 | * But only one time - we clear expiry and if it's still not |
| 228 | * completed after WAIT_CMD, we error and retry in PIO. |
| 229 | * This can occur if an interrupt is lost or due to hang or bugs. |
| 230 | */ |
| 231 | |
| 232 | static int dma_timer_expiry(ide_drive_t *drive) |
| 233 | { |
| 234 | ide_hwif_t *hwif = drive->hwif; |
| 235 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
| 236 | |
| 237 | printk(KERN_WARNING "%s: %s: DMA status (0x%02x)\n", |
| 238 | drive->name, __func__, dma_stat); |
| 239 | |
| 240 | if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ |
| 241 | return WAIT_CMD; |
| 242 | |
| 243 | hwif->hwgroup->expiry = NULL; /* one free ride for now */ |
| 244 | |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 245 | if (dma_stat & ATA_DMA_ERR) /* ERROR */ |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 246 | return -1; |
| 247 | |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 248 | if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */ |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 249 | return WAIT_CMD; |
| 250 | |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 251 | if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */ |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 252 | return WAIT_CMD; |
| 253 | |
| 254 | return 0; /* Status is unknown -- reset the bus */ |
| 255 | } |
| 256 | |
| 257 | void ide_dma_exec_cmd(ide_drive_t *drive, u8 command) |
| 258 | { |
| 259 | /* issue cmd to drive */ |
| 260 | ide_execute_command(drive, command, &ide_dma_intr, 2 * WAIT_CMD, |
| 261 | dma_timer_expiry); |
| 262 | } |
| 263 | EXPORT_SYMBOL_GPL(ide_dma_exec_cmd); |
| 264 | |
| 265 | void ide_dma_start(ide_drive_t *drive) |
| 266 | { |
| 267 | ide_hwif_t *hwif = drive->hwif; |
| 268 | u8 dma_cmd; |
| 269 | |
| 270 | /* Note that this is done *after* the cmd has |
| 271 | * been issued to the drive, as per the BM-IDE spec. |
| 272 | * The Promise Ultra33 doesn't work correctly when |
| 273 | * we do this part before issuing the drive cmd. |
| 274 | */ |
| 275 | if (hwif->host_flags & IDE_HFLAG_MMIO) { |
| 276 | dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 277 | writeb(dma_cmd | ATA_DMA_START, |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 278 | (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); |
| 279 | } else { |
| 280 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 281 | outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | wmb(); |
| 285 | } |
| 286 | EXPORT_SYMBOL_GPL(ide_dma_start); |
| 287 | |
| 288 | /* returns 1 on error, 0 otherwise */ |
| 289 | int ide_dma_end(ide_drive_t *drive) |
| 290 | { |
| 291 | ide_hwif_t *hwif = drive->hwif; |
| 292 | u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 293 | u8 dma_stat = 0, dma_cmd = 0, mask; |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 294 | |
| 295 | drive->waiting_for_dma = 0; |
| 296 | |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 297 | /* stop DMA */ |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 298 | if (mmio) { |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 299 | dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 300 | writeb(dma_cmd & ~ATA_DMA_START, |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 301 | (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); |
| 302 | } else { |
| 303 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 304 | outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 305 | } |
| 306 | |
| 307 | /* get DMA status */ |
| 308 | dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
| 309 | |
| 310 | if (mmio) |
| 311 | /* clear the INTR & ERROR bits */ |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 312 | writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR, |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 313 | (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); |
| 314 | else |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 315 | outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR, |
| 316 | hwif->dma_base + ATA_DMA_STATUS); |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 317 | |
| 318 | /* purge DMA mappings */ |
| 319 | ide_destroy_dmatable(drive); |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 320 | wmb(); |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 321 | |
| 322 | /* verify good DMA status */ |
| 323 | mask = ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR; |
| 324 | if ((dma_stat & mask) != ATA_DMA_INTR) |
| 325 | return 0x10 | dma_stat; |
| 326 | return 0; |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 327 | } |
| 328 | EXPORT_SYMBOL_GPL(ide_dma_end); |
| 329 | |
| 330 | /* returns 1 if dma irq issued, 0 otherwise */ |
| 331 | int ide_dma_test_irq(ide_drive_t *drive) |
| 332 | { |
| 333 | ide_hwif_t *hwif = drive->hwif; |
| 334 | u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); |
| 335 | |
Bartlomiej Zolnierkiewicz | 1d35364 | 2008-12-29 20:27:37 +0100 | [diff] [blame] | 336 | return (dma_stat & ATA_DMA_INTR) ? 1 : 0; |
Bartlomiej Zolnierkiewicz | 2dbe7e9 | 2008-10-13 21:39:47 +0200 | [diff] [blame] | 337 | } |
| 338 | EXPORT_SYMBOL_GPL(ide_dma_test_irq); |
| 339 | |
| 340 | const struct ide_dma_ops sff_dma_ops = { |
| 341 | .dma_host_set = ide_dma_host_set, |
| 342 | .dma_setup = ide_dma_setup, |
| 343 | .dma_exec_cmd = ide_dma_exec_cmd, |
| 344 | .dma_start = ide_dma_start, |
| 345 | .dma_end = ide_dma_end, |
| 346 | .dma_test_irq = ide_dma_test_irq, |
| 347 | .dma_timeout = ide_dma_timeout, |
| 348 | .dma_lost_irq = ide_dma_lost_irq, |
| 349 | }; |
| 350 | EXPORT_SYMBOL_GPL(sff_dma_ops); |