Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * dec_esp.c: Driver for SCSI chips on IOASIC based TURBOchannel DECstations |
| 3 | * and TURBOchannel PMAZ-A cards |
| 4 | * |
| 5 | * TURBOchannel changes by Harald Koerfgen |
| 6 | * PMAZ-A support by David Airlie |
| 7 | * |
| 8 | * based on jazz_esp.c: |
| 9 | * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de) |
| 10 | * |
| 11 | * jazz_esp is based on David S. Miller's ESP driver and cyber_esp |
| 12 | * |
| 13 | * 20000819 - Small PMAZ-AA fixes by Florian Lohoff <flo@rfc822.org> |
| 14 | * Be warned the PMAZ-AA works currently as a single card. |
| 15 | * Dont try to put multiple cards in one machine - They are |
| 16 | * both detected but it may crash under high load garbling your |
| 17 | * data. |
| 18 | * 20001005 - Initialization fixes for 2.4.0-test9 |
| 19 | * Florian Lohoff <flo@rfc822.org> |
| 20 | * |
| 21 | * Copyright (C) 2002, 2003 Maciej W. Rozycki |
| 22 | */ |
| 23 | |
| 24 | #include <linux/kernel.h> |
| 25 | #include <linux/delay.h> |
| 26 | #include <linux/types.h> |
| 27 | #include <linux/string.h> |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/blkdev.h> |
| 30 | #include <linux/proc_fs.h> |
| 31 | #include <linux/spinlock.h> |
| 32 | #include <linux/stat.h> |
| 33 | |
| 34 | #include <asm/dma.h> |
| 35 | #include <asm/irq.h> |
| 36 | #include <asm/pgtable.h> |
| 37 | #include <asm/system.h> |
| 38 | |
| 39 | #include <asm/dec/interrupts.h> |
| 40 | #include <asm/dec/ioasic.h> |
| 41 | #include <asm/dec/ioasic_addrs.h> |
| 42 | #include <asm/dec/ioasic_ints.h> |
| 43 | #include <asm/dec/machtype.h> |
| 44 | #include <asm/dec/tc.h> |
| 45 | |
| 46 | #define DEC_SCSI_SREG 0 |
| 47 | #define DEC_SCSI_DMAREG 0x40000 |
| 48 | #define DEC_SCSI_SRAM 0x80000 |
| 49 | #define DEC_SCSI_DIAG 0xC0000 |
| 50 | |
| 51 | #include "scsi.h" |
| 52 | #include <scsi/scsi_host.h> |
| 53 | #include "NCR53C9x.h" |
| 54 | |
| 55 | static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count); |
| 56 | static void dma_drain(struct NCR_ESP *esp); |
| 57 | static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp); |
| 58 | static void dma_dump_state(struct NCR_ESP *esp); |
| 59 | static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length); |
| 60 | static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length); |
| 61 | static void dma_ints_off(struct NCR_ESP *esp); |
| 62 | static void dma_ints_on(struct NCR_ESP *esp); |
| 63 | static int dma_irq_p(struct NCR_ESP *esp); |
| 64 | static int dma_ports_p(struct NCR_ESP *esp); |
| 65 | static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write); |
| 66 | static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp); |
| 67 | static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp); |
| 68 | static void dma_advance_sg(struct scsi_cmnd * sp); |
| 69 | |
| 70 | static void pmaz_dma_drain(struct NCR_ESP *esp); |
| 71 | static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length); |
| 72 | static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length); |
| 73 | static void pmaz_dma_ints_off(struct NCR_ESP *esp); |
| 74 | static void pmaz_dma_ints_on(struct NCR_ESP *esp); |
| 75 | static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write); |
| 76 | static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp); |
| 77 | |
| 78 | #define TC_ESP_RAM_SIZE 0x20000 |
| 79 | #define ESP_TGT_DMA_SIZE ((TC_ESP_RAM_SIZE/7) & ~(sizeof(int)-1)) |
| 80 | #define ESP_NCMD 7 |
| 81 | |
| 82 | #define TC_ESP_DMAR_MASK 0x1ffff |
| 83 | #define TC_ESP_DMAR_WRITE 0x80000000 |
| 84 | #define TC_ESP_DMA_ADDR(x) ((unsigned)(x) & TC_ESP_DMAR_MASK) |
| 85 | |
| 86 | u32 esp_virt_buffer; |
| 87 | int scsi_current_length; |
| 88 | |
| 89 | volatile unsigned char cmd_buffer[16]; |
| 90 | volatile unsigned char pmaz_cmd_buffer[16]; |
| 91 | /* This is where all commands are put |
| 92 | * before they are trasfered to the ESP chip |
| 93 | * via PIO. |
| 94 | */ |
| 95 | |
| 96 | static irqreturn_t scsi_dma_merr_int(int, void *, struct pt_regs *); |
| 97 | static irqreturn_t scsi_dma_err_int(int, void *, struct pt_regs *); |
| 98 | static irqreturn_t scsi_dma_int(int, void *, struct pt_regs *); |
| 99 | |
| 100 | static int dec_esp_detect(struct scsi_host_template * tpnt); |
| 101 | |
| 102 | static int dec_esp_release(struct Scsi_Host *shost) |
| 103 | { |
| 104 | if (shost->irq) |
| 105 | free_irq(shost->irq, NULL); |
| 106 | if (shost->io_port && shost->n_io_port) |
| 107 | release_region(shost->io_port, shost->n_io_port); |
| 108 | scsi_unregister(shost); |
| 109 | return 0; |
| 110 | } |
| 111 | |
| 112 | static struct scsi_host_template driver_template = { |
| 113 | .proc_name = "dec_esp", |
| 114 | .proc_info = esp_proc_info, |
| 115 | .name = "NCR53C94", |
| 116 | .detect = dec_esp_detect, |
| 117 | .slave_alloc = esp_slave_alloc, |
| 118 | .slave_destroy = esp_slave_destroy, |
| 119 | .release = dec_esp_release, |
| 120 | .info = esp_info, |
| 121 | .queuecommand = esp_queue, |
| 122 | .eh_abort_handler = esp_abort, |
| 123 | .eh_bus_reset_handler = esp_reset, |
| 124 | .can_queue = 7, |
| 125 | .this_id = 7, |
| 126 | .sg_tablesize = SG_ALL, |
| 127 | .cmd_per_lun = 1, |
| 128 | .use_clustering = DISABLE_CLUSTERING, |
| 129 | }; |
| 130 | |
| 131 | |
| 132 | #include "scsi_module.c" |
| 133 | |
| 134 | /***************************************************************** Detection */ |
| 135 | static int dec_esp_detect(Scsi_Host_Template * tpnt) |
| 136 | { |
| 137 | struct NCR_ESP *esp; |
| 138 | struct ConfigDev *esp_dev; |
| 139 | int slot; |
| 140 | unsigned long mem_start; |
| 141 | |
| 142 | if (IOASIC) { |
| 143 | esp_dev = 0; |
| 144 | esp = esp_allocate(tpnt, (void *) esp_dev); |
| 145 | |
| 146 | /* Do command transfer with programmed I/O */ |
| 147 | esp->do_pio_cmds = 1; |
| 148 | |
| 149 | /* Required functions */ |
| 150 | esp->dma_bytes_sent = &dma_bytes_sent; |
| 151 | esp->dma_can_transfer = &dma_can_transfer; |
| 152 | esp->dma_dump_state = &dma_dump_state; |
| 153 | esp->dma_init_read = &dma_init_read; |
| 154 | esp->dma_init_write = &dma_init_write; |
| 155 | esp->dma_ints_off = &dma_ints_off; |
| 156 | esp->dma_ints_on = &dma_ints_on; |
| 157 | esp->dma_irq_p = &dma_irq_p; |
| 158 | esp->dma_ports_p = &dma_ports_p; |
| 159 | esp->dma_setup = &dma_setup; |
| 160 | |
| 161 | /* Optional functions */ |
| 162 | esp->dma_barrier = 0; |
| 163 | esp->dma_drain = &dma_drain; |
| 164 | esp->dma_invalidate = 0; |
| 165 | esp->dma_irq_entry = 0; |
| 166 | esp->dma_irq_exit = 0; |
| 167 | esp->dma_poll = 0; |
| 168 | esp->dma_reset = 0; |
| 169 | esp->dma_led_off = 0; |
| 170 | esp->dma_led_on = 0; |
| 171 | |
| 172 | /* virtual DMA functions */ |
| 173 | esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one; |
| 174 | esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl; |
| 175 | esp->dma_mmu_release_scsi_one = 0; |
| 176 | esp->dma_mmu_release_scsi_sgl = 0; |
| 177 | esp->dma_advance_sg = &dma_advance_sg; |
| 178 | |
| 179 | |
| 180 | /* SCSI chip speed */ |
| 181 | esp->cfreq = 25000000; |
| 182 | |
| 183 | esp->dregs = 0; |
| 184 | |
| 185 | /* ESP register base */ |
| 186 | esp->eregs = (struct ESP_regs *) (system_base + IOASIC_SCSI); |
| 187 | |
| 188 | /* Set the command buffer */ |
| 189 | esp->esp_command = (volatile unsigned char *) cmd_buffer; |
| 190 | |
| 191 | /* get virtual dma address for command buffer */ |
| 192 | esp->esp_command_dvma = virt_to_phys(cmd_buffer); |
| 193 | |
| 194 | esp->irq = dec_interrupt[DEC_IRQ_ASC]; |
| 195 | |
| 196 | esp->scsi_id = 7; |
| 197 | |
| 198 | /* Check for differential SCSI-bus */ |
| 199 | esp->diff = 0; |
| 200 | |
| 201 | esp_initialize(esp); |
| 202 | |
| 203 | if (request_irq(esp->irq, esp_intr, SA_INTERRUPT, |
| 204 | "ncr53c94", esp->ehost)) |
| 205 | goto err_dealloc; |
| 206 | if (request_irq(dec_interrupt[DEC_IRQ_ASC_MERR], |
| 207 | scsi_dma_merr_int, SA_INTERRUPT, |
| 208 | "ncr53c94 error", esp->ehost)) |
| 209 | goto err_free_irq; |
| 210 | if (request_irq(dec_interrupt[DEC_IRQ_ASC_ERR], |
| 211 | scsi_dma_err_int, SA_INTERRUPT, |
| 212 | "ncr53c94 overrun", esp->ehost)) |
| 213 | goto err_free_irq_merr; |
| 214 | if (request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], |
| 215 | scsi_dma_int, SA_INTERRUPT, |
| 216 | "ncr53c94 dma", esp->ehost)) |
| 217 | goto err_free_irq_err; |
| 218 | |
| 219 | } |
| 220 | |
| 221 | if (TURBOCHANNEL) { |
| 222 | while ((slot = search_tc_card("PMAZ-AA")) >= 0) { |
| 223 | claim_tc_card(slot); |
| 224 | |
| 225 | esp_dev = 0; |
| 226 | esp = esp_allocate(tpnt, (void *) esp_dev); |
| 227 | |
| 228 | mem_start = get_tc_base_addr(slot); |
| 229 | |
| 230 | /* Store base addr into esp struct */ |
| 231 | esp->slot = PHYSADDR(mem_start); |
| 232 | |
| 233 | esp->dregs = 0; |
| 234 | esp->eregs = (struct ESP_regs *) (mem_start + DEC_SCSI_SREG); |
| 235 | esp->do_pio_cmds = 1; |
| 236 | |
| 237 | /* Set the command buffer */ |
| 238 | esp->esp_command = (volatile unsigned char *) pmaz_cmd_buffer; |
| 239 | |
| 240 | /* get virtual dma address for command buffer */ |
| 241 | esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer); |
| 242 | |
| 243 | esp->cfreq = get_tc_speed(); |
| 244 | |
| 245 | esp->irq = get_tc_irq_nr(slot); |
| 246 | |
| 247 | /* Required functions */ |
| 248 | esp->dma_bytes_sent = &dma_bytes_sent; |
| 249 | esp->dma_can_transfer = &dma_can_transfer; |
| 250 | esp->dma_dump_state = &dma_dump_state; |
| 251 | esp->dma_init_read = &pmaz_dma_init_read; |
| 252 | esp->dma_init_write = &pmaz_dma_init_write; |
| 253 | esp->dma_ints_off = &pmaz_dma_ints_off; |
| 254 | esp->dma_ints_on = &pmaz_dma_ints_on; |
| 255 | esp->dma_irq_p = &dma_irq_p; |
| 256 | esp->dma_ports_p = &dma_ports_p; |
| 257 | esp->dma_setup = &pmaz_dma_setup; |
| 258 | |
| 259 | /* Optional functions */ |
| 260 | esp->dma_barrier = 0; |
| 261 | esp->dma_drain = &pmaz_dma_drain; |
| 262 | esp->dma_invalidate = 0; |
| 263 | esp->dma_irq_entry = 0; |
| 264 | esp->dma_irq_exit = 0; |
| 265 | esp->dma_poll = 0; |
| 266 | esp->dma_reset = 0; |
| 267 | esp->dma_led_off = 0; |
| 268 | esp->dma_led_on = 0; |
| 269 | |
| 270 | esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one; |
| 271 | esp->dma_mmu_get_scsi_sgl = 0; |
| 272 | esp->dma_mmu_release_scsi_one = 0; |
| 273 | esp->dma_mmu_release_scsi_sgl = 0; |
| 274 | esp->dma_advance_sg = 0; |
| 275 | |
| 276 | if (request_irq(esp->irq, esp_intr, SA_INTERRUPT, |
| 277 | "PMAZ_AA", esp->ehost)) { |
| 278 | esp_deallocate(esp); |
| 279 | release_tc_card(slot); |
| 280 | continue; |
| 281 | } |
| 282 | esp->scsi_id = 7; |
| 283 | esp->diff = 0; |
| 284 | esp_initialize(esp); |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | if(nesps) { |
| 289 | printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); |
| 290 | esps_running = esps_in_use; |
| 291 | return esps_in_use; |
| 292 | } |
| 293 | return 0; |
| 294 | |
| 295 | err_free_irq_err: |
| 296 | free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], scsi_dma_err_int); |
| 297 | err_free_irq_merr: |
| 298 | free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], scsi_dma_merr_int); |
| 299 | err_free_irq: |
| 300 | free_irq(esp->irq, esp_intr); |
| 301 | err_dealloc: |
| 302 | esp_deallocate(esp); |
| 303 | return 0; |
| 304 | } |
| 305 | |
| 306 | /************************************************************* DMA Functions */ |
| 307 | static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id, struct pt_regs *regs) |
| 308 | { |
| 309 | printk("Got unexpected SCSI DMA Interrupt! < "); |
| 310 | printk("SCSI_DMA_MEMRDERR "); |
| 311 | printk(">\n"); |
| 312 | |
| 313 | return IRQ_HANDLED; |
| 314 | } |
| 315 | |
| 316 | static irqreturn_t scsi_dma_err_int(int irq, void *dev_id, struct pt_regs *regs) |
| 317 | { |
| 318 | /* empty */ |
| 319 | |
| 320 | return IRQ_HANDLED; |
| 321 | } |
| 322 | |
| 323 | static irqreturn_t scsi_dma_int(int irq, void *dev_id, struct pt_regs *regs) |
| 324 | { |
| 325 | u32 scsi_next_ptr; |
| 326 | |
| 327 | scsi_next_ptr = ioasic_read(IO_REG_SCSI_DMA_P); |
| 328 | |
| 329 | /* next page */ |
| 330 | scsi_next_ptr = (((scsi_next_ptr >> 3) + PAGE_SIZE) & PAGE_MASK) << 3; |
| 331 | ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr); |
| 332 | fast_iob(); |
| 333 | |
| 334 | return IRQ_HANDLED; |
| 335 | } |
| 336 | |
| 337 | static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) |
| 338 | { |
| 339 | return fifo_count; |
| 340 | } |
| 341 | |
| 342 | static void dma_drain(struct NCR_ESP *esp) |
| 343 | { |
| 344 | u32 nw, data0, data1, scsi_data_ptr; |
| 345 | u16 *p; |
| 346 | |
| 347 | nw = ioasic_read(IO_REG_SCSI_SCR); |
| 348 | |
| 349 | /* |
| 350 | * Is there something in the dma buffers left? |
| 351 | */ |
| 352 | if (nw) { |
| 353 | scsi_data_ptr = ioasic_read(IO_REG_SCSI_DMA_P) >> 3; |
| 354 | p = phys_to_virt(scsi_data_ptr); |
| 355 | switch (nw) { |
| 356 | case 1: |
| 357 | data0 = ioasic_read(IO_REG_SCSI_SDR0); |
| 358 | p[0] = data0 & 0xffff; |
| 359 | break; |
| 360 | case 2: |
| 361 | data0 = ioasic_read(IO_REG_SCSI_SDR0); |
| 362 | p[0] = data0 & 0xffff; |
| 363 | p[1] = (data0 >> 16) & 0xffff; |
| 364 | break; |
| 365 | case 3: |
| 366 | data0 = ioasic_read(IO_REG_SCSI_SDR0); |
| 367 | data1 = ioasic_read(IO_REG_SCSI_SDR1); |
| 368 | p[0] = data0 & 0xffff; |
| 369 | p[1] = (data0 >> 16) & 0xffff; |
| 370 | p[2] = data1 & 0xffff; |
| 371 | break; |
| 372 | default: |
| 373 | printk("Strange: %d words in dma buffer left\n", nw); |
| 374 | break; |
| 375 | } |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp) |
| 380 | { |
| 381 | return sp->SCp.this_residual; |
| 382 | } |
| 383 | |
| 384 | static void dma_dump_state(struct NCR_ESP *esp) |
| 385 | { |
| 386 | } |
| 387 | |
| 388 | static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length) |
| 389 | { |
| 390 | u32 scsi_next_ptr, ioasic_ssr; |
| 391 | unsigned long flags; |
| 392 | |
| 393 | if (vaddress & 3) |
| 394 | panic("dec_esp.c: unable to handle partial word transfers, yet..."); |
| 395 | |
| 396 | dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length); |
| 397 | |
| 398 | spin_lock_irqsave(&ioasic_ssr_lock, flags); |
| 399 | |
| 400 | fast_mb(); |
| 401 | ioasic_ssr = ioasic_read(IO_REG_SSR); |
| 402 | |
| 403 | ioasic_ssr &= ~IO_SSR_SCSI_DMA_EN; |
| 404 | ioasic_write(IO_REG_SSR, ioasic_ssr); |
| 405 | |
| 406 | fast_wmb(); |
| 407 | ioasic_write(IO_REG_SCSI_SCR, 0); |
| 408 | ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3); |
| 409 | |
| 410 | /* prepare for next page */ |
| 411 | scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3; |
| 412 | ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr); |
| 413 | |
| 414 | ioasic_ssr |= (IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN); |
| 415 | fast_wmb(); |
| 416 | ioasic_write(IO_REG_SSR, ioasic_ssr); |
| 417 | |
| 418 | fast_iob(); |
| 419 | spin_unlock_irqrestore(&ioasic_ssr_lock, flags); |
| 420 | } |
| 421 | |
| 422 | static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length) |
| 423 | { |
| 424 | u32 scsi_next_ptr, ioasic_ssr; |
| 425 | unsigned long flags; |
| 426 | |
| 427 | if (vaddress & 3) |
| 428 | panic("dec_esp.c: unable to handle partial word transfers, yet..."); |
| 429 | |
| 430 | dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length); |
| 431 | |
| 432 | spin_lock_irqsave(&ioasic_ssr_lock, flags); |
| 433 | |
| 434 | fast_mb(); |
| 435 | ioasic_ssr = ioasic_read(IO_REG_SSR); |
| 436 | |
| 437 | ioasic_ssr &= ~(IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN); |
| 438 | ioasic_write(IO_REG_SSR, ioasic_ssr); |
| 439 | |
| 440 | fast_wmb(); |
| 441 | ioasic_write(IO_REG_SCSI_SCR, 0); |
| 442 | ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3); |
| 443 | |
| 444 | /* prepare for next page */ |
| 445 | scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3; |
| 446 | ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr); |
| 447 | |
| 448 | ioasic_ssr |= IO_SSR_SCSI_DMA_EN; |
| 449 | fast_wmb(); |
| 450 | ioasic_write(IO_REG_SSR, ioasic_ssr); |
| 451 | |
| 452 | fast_iob(); |
| 453 | spin_unlock_irqrestore(&ioasic_ssr_lock, flags); |
| 454 | } |
| 455 | |
| 456 | static void dma_ints_off(struct NCR_ESP *esp) |
| 457 | { |
| 458 | disable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]); |
| 459 | } |
| 460 | |
| 461 | static void dma_ints_on(struct NCR_ESP *esp) |
| 462 | { |
| 463 | enable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]); |
| 464 | } |
| 465 | |
| 466 | static int dma_irq_p(struct NCR_ESP *esp) |
| 467 | { |
| 468 | return (esp->eregs->esp_status & ESP_STAT_INTR); |
| 469 | } |
| 470 | |
| 471 | static int dma_ports_p(struct NCR_ESP *esp) |
| 472 | { |
| 473 | /* |
| 474 | * FIXME: what's this good for? |
| 475 | */ |
| 476 | return 1; |
| 477 | } |
| 478 | |
| 479 | static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write) |
| 480 | { |
| 481 | /* |
| 482 | * DMA_ST_WRITE means "move data from device to memory" |
| 483 | * so when (write) is true, it actually means READ! |
| 484 | */ |
| 485 | if (write) |
| 486 | dma_init_read(esp, addr, count); |
| 487 | else |
| 488 | dma_init_write(esp, addr, count); |
| 489 | } |
| 490 | |
| 491 | static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp) |
| 492 | { |
| 493 | sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer); |
| 494 | } |
| 495 | |
| 496 | static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp) |
| 497 | { |
| 498 | int sz = sp->SCp.buffers_residual; |
| 499 | struct scatterlist *sg = sp->SCp.buffer; |
| 500 | |
| 501 | while (sz >= 0) { |
| 502 | sg[sz].dma_address = page_to_phys(sg[sz].page) + sg[sz].offset; |
| 503 | sz--; |
| 504 | } |
| 505 | sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address); |
| 506 | } |
| 507 | |
| 508 | static void dma_advance_sg(struct scsi_cmnd * sp) |
| 509 | { |
| 510 | sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address); |
| 511 | } |
| 512 | |
| 513 | static void pmaz_dma_drain(struct NCR_ESP *esp) |
| 514 | { |
| 515 | memcpy(phys_to_virt(esp_virt_buffer), |
| 516 | (void *)KSEG1ADDR(esp->slot + DEC_SCSI_SRAM + ESP_TGT_DMA_SIZE), |
| 517 | scsi_current_length); |
| 518 | } |
| 519 | |
| 520 | static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length) |
| 521 | { |
| 522 | volatile u32 *dmareg = |
| 523 | (volatile u32 *)KSEG1ADDR(esp->slot + DEC_SCSI_DMAREG); |
| 524 | |
| 525 | if (length > ESP_TGT_DMA_SIZE) |
| 526 | length = ESP_TGT_DMA_SIZE; |
| 527 | |
| 528 | *dmareg = TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE); |
| 529 | |
| 530 | iob(); |
| 531 | |
| 532 | esp_virt_buffer = vaddress; |
| 533 | scsi_current_length = length; |
| 534 | } |
| 535 | |
| 536 | static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length) |
| 537 | { |
| 538 | volatile u32 *dmareg = |
| 539 | (volatile u32 *)KSEG1ADDR(esp->slot + DEC_SCSI_DMAREG); |
| 540 | |
| 541 | memcpy((void *)KSEG1ADDR(esp->slot + DEC_SCSI_SRAM + ESP_TGT_DMA_SIZE), |
| 542 | phys_to_virt(vaddress), length); |
| 543 | |
| 544 | wmb(); |
| 545 | *dmareg = TC_ESP_DMAR_WRITE | TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE); |
| 546 | |
| 547 | iob(); |
| 548 | } |
| 549 | |
| 550 | static void pmaz_dma_ints_off(struct NCR_ESP *esp) |
| 551 | { |
| 552 | } |
| 553 | |
| 554 | static void pmaz_dma_ints_on(struct NCR_ESP *esp) |
| 555 | { |
| 556 | } |
| 557 | |
| 558 | static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write) |
| 559 | { |
| 560 | /* |
| 561 | * DMA_ST_WRITE means "move data from device to memory" |
| 562 | * so when (write) is true, it actually means READ! |
| 563 | */ |
| 564 | if (write) |
| 565 | pmaz_dma_init_read(esp, addr, count); |
| 566 | else |
| 567 | pmaz_dma_init_write(esp, addr, count); |
| 568 | } |
| 569 | |
| 570 | static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp) |
| 571 | { |
| 572 | sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer); |
| 573 | } |