blob: 6cb52b0e7696a814e97b1aabfda3ef1152320d69 [file] [log] [blame]
Jeff Garzik669a5db2006-08-29 18:12:40 -04001/*
2 * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
3 * (C) 2005 Red Hat Inc
4 * Alan Cox <alan@redhat.com>
5 *
6 * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
7 *
8 * First cut with LBA48/ATAPI
9 *
10 * TODO:
11 * Channel interlock/reset on both required ?
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/init.h>
18#include <linux/blkdev.h>
19#include <linux/delay.h>
20#include <scsi/scsi_host.h>
21#include <linux/libata.h>
22
23#define DRV_NAME "pata_pdc202xx_old"
24#define DRV_VERSION "0.2.1"
25
26/**
27 * pdc2024x_pre_reset - probe begin
28 * @ap: ATA port
29 *
30 * Set up cable type and use generic probe init
31 */
32
33static int pdc2024x_pre_reset(struct ata_port *ap)
34{
35 ap->cbl = ATA_CBL_PATA40;
36 return ata_std_prereset(ap);
37}
38
39
40static void pdc2024x_error_handler(struct ata_port *ap)
41{
42 ata_bmdma_drive_eh(ap, pdc2024x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
43}
44
45
46static int pdc2026x_pre_reset(struct ata_port *ap)
47{
48 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
49 u16 cis;
50
51 pci_read_config_word(pdev, 0x50, &cis);
52 if (cis & (1 << (10 + ap->port_no)))
53 ap->cbl = ATA_CBL_PATA80;
54 else
55 ap->cbl = ATA_CBL_PATA40;
56
57 return ata_std_prereset(ap);
58}
59
60static void pdc2026x_error_handler(struct ata_port *ap)
61{
62 ata_bmdma_drive_eh(ap, pdc2026x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
63}
64
65/**
66 * pdc_configure_piomode - set chip PIO timing
67 * @ap: ATA interface
68 * @adev: ATA device
69 * @pio: PIO mode
70 *
71 * Called to do the PIO mode setup. Our timing registers are shared
72 * so a configure_dmamode call will undo any work we do here and vice
73 * versa
74 */
75
76static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
77{
78 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
79 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
80 static u16 pio_timing[5] = {
81 0x0913, 0x050C , 0x0308, 0x0206, 0x0104
82 };
83 u8 r_ap, r_bp;
84
85 pci_read_config_byte(pdev, port, &r_ap);
86 pci_read_config_byte(pdev, port + 1, &r_bp);
87 r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
88 r_bp &= ~0x07;
89 r_ap |= (pio_timing[pio] >> 8);
90 r_bp |= (pio_timing[pio] & 0xFF);
91
92 if (ata_pio_need_iordy(adev))
93 r_ap |= 0x20; /* IORDY enable */
94 if (adev->class == ATA_DEV_ATA)
95 r_ap |= 0x10; /* FIFO enable */
96 pci_write_config_byte(pdev, port, r_ap);
97 pci_write_config_byte(pdev, port + 1, r_bp);
98}
99
100/**
101 * pdc_set_piomode - set initial PIO mode data
102 * @ap: ATA interface
103 * @adev: ATA device
104 *
105 * Called to do the PIO mode setup. Our timing registers are shared
106 * but we want to set the PIO timing by default.
107 */
108
109static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
110{
111 pdc_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
112}
113
114/**
115 * pdc_configure_dmamode - set DMA mode in chip
116 * @ap: ATA interface
117 * @adev: ATA device
118 *
119 * Load DMA cycle times into the chip ready for a DMA transfer
120 * to occur.
121 */
122
123static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
124{
125 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
126 int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
127 static u8 udma_timing[6][2] = {
128 { 0x60, 0x03 }, /* 33 Mhz Clock */
129 { 0x40, 0x02 },
130 { 0x20, 0x01 },
131 { 0x40, 0x02 }, /* 66 Mhz Clock */
132 { 0x20, 0x01 },
133 { 0x20, 0x01 }
134 };
135 u8 r_bp, r_cp;
136
137 pci_read_config_byte(pdev, port + 1, &r_bp);
138 pci_read_config_byte(pdev, port + 2, &r_cp);
139
140 r_bp &= ~0xF0;
141 r_cp &= ~0x0F;
142
143 if (adev->dma_mode >= XFER_UDMA_0) {
144 int speed = adev->dma_mode - XFER_UDMA_0;
145 r_bp |= udma_timing[speed][0];
146 r_cp |= udma_timing[speed][1];
147
148 } else {
149 int speed = adev->dma_mode - XFER_MW_DMA_0;
150 r_bp |= 0x60;
151 r_cp |= (5 - speed);
152 }
153 pci_write_config_byte(pdev, port + 1, r_bp);
154 pci_write_config_byte(pdev, port + 2, r_cp);
155
156}
157
158/**
159 * pdc2026x_bmdma_start - DMA engine begin
160 * @qc: ATA command
161 *
162 * In UDMA3 or higher we have to clock switch for the duration of the
163 * DMA transfer sequence.
164 */
165
166static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
167{
168 struct ata_port *ap = qc->ap;
169 struct ata_device *adev = qc->dev;
170 struct ata_taskfile *tf = &qc->tf;
171 int sel66 = ap->port_no ? 0x08: 0x02;
172
173 unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
174 unsigned long clock = master + 0x11;
175 unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
176
177 u32 len;
178
179 /* Check we keep host level locking here */
180 if (adev->dma_mode >= XFER_UDMA_2)
181 outb(inb(clock) | sel66, clock);
182 else
183 outb(inb(clock) & ~sel66, clock);
184
185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional
186 and move to qc_issue ? */
187 pdc_set_dmamode(ap, qc->dev);
188
189 /* Cases the state machine will not complete correctly without help */
190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
191 {
192 if (tf->flags & ATA_TFLAG_LBA48)
193 len = qc->nsect * 512;
194 else
195 len = qc->nbytes;
196
197 if (tf->flags & ATA_TFLAG_WRITE)
198 len |= 0x06000000;
199 else
200 len |= 0x05000000;
201
202 outl(len, atapi_reg);
203 }
204
205 /* Activate DMA */
206 ata_bmdma_start(qc);
207}
208
209/**
210 * pdc2026x_bmdma_end - DMA engine stop
211 * @qc: ATA command
212 *
213 * After a DMA completes we need to put the clock back to 33MHz for
214 * PIO timings.
215 */
216
217static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
218{
219 struct ata_port *ap = qc->ap;
220 struct ata_device *adev = qc->dev;
221 struct ata_taskfile *tf = &qc->tf;
222
223 int sel66 = ap->port_no ? 0x08: 0x02;
224 /* The clock bits are in the same register for both channels */
225 unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
226 unsigned long clock = master + 0x11;
227 unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
228
229 /* Cases the state machine will not complete correctly */
230 if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
231 outl(0, atapi_reg);
232 outb(inb(clock) & ~sel66, clock);
233 }
234 /* Check we keep host level locking here */
235 /* Flip back to 33Mhz for PIO */
236 if (adev->dma_mode >= XFER_UDMA_2)
237 outb(inb(clock) & ~sel66, clock);
238
239 ata_bmdma_stop(qc);
240}
241
242/**
243 * pdc2026x_dev_config - device setup hook
244 * @ap: ATA port
245 * @adev: newly found device
246 *
247 * Perform chip specific early setup. We need to lock the transfer
248 * sizes to 8bit to avoid making the state engine on the 2026x cards
249 * barf.
250 */
251
252static void pdc2026x_dev_config(struct ata_port *ap, struct ata_device *adev)
253{
254 adev->max_sectors = 256;
255}
256
257static struct scsi_host_template pdc_sht = {
258 .module = THIS_MODULE,
259 .name = DRV_NAME,
260 .ioctl = ata_scsi_ioctl,
261 .queuecommand = ata_scsi_queuecmd,
262 .can_queue = ATA_DEF_QUEUE,
263 .this_id = ATA_SHT_THIS_ID,
264 .sg_tablesize = LIBATA_MAX_PRD,
265 .max_sectors = ATA_MAX_SECTORS,
266 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
267 .emulated = ATA_SHT_EMULATED,
268 .use_clustering = ATA_SHT_USE_CLUSTERING,
269 .proc_name = DRV_NAME,
270 .dma_boundary = ATA_DMA_BOUNDARY,
271 .slave_configure = ata_scsi_slave_config,
272 .bios_param = ata_std_bios_param,
273};
274
275static struct ata_port_operations pdc2024x_port_ops = {
276 .port_disable = ata_port_disable,
277 .set_piomode = pdc_set_piomode,
278 .set_dmamode = pdc_set_dmamode,
279 .mode_filter = ata_pci_default_filter,
280 .tf_load = ata_tf_load,
281 .tf_read = ata_tf_read,
282 .check_status = ata_check_status,
283 .exec_command = ata_exec_command,
284 .dev_select = ata_std_dev_select,
285
286 .freeze = ata_bmdma_freeze,
287 .thaw = ata_bmdma_thaw,
288 .error_handler = pdc2024x_error_handler,
289 .post_internal_cmd = ata_bmdma_post_internal_cmd,
290
291 .bmdma_setup = ata_bmdma_setup,
292 .bmdma_start = ata_bmdma_start,
293 .bmdma_stop = ata_bmdma_stop,
294 .bmdma_status = ata_bmdma_status,
295
296 .qc_prep = ata_qc_prep,
297 .qc_issue = ata_qc_issue_prot,
298 .data_xfer = ata_pio_data_xfer,
299
300 .irq_handler = ata_interrupt,
301 .irq_clear = ata_bmdma_irq_clear,
302
303 .port_start = ata_port_start,
304 .port_stop = ata_port_stop,
305 .host_stop = ata_host_stop
306};
307
308static struct ata_port_operations pdc2026x_port_ops = {
309 .port_disable = ata_port_disable,
310 .set_piomode = pdc_set_piomode,
311 .set_dmamode = pdc_set_dmamode,
312 .mode_filter = ata_pci_default_filter,
313 .tf_load = ata_tf_load,
314 .tf_read = ata_tf_read,
315 .check_status = ata_check_status,
316 .exec_command = ata_exec_command,
317 .dev_select = ata_std_dev_select,
318 .dev_config = pdc2026x_dev_config,
319
320 .freeze = ata_bmdma_freeze,
321 .thaw = ata_bmdma_thaw,
322 .error_handler = pdc2026x_error_handler,
323 .post_internal_cmd = ata_bmdma_post_internal_cmd,
324
325 .bmdma_setup = ata_bmdma_setup,
326 .bmdma_start = pdc2026x_bmdma_start,
327 .bmdma_stop = pdc2026x_bmdma_stop,
328 .bmdma_status = ata_bmdma_status,
329
330 .qc_prep = ata_qc_prep,
331 .qc_issue = ata_qc_issue_prot,
332 .data_xfer = ata_pio_data_xfer,
333
334 .irq_handler = ata_interrupt,
335 .irq_clear = ata_bmdma_irq_clear,
336
337 .port_start = ata_port_start,
338 .port_stop = ata_port_stop,
339 .host_stop = ata_host_stop
340};
341
342static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
343{
344 static struct ata_port_info info[3] = {
345 {
346 .sht = &pdc_sht,
347 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
348 .pio_mask = 0x1f,
349 .mwdma_mask = 0x07,
350 .udma_mask = ATA_UDMA2,
351 .port_ops = &pdc2024x_port_ops
352 },
353 {
354 .sht = &pdc_sht,
355 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
356 .pio_mask = 0x1f,
357 .mwdma_mask = 0x07,
358 .udma_mask = ATA_UDMA4,
359 .port_ops = &pdc2026x_port_ops
360 },
361 {
362 .sht = &pdc_sht,
363 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
364 .pio_mask = 0x1f,
365 .mwdma_mask = 0x07,
366 .udma_mask = ATA_UDMA5,
367 .port_ops = &pdc2026x_port_ops
368 }
369
370 };
371 static struct ata_port_info *port_info[2];
372
373 port_info[0] = port_info[1] = &info[id->driver_data];
374
375 if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
376 struct pci_dev *bridge = dev->bus->self;
377 /* Don't grab anything behind a Promise I2O RAID */
378 if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
379 if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
380 return -ENODEV;
381 if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
382 return -ENODEV;
383 }
384 }
385 return ata_pci_init_one(dev, port_info, 2);
386}
387
388static struct pci_device_id pdc[] = {
389 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0},
390 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1},
391 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1},
392 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2},
393 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2},
394 { 0, },
395};
396
397static struct pci_driver pdc_pci_driver = {
398 .name = DRV_NAME,
399 .id_table = pdc,
400 .probe = pdc_init_one,
401 .remove = ata_pci_remove_one
402};
403
404static int __init pdc_init(void)
405{
406 return pci_register_driver(&pdc_pci_driver);
407}
408
409
410static void __exit pdc_exit(void)
411{
412 pci_unregister_driver(&pdc_pci_driver);
413}
414
415
416MODULE_AUTHOR("Alan Cox");
417MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
418MODULE_LICENSE("GPL");
419MODULE_DEVICE_TABLE(pci, pdc);
420MODULE_VERSION(DRV_VERSION);
421
422module_init(pdc_init);
423module_exit(pdc_exit);