blob: 0d316eb3c2149da3ac09266985dc7624f92f84db [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Robert Hancock2dec7552006-11-26 14:20:19 -060052#define DRV_VERSION "3.2"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
57 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07,
60 NV_UDMA_MASK = 0x7f,
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Tejun Heo27e4b272006-06-17 15:49:55 +090064 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050065 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050066 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090067 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Tejun Heo27e4b272006-06-17 15:49:55 +090070 /* INT_STATUS/ENABLE bits */
71 NV_INT_DEV = 0x01,
72 NV_INT_PM = 0x02,
73 NV_INT_ADDED = 0x04,
74 NV_INT_REMOVED = 0x08,
75
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
77
Tejun Heo39f87582006-06-17 15:49:56 +090078 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090079 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090081
Tejun Heo27e4b272006-06-17 15:49:55 +090082 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050083 NV_INT_CONFIG = 0x12,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Jeff Garzik10ad05d2006-03-22 23:50:50 -050086 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070089 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
93
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
103
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
108
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
111
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
114
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
125
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
133
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
139
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
146
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
151
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
165
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700169
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500170};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Robert Hancockfbbb2622006-10-27 19:08:41 -0700172/* ADMA Physical Region Descriptor - one SG segment */
173struct nv_adma_prd {
174 __le64 addr;
175 __le32 len;
176 u8 flags;
177 u8 packet_len;
178 __le16 reserved;
179};
180
181enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
189};
190
191/* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
195struct nv_adma_cpb {
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
200 u8 len; /* 3 */
201 u8 tag; /* 4 */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
208};
209
210
211struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
213 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma;
216 u8 flags;
217};
218
219#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
Jeff Garzikcca39742006-08-24 03:19:22 -0400222static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100223static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
224static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
225static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
227static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Tejun Heo39f87582006-06-17 15:49:56 +0900229static void nv_nf2_freeze(struct ata_port *ap);
230static void nv_nf2_thaw(struct ata_port *ap);
231static void nv_ck804_freeze(struct ata_port *ap);
232static void nv_ck804_thaw(struct ata_port *ap);
233static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700234static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600235static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700236static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239static void nv_adma_irq_clear(struct ata_port *ap);
240static int nv_adma_port_start(struct ata_port *ap);
241static void nv_adma_port_stop(struct ata_port *ap);
242static void nv_adma_error_handler(struct ata_port *ap);
243static void nv_adma_host_stop(struct ata_host *host);
244static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247static u8 nv_adma_bmdma_status(struct ata_port *ap);
Tejun Heo39f87582006-06-17 15:49:56 +0900248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249enum nv_host_type
250{
251 GENERIC,
252 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900253 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700254 CK804,
255 ADMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256};
257
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500258static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a942006-09-27 22:20:11 -0400259 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Peer Chen8fc2d9c2006-11-01 05:23:11 -0500273 { PCI_VDEVICE(NVIDIA, 0x045c), GENERIC }, /* MCP65 */
274 { PCI_VDEVICE(NVIDIA, 0x045d), GENERIC }, /* MCP65 */
275 { PCI_VDEVICE(NVIDIA, 0x045e), GENERIC }, /* MCP65 */
276 { PCI_VDEVICE(NVIDIA, 0x045f), GENERIC }, /* MCP65 */
277 { PCI_VDEVICE(NVIDIA, 0x0550), GENERIC }, /* MCP67 */
278 { PCI_VDEVICE(NVIDIA, 0x0551), GENERIC }, /* MCP67 */
279 { PCI_VDEVICE(NVIDIA, 0x0552), GENERIC }, /* MCP67 */
280 { PCI_VDEVICE(NVIDIA, 0x0553), GENERIC }, /* MCP67 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
282 PCI_ANY_ID, PCI_ANY_ID,
283 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
Daniel Drake541134c2005-07-03 13:44:39 +0100284 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
285 PCI_ANY_ID, PCI_ANY_ID,
286 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400287
288 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289};
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291static struct pci_driver nv_pci_driver = {
292 .name = DRV_NAME,
293 .id_table = nv_pci_tbl,
294 .probe = nv_init_one,
295 .remove = ata_pci_remove_one,
296};
297
Jeff Garzik193515d2005-11-07 00:59:37 -0500298static struct scsi_host_template nv_sht = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 .module = THIS_MODULE,
300 .name = DRV_NAME,
301 .ioctl = ata_scsi_ioctl,
302 .queuecommand = ata_scsi_queuecmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 .can_queue = ATA_DEF_QUEUE,
304 .this_id = ATA_SHT_THIS_ID,
305 .sg_tablesize = LIBATA_MAX_PRD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
307 .emulated = ATA_SHT_EMULATED,
308 .use_clustering = ATA_SHT_USE_CLUSTERING,
309 .proc_name = DRV_NAME,
310 .dma_boundary = ATA_DMA_BOUNDARY,
311 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900312 .slave_destroy = ata_scsi_slave_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 .bios_param = ata_std_bios_param,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314};
315
Robert Hancockfbbb2622006-10-27 19:08:41 -0700316static struct scsi_host_template nv_adma_sht = {
317 .module = THIS_MODULE,
318 .name = DRV_NAME,
319 .ioctl = ata_scsi_ioctl,
320 .queuecommand = ata_scsi_queuecmd,
321 .can_queue = NV_ADMA_MAX_CPBS,
322 .this_id = ATA_SHT_THIS_ID,
323 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700324 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
325 .emulated = ATA_SHT_EMULATED,
326 .use_clustering = ATA_SHT_USE_CLUSTERING,
327 .proc_name = DRV_NAME,
328 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
329 .slave_configure = nv_adma_slave_config,
330 .slave_destroy = ata_scsi_slave_destroy,
331 .bios_param = ata_std_bios_param,
332};
333
Tejun Heoada364e2006-06-17 15:49:56 +0900334static const struct ata_port_operations nv_generic_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 .port_disable = ata_port_disable,
336 .tf_load = ata_tf_load,
337 .tf_read = ata_tf_read,
338 .exec_command = ata_exec_command,
339 .check_status = ata_check_status,
340 .dev_select = ata_std_dev_select,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 .bmdma_setup = ata_bmdma_setup,
342 .bmdma_start = ata_bmdma_start,
343 .bmdma_stop = ata_bmdma_stop,
344 .bmdma_status = ata_bmdma_status,
345 .qc_prep = ata_qc_prep,
346 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900347 .freeze = ata_bmdma_freeze,
348 .thaw = ata_bmdma_thaw,
349 .error_handler = nv_error_handler,
350 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Alan Coxa6b2c5d2006-05-22 16:59:59 +0100351 .data_xfer = ata_pio_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900352 .irq_handler = nv_generic_interrupt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 .irq_clear = ata_bmdma_irq_clear,
354 .scr_read = nv_scr_read,
355 .scr_write = nv_scr_write,
356 .port_start = ata_port_start,
357 .port_stop = ata_port_stop,
Tejun Heoe6faf082006-06-17 15:49:55 +0900358 .host_stop = ata_pci_host_stop,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359};
360
Tejun Heoada364e2006-06-17 15:49:56 +0900361static const struct ata_port_operations nv_nf2_ops = {
362 .port_disable = ata_port_disable,
363 .tf_load = ata_tf_load,
364 .tf_read = ata_tf_read,
365 .exec_command = ata_exec_command,
366 .check_status = ata_check_status,
367 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900368 .bmdma_setup = ata_bmdma_setup,
369 .bmdma_start = ata_bmdma_start,
370 .bmdma_stop = ata_bmdma_stop,
371 .bmdma_status = ata_bmdma_status,
372 .qc_prep = ata_qc_prep,
373 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900374 .freeze = nv_nf2_freeze,
375 .thaw = nv_nf2_thaw,
376 .error_handler = nv_error_handler,
377 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heoada364e2006-06-17 15:49:56 +0900378 .data_xfer = ata_pio_data_xfer,
379 .irq_handler = nv_nf2_interrupt,
380 .irq_clear = ata_bmdma_irq_clear,
381 .scr_read = nv_scr_read,
382 .scr_write = nv_scr_write,
383 .port_start = ata_port_start,
384 .port_stop = ata_port_stop,
385 .host_stop = ata_pci_host_stop,
386};
387
388static const struct ata_port_operations nv_ck804_ops = {
389 .port_disable = ata_port_disable,
390 .tf_load = ata_tf_load,
391 .tf_read = ata_tf_read,
392 .exec_command = ata_exec_command,
393 .check_status = ata_check_status,
394 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900395 .bmdma_setup = ata_bmdma_setup,
396 .bmdma_start = ata_bmdma_start,
397 .bmdma_stop = ata_bmdma_stop,
398 .bmdma_status = ata_bmdma_status,
399 .qc_prep = ata_qc_prep,
400 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900401 .freeze = nv_ck804_freeze,
402 .thaw = nv_ck804_thaw,
403 .error_handler = nv_error_handler,
404 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heoada364e2006-06-17 15:49:56 +0900405 .data_xfer = ata_pio_data_xfer,
406 .irq_handler = nv_ck804_interrupt,
407 .irq_clear = ata_bmdma_irq_clear,
408 .scr_read = nv_scr_read,
409 .scr_write = nv_scr_write,
410 .port_start = ata_port_start,
411 .port_stop = ata_port_stop,
412 .host_stop = nv_ck804_host_stop,
413};
414
Robert Hancockfbbb2622006-10-27 19:08:41 -0700415static const struct ata_port_operations nv_adma_ops = {
416 .port_disable = ata_port_disable,
417 .tf_load = ata_tf_load,
418 .tf_read = ata_tf_read,
Robert Hancock2dec7552006-11-26 14:20:19 -0600419 .check_atapi_dma = nv_adma_check_atapi_dma,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700420 .exec_command = ata_exec_command,
421 .check_status = ata_check_status,
422 .dev_select = ata_std_dev_select,
423 .bmdma_setup = nv_adma_bmdma_setup,
424 .bmdma_start = nv_adma_bmdma_start,
425 .bmdma_stop = nv_adma_bmdma_stop,
426 .bmdma_status = nv_adma_bmdma_status,
427 .qc_prep = nv_adma_qc_prep,
428 .qc_issue = nv_adma_qc_issue,
429 .freeze = nv_ck804_freeze,
430 .thaw = nv_ck804_thaw,
431 .error_handler = nv_adma_error_handler,
432 .post_internal_cmd = nv_adma_bmdma_stop,
433 .data_xfer = ata_mmio_data_xfer,
434 .irq_handler = nv_adma_interrupt,
435 .irq_clear = nv_adma_irq_clear,
436 .scr_read = nv_scr_read,
437 .scr_write = nv_scr_write,
438 .port_start = nv_adma_port_start,
439 .port_stop = nv_adma_port_stop,
440 .host_stop = nv_adma_host_stop,
441};
442
Tejun Heoada364e2006-06-17 15:49:56 +0900443static struct ata_port_info nv_port_info[] = {
444 /* generic */
445 {
446 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900447 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
448 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900449 .pio_mask = NV_PIO_MASK,
450 .mwdma_mask = NV_MWDMA_MASK,
451 .udma_mask = NV_UDMA_MASK,
452 .port_ops = &nv_generic_ops,
453 },
454 /* nforce2/3 */
455 {
456 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900457 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
458 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900459 .pio_mask = NV_PIO_MASK,
460 .mwdma_mask = NV_MWDMA_MASK,
461 .udma_mask = NV_UDMA_MASK,
462 .port_ops = &nv_nf2_ops,
463 },
464 /* ck804 */
465 {
466 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900467 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
468 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900469 .pio_mask = NV_PIO_MASK,
470 .mwdma_mask = NV_MWDMA_MASK,
471 .udma_mask = NV_UDMA_MASK,
472 .port_ops = &nv_ck804_ops,
473 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700474 /* ADMA */
475 {
476 .sht = &nv_adma_sht,
477 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
478 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
479 .pio_mask = NV_PIO_MASK,
480 .mwdma_mask = NV_MWDMA_MASK,
481 .udma_mask = NV_UDMA_MASK,
482 .port_ops = &nv_adma_ops,
483 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484};
485
486MODULE_AUTHOR("NVIDIA");
487MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
488MODULE_LICENSE("GPL");
489MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
490MODULE_VERSION(DRV_VERSION);
491
Robert Hancockfbbb2622006-10-27 19:08:41 -0700492static int adma_enabled = 1;
493
Robert Hancock2dec7552006-11-26 14:20:19 -0600494static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
495 unsigned int port_no)
496{
497 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
498 return mmio;
499}
500
501static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
502{
503 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
504}
505
506static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
507{
508 return (ap->host->mmio_base + NV_ADMA_GEN);
509}
510
511static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
512{
513 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
514}
515
516static void nv_adma_register_mode(struct ata_port *ap)
517{
518 void __iomem *mmio = nv_adma_ctl_block(ap);
519 struct nv_adma_port_priv *pp = ap->private_data;
520 u16 tmp;
521
522 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
523 return;
524
525 tmp = readw(mmio + NV_ADMA_CTL);
526 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
527
528 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
529}
530
531static void nv_adma_mode(struct ata_port *ap)
532{
533 void __iomem *mmio = nv_adma_ctl_block(ap);
534 struct nv_adma_port_priv *pp = ap->private_data;
535 u16 tmp;
536
537 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
538 return;
539
540 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
541
542 tmp = readw(mmio + NV_ADMA_CTL);
543 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
544
545 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
546}
547
Robert Hancockfbbb2622006-10-27 19:08:41 -0700548static int nv_adma_slave_config(struct scsi_device *sdev)
549{
550 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600551 struct nv_adma_port_priv *pp = ap->private_data;
552 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700553 u64 bounce_limit;
554 unsigned long segment_boundary;
555 unsigned short sg_tablesize;
556 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600557 int adma_enable;
558 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700559
560 rc = ata_scsi_slave_config(sdev);
561
562 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
563 /* Not a proper libata device, ignore */
564 return rc;
565
566 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
567 /*
568 * NVIDIA reports that ADMA mode does not support ATAPI commands.
569 * Therefore ATAPI commands are sent through the legacy interface.
570 * However, the legacy interface only supports 32-bit DMA.
571 * Restrict DMA parameters as required by the legacy interface
572 * when an ATAPI device is connected.
573 */
574 bounce_limit = ATA_DMA_MASK;
575 segment_boundary = ATA_DMA_BOUNDARY;
576 /* Subtract 1 since an extra entry may be needed for padding, see
577 libata-scsi.c */
578 sg_tablesize = LIBATA_MAX_PRD - 1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600579
580 /* Since the legacy DMA engine is in use, we need to disable ADMA
581 on the port. */
582 adma_enable = 0;
583 nv_adma_register_mode(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700584 }
585 else {
586 bounce_limit = *ap->dev->dma_mask;
587 segment_boundary = NV_ADMA_DMA_BOUNDARY;
588 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600589 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700590 }
Robert Hancock2dec7552006-11-26 14:20:19 -0600591
592 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700593
Robert Hancock2dec7552006-11-26 14:20:19 -0600594 if(ap->port_no == 1)
595 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
596 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
597 else
598 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
599 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
600
601 if(adma_enable) {
602 new_reg = current_reg | config_mask;
603 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
604 }
605 else {
606 new_reg = current_reg & ~config_mask;
607 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
608 }
609
610 if(current_reg != new_reg)
611 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
612
Robert Hancockfbbb2622006-10-27 19:08:41 -0700613 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
614 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
615 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
616 ata_port_printk(ap, KERN_INFO,
617 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
618 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
619 return rc;
620}
621
Robert Hancock2dec7552006-11-26 14:20:19 -0600622static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
623{
624 struct nv_adma_port_priv *pp = qc->ap->private_data;
625 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
626}
627
628static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700629{
630 unsigned int idx = 0;
631
632 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
633
634 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
635 cpb[idx++] = cpu_to_le16(IGN);
636 cpb[idx++] = cpu_to_le16(IGN);
637 cpb[idx++] = cpu_to_le16(IGN);
638 cpb[idx++] = cpu_to_le16(IGN);
639 cpb[idx++] = cpu_to_le16(IGN);
640 }
641 else {
642 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
643 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
644 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
647 }
648 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
649 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
650 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
651 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
652 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
653
654 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
655
656 return idx;
657}
658
Robert Hancockfbbb2622006-10-27 19:08:41 -0700659static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
660{
661 struct nv_adma_port_priv *pp = ap->private_data;
662 int complete = 0, have_err = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600663 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700664
665 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
666
667 if (flags & NV_CPB_RESP_DONE) {
668 VPRINTK("CPB flags done, flags=0x%x\n", flags);
669 complete = 1;
670 }
671 if (flags & NV_CPB_RESP_ATA_ERR) {
672 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
673 have_err = 1;
674 complete = 1;
675 }
676 if (flags & NV_CPB_RESP_CMD_ERR) {
677 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
678 have_err = 1;
679 complete = 1;
680 }
681 if (flags & NV_CPB_RESP_CPB_ERR) {
682 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
683 have_err = 1;
684 complete = 1;
685 }
686 if(complete || force_err)
687 {
688 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
689 if(likely(qc)) {
690 u8 ata_status = 0;
691 /* Only use the ATA port status for non-NCQ commands.
692 For NCQ commands the current status may have nothing to do with
693 the command just completed. */
694 if(qc->tf.protocol != ATA_PROT_NCQ)
695 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
696
697 if(have_err || force_err)
698 ata_status |= ATA_ERR;
699
700 qc->err_mask |= ac_err_mask(ata_status);
701 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
702 qc->err_mask);
703 ata_qc_complete(qc);
704 }
705 }
706}
707
Robert Hancock2dec7552006-11-26 14:20:19 -0600708static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
709{
710 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
711 int handled;
712
713 /* freeze if hotplugged */
714 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
715 ata_port_freeze(ap);
716 return 1;
717 }
718
719 /* bail out if not our interrupt */
720 if (!(irq_stat & NV_INT_DEV))
721 return 0;
722
723 /* DEV interrupt w/ no active qc? */
724 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
725 ata_check_status(ap);
726 return 1;
727 }
728
729 /* handle interrupt */
730 handled = ata_host_intr(ap, qc);
731 if (unlikely(!handled)) {
732 /* spurious, clear it */
733 ata_check_status(ap);
734 }
735
736 return 1;
737}
738
Robert Hancockfbbb2622006-10-27 19:08:41 -0700739static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
740{
741 struct ata_host *host = dev_instance;
742 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600743 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700744
745 spin_lock(&host->lock);
746
747 for (i = 0; i < host->n_ports; i++) {
748 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600749 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700750
751 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
752 struct nv_adma_port_priv *pp = ap->private_data;
753 void __iomem *mmio = nv_adma_ctl_block(ap);
754 u16 status;
755 u32 gen_ctl;
756 int have_global_err = 0;
757 u32 notifier, notifier_error;
758
759 /* if in ATA register mode, use standard ata interrupt handler */
760 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600761 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
762 >> (NV_INT_PORT_SHIFT * i);
763 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700764 continue;
765 }
766
767 notifier = readl(mmio + NV_ADMA_NOTIFIER);
768 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600769 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700770
771 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
772
Robert Hancockfbbb2622006-10-27 19:08:41 -0700773 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
774 !notifier_error)
775 /* Nothing to do */
776 continue;
777
778 status = readw(mmio + NV_ADMA_STAT);
779
780 /* Clear status. Ensure the controller sees the clearing before we start
781 looking at any of the CPB statuses, so that any CPB completions after
782 this point in the handler will raise another interrupt. */
783 writew(status, mmio + NV_ADMA_STAT);
784 readw(mmio + NV_ADMA_STAT); /* flush posted write */
785 rmb();
786
787 /* freeze if hotplugged */
788 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
789 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
790 ata_port_freeze(ap);
791 handled++;
792 continue;
793 }
794
795 if (status & NV_ADMA_STAT_TIMEOUT) {
796 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
797 have_global_err = 1;
798 }
799 if (status & NV_ADMA_STAT_CPBERR) {
800 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
801 have_global_err = 1;
802 }
803 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
804 /** Check CPBs for completed commands */
805
806 if(ata_tag_valid(ap->active_tag))
807 /* Non-NCQ command */
808 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
809 (notifier_error & (1 << ap->active_tag)));
810 else {
811 int pos;
812 u32 active = ap->sactive;
813 while( (pos = ffs(active)) ) {
814 pos--;
815 nv_adma_check_cpb(ap, pos, have_global_err ||
816 (notifier_error & (1 << pos)) );
817 active &= ~(1 << pos );
818 }
819 }
820 }
821
822 handled++; /* irq handled if we got here */
823 }
824 }
Robert Hancock2dec7552006-11-26 14:20:19 -0600825
826 if(notifier_clears[0] || notifier_clears[1]) {
827 /* Note: Both notifier clear registers must be written
828 if either is set, even if one is zero, according to NVIDIA. */
829 writel(notifier_clears[0],
830 nv_adma_notifier_clear_block(host->ports[0]));
831 writel(notifier_clears[1],
832 nv_adma_notifier_clear_block(host->ports[1]));
833 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700834
835 spin_unlock(&host->lock);
836
837 return IRQ_RETVAL(handled);
838}
839
840static void nv_adma_irq_clear(struct ata_port *ap)
841{
842 void __iomem *mmio = nv_adma_ctl_block(ap);
843 u16 status = readw(mmio + NV_ADMA_STAT);
844 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
845 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600846 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700847
848 /* clear ADMA status */
849 writew(status, mmio + NV_ADMA_STAT);
850 writel(notifier | notifier_error,
851 nv_adma_notifier_clear_block(ap));
852
853 /** clear legacy status */
Robert Hancock2dec7552006-11-26 14:20:19 -0600854 outb(inb(dma_stat_addr), dma_stat_addr);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700855}
856
857static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
858{
Robert Hancock2dec7552006-11-26 14:20:19 -0600859 struct ata_port *ap = qc->ap;
860 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
861 struct nv_adma_port_priv *pp = ap->private_data;
862 u8 dmactl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700863
Robert Hancock2dec7552006-11-26 14:20:19 -0600864 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700865 WARN_ON(1);
866 return;
867 }
868
Robert Hancock2dec7552006-11-26 14:20:19 -0600869 /* load PRD table addr. */
870 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
871
872 /* specify data direction, triple-check start bit is clear */
873 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
874 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
875 if (!rw)
876 dmactl |= ATA_DMA_WR;
877
878 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
879
880 /* issue r/w command */
881 ata_exec_command(ap, &qc->tf);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700882}
883
884static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
885{
Robert Hancock2dec7552006-11-26 14:20:19 -0600886 struct ata_port *ap = qc->ap;
887 struct nv_adma_port_priv *pp = ap->private_data;
888 u8 dmactl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700889
Robert Hancock2dec7552006-11-26 14:20:19 -0600890 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700891 WARN_ON(1);
892 return;
893 }
894
Robert Hancock2dec7552006-11-26 14:20:19 -0600895 /* start host DMA transaction */
896 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
897 outb(dmactl | ATA_DMA_START,
898 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700899}
900
901static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
902{
Robert Hancock2dec7552006-11-26 14:20:19 -0600903 struct ata_port *ap = qc->ap;
904 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700905
Robert Hancock2dec7552006-11-26 14:20:19 -0600906 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700907 return;
908
Robert Hancock2dec7552006-11-26 14:20:19 -0600909 /* clear start/stop bit */
910 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
911 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
912
913 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
914 ata_altstatus(ap); /* dummy read */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700915}
916
917static u8 nv_adma_bmdma_status(struct ata_port *ap)
918{
Robert Hancockfbbb2622006-10-27 19:08:41 -0700919 struct nv_adma_port_priv *pp = ap->private_data;
920
Robert Hancock2dec7552006-11-26 14:20:19 -0600921 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
Robert Hancockfbbb2622006-10-27 19:08:41 -0700922
Robert Hancock2dec7552006-11-26 14:20:19 -0600923 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700924}
925
926static int nv_adma_port_start(struct ata_port *ap)
927{
928 struct device *dev = ap->host->dev;
929 struct nv_adma_port_priv *pp;
930 int rc;
931 void *mem;
932 dma_addr_t mem_dma;
933 void __iomem *mmio = nv_adma_ctl_block(ap);
934 u16 tmp;
935
936 VPRINTK("ENTER\n");
937
938 rc = ata_port_start(ap);
939 if (rc)
940 return rc;
941
942 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
943 if (!pp) {
944 rc = -ENOMEM;
945 goto err_out;
946 }
947
948 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
949 &mem_dma, GFP_KERNEL);
950
951 if (!mem) {
952 rc = -ENOMEM;
953 goto err_out_kfree;
954 }
955 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
956
957 /*
958 * First item in chunk of DMA memory:
959 * 128-byte command parameter block (CPB)
960 * one for each command tag
961 */
962 pp->cpb = mem;
963 pp->cpb_dma = mem_dma;
964
965 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
966 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
967
968 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
969 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
970
971 /*
972 * Second item: block of ADMA_SGTBL_LEN s/g entries
973 */
974 pp->aprd = mem;
975 pp->aprd_dma = mem_dma;
976
977 ap->private_data = pp;
978
979 /* clear any outstanding interrupt conditions */
980 writew(0xffff, mmio + NV_ADMA_STAT);
981
982 /* initialize port variables */
983 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
984
985 /* clear CPB fetch count */
986 writew(0, mmio + NV_ADMA_CPB_COUNT);
987
988 /* clear GO for register mode */
989 tmp = readw(mmio + NV_ADMA_CTL);
990 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
991
992 tmp = readw(mmio + NV_ADMA_CTL);
993 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
994 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
995 udelay(1);
996 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
997 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
998
999 return 0;
1000
1001err_out_kfree:
1002 kfree(pp);
1003err_out:
1004 ata_port_stop(ap);
1005 return rc;
1006}
1007
1008static void nv_adma_port_stop(struct ata_port *ap)
1009{
1010 struct device *dev = ap->host->dev;
1011 struct nv_adma_port_priv *pp = ap->private_data;
1012 void __iomem *mmio = nv_adma_ctl_block(ap);
1013
1014 VPRINTK("ENTER\n");
1015
1016 writew(0, mmio + NV_ADMA_CTL);
1017
1018 ap->private_data = NULL;
1019 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1020 kfree(pp);
1021 ata_port_stop(ap);
1022}
1023
1024
1025static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1026{
1027 void __iomem *mmio = probe_ent->mmio_base;
1028 struct ata_ioports *ioport = &probe_ent->port[port];
1029
1030 VPRINTK("ENTER\n");
1031
1032 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1033
1034 ioport->cmd_addr = (unsigned long) mmio;
1035 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1036 ioport->error_addr =
1037 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1038 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1039 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1040 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1041 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1042 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1043 ioport->status_addr =
1044 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1045 ioport->altstatus_addr =
1046 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1047}
1048
1049static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1050{
1051 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1052 unsigned int i;
1053 u32 tmp32;
1054
1055 VPRINTK("ENTER\n");
1056
1057 /* enable ADMA on the ports */
1058 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1059 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1060 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1061 NV_MCP_SATA_CFG_20_PORT1_EN |
1062 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1063
1064 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1065
1066 for (i = 0; i < probe_ent->n_ports; i++)
1067 nv_adma_setup_port(probe_ent, i);
1068
1069 for (i = 0; i < probe_ent->n_ports; i++) {
1070 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1071 u16 tmp;
1072
1073 /* enable interrupt, clear reset if not already clear */
1074 tmp = readw(mmio + NV_ADMA_CTL);
1075 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1076 }
1077
1078 return 0;
1079}
1080
1081static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1082 struct scatterlist *sg,
1083 int idx,
1084 struct nv_adma_prd *aprd)
1085{
Robert Hancock2dec7552006-11-26 14:20:19 -06001086 u8 flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001087
1088 memset(aprd, 0, sizeof(struct nv_adma_prd));
1089
1090 flags = 0;
1091 if (qc->tf.flags & ATA_TFLAG_WRITE)
1092 flags |= NV_APRD_WRITE;
1093 if (idx == qc->n_elem - 1)
1094 flags |= NV_APRD_END;
1095 else if (idx != 4)
1096 flags |= NV_APRD_CONT;
1097
1098 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1099 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001100 aprd->flags = flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001101}
1102
1103static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1104{
1105 struct nv_adma_port_priv *pp = qc->ap->private_data;
1106 unsigned int idx;
1107 struct nv_adma_prd *aprd;
1108 struct scatterlist *sg;
1109
1110 VPRINTK("ENTER\n");
1111
1112 idx = 0;
1113
1114 ata_for_each_sg(sg, qc) {
1115 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1116 nv_adma_fill_aprd(qc, sg, idx, aprd);
1117 idx++;
1118 }
1119 if (idx > 5)
1120 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1121}
1122
1123static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1124{
1125 struct nv_adma_port_priv *pp = qc->ap->private_data;
1126 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1127 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1128 NV_CPB_CTL_APRD_VALID |
1129 NV_CPB_CTL_IEN;
1130
1131 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1132
1133 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock2dec7552006-11-26 14:20:19 -06001134 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1135 nv_adma_register_mode(qc->ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001136 ata_qc_prep(qc);
1137 return;
1138 }
1139
1140 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1141
1142 cpb->len = 3;
1143 cpb->tag = qc->tag;
1144 cpb->next_cpb_idx = 0;
1145
1146 /* turn on NCQ flags for NCQ commands */
1147 if (qc->tf.protocol == ATA_PROT_NCQ)
1148 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1149
1150 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1151
1152 nv_adma_fill_sg(qc, cpb);
1153
1154 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1155 finished filling in all of the contents */
1156 wmb();
1157 cpb->ctl_flags = ctl_flags;
1158}
1159
1160static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1161{
Robert Hancock2dec7552006-11-26 14:20:19 -06001162 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001163 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1164
1165 VPRINTK("ENTER\n");
1166
1167 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock2dec7552006-11-26 14:20:19 -06001168 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001169 /* use ATA register mode */
1170 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1171 nv_adma_register_mode(qc->ap);
1172 return ata_qc_issue_prot(qc);
1173 } else
1174 nv_adma_mode(qc->ap);
1175
1176 /* write append register, command tag in lower 8 bits
1177 and (number of cpbs to append -1) in top 8 bits */
1178 wmb();
1179 writew(qc->tag, mmio + NV_ADMA_APPEND);
1180
1181 DPRINTK("Issued tag %u\n",qc->tag);
1182
1183 return 0;
1184}
1185
David Howells7d12e782006-10-05 14:55:46 +01001186static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
Jeff Garzikcca39742006-08-24 03:19:22 -04001188 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 unsigned int i;
1190 unsigned int handled = 0;
1191 unsigned long flags;
1192
Jeff Garzikcca39742006-08-24 03:19:22 -04001193 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Jeff Garzikcca39742006-08-24 03:19:22 -04001195 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 struct ata_port *ap;
1197
Jeff Garzikcca39742006-08-24 03:19:22 -04001198 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001199 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001200 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 struct ata_queued_cmd *qc;
1202
1203 qc = ata_qc_from_tag(ap, ap->active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001204 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001206 else
1207 // No request pending? Clear interrupt status
1208 // anyway, in case there's one pending.
1209 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 }
1211
1212 }
1213
Jeff Garzikcca39742006-08-24 03:19:22 -04001214 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216 return IRQ_RETVAL(handled);
1217}
1218
Jeff Garzikcca39742006-08-24 03:19:22 -04001219static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001220{
1221 int i, handled = 0;
1222
Jeff Garzikcca39742006-08-24 03:19:22 -04001223 for (i = 0; i < host->n_ports; i++) {
1224 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001225
1226 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1227 handled += nv_host_intr(ap, irq_stat);
1228
1229 irq_stat >>= NV_INT_PORT_SHIFT;
1230 }
1231
1232 return IRQ_RETVAL(handled);
1233}
1234
David Howells7d12e782006-10-05 14:55:46 +01001235static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001236{
Jeff Garzikcca39742006-08-24 03:19:22 -04001237 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001238 u8 irq_stat;
1239 irqreturn_t ret;
1240
Jeff Garzikcca39742006-08-24 03:19:22 -04001241 spin_lock(&host->lock);
1242 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1243 ret = nv_do_interrupt(host, irq_stat);
1244 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001245
1246 return ret;
1247}
1248
David Howells7d12e782006-10-05 14:55:46 +01001249static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001250{
Jeff Garzikcca39742006-08-24 03:19:22 -04001251 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001252 u8 irq_stat;
1253 irqreturn_t ret;
1254
Jeff Garzikcca39742006-08-24 03:19:22 -04001255 spin_lock(&host->lock);
1256 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1257 ret = nv_do_interrupt(host, irq_stat);
1258 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001259
1260 return ret;
1261}
1262
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1264{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 if (sc_reg > SCR_CONTROL)
1266 return 0xffffffffU;
1267
Jeff Garzik02cbd922006-03-22 23:59:46 -05001268 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269}
1270
1271static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1272{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 if (sc_reg > SCR_CONTROL)
1274 return;
1275
Jeff Garzik02cbd922006-03-22 23:59:46 -05001276 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277}
1278
Tejun Heo39f87582006-06-17 15:49:56 +09001279static void nv_nf2_freeze(struct ata_port *ap)
1280{
Jeff Garzikcca39742006-08-24 03:19:22 -04001281 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001282 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1283 u8 mask;
1284
1285 mask = inb(scr_addr + NV_INT_ENABLE);
1286 mask &= ~(NV_INT_ALL << shift);
1287 outb(mask, scr_addr + NV_INT_ENABLE);
1288}
1289
1290static void nv_nf2_thaw(struct ata_port *ap)
1291{
Jeff Garzikcca39742006-08-24 03:19:22 -04001292 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001293 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1294 u8 mask;
1295
1296 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1297
1298 mask = inb(scr_addr + NV_INT_ENABLE);
1299 mask |= (NV_INT_MASK << shift);
1300 outb(mask, scr_addr + NV_INT_ENABLE);
1301}
1302
1303static void nv_ck804_freeze(struct ata_port *ap)
1304{
Jeff Garzikcca39742006-08-24 03:19:22 -04001305 void __iomem *mmio_base = ap->host->mmio_base;
Tejun Heo39f87582006-06-17 15:49:56 +09001306 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1307 u8 mask;
1308
1309 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1310 mask &= ~(NV_INT_ALL << shift);
1311 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1312}
1313
1314static void nv_ck804_thaw(struct ata_port *ap)
1315{
Jeff Garzikcca39742006-08-24 03:19:22 -04001316 void __iomem *mmio_base = ap->host->mmio_base;
Tejun Heo39f87582006-06-17 15:49:56 +09001317 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1318 u8 mask;
1319
1320 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1321
1322 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1323 mask |= (NV_INT_MASK << shift);
1324 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1325}
1326
1327static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1328{
1329 unsigned int dummy;
1330
1331 /* SATA hardreset fails to retrieve proper device signature on
1332 * some controllers. Don't classify on hardreset. For more
1333 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1334 */
1335 return sata_std_hardreset(ap, &dummy);
1336}
1337
1338static void nv_error_handler(struct ata_port *ap)
1339{
1340 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1341 nv_hardreset, ata_std_postreset);
1342}
1343
Robert Hancockfbbb2622006-10-27 19:08:41 -07001344static void nv_adma_error_handler(struct ata_port *ap)
1345{
1346 struct nv_adma_port_priv *pp = ap->private_data;
1347 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1348 void __iomem *mmio = nv_adma_ctl_block(ap);
1349 int i;
1350 u16 tmp;
1351
1352 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1353 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1354 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1355 u32 status = readw(mmio + NV_ADMA_STAT);
1356
1357 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1358 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1359 notifier, notifier_error, gen_ctl, status);
1360
1361 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1362 struct nv_adma_cpb *cpb = &pp->cpb[i];
1363 if( cpb->ctl_flags || cpb->resp_flags )
1364 ata_port_printk(ap, KERN_ERR,
1365 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1366 i, cpb->ctl_flags, cpb->resp_flags);
1367 }
1368
1369 /* Push us back into port register mode for error handling. */
1370 nv_adma_register_mode(ap);
1371
1372 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1373
1374 /* Mark all of the CPBs as invalid to prevent them from being executed */
1375 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1376 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1377
1378 /* clear CPB fetch count */
1379 writew(0, mmio + NV_ADMA_CPB_COUNT);
1380
1381 /* Reset channel */
1382 tmp = readw(mmio + NV_ADMA_CTL);
1383 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1384 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1385 udelay(1);
1386 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1387 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1388 }
1389
1390 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1391 nv_hardreset, ata_std_postreset);
1392}
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1395{
1396 static int printed_version = 0;
Jeff Garzik29da9f62006-09-25 21:56:33 -04001397 struct ata_port_info *ppi[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 struct ata_probe_ent *probe_ent;
1399 int pci_dev_busy = 0;
1400 int rc;
1401 u32 bar;
Jeff Garzik02cbd922006-03-22 23:59:46 -05001402 unsigned long base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001403 unsigned long type = ent->driver_data;
1404 int mask_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
1406 // Make sure this is a SATA controller by counting the number of bars
1407 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1408 // it's an IDE controller and we ignore it.
1409 for (bar=0; bar<6; bar++)
1410 if (pci_resource_start(pdev, bar) == 0)
1411 return -ENODEV;
1412
Robert Hancockfbbb2622006-10-27 19:08:41 -07001413 if ( !printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05001414 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
1416 rc = pci_enable_device(pdev);
1417 if (rc)
1418 goto err_out;
1419
1420 rc = pci_request_regions(pdev, DRV_NAME);
1421 if (rc) {
1422 pci_dev_busy = 1;
1423 goto err_out_disable;
1424 }
1425
Robert Hancockfbbb2622006-10-27 19:08:41 -07001426 if(type >= CK804 && adma_enabled) {
1427 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1428 type = ADMA;
1429 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1430 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1431 mask_set = 1;
1432 }
1433
1434 if(!mask_set) {
1435 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1436 if (rc)
1437 goto err_out_regions;
1438 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1439 if (rc)
1440 goto err_out_regions;
1441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
1443 rc = -ENOMEM;
1444
Robert Hancockfbbb2622006-10-27 19:08:41 -07001445 ppi[0] = ppi[1] = &nv_port_info[type];
Jeff Garzik29da9f62006-09-25 21:56:33 -04001446 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 if (!probe_ent)
1448 goto err_out_regions;
1449
Jeff Garzik02cbd922006-03-22 23:59:46 -05001450 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1451 if (!probe_ent->mmio_base) {
1452 rc = -EIO;
Tejun Heoe6faf082006-06-17 15:49:55 +09001453 goto err_out_free_ent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 }
1455
Jeff Garzik02cbd922006-03-22 23:59:46 -05001456 base = (unsigned long)probe_ent->mmio_base;
1457
1458 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1459 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1460
Tejun Heoada364e2006-06-17 15:49:56 +09001461 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001462 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09001463 u8 regval;
1464
1465 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1466 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1467 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1468 }
1469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 pci_set_master(pdev);
1471
Robert Hancockfbbb2622006-10-27 19:08:41 -07001472 if (type == ADMA) {
1473 rc = nv_adma_host_init(probe_ent);
1474 if (rc)
1475 goto err_out_iounmap;
1476 }
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 rc = ata_device_add(probe_ent);
1479 if (rc != NV_PORTS)
1480 goto err_out_iounmap;
1481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 kfree(probe_ent);
1483
1484 return 0;
1485
1486err_out_iounmap:
Jeff Garzik02cbd922006-03-22 23:59:46 -05001487 pci_iounmap(pdev, probe_ent->mmio_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488err_out_free_ent:
1489 kfree(probe_ent);
1490err_out_regions:
1491 pci_release_regions(pdev);
1492err_out_disable:
1493 if (!pci_dev_busy)
1494 pci_disable_device(pdev);
1495err_out:
1496 return rc;
1497}
1498
Jeff Garzikcca39742006-08-24 03:19:22 -04001499static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09001500{
Jeff Garzikcca39742006-08-24 03:19:22 -04001501 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09001502 u8 regval;
1503
1504 /* disable SATA space for CK804 */
1505 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1506 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1507 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1508
Jeff Garzikcca39742006-08-24 03:19:22 -04001509 ata_pci_host_stop(host);
Tejun Heoada364e2006-06-17 15:49:56 +09001510}
1511
Robert Hancockfbbb2622006-10-27 19:08:41 -07001512static void nv_adma_host_stop(struct ata_host *host)
1513{
1514 struct pci_dev *pdev = to_pci_dev(host->dev);
1515 int i;
1516 u32 tmp32;
1517
1518 for (i = 0; i < host->n_ports; i++) {
1519 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1520 u16 tmp;
1521
1522 /* disable interrupt */
1523 tmp = readw(mmio + NV_ADMA_CTL);
1524 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1525 }
1526
1527 /* disable ADMA on the ports */
1528 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1529 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1530 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1531 NV_MCP_SATA_CFG_20_PORT1_EN |
1532 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1533
1534 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1535
1536 nv_ck804_host_stop(host);
1537}
1538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539static int __init nv_init(void)
1540{
Pavel Roskinb7887192006-08-10 18:13:18 +09001541 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542}
1543
1544static void __exit nv_exit(void)
1545{
1546 pci_unregister_driver(&nv_pci_driver);
1547}
1548
1549module_init(nv_init);
1550module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001551module_param_named(adma, adma_enabled, bool, 0444);
1552MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");