blob: f6d498e1cf80b11f0dd23299415633d5d00e86f1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Robert Hancock2dec7552006-11-26 14:20:19 -060052#define DRV_VERSION "3.2"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
57 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07,
60 NV_UDMA_MASK = 0x7f,
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Tejun Heo27e4b272006-06-17 15:49:55 +090064 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050065 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050066 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090067 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Tejun Heo27e4b272006-06-17 15:49:55 +090070 /* INT_STATUS/ENABLE bits */
71 NV_INT_DEV = 0x01,
72 NV_INT_PM = 0x02,
73 NV_INT_ADDED = 0x04,
74 NV_INT_REMOVED = 0x08,
75
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
77
Tejun Heo39f87582006-06-17 15:49:56 +090078 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090079 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090081
Tejun Heo27e4b272006-06-17 15:49:55 +090082 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050083 NV_INT_CONFIG = 0x12,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Jeff Garzik10ad05d2006-03-22 23:50:50 -050086 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070089 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
93
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
103
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
108
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
111
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
114
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
125
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
133
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
139
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
146
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
151
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
165
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700169
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500170};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Robert Hancockfbbb2622006-10-27 19:08:41 -0700172/* ADMA Physical Region Descriptor - one SG segment */
173struct nv_adma_prd {
174 __le64 addr;
175 __le32 len;
176 u8 flags;
177 u8 packet_len;
178 __le16 reserved;
179};
180
181enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
189};
190
191/* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
195struct nv_adma_cpb {
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
200 u8 len; /* 3 */
201 u8 tag; /* 4 */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
208};
209
210
211struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
213 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma;
216 u8 flags;
217};
218
219#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
Jeff Garzikcca39742006-08-24 03:19:22 -0400222static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100223static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
224static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
225static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
227static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Tejun Heo39f87582006-06-17 15:49:56 +0900229static void nv_nf2_freeze(struct ata_port *ap);
230static void nv_nf2_thaw(struct ata_port *ap);
231static void nv_ck804_freeze(struct ata_port *ap);
232static void nv_ck804_thaw(struct ata_port *ap);
233static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700234static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600235static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700236static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239static void nv_adma_irq_clear(struct ata_port *ap);
240static int nv_adma_port_start(struct ata_port *ap);
241static void nv_adma_port_stop(struct ata_port *ap);
242static void nv_adma_error_handler(struct ata_port *ap);
243static void nv_adma_host_stop(struct ata_host *host);
244static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247static u8 nv_adma_bmdma_status(struct ata_port *ap);
Tejun Heo39f87582006-06-17 15:49:56 +0900248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249enum nv_host_type
250{
251 GENERIC,
252 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900253 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700254 CK804,
255 ADMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256};
257
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500258static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a92006-09-27 22:20:11 -0400259 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
274 PCI_ANY_ID, PCI_ANY_ID,
275 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
Daniel Drake541134c2005-07-03 13:44:39 +0100276 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
277 PCI_ANY_ID, PCI_ANY_ID,
278 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400279
280 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281};
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283static struct pci_driver nv_pci_driver = {
284 .name = DRV_NAME,
285 .id_table = nv_pci_tbl,
286 .probe = nv_init_one,
287 .remove = ata_pci_remove_one,
288};
289
Jeff Garzik193515d2005-11-07 00:59:37 -0500290static struct scsi_host_template nv_sht = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 .module = THIS_MODULE,
292 .name = DRV_NAME,
293 .ioctl = ata_scsi_ioctl,
294 .queuecommand = ata_scsi_queuecmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 .can_queue = ATA_DEF_QUEUE,
296 .this_id = ATA_SHT_THIS_ID,
297 .sg_tablesize = LIBATA_MAX_PRD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
299 .emulated = ATA_SHT_EMULATED,
300 .use_clustering = ATA_SHT_USE_CLUSTERING,
301 .proc_name = DRV_NAME,
302 .dma_boundary = ATA_DMA_BOUNDARY,
303 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900304 .slave_destroy = ata_scsi_slave_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 .bios_param = ata_std_bios_param,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306};
307
Robert Hancockfbbb2622006-10-27 19:08:41 -0700308static struct scsi_host_template nv_adma_sht = {
309 .module = THIS_MODULE,
310 .name = DRV_NAME,
311 .ioctl = ata_scsi_ioctl,
312 .queuecommand = ata_scsi_queuecmd,
313 .can_queue = NV_ADMA_MAX_CPBS,
314 .this_id = ATA_SHT_THIS_ID,
315 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700316 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
317 .emulated = ATA_SHT_EMULATED,
318 .use_clustering = ATA_SHT_USE_CLUSTERING,
319 .proc_name = DRV_NAME,
320 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
321 .slave_configure = nv_adma_slave_config,
322 .slave_destroy = ata_scsi_slave_destroy,
323 .bios_param = ata_std_bios_param,
324};
325
Tejun Heoada364e2006-06-17 15:49:56 +0900326static const struct ata_port_operations nv_generic_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 .port_disable = ata_port_disable,
328 .tf_load = ata_tf_load,
329 .tf_read = ata_tf_read,
330 .exec_command = ata_exec_command,
331 .check_status = ata_check_status,
332 .dev_select = ata_std_dev_select,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 .bmdma_setup = ata_bmdma_setup,
334 .bmdma_start = ata_bmdma_start,
335 .bmdma_stop = ata_bmdma_stop,
336 .bmdma_status = ata_bmdma_status,
337 .qc_prep = ata_qc_prep,
338 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900339 .freeze = ata_bmdma_freeze,
340 .thaw = ata_bmdma_thaw,
341 .error_handler = nv_error_handler,
342 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Alan Coxa6b2c5d2006-05-22 16:59:59 +0100343 .data_xfer = ata_pio_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900344 .irq_handler = nv_generic_interrupt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 .irq_clear = ata_bmdma_irq_clear,
346 .scr_read = nv_scr_read,
347 .scr_write = nv_scr_write,
348 .port_start = ata_port_start,
349 .port_stop = ata_port_stop,
Tejun Heoe6faf082006-06-17 15:49:55 +0900350 .host_stop = ata_pci_host_stop,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351};
352
Tejun Heoada364e2006-06-17 15:49:56 +0900353static const struct ata_port_operations nv_nf2_ops = {
354 .port_disable = ata_port_disable,
355 .tf_load = ata_tf_load,
356 .tf_read = ata_tf_read,
357 .exec_command = ata_exec_command,
358 .check_status = ata_check_status,
359 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900360 .bmdma_setup = ata_bmdma_setup,
361 .bmdma_start = ata_bmdma_start,
362 .bmdma_stop = ata_bmdma_stop,
363 .bmdma_status = ata_bmdma_status,
364 .qc_prep = ata_qc_prep,
365 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900366 .freeze = nv_nf2_freeze,
367 .thaw = nv_nf2_thaw,
368 .error_handler = nv_error_handler,
369 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heoada364e2006-06-17 15:49:56 +0900370 .data_xfer = ata_pio_data_xfer,
371 .irq_handler = nv_nf2_interrupt,
372 .irq_clear = ata_bmdma_irq_clear,
373 .scr_read = nv_scr_read,
374 .scr_write = nv_scr_write,
375 .port_start = ata_port_start,
376 .port_stop = ata_port_stop,
377 .host_stop = ata_pci_host_stop,
378};
379
380static const struct ata_port_operations nv_ck804_ops = {
381 .port_disable = ata_port_disable,
382 .tf_load = ata_tf_load,
383 .tf_read = ata_tf_read,
384 .exec_command = ata_exec_command,
385 .check_status = ata_check_status,
386 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900387 .bmdma_setup = ata_bmdma_setup,
388 .bmdma_start = ata_bmdma_start,
389 .bmdma_stop = ata_bmdma_stop,
390 .bmdma_status = ata_bmdma_status,
391 .qc_prep = ata_qc_prep,
392 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900393 .freeze = nv_ck804_freeze,
394 .thaw = nv_ck804_thaw,
395 .error_handler = nv_error_handler,
396 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heoada364e2006-06-17 15:49:56 +0900397 .data_xfer = ata_pio_data_xfer,
398 .irq_handler = nv_ck804_interrupt,
399 .irq_clear = ata_bmdma_irq_clear,
400 .scr_read = nv_scr_read,
401 .scr_write = nv_scr_write,
402 .port_start = ata_port_start,
403 .port_stop = ata_port_stop,
404 .host_stop = nv_ck804_host_stop,
405};
406
Robert Hancockfbbb2622006-10-27 19:08:41 -0700407static const struct ata_port_operations nv_adma_ops = {
408 .port_disable = ata_port_disable,
409 .tf_load = ata_tf_load,
410 .tf_read = ata_tf_read,
Robert Hancock2dec7552006-11-26 14:20:19 -0600411 .check_atapi_dma = nv_adma_check_atapi_dma,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700412 .exec_command = ata_exec_command,
413 .check_status = ata_check_status,
414 .dev_select = ata_std_dev_select,
415 .bmdma_setup = nv_adma_bmdma_setup,
416 .bmdma_start = nv_adma_bmdma_start,
417 .bmdma_stop = nv_adma_bmdma_stop,
418 .bmdma_status = nv_adma_bmdma_status,
419 .qc_prep = nv_adma_qc_prep,
420 .qc_issue = nv_adma_qc_issue,
421 .freeze = nv_ck804_freeze,
422 .thaw = nv_ck804_thaw,
423 .error_handler = nv_adma_error_handler,
424 .post_internal_cmd = nv_adma_bmdma_stop,
425 .data_xfer = ata_mmio_data_xfer,
426 .irq_handler = nv_adma_interrupt,
427 .irq_clear = nv_adma_irq_clear,
428 .scr_read = nv_scr_read,
429 .scr_write = nv_scr_write,
430 .port_start = nv_adma_port_start,
431 .port_stop = nv_adma_port_stop,
432 .host_stop = nv_adma_host_stop,
433};
434
Tejun Heoada364e2006-06-17 15:49:56 +0900435static struct ata_port_info nv_port_info[] = {
436 /* generic */
437 {
438 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900439 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
440 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900441 .pio_mask = NV_PIO_MASK,
442 .mwdma_mask = NV_MWDMA_MASK,
443 .udma_mask = NV_UDMA_MASK,
444 .port_ops = &nv_generic_ops,
445 },
446 /* nforce2/3 */
447 {
448 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900449 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
450 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900451 .pio_mask = NV_PIO_MASK,
452 .mwdma_mask = NV_MWDMA_MASK,
453 .udma_mask = NV_UDMA_MASK,
454 .port_ops = &nv_nf2_ops,
455 },
456 /* ck804 */
457 {
458 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900459 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
460 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900461 .pio_mask = NV_PIO_MASK,
462 .mwdma_mask = NV_MWDMA_MASK,
463 .udma_mask = NV_UDMA_MASK,
464 .port_ops = &nv_ck804_ops,
465 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700466 /* ADMA */
467 {
468 .sht = &nv_adma_sht,
469 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
470 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
471 .pio_mask = NV_PIO_MASK,
472 .mwdma_mask = NV_MWDMA_MASK,
473 .udma_mask = NV_UDMA_MASK,
474 .port_ops = &nv_adma_ops,
475 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476};
477
478MODULE_AUTHOR("NVIDIA");
479MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
480MODULE_LICENSE("GPL");
481MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
482MODULE_VERSION(DRV_VERSION);
483
Robert Hancockfbbb2622006-10-27 19:08:41 -0700484static int adma_enabled = 1;
485
Robert Hancock2dec7552006-11-26 14:20:19 -0600486static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
487 unsigned int port_no)
488{
489 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
490 return mmio;
491}
492
493static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
494{
495 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
496}
497
498static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
499{
500 return (ap->host->mmio_base + NV_ADMA_GEN);
501}
502
503static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
504{
505 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
506}
507
508static void nv_adma_register_mode(struct ata_port *ap)
509{
510 void __iomem *mmio = nv_adma_ctl_block(ap);
511 struct nv_adma_port_priv *pp = ap->private_data;
512 u16 tmp;
513
514 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
515 return;
516
517 tmp = readw(mmio + NV_ADMA_CTL);
518 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
519
520 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
521}
522
523static void nv_adma_mode(struct ata_port *ap)
524{
525 void __iomem *mmio = nv_adma_ctl_block(ap);
526 struct nv_adma_port_priv *pp = ap->private_data;
527 u16 tmp;
528
529 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
530 return;
531
532 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
533
534 tmp = readw(mmio + NV_ADMA_CTL);
535 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
536
537 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
538}
539
Robert Hancockfbbb2622006-10-27 19:08:41 -0700540static int nv_adma_slave_config(struct scsi_device *sdev)
541{
542 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600543 struct nv_adma_port_priv *pp = ap->private_data;
544 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700545 u64 bounce_limit;
546 unsigned long segment_boundary;
547 unsigned short sg_tablesize;
548 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600549 int adma_enable;
550 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700551
552 rc = ata_scsi_slave_config(sdev);
553
554 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
555 /* Not a proper libata device, ignore */
556 return rc;
557
558 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
559 /*
560 * NVIDIA reports that ADMA mode does not support ATAPI commands.
561 * Therefore ATAPI commands are sent through the legacy interface.
562 * However, the legacy interface only supports 32-bit DMA.
563 * Restrict DMA parameters as required by the legacy interface
564 * when an ATAPI device is connected.
565 */
566 bounce_limit = ATA_DMA_MASK;
567 segment_boundary = ATA_DMA_BOUNDARY;
568 /* Subtract 1 since an extra entry may be needed for padding, see
569 libata-scsi.c */
570 sg_tablesize = LIBATA_MAX_PRD - 1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600571
572 /* Since the legacy DMA engine is in use, we need to disable ADMA
573 on the port. */
574 adma_enable = 0;
575 nv_adma_register_mode(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700576 }
577 else {
578 bounce_limit = *ap->dev->dma_mask;
579 segment_boundary = NV_ADMA_DMA_BOUNDARY;
580 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600581 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700582 }
Robert Hancock2dec7552006-11-26 14:20:19 -0600583
584 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700585
Robert Hancock2dec7552006-11-26 14:20:19 -0600586 if(ap->port_no == 1)
587 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
588 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
589 else
590 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
591 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
592
593 if(adma_enable) {
594 new_reg = current_reg | config_mask;
595 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
596 }
597 else {
598 new_reg = current_reg & ~config_mask;
599 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
600 }
601
602 if(current_reg != new_reg)
603 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
604
Robert Hancockfbbb2622006-10-27 19:08:41 -0700605 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
606 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
607 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
608 ata_port_printk(ap, KERN_INFO,
609 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
610 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
611 return rc;
612}
613
Robert Hancock2dec7552006-11-26 14:20:19 -0600614static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
615{
616 struct nv_adma_port_priv *pp = qc->ap->private_data;
617 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
618}
619
620static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700621{
622 unsigned int idx = 0;
623
624 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
625
626 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
627 cpb[idx++] = cpu_to_le16(IGN);
628 cpb[idx++] = cpu_to_le16(IGN);
629 cpb[idx++] = cpu_to_le16(IGN);
630 cpb[idx++] = cpu_to_le16(IGN);
631 cpb[idx++] = cpu_to_le16(IGN);
632 }
633 else {
634 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
635 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
636 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
637 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
638 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
639 }
640 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
641 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
642 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
643 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
644 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
645
646 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
647
648 return idx;
649}
650
Robert Hancockfbbb2622006-10-27 19:08:41 -0700651static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
652{
653 struct nv_adma_port_priv *pp = ap->private_data;
654 int complete = 0, have_err = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600655 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700656
657 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
658
659 if (flags & NV_CPB_RESP_DONE) {
660 VPRINTK("CPB flags done, flags=0x%x\n", flags);
661 complete = 1;
662 }
663 if (flags & NV_CPB_RESP_ATA_ERR) {
664 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
665 have_err = 1;
666 complete = 1;
667 }
668 if (flags & NV_CPB_RESP_CMD_ERR) {
669 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
670 have_err = 1;
671 complete = 1;
672 }
673 if (flags & NV_CPB_RESP_CPB_ERR) {
674 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
675 have_err = 1;
676 complete = 1;
677 }
678 if(complete || force_err)
679 {
680 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
681 if(likely(qc)) {
682 u8 ata_status = 0;
683 /* Only use the ATA port status for non-NCQ commands.
684 For NCQ commands the current status may have nothing to do with
685 the command just completed. */
686 if(qc->tf.protocol != ATA_PROT_NCQ)
687 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
688
689 if(have_err || force_err)
690 ata_status |= ATA_ERR;
691
692 qc->err_mask |= ac_err_mask(ata_status);
693 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
694 qc->err_mask);
695 ata_qc_complete(qc);
696 }
697 }
698}
699
Robert Hancock2dec7552006-11-26 14:20:19 -0600700static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
701{
702 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
703 int handled;
704
705 /* freeze if hotplugged */
706 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
707 ata_port_freeze(ap);
708 return 1;
709 }
710
711 /* bail out if not our interrupt */
712 if (!(irq_stat & NV_INT_DEV))
713 return 0;
714
715 /* DEV interrupt w/ no active qc? */
716 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
717 ata_check_status(ap);
718 return 1;
719 }
720
721 /* handle interrupt */
722 handled = ata_host_intr(ap, qc);
723 if (unlikely(!handled)) {
724 /* spurious, clear it */
725 ata_check_status(ap);
726 }
727
728 return 1;
729}
730
Robert Hancockfbbb2622006-10-27 19:08:41 -0700731static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
732{
733 struct ata_host *host = dev_instance;
734 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600735 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700736
737 spin_lock(&host->lock);
738
739 for (i = 0; i < host->n_ports; i++) {
740 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600741 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700742
743 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
744 struct nv_adma_port_priv *pp = ap->private_data;
745 void __iomem *mmio = nv_adma_ctl_block(ap);
746 u16 status;
747 u32 gen_ctl;
748 int have_global_err = 0;
749 u32 notifier, notifier_error;
750
751 /* if in ATA register mode, use standard ata interrupt handler */
752 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600753 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
754 >> (NV_INT_PORT_SHIFT * i);
755 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700756 continue;
757 }
758
759 notifier = readl(mmio + NV_ADMA_NOTIFIER);
760 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600761 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700762
763 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
764
Robert Hancockfbbb2622006-10-27 19:08:41 -0700765 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
766 !notifier_error)
767 /* Nothing to do */
768 continue;
769
770 status = readw(mmio + NV_ADMA_STAT);
771
772 /* Clear status. Ensure the controller sees the clearing before we start
773 looking at any of the CPB statuses, so that any CPB completions after
774 this point in the handler will raise another interrupt. */
775 writew(status, mmio + NV_ADMA_STAT);
776 readw(mmio + NV_ADMA_STAT); /* flush posted write */
777 rmb();
778
779 /* freeze if hotplugged */
780 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
781 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
782 ata_port_freeze(ap);
783 handled++;
784 continue;
785 }
786
787 if (status & NV_ADMA_STAT_TIMEOUT) {
788 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
789 have_global_err = 1;
790 }
791 if (status & NV_ADMA_STAT_CPBERR) {
792 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
793 have_global_err = 1;
794 }
795 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
796 /** Check CPBs for completed commands */
797
798 if(ata_tag_valid(ap->active_tag))
799 /* Non-NCQ command */
800 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
801 (notifier_error & (1 << ap->active_tag)));
802 else {
803 int pos;
804 u32 active = ap->sactive;
805 while( (pos = ffs(active)) ) {
806 pos--;
807 nv_adma_check_cpb(ap, pos, have_global_err ||
808 (notifier_error & (1 << pos)) );
809 active &= ~(1 << pos );
810 }
811 }
812 }
813
814 handled++; /* irq handled if we got here */
815 }
816 }
Robert Hancock2dec7552006-11-26 14:20:19 -0600817
818 if(notifier_clears[0] || notifier_clears[1]) {
819 /* Note: Both notifier clear registers must be written
820 if either is set, even if one is zero, according to NVIDIA. */
821 writel(notifier_clears[0],
822 nv_adma_notifier_clear_block(host->ports[0]));
823 writel(notifier_clears[1],
824 nv_adma_notifier_clear_block(host->ports[1]));
825 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700826
827 spin_unlock(&host->lock);
828
829 return IRQ_RETVAL(handled);
830}
831
832static void nv_adma_irq_clear(struct ata_port *ap)
833{
834 void __iomem *mmio = nv_adma_ctl_block(ap);
835 u16 status = readw(mmio + NV_ADMA_STAT);
836 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
837 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600838 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700839
840 /* clear ADMA status */
841 writew(status, mmio + NV_ADMA_STAT);
842 writel(notifier | notifier_error,
843 nv_adma_notifier_clear_block(ap));
844
845 /** clear legacy status */
Robert Hancock2dec7552006-11-26 14:20:19 -0600846 outb(inb(dma_stat_addr), dma_stat_addr);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700847}
848
849static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
850{
Robert Hancock2dec7552006-11-26 14:20:19 -0600851 struct ata_port *ap = qc->ap;
852 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
853 struct nv_adma_port_priv *pp = ap->private_data;
854 u8 dmactl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700855
Robert Hancock2dec7552006-11-26 14:20:19 -0600856 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700857 WARN_ON(1);
858 return;
859 }
860
Robert Hancock2dec7552006-11-26 14:20:19 -0600861 /* load PRD table addr. */
862 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
863
864 /* specify data direction, triple-check start bit is clear */
865 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
866 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
867 if (!rw)
868 dmactl |= ATA_DMA_WR;
869
870 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
871
872 /* issue r/w command */
873 ata_exec_command(ap, &qc->tf);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700874}
875
876static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
877{
Robert Hancock2dec7552006-11-26 14:20:19 -0600878 struct ata_port *ap = qc->ap;
879 struct nv_adma_port_priv *pp = ap->private_data;
880 u8 dmactl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700881
Robert Hancock2dec7552006-11-26 14:20:19 -0600882 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700883 WARN_ON(1);
884 return;
885 }
886
Robert Hancock2dec7552006-11-26 14:20:19 -0600887 /* start host DMA transaction */
888 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
889 outb(dmactl | ATA_DMA_START,
890 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700891}
892
893static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
894{
Robert Hancock2dec7552006-11-26 14:20:19 -0600895 struct ata_port *ap = qc->ap;
896 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700897
Robert Hancock2dec7552006-11-26 14:20:19 -0600898 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700899 return;
900
Robert Hancock2dec7552006-11-26 14:20:19 -0600901 /* clear start/stop bit */
902 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
903 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
904
905 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
906 ata_altstatus(ap); /* dummy read */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700907}
908
909static u8 nv_adma_bmdma_status(struct ata_port *ap)
910{
Robert Hancockfbbb2622006-10-27 19:08:41 -0700911 struct nv_adma_port_priv *pp = ap->private_data;
912
Robert Hancock2dec7552006-11-26 14:20:19 -0600913 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
Robert Hancockfbbb2622006-10-27 19:08:41 -0700914
Robert Hancock2dec7552006-11-26 14:20:19 -0600915 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700916}
917
918static int nv_adma_port_start(struct ata_port *ap)
919{
920 struct device *dev = ap->host->dev;
921 struct nv_adma_port_priv *pp;
922 int rc;
923 void *mem;
924 dma_addr_t mem_dma;
925 void __iomem *mmio = nv_adma_ctl_block(ap);
926 u16 tmp;
927
928 VPRINTK("ENTER\n");
929
930 rc = ata_port_start(ap);
931 if (rc)
932 return rc;
933
934 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
935 if (!pp) {
936 rc = -ENOMEM;
937 goto err_out;
938 }
939
940 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
941 &mem_dma, GFP_KERNEL);
942
943 if (!mem) {
944 rc = -ENOMEM;
945 goto err_out_kfree;
946 }
947 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
948
949 /*
950 * First item in chunk of DMA memory:
951 * 128-byte command parameter block (CPB)
952 * one for each command tag
953 */
954 pp->cpb = mem;
955 pp->cpb_dma = mem_dma;
956
957 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
958 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
959
960 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
961 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
962
963 /*
964 * Second item: block of ADMA_SGTBL_LEN s/g entries
965 */
966 pp->aprd = mem;
967 pp->aprd_dma = mem_dma;
968
969 ap->private_data = pp;
970
971 /* clear any outstanding interrupt conditions */
972 writew(0xffff, mmio + NV_ADMA_STAT);
973
974 /* initialize port variables */
975 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
976
977 /* clear CPB fetch count */
978 writew(0, mmio + NV_ADMA_CPB_COUNT);
979
980 /* clear GO for register mode */
981 tmp = readw(mmio + NV_ADMA_CTL);
982 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
983
984 tmp = readw(mmio + NV_ADMA_CTL);
985 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
986 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
987 udelay(1);
988 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
989 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
990
991 return 0;
992
993err_out_kfree:
994 kfree(pp);
995err_out:
996 ata_port_stop(ap);
997 return rc;
998}
999
1000static void nv_adma_port_stop(struct ata_port *ap)
1001{
1002 struct device *dev = ap->host->dev;
1003 struct nv_adma_port_priv *pp = ap->private_data;
1004 void __iomem *mmio = nv_adma_ctl_block(ap);
1005
1006 VPRINTK("ENTER\n");
1007
1008 writew(0, mmio + NV_ADMA_CTL);
1009
1010 ap->private_data = NULL;
1011 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1012 kfree(pp);
1013 ata_port_stop(ap);
1014}
1015
1016
1017static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1018{
1019 void __iomem *mmio = probe_ent->mmio_base;
1020 struct ata_ioports *ioport = &probe_ent->port[port];
1021
1022 VPRINTK("ENTER\n");
1023
1024 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1025
1026 ioport->cmd_addr = (unsigned long) mmio;
1027 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1028 ioport->error_addr =
1029 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1030 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1031 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1032 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1033 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1034 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1035 ioport->status_addr =
1036 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1037 ioport->altstatus_addr =
1038 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1039}
1040
1041static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1042{
1043 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1044 unsigned int i;
1045 u32 tmp32;
1046
1047 VPRINTK("ENTER\n");
1048
1049 /* enable ADMA on the ports */
1050 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1051 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1052 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1053 NV_MCP_SATA_CFG_20_PORT1_EN |
1054 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1055
1056 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1057
1058 for (i = 0; i < probe_ent->n_ports; i++)
1059 nv_adma_setup_port(probe_ent, i);
1060
1061 for (i = 0; i < probe_ent->n_ports; i++) {
1062 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1063 u16 tmp;
1064
1065 /* enable interrupt, clear reset if not already clear */
1066 tmp = readw(mmio + NV_ADMA_CTL);
1067 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1068 }
1069
1070 return 0;
1071}
1072
1073static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1074 struct scatterlist *sg,
1075 int idx,
1076 struct nv_adma_prd *aprd)
1077{
Robert Hancock2dec7552006-11-26 14:20:19 -06001078 u8 flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001079
1080 memset(aprd, 0, sizeof(struct nv_adma_prd));
1081
1082 flags = 0;
1083 if (qc->tf.flags & ATA_TFLAG_WRITE)
1084 flags |= NV_APRD_WRITE;
1085 if (idx == qc->n_elem - 1)
1086 flags |= NV_APRD_END;
1087 else if (idx != 4)
1088 flags |= NV_APRD_CONT;
1089
1090 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1091 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001092 aprd->flags = flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001093}
1094
1095static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1096{
1097 struct nv_adma_port_priv *pp = qc->ap->private_data;
1098 unsigned int idx;
1099 struct nv_adma_prd *aprd;
1100 struct scatterlist *sg;
1101
1102 VPRINTK("ENTER\n");
1103
1104 idx = 0;
1105
1106 ata_for_each_sg(sg, qc) {
1107 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1108 nv_adma_fill_aprd(qc, sg, idx, aprd);
1109 idx++;
1110 }
1111 if (idx > 5)
1112 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1113}
1114
1115static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1116{
1117 struct nv_adma_port_priv *pp = qc->ap->private_data;
1118 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1119 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1120 NV_CPB_CTL_APRD_VALID |
1121 NV_CPB_CTL_IEN;
1122
1123 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1124
1125 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock2dec7552006-11-26 14:20:19 -06001126 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1127 nv_adma_register_mode(qc->ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001128 ata_qc_prep(qc);
1129 return;
1130 }
1131
1132 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1133
1134 cpb->len = 3;
1135 cpb->tag = qc->tag;
1136 cpb->next_cpb_idx = 0;
1137
1138 /* turn on NCQ flags for NCQ commands */
1139 if (qc->tf.protocol == ATA_PROT_NCQ)
1140 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1141
1142 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1143
1144 nv_adma_fill_sg(qc, cpb);
1145
1146 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1147 finished filling in all of the contents */
1148 wmb();
1149 cpb->ctl_flags = ctl_flags;
1150}
1151
1152static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1153{
Robert Hancock2dec7552006-11-26 14:20:19 -06001154 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001155 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1156
1157 VPRINTK("ENTER\n");
1158
1159 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock2dec7552006-11-26 14:20:19 -06001160 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001161 /* use ATA register mode */
1162 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1163 nv_adma_register_mode(qc->ap);
1164 return ata_qc_issue_prot(qc);
1165 } else
1166 nv_adma_mode(qc->ap);
1167
1168 /* write append register, command tag in lower 8 bits
1169 and (number of cpbs to append -1) in top 8 bits */
1170 wmb();
1171 writew(qc->tag, mmio + NV_ADMA_APPEND);
1172
1173 DPRINTK("Issued tag %u\n",qc->tag);
1174
1175 return 0;
1176}
1177
David Howells7d12e782006-10-05 14:55:46 +01001178static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179{
Jeff Garzikcca39742006-08-24 03:19:22 -04001180 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 unsigned int i;
1182 unsigned int handled = 0;
1183 unsigned long flags;
1184
Jeff Garzikcca39742006-08-24 03:19:22 -04001185 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
Jeff Garzikcca39742006-08-24 03:19:22 -04001187 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 struct ata_port *ap;
1189
Jeff Garzikcca39742006-08-24 03:19:22 -04001190 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001191 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001192 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 struct ata_queued_cmd *qc;
1194
1195 qc = ata_qc_from_tag(ap, ap->active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001196 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001198 else
1199 // No request pending? Clear interrupt status
1200 // anyway, in case there's one pending.
1201 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 }
1203
1204 }
1205
Jeff Garzikcca39742006-08-24 03:19:22 -04001206 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
1208 return IRQ_RETVAL(handled);
1209}
1210
Jeff Garzikcca39742006-08-24 03:19:22 -04001211static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001212{
1213 int i, handled = 0;
1214
Jeff Garzikcca39742006-08-24 03:19:22 -04001215 for (i = 0; i < host->n_ports; i++) {
1216 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001217
1218 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1219 handled += nv_host_intr(ap, irq_stat);
1220
1221 irq_stat >>= NV_INT_PORT_SHIFT;
1222 }
1223
1224 return IRQ_RETVAL(handled);
1225}
1226
David Howells7d12e782006-10-05 14:55:46 +01001227static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001228{
Jeff Garzikcca39742006-08-24 03:19:22 -04001229 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001230 u8 irq_stat;
1231 irqreturn_t ret;
1232
Jeff Garzikcca39742006-08-24 03:19:22 -04001233 spin_lock(&host->lock);
1234 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1235 ret = nv_do_interrupt(host, irq_stat);
1236 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001237
1238 return ret;
1239}
1240
David Howells7d12e782006-10-05 14:55:46 +01001241static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001242{
Jeff Garzikcca39742006-08-24 03:19:22 -04001243 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001244 u8 irq_stat;
1245 irqreturn_t ret;
1246
Jeff Garzikcca39742006-08-24 03:19:22 -04001247 spin_lock(&host->lock);
1248 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1249 ret = nv_do_interrupt(host, irq_stat);
1250 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001251
1252 return ret;
1253}
1254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1256{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 if (sc_reg > SCR_CONTROL)
1258 return 0xffffffffU;
1259
Jeff Garzik02cbd922006-03-22 23:59:46 -05001260 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261}
1262
1263static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1264{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 if (sc_reg > SCR_CONTROL)
1266 return;
1267
Jeff Garzik02cbd922006-03-22 23:59:46 -05001268 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269}
1270
Tejun Heo39f87582006-06-17 15:49:56 +09001271static void nv_nf2_freeze(struct ata_port *ap)
1272{
Jeff Garzikcca39742006-08-24 03:19:22 -04001273 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001274 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1275 u8 mask;
1276
1277 mask = inb(scr_addr + NV_INT_ENABLE);
1278 mask &= ~(NV_INT_ALL << shift);
1279 outb(mask, scr_addr + NV_INT_ENABLE);
1280}
1281
1282static void nv_nf2_thaw(struct ata_port *ap)
1283{
Jeff Garzikcca39742006-08-24 03:19:22 -04001284 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001285 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1286 u8 mask;
1287
1288 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1289
1290 mask = inb(scr_addr + NV_INT_ENABLE);
1291 mask |= (NV_INT_MASK << shift);
1292 outb(mask, scr_addr + NV_INT_ENABLE);
1293}
1294
1295static void nv_ck804_freeze(struct ata_port *ap)
1296{
Jeff Garzikcca39742006-08-24 03:19:22 -04001297 void __iomem *mmio_base = ap->host->mmio_base;
Tejun Heo39f87582006-06-17 15:49:56 +09001298 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1299 u8 mask;
1300
1301 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1302 mask &= ~(NV_INT_ALL << shift);
1303 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1304}
1305
1306static void nv_ck804_thaw(struct ata_port *ap)
1307{
Jeff Garzikcca39742006-08-24 03:19:22 -04001308 void __iomem *mmio_base = ap->host->mmio_base;
Tejun Heo39f87582006-06-17 15:49:56 +09001309 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1310 u8 mask;
1311
1312 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1313
1314 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1315 mask |= (NV_INT_MASK << shift);
1316 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1317}
1318
1319static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1320{
1321 unsigned int dummy;
1322
1323 /* SATA hardreset fails to retrieve proper device signature on
1324 * some controllers. Don't classify on hardreset. For more
1325 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1326 */
1327 return sata_std_hardreset(ap, &dummy);
1328}
1329
1330static void nv_error_handler(struct ata_port *ap)
1331{
1332 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1333 nv_hardreset, ata_std_postreset);
1334}
1335
Robert Hancockfbbb2622006-10-27 19:08:41 -07001336static void nv_adma_error_handler(struct ata_port *ap)
1337{
1338 struct nv_adma_port_priv *pp = ap->private_data;
1339 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1340 void __iomem *mmio = nv_adma_ctl_block(ap);
1341 int i;
1342 u16 tmp;
1343
1344 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1345 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1346 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1347 u32 status = readw(mmio + NV_ADMA_STAT);
1348
1349 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1350 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1351 notifier, notifier_error, gen_ctl, status);
1352
1353 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1354 struct nv_adma_cpb *cpb = &pp->cpb[i];
1355 if( cpb->ctl_flags || cpb->resp_flags )
1356 ata_port_printk(ap, KERN_ERR,
1357 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1358 i, cpb->ctl_flags, cpb->resp_flags);
1359 }
1360
1361 /* Push us back into port register mode for error handling. */
1362 nv_adma_register_mode(ap);
1363
1364 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1365
1366 /* Mark all of the CPBs as invalid to prevent them from being executed */
1367 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1368 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1369
1370 /* clear CPB fetch count */
1371 writew(0, mmio + NV_ADMA_CPB_COUNT);
1372
1373 /* Reset channel */
1374 tmp = readw(mmio + NV_ADMA_CTL);
1375 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1376 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1377 udelay(1);
1378 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1379 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1380 }
1381
1382 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1383 nv_hardreset, ata_std_postreset);
1384}
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1387{
1388 static int printed_version = 0;
Jeff Garzik29da9f62006-09-25 21:56:33 -04001389 struct ata_port_info *ppi[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 struct ata_probe_ent *probe_ent;
1391 int pci_dev_busy = 0;
1392 int rc;
1393 u32 bar;
Jeff Garzik02cbd922006-03-22 23:59:46 -05001394 unsigned long base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001395 unsigned long type = ent->driver_data;
1396 int mask_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
1398 // Make sure this is a SATA controller by counting the number of bars
1399 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1400 // it's an IDE controller and we ignore it.
1401 for (bar=0; bar<6; bar++)
1402 if (pci_resource_start(pdev, bar) == 0)
1403 return -ENODEV;
1404
Robert Hancockfbbb2622006-10-27 19:08:41 -07001405 if ( !printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05001406 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
1408 rc = pci_enable_device(pdev);
1409 if (rc)
1410 goto err_out;
1411
1412 rc = pci_request_regions(pdev, DRV_NAME);
1413 if (rc) {
1414 pci_dev_busy = 1;
1415 goto err_out_disable;
1416 }
1417
Robert Hancockfbbb2622006-10-27 19:08:41 -07001418 if(type >= CK804 && adma_enabled) {
1419 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1420 type = ADMA;
1421 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1422 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1423 mask_set = 1;
1424 }
1425
1426 if(!mask_set) {
1427 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1428 if (rc)
1429 goto err_out_regions;
1430 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1431 if (rc)
1432 goto err_out_regions;
1433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434
1435 rc = -ENOMEM;
1436
Robert Hancockfbbb2622006-10-27 19:08:41 -07001437 ppi[0] = ppi[1] = &nv_port_info[type];
Jeff Garzik29da9f62006-09-25 21:56:33 -04001438 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 if (!probe_ent)
1440 goto err_out_regions;
1441
Jeff Garzik02cbd922006-03-22 23:59:46 -05001442 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1443 if (!probe_ent->mmio_base) {
1444 rc = -EIO;
Tejun Heoe6faf082006-06-17 15:49:55 +09001445 goto err_out_free_ent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 }
1447
Jeff Garzik02cbd922006-03-22 23:59:46 -05001448 base = (unsigned long)probe_ent->mmio_base;
1449
1450 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1451 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1452
Tejun Heoada364e2006-06-17 15:49:56 +09001453 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001454 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09001455 u8 regval;
1456
1457 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1458 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1459 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1460 }
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 pci_set_master(pdev);
1463
Robert Hancockfbbb2622006-10-27 19:08:41 -07001464 if (type == ADMA) {
1465 rc = nv_adma_host_init(probe_ent);
1466 if (rc)
1467 goto err_out_iounmap;
1468 }
1469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 rc = ata_device_add(probe_ent);
1471 if (rc != NV_PORTS)
1472 goto err_out_iounmap;
1473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 kfree(probe_ent);
1475
1476 return 0;
1477
1478err_out_iounmap:
Jeff Garzik02cbd922006-03-22 23:59:46 -05001479 pci_iounmap(pdev, probe_ent->mmio_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480err_out_free_ent:
1481 kfree(probe_ent);
1482err_out_regions:
1483 pci_release_regions(pdev);
1484err_out_disable:
1485 if (!pci_dev_busy)
1486 pci_disable_device(pdev);
1487err_out:
1488 return rc;
1489}
1490
Jeff Garzikcca39742006-08-24 03:19:22 -04001491static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09001492{
Jeff Garzikcca39742006-08-24 03:19:22 -04001493 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09001494 u8 regval;
1495
1496 /* disable SATA space for CK804 */
1497 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1498 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1499 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1500
Jeff Garzikcca39742006-08-24 03:19:22 -04001501 ata_pci_host_stop(host);
Tejun Heoada364e2006-06-17 15:49:56 +09001502}
1503
Robert Hancockfbbb2622006-10-27 19:08:41 -07001504static void nv_adma_host_stop(struct ata_host *host)
1505{
1506 struct pci_dev *pdev = to_pci_dev(host->dev);
1507 int i;
1508 u32 tmp32;
1509
1510 for (i = 0; i < host->n_ports; i++) {
1511 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1512 u16 tmp;
1513
1514 /* disable interrupt */
1515 tmp = readw(mmio + NV_ADMA_CTL);
1516 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1517 }
1518
1519 /* disable ADMA on the ports */
1520 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1521 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1522 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1523 NV_MCP_SATA_CFG_20_PORT1_EN |
1524 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1525
1526 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1527
1528 nv_ck804_host_stop(host);
1529}
1530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531static int __init nv_init(void)
1532{
Pavel Roskinb7887192006-08-10 18:13:18 +09001533 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
1536static void __exit nv_exit(void)
1537{
1538 pci_unregister_driver(&nv_pci_driver);
1539}
1540
1541module_init(nv_init);
1542module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001543module_param_named(adma, adma_enabled, bool, 0444);
1544MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");