blob: a57710107619803dcbd78561b83846998eb5b1f8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Robert Hancockfbbb2622006-10-27 19:08:41 -070052#define DRV_VERSION "3.1"
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
57 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07,
60 NV_UDMA_MASK = 0x7f,
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Tejun Heo27e4b272006-06-17 15:49:55 +090064 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050065 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050066 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090067 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Tejun Heo27e4b272006-06-17 15:49:55 +090070 /* INT_STATUS/ENABLE bits */
71 NV_INT_DEV = 0x01,
72 NV_INT_PM = 0x02,
73 NV_INT_ADDED = 0x04,
74 NV_INT_REMOVED = 0x08,
75
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
77
Tejun Heo39f87582006-06-17 15:49:56 +090078 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090079 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090081
Tejun Heo27e4b272006-06-17 15:49:55 +090082 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050083 NV_INT_CONFIG = 0x12,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Jeff Garzik10ad05d2006-03-22 23:50:50 -050086 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070089 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
93
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
103
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
108
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
111
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
114
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
125
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
133
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
139
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
146
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
151
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
165
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500169};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171/* ADMA Physical Region Descriptor - one SG segment */
172struct nv_adma_prd {
173 __le64 addr;
174 __le32 len;
175 u8 flags;
176 u8 packet_len;
177 __le16 reserved;
178};
179
180enum nv_adma_regbits {
181 CMDEND = (1 << 15), /* end of command list */
182 WNB = (1 << 14), /* wait-not-BSY */
183 IGN = (1 << 13), /* ignore this entry */
184 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
185 DA2 = (1 << (2 + 8)),
186 DA1 = (1 << (1 + 8)),
187 DA0 = (1 << (0 + 8)),
188};
189
190/* ADMA Command Parameter Block
191 The first 5 SG segments are stored inside the Command Parameter Block itself.
192 If there are more than 5 segments the remainder are stored in a separate
193 memory area indicated by next_aprd. */
194struct nv_adma_cpb {
195 u8 resp_flags; /* 0 */
196 u8 reserved1; /* 1 */
197 u8 ctl_flags; /* 2 */
198 /* len is length of taskfile in 64 bit words */
199 u8 len; /* 3 */
200 u8 tag; /* 4 */
201 u8 next_cpb_idx; /* 5 */
202 __le16 reserved2; /* 6-7 */
203 __le16 tf[12]; /* 8-31 */
204 struct nv_adma_prd aprd[5]; /* 32-111 */
205 __le64 next_aprd; /* 112-119 */
206 __le64 reserved3; /* 120-127 */
207};
208
209
210struct nv_adma_port_priv {
211 struct nv_adma_cpb *cpb;
212 dma_addr_t cpb_dma;
213 struct nv_adma_prd *aprd;
214 dma_addr_t aprd_dma;
215 u8 flags;
216};
217
218#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
Jeff Garzikcca39742006-08-24 03:19:22 -0400221static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100222static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
223static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
224static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
226static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Tejun Heo39f87582006-06-17 15:49:56 +0900228static void nv_nf2_freeze(struct ata_port *ap);
229static void nv_nf2_thaw(struct ata_port *ap);
230static void nv_ck804_freeze(struct ata_port *ap);
231static void nv_ck804_thaw(struct ata_port *ap);
232static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700233static int nv_adma_slave_config(struct scsi_device *sdev);
234static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
235static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
236static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
237static void nv_adma_irq_clear(struct ata_port *ap);
238static int nv_adma_port_start(struct ata_port *ap);
239static void nv_adma_port_stop(struct ata_port *ap);
240static void nv_adma_error_handler(struct ata_port *ap);
241static void nv_adma_host_stop(struct ata_host *host);
242static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
243static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
244static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
245static u8 nv_adma_bmdma_status(struct ata_port *ap);
Tejun Heo39f87582006-06-17 15:49:56 +0900246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247enum nv_host_type
248{
249 GENERIC,
250 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900251 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700252 CK804,
253 ADMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254};
255
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500256static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a92006-09-27 22:20:11 -0400257 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
258 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
259 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Peer Chen8fc2d9c2006-11-01 05:23:11 -0500271 { PCI_VDEVICE(NVIDIA, 0x045c), GENERIC }, /* MCP65 */
272 { PCI_VDEVICE(NVIDIA, 0x045d), GENERIC }, /* MCP65 */
273 { PCI_VDEVICE(NVIDIA, 0x045e), GENERIC }, /* MCP65 */
274 { PCI_VDEVICE(NVIDIA, 0x045f), GENERIC }, /* MCP65 */
275 { PCI_VDEVICE(NVIDIA, 0x0550), GENERIC }, /* MCP67 */
276 { PCI_VDEVICE(NVIDIA, 0x0551), GENERIC }, /* MCP67 */
277 { PCI_VDEVICE(NVIDIA, 0x0552), GENERIC }, /* MCP67 */
278 { PCI_VDEVICE(NVIDIA, 0x0553), GENERIC }, /* MCP67 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
280 PCI_ANY_ID, PCI_ANY_ID,
281 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
Daniel Drake541134c2005-07-03 13:44:39 +0100282 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
283 PCI_ANY_ID, PCI_ANY_ID,
284 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400285
286 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287};
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static struct pci_driver nv_pci_driver = {
290 .name = DRV_NAME,
291 .id_table = nv_pci_tbl,
292 .probe = nv_init_one,
293 .remove = ata_pci_remove_one,
294};
295
Jeff Garzik193515d2005-11-07 00:59:37 -0500296static struct scsi_host_template nv_sht = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 .module = THIS_MODULE,
298 .name = DRV_NAME,
299 .ioctl = ata_scsi_ioctl,
300 .queuecommand = ata_scsi_queuecmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 .can_queue = ATA_DEF_QUEUE,
302 .this_id = ATA_SHT_THIS_ID,
303 .sg_tablesize = LIBATA_MAX_PRD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
305 .emulated = ATA_SHT_EMULATED,
306 .use_clustering = ATA_SHT_USE_CLUSTERING,
307 .proc_name = DRV_NAME,
308 .dma_boundary = ATA_DMA_BOUNDARY,
309 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900310 .slave_destroy = ata_scsi_slave_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 .bios_param = ata_std_bios_param,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312};
313
Robert Hancockfbbb2622006-10-27 19:08:41 -0700314static struct scsi_host_template nv_adma_sht = {
315 .module = THIS_MODULE,
316 .name = DRV_NAME,
317 .ioctl = ata_scsi_ioctl,
318 .queuecommand = ata_scsi_queuecmd,
319 .can_queue = NV_ADMA_MAX_CPBS,
320 .this_id = ATA_SHT_THIS_ID,
321 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
322 .max_sectors = ATA_MAX_SECTORS,
323 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
324 .emulated = ATA_SHT_EMULATED,
325 .use_clustering = ATA_SHT_USE_CLUSTERING,
326 .proc_name = DRV_NAME,
327 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
328 .slave_configure = nv_adma_slave_config,
329 .slave_destroy = ata_scsi_slave_destroy,
330 .bios_param = ata_std_bios_param,
331};
332
Tejun Heoada364e2006-06-17 15:49:56 +0900333static const struct ata_port_operations nv_generic_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 .port_disable = ata_port_disable,
335 .tf_load = ata_tf_load,
336 .tf_read = ata_tf_read,
337 .exec_command = ata_exec_command,
338 .check_status = ata_check_status,
339 .dev_select = ata_std_dev_select,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 .bmdma_setup = ata_bmdma_setup,
341 .bmdma_start = ata_bmdma_start,
342 .bmdma_stop = ata_bmdma_stop,
343 .bmdma_status = ata_bmdma_status,
344 .qc_prep = ata_qc_prep,
345 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900346 .freeze = ata_bmdma_freeze,
347 .thaw = ata_bmdma_thaw,
348 .error_handler = nv_error_handler,
349 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Alan Coxa6b2c5d2006-05-22 16:59:59 +0100350 .data_xfer = ata_pio_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900351 .irq_handler = nv_generic_interrupt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 .irq_clear = ata_bmdma_irq_clear,
353 .scr_read = nv_scr_read,
354 .scr_write = nv_scr_write,
355 .port_start = ata_port_start,
356 .port_stop = ata_port_stop,
Tejun Heoe6faf082006-06-17 15:49:55 +0900357 .host_stop = ata_pci_host_stop,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358};
359
Tejun Heoada364e2006-06-17 15:49:56 +0900360static const struct ata_port_operations nv_nf2_ops = {
361 .port_disable = ata_port_disable,
362 .tf_load = ata_tf_load,
363 .tf_read = ata_tf_read,
364 .exec_command = ata_exec_command,
365 .check_status = ata_check_status,
366 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900367 .bmdma_setup = ata_bmdma_setup,
368 .bmdma_start = ata_bmdma_start,
369 .bmdma_stop = ata_bmdma_stop,
370 .bmdma_status = ata_bmdma_status,
371 .qc_prep = ata_qc_prep,
372 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900373 .freeze = nv_nf2_freeze,
374 .thaw = nv_nf2_thaw,
375 .error_handler = nv_error_handler,
376 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heoada364e2006-06-17 15:49:56 +0900377 .data_xfer = ata_pio_data_xfer,
378 .irq_handler = nv_nf2_interrupt,
379 .irq_clear = ata_bmdma_irq_clear,
380 .scr_read = nv_scr_read,
381 .scr_write = nv_scr_write,
382 .port_start = ata_port_start,
383 .port_stop = ata_port_stop,
384 .host_stop = ata_pci_host_stop,
385};
386
387static const struct ata_port_operations nv_ck804_ops = {
388 .port_disable = ata_port_disable,
389 .tf_load = ata_tf_load,
390 .tf_read = ata_tf_read,
391 .exec_command = ata_exec_command,
392 .check_status = ata_check_status,
393 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900394 .bmdma_setup = ata_bmdma_setup,
395 .bmdma_start = ata_bmdma_start,
396 .bmdma_stop = ata_bmdma_stop,
397 .bmdma_status = ata_bmdma_status,
398 .qc_prep = ata_qc_prep,
399 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900400 .freeze = nv_ck804_freeze,
401 .thaw = nv_ck804_thaw,
402 .error_handler = nv_error_handler,
403 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heoada364e2006-06-17 15:49:56 +0900404 .data_xfer = ata_pio_data_xfer,
405 .irq_handler = nv_ck804_interrupt,
406 .irq_clear = ata_bmdma_irq_clear,
407 .scr_read = nv_scr_read,
408 .scr_write = nv_scr_write,
409 .port_start = ata_port_start,
410 .port_stop = ata_port_stop,
411 .host_stop = nv_ck804_host_stop,
412};
413
Robert Hancockfbbb2622006-10-27 19:08:41 -0700414static const struct ata_port_operations nv_adma_ops = {
415 .port_disable = ata_port_disable,
416 .tf_load = ata_tf_load,
417 .tf_read = ata_tf_read,
418 .exec_command = ata_exec_command,
419 .check_status = ata_check_status,
420 .dev_select = ata_std_dev_select,
421 .bmdma_setup = nv_adma_bmdma_setup,
422 .bmdma_start = nv_adma_bmdma_start,
423 .bmdma_stop = nv_adma_bmdma_stop,
424 .bmdma_status = nv_adma_bmdma_status,
425 .qc_prep = nv_adma_qc_prep,
426 .qc_issue = nv_adma_qc_issue,
427 .freeze = nv_ck804_freeze,
428 .thaw = nv_ck804_thaw,
429 .error_handler = nv_adma_error_handler,
430 .post_internal_cmd = nv_adma_bmdma_stop,
431 .data_xfer = ata_mmio_data_xfer,
432 .irq_handler = nv_adma_interrupt,
433 .irq_clear = nv_adma_irq_clear,
434 .scr_read = nv_scr_read,
435 .scr_write = nv_scr_write,
436 .port_start = nv_adma_port_start,
437 .port_stop = nv_adma_port_stop,
438 .host_stop = nv_adma_host_stop,
439};
440
Tejun Heoada364e2006-06-17 15:49:56 +0900441static struct ata_port_info nv_port_info[] = {
442 /* generic */
443 {
444 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900445 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
446 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900447 .pio_mask = NV_PIO_MASK,
448 .mwdma_mask = NV_MWDMA_MASK,
449 .udma_mask = NV_UDMA_MASK,
450 .port_ops = &nv_generic_ops,
451 },
452 /* nforce2/3 */
453 {
454 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900455 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
456 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900457 .pio_mask = NV_PIO_MASK,
458 .mwdma_mask = NV_MWDMA_MASK,
459 .udma_mask = NV_UDMA_MASK,
460 .port_ops = &nv_nf2_ops,
461 },
462 /* ck804 */
463 {
464 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900465 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
466 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900467 .pio_mask = NV_PIO_MASK,
468 .mwdma_mask = NV_MWDMA_MASK,
469 .udma_mask = NV_UDMA_MASK,
470 .port_ops = &nv_ck804_ops,
471 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700472 /* ADMA */
473 {
474 .sht = &nv_adma_sht,
475 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
476 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
477 .pio_mask = NV_PIO_MASK,
478 .mwdma_mask = NV_MWDMA_MASK,
479 .udma_mask = NV_UDMA_MASK,
480 .port_ops = &nv_adma_ops,
481 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482};
483
484MODULE_AUTHOR("NVIDIA");
485MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
486MODULE_LICENSE("GPL");
487MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
488MODULE_VERSION(DRV_VERSION);
489
Robert Hancockfbbb2622006-10-27 19:08:41 -0700490static int adma_enabled = 1;
491
492static int nv_adma_slave_config(struct scsi_device *sdev)
493{
494 struct ata_port *ap = ata_shost_to_port(sdev->host);
495 u64 bounce_limit;
496 unsigned long segment_boundary;
497 unsigned short sg_tablesize;
498 int rc;
499
500 rc = ata_scsi_slave_config(sdev);
501
502 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
503 /* Not a proper libata device, ignore */
504 return rc;
505
506 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
507 /*
508 * NVIDIA reports that ADMA mode does not support ATAPI commands.
509 * Therefore ATAPI commands are sent through the legacy interface.
510 * However, the legacy interface only supports 32-bit DMA.
511 * Restrict DMA parameters as required by the legacy interface
512 * when an ATAPI device is connected.
513 */
514 bounce_limit = ATA_DMA_MASK;
515 segment_boundary = ATA_DMA_BOUNDARY;
516 /* Subtract 1 since an extra entry may be needed for padding, see
517 libata-scsi.c */
518 sg_tablesize = LIBATA_MAX_PRD - 1;
519 }
520 else {
521 bounce_limit = *ap->dev->dma_mask;
522 segment_boundary = NV_ADMA_DMA_BOUNDARY;
523 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
524 }
525
526 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
527 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
528 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
529 ata_port_printk(ap, KERN_INFO,
530 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
531 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
532 return rc;
533}
534
535static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, u16 *cpb)
536{
537 unsigned int idx = 0;
538
539 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
540
541 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
542 cpb[idx++] = cpu_to_le16(IGN);
543 cpb[idx++] = cpu_to_le16(IGN);
544 cpb[idx++] = cpu_to_le16(IGN);
545 cpb[idx++] = cpu_to_le16(IGN);
546 cpb[idx++] = cpu_to_le16(IGN);
547 }
548 else {
549 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
550 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
551 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
552 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
553 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
554 }
555 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
556 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
557 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
558 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
559 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
560
561 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
562
563 return idx;
564}
565
566static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
567 unsigned int port_no)
568{
569 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
570 return mmio;
571}
572
573static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
574{
575 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
576}
577
578static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
579{
580 return (ap->host->mmio_base + NV_ADMA_GEN);
581}
582
583static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
584{
585 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
586}
587
588static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
589{
590 struct nv_adma_port_priv *pp = ap->private_data;
591 int complete = 0, have_err = 0;
592 u16 flags = pp->cpb[cpb_num].resp_flags;
593
594 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
595
596 if (flags & NV_CPB_RESP_DONE) {
597 VPRINTK("CPB flags done, flags=0x%x\n", flags);
598 complete = 1;
599 }
600 if (flags & NV_CPB_RESP_ATA_ERR) {
601 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
602 have_err = 1;
603 complete = 1;
604 }
605 if (flags & NV_CPB_RESP_CMD_ERR) {
606 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
607 have_err = 1;
608 complete = 1;
609 }
610 if (flags & NV_CPB_RESP_CPB_ERR) {
611 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
612 have_err = 1;
613 complete = 1;
614 }
615 if(complete || force_err)
616 {
617 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
618 if(likely(qc)) {
619 u8 ata_status = 0;
620 /* Only use the ATA port status for non-NCQ commands.
621 For NCQ commands the current status may have nothing to do with
622 the command just completed. */
623 if(qc->tf.protocol != ATA_PROT_NCQ)
624 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
625
626 if(have_err || force_err)
627 ata_status |= ATA_ERR;
628
629 qc->err_mask |= ac_err_mask(ata_status);
630 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
631 qc->err_mask);
632 ata_qc_complete(qc);
633 }
634 }
635}
636
637static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
638{
639 struct ata_host *host = dev_instance;
640 int i, handled = 0;
641
642 spin_lock(&host->lock);
643
644 for (i = 0; i < host->n_ports; i++) {
645 struct ata_port *ap = host->ports[i];
646
647 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
648 struct nv_adma_port_priv *pp = ap->private_data;
649 void __iomem *mmio = nv_adma_ctl_block(ap);
650 u16 status;
651 u32 gen_ctl;
652 int have_global_err = 0;
653 u32 notifier, notifier_error;
654
655 /* if in ATA register mode, use standard ata interrupt handler */
656 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
657 struct ata_queued_cmd *qc;
658 VPRINTK("in ATA register mode\n");
659 qc = ata_qc_from_tag(ap, ap->active_tag);
660 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
661 handled += ata_host_intr(ap, qc);
662 else {
663 /* No request pending? Clear interrupt status
664 anyway, in case there's one pending. */
665 ap->ops->check_status(ap);
666 handled++;
667 }
668 continue;
669 }
670
671 notifier = readl(mmio + NV_ADMA_NOTIFIER);
672 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
673
674 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
675
676 /* Seems necessary to clear notifiers even when they were 0.
677 Otherwise we seem to stop receiving further interrupts.
678 Unsure why. */
679 writel(notifier | notifier_error, nv_adma_notifier_clear_block(ap));
680
681 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
682 !notifier_error)
683 /* Nothing to do */
684 continue;
685
686 status = readw(mmio + NV_ADMA_STAT);
687
688 /* Clear status. Ensure the controller sees the clearing before we start
689 looking at any of the CPB statuses, so that any CPB completions after
690 this point in the handler will raise another interrupt. */
691 writew(status, mmio + NV_ADMA_STAT);
692 readw(mmio + NV_ADMA_STAT); /* flush posted write */
693 rmb();
694
695 /* freeze if hotplugged */
696 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
697 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
698 ata_port_freeze(ap);
699 handled++;
700 continue;
701 }
702
703 if (status & NV_ADMA_STAT_TIMEOUT) {
704 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
705 have_global_err = 1;
706 }
707 if (status & NV_ADMA_STAT_CPBERR) {
708 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
709 have_global_err = 1;
710 }
711 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
712 /** Check CPBs for completed commands */
713
714 if(ata_tag_valid(ap->active_tag))
715 /* Non-NCQ command */
716 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
717 (notifier_error & (1 << ap->active_tag)));
718 else {
719 int pos;
720 u32 active = ap->sactive;
721 while( (pos = ffs(active)) ) {
722 pos--;
723 nv_adma_check_cpb(ap, pos, have_global_err ||
724 (notifier_error & (1 << pos)) );
725 active &= ~(1 << pos );
726 }
727 }
728 }
729
730 handled++; /* irq handled if we got here */
731 }
732 }
733
734 spin_unlock(&host->lock);
735
736 return IRQ_RETVAL(handled);
737}
738
739static void nv_adma_irq_clear(struct ata_port *ap)
740{
741 void __iomem *mmio = nv_adma_ctl_block(ap);
742 u16 status = readw(mmio + NV_ADMA_STAT);
743 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
744 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
745
746 /* clear ADMA status */
747 writew(status, mmio + NV_ADMA_STAT);
748 writel(notifier | notifier_error,
749 nv_adma_notifier_clear_block(ap));
750
751 /** clear legacy status */
752 ap->flags &= ~ATA_FLAG_MMIO;
753 ata_bmdma_irq_clear(ap);
754 ap->flags |= ATA_FLAG_MMIO;
755}
756
757static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
758{
759 struct nv_adma_port_priv *pp = qc->ap->private_data;
760
761 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
762 WARN_ON(1);
763 return;
764 }
765
766 qc->ap->flags &= ~ATA_FLAG_MMIO;
767 ata_bmdma_setup(qc);
768 qc->ap->flags |= ATA_FLAG_MMIO;
769}
770
771static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
772{
773 struct nv_adma_port_priv *pp = qc->ap->private_data;
774
775 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
776 WARN_ON(1);
777 return;
778 }
779
780 qc->ap->flags &= ~ATA_FLAG_MMIO;
781 ata_bmdma_start(qc);
782 qc->ap->flags |= ATA_FLAG_MMIO;
783}
784
785static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
786{
787 struct nv_adma_port_priv *pp = qc->ap->private_data;
788
789 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
790 return;
791
792 qc->ap->flags &= ~ATA_FLAG_MMIO;
793 ata_bmdma_stop(qc);
794 qc->ap->flags |= ATA_FLAG_MMIO;
795}
796
797static u8 nv_adma_bmdma_status(struct ata_port *ap)
798{
799 u8 status;
800 struct nv_adma_port_priv *pp = ap->private_data;
801
802 WARN_ON(pp->flags & NV_ADMA_PORT_REGISTER_MODE);
803
804 ap->flags &= ~ATA_FLAG_MMIO;
805 status = ata_bmdma_status(ap);
806 ap->flags |= ATA_FLAG_MMIO;
807 return status;
808}
809
810static void nv_adma_register_mode(struct ata_port *ap)
811{
812 void __iomem *mmio = nv_adma_ctl_block(ap);
813 struct nv_adma_port_priv *pp = ap->private_data;
814 u16 tmp;
815
816 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
817 return;
818
819 tmp = readw(mmio + NV_ADMA_CTL);
820 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
821
822 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
823}
824
825static void nv_adma_mode(struct ata_port *ap)
826{
827 void __iomem *mmio = nv_adma_ctl_block(ap);
828 struct nv_adma_port_priv *pp = ap->private_data;
829 u16 tmp;
830
831 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
832 return;
833
834 tmp = readw(mmio + NV_ADMA_CTL);
835 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
836
837 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
838}
839
840static int nv_adma_port_start(struct ata_port *ap)
841{
842 struct device *dev = ap->host->dev;
843 struct nv_adma_port_priv *pp;
844 int rc;
845 void *mem;
846 dma_addr_t mem_dma;
847 void __iomem *mmio = nv_adma_ctl_block(ap);
848 u16 tmp;
849
850 VPRINTK("ENTER\n");
851
852 rc = ata_port_start(ap);
853 if (rc)
854 return rc;
855
856 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
857 if (!pp) {
858 rc = -ENOMEM;
859 goto err_out;
860 }
861
862 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
863 &mem_dma, GFP_KERNEL);
864
865 if (!mem) {
866 rc = -ENOMEM;
867 goto err_out_kfree;
868 }
869 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
870
871 /*
872 * First item in chunk of DMA memory:
873 * 128-byte command parameter block (CPB)
874 * one for each command tag
875 */
876 pp->cpb = mem;
877 pp->cpb_dma = mem_dma;
878
879 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
880 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
881
882 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
883 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
884
885 /*
886 * Second item: block of ADMA_SGTBL_LEN s/g entries
887 */
888 pp->aprd = mem;
889 pp->aprd_dma = mem_dma;
890
891 ap->private_data = pp;
892
893 /* clear any outstanding interrupt conditions */
894 writew(0xffff, mmio + NV_ADMA_STAT);
895
896 /* initialize port variables */
897 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
898
899 /* clear CPB fetch count */
900 writew(0, mmio + NV_ADMA_CPB_COUNT);
901
902 /* clear GO for register mode */
903 tmp = readw(mmio + NV_ADMA_CTL);
904 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
905
906 tmp = readw(mmio + NV_ADMA_CTL);
907 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
908 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
909 udelay(1);
910 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
911 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
912
913 return 0;
914
915err_out_kfree:
916 kfree(pp);
917err_out:
918 ata_port_stop(ap);
919 return rc;
920}
921
922static void nv_adma_port_stop(struct ata_port *ap)
923{
924 struct device *dev = ap->host->dev;
925 struct nv_adma_port_priv *pp = ap->private_data;
926 void __iomem *mmio = nv_adma_ctl_block(ap);
927
928 VPRINTK("ENTER\n");
929
930 writew(0, mmio + NV_ADMA_CTL);
931
932 ap->private_data = NULL;
933 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
934 kfree(pp);
935 ata_port_stop(ap);
936}
937
938
939static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
940{
941 void __iomem *mmio = probe_ent->mmio_base;
942 struct ata_ioports *ioport = &probe_ent->port[port];
943
944 VPRINTK("ENTER\n");
945
946 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
947
948 ioport->cmd_addr = (unsigned long) mmio;
949 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
950 ioport->error_addr =
951 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
952 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
953 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
954 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
955 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
956 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
957 ioport->status_addr =
958 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
959 ioport->altstatus_addr =
960 ioport->ctl_addr = (unsigned long) mmio + 0x20;
961}
962
963static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
964{
965 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
966 unsigned int i;
967 u32 tmp32;
968
969 VPRINTK("ENTER\n");
970
971 /* enable ADMA on the ports */
972 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
973 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
974 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
975 NV_MCP_SATA_CFG_20_PORT1_EN |
976 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
977
978 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
979
980 for (i = 0; i < probe_ent->n_ports; i++)
981 nv_adma_setup_port(probe_ent, i);
982
983 for (i = 0; i < probe_ent->n_ports; i++) {
984 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
985 u16 tmp;
986
987 /* enable interrupt, clear reset if not already clear */
988 tmp = readw(mmio + NV_ADMA_CTL);
989 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
990 }
991
992 return 0;
993}
994
995static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
996 struct scatterlist *sg,
997 int idx,
998 struct nv_adma_prd *aprd)
999{
1000 u32 flags;
1001
1002 memset(aprd, 0, sizeof(struct nv_adma_prd));
1003
1004 flags = 0;
1005 if (qc->tf.flags & ATA_TFLAG_WRITE)
1006 flags |= NV_APRD_WRITE;
1007 if (idx == qc->n_elem - 1)
1008 flags |= NV_APRD_END;
1009 else if (idx != 4)
1010 flags |= NV_APRD_CONT;
1011
1012 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1013 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1014 aprd->flags = cpu_to_le32(flags);
1015}
1016
1017static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1018{
1019 struct nv_adma_port_priv *pp = qc->ap->private_data;
1020 unsigned int idx;
1021 struct nv_adma_prd *aprd;
1022 struct scatterlist *sg;
1023
1024 VPRINTK("ENTER\n");
1025
1026 idx = 0;
1027
1028 ata_for_each_sg(sg, qc) {
1029 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1030 nv_adma_fill_aprd(qc, sg, idx, aprd);
1031 idx++;
1032 }
1033 if (idx > 5)
1034 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1035}
1036
1037static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1038{
1039 struct nv_adma_port_priv *pp = qc->ap->private_data;
1040 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1041 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1042 NV_CPB_CTL_APRD_VALID |
1043 NV_CPB_CTL_IEN;
1044
1045 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1046
1047 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1048 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
1049 ata_qc_prep(qc);
1050 return;
1051 }
1052
1053 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1054
1055 cpb->len = 3;
1056 cpb->tag = qc->tag;
1057 cpb->next_cpb_idx = 0;
1058
1059 /* turn on NCQ flags for NCQ commands */
1060 if (qc->tf.protocol == ATA_PROT_NCQ)
1061 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1062
1063 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1064
1065 nv_adma_fill_sg(qc, cpb);
1066
1067 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1068 finished filling in all of the contents */
1069 wmb();
1070 cpb->ctl_flags = ctl_flags;
1071}
1072
1073static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1074{
1075 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1076
1077 VPRINTK("ENTER\n");
1078
1079 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1080 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
1081 /* use ATA register mode */
1082 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1083 nv_adma_register_mode(qc->ap);
1084 return ata_qc_issue_prot(qc);
1085 } else
1086 nv_adma_mode(qc->ap);
1087
1088 /* write append register, command tag in lower 8 bits
1089 and (number of cpbs to append -1) in top 8 bits */
1090 wmb();
1091 writew(qc->tag, mmio + NV_ADMA_APPEND);
1092
1093 DPRINTK("Issued tag %u\n",qc->tag);
1094
1095 return 0;
1096}
1097
David Howells7d12e782006-10-05 14:55:46 +01001098static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099{
Jeff Garzikcca39742006-08-24 03:19:22 -04001100 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 unsigned int i;
1102 unsigned int handled = 0;
1103 unsigned long flags;
1104
Jeff Garzikcca39742006-08-24 03:19:22 -04001105 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Jeff Garzikcca39742006-08-24 03:19:22 -04001107 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 struct ata_port *ap;
1109
Jeff Garzikcca39742006-08-24 03:19:22 -04001110 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001111 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001112 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 struct ata_queued_cmd *qc;
1114
1115 qc = ata_qc_from_tag(ap, ap->active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001116 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001118 else
1119 // No request pending? Clear interrupt status
1120 // anyway, in case there's one pending.
1121 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 }
1123
1124 }
1125
Jeff Garzikcca39742006-08-24 03:19:22 -04001126 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
1128 return IRQ_RETVAL(handled);
1129}
1130
Tejun Heoada364e2006-06-17 15:49:56 +09001131static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
1132{
1133 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
1134 int handled;
1135
Tejun Heo5a44eff2006-06-17 15:49:56 +09001136 /* freeze if hotplugged */
1137 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
1138 ata_port_freeze(ap);
1139 return 1;
1140 }
1141
Tejun Heoada364e2006-06-17 15:49:56 +09001142 /* bail out if not our interrupt */
1143 if (!(irq_stat & NV_INT_DEV))
1144 return 0;
1145
1146 /* DEV interrupt w/ no active qc? */
1147 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
1148 ata_check_status(ap);
1149 return 1;
1150 }
1151
1152 /* handle interrupt */
1153 handled = ata_host_intr(ap, qc);
1154 if (unlikely(!handled)) {
1155 /* spurious, clear it */
1156 ata_check_status(ap);
1157 }
1158
1159 return 1;
1160}
1161
Jeff Garzikcca39742006-08-24 03:19:22 -04001162static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001163{
1164 int i, handled = 0;
1165
Jeff Garzikcca39742006-08-24 03:19:22 -04001166 for (i = 0; i < host->n_ports; i++) {
1167 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001168
1169 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1170 handled += nv_host_intr(ap, irq_stat);
1171
1172 irq_stat >>= NV_INT_PORT_SHIFT;
1173 }
1174
1175 return IRQ_RETVAL(handled);
1176}
1177
David Howells7d12e782006-10-05 14:55:46 +01001178static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001179{
Jeff Garzikcca39742006-08-24 03:19:22 -04001180 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001181 u8 irq_stat;
1182 irqreturn_t ret;
1183
Jeff Garzikcca39742006-08-24 03:19:22 -04001184 spin_lock(&host->lock);
1185 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1186 ret = nv_do_interrupt(host, irq_stat);
1187 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001188
1189 return ret;
1190}
1191
David Howells7d12e782006-10-05 14:55:46 +01001192static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001193{
Jeff Garzikcca39742006-08-24 03:19:22 -04001194 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001195 u8 irq_stat;
1196 irqreturn_t ret;
1197
Jeff Garzikcca39742006-08-24 03:19:22 -04001198 spin_lock(&host->lock);
1199 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1200 ret = nv_do_interrupt(host, irq_stat);
1201 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001202
1203 return ret;
1204}
1205
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1207{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 if (sc_reg > SCR_CONTROL)
1209 return 0xffffffffU;
1210
Jeff Garzik02cbd922006-03-22 23:59:46 -05001211 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212}
1213
1214static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1215{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 if (sc_reg > SCR_CONTROL)
1217 return;
1218
Jeff Garzik02cbd922006-03-22 23:59:46 -05001219 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220}
1221
Tejun Heo39f87582006-06-17 15:49:56 +09001222static void nv_nf2_freeze(struct ata_port *ap)
1223{
Jeff Garzikcca39742006-08-24 03:19:22 -04001224 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001225 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1226 u8 mask;
1227
1228 mask = inb(scr_addr + NV_INT_ENABLE);
1229 mask &= ~(NV_INT_ALL << shift);
1230 outb(mask, scr_addr + NV_INT_ENABLE);
1231}
1232
1233static void nv_nf2_thaw(struct ata_port *ap)
1234{
Jeff Garzikcca39742006-08-24 03:19:22 -04001235 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001236 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1237 u8 mask;
1238
1239 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1240
1241 mask = inb(scr_addr + NV_INT_ENABLE);
1242 mask |= (NV_INT_MASK << shift);
1243 outb(mask, scr_addr + NV_INT_ENABLE);
1244}
1245
1246static void nv_ck804_freeze(struct ata_port *ap)
1247{
Jeff Garzikcca39742006-08-24 03:19:22 -04001248 void __iomem *mmio_base = ap->host->mmio_base;
Tejun Heo39f87582006-06-17 15:49:56 +09001249 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1250 u8 mask;
1251
1252 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1253 mask &= ~(NV_INT_ALL << shift);
1254 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1255}
1256
1257static void nv_ck804_thaw(struct ata_port *ap)
1258{
Jeff Garzikcca39742006-08-24 03:19:22 -04001259 void __iomem *mmio_base = ap->host->mmio_base;
Tejun Heo39f87582006-06-17 15:49:56 +09001260 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1261 u8 mask;
1262
1263 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1264
1265 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1266 mask |= (NV_INT_MASK << shift);
1267 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1268}
1269
1270static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1271{
1272 unsigned int dummy;
1273
1274 /* SATA hardreset fails to retrieve proper device signature on
1275 * some controllers. Don't classify on hardreset. For more
1276 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1277 */
1278 return sata_std_hardreset(ap, &dummy);
1279}
1280
1281static void nv_error_handler(struct ata_port *ap)
1282{
1283 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1284 nv_hardreset, ata_std_postreset);
1285}
1286
Robert Hancockfbbb2622006-10-27 19:08:41 -07001287static void nv_adma_error_handler(struct ata_port *ap)
1288{
1289 struct nv_adma_port_priv *pp = ap->private_data;
1290 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1291 void __iomem *mmio = nv_adma_ctl_block(ap);
1292 int i;
1293 u16 tmp;
1294
1295 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1296 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1297 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1298 u32 status = readw(mmio + NV_ADMA_STAT);
1299
1300 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1301 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1302 notifier, notifier_error, gen_ctl, status);
1303
1304 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1305 struct nv_adma_cpb *cpb = &pp->cpb[i];
1306 if( cpb->ctl_flags || cpb->resp_flags )
1307 ata_port_printk(ap, KERN_ERR,
1308 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1309 i, cpb->ctl_flags, cpb->resp_flags);
1310 }
1311
1312 /* Push us back into port register mode for error handling. */
1313 nv_adma_register_mode(ap);
1314
1315 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1316
1317 /* Mark all of the CPBs as invalid to prevent them from being executed */
1318 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1319 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1320
1321 /* clear CPB fetch count */
1322 writew(0, mmio + NV_ADMA_CPB_COUNT);
1323
1324 /* Reset channel */
1325 tmp = readw(mmio + NV_ADMA_CTL);
1326 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1327 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1328 udelay(1);
1329 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1330 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1331 }
1332
1333 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1334 nv_hardreset, ata_std_postreset);
1335}
1336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1338{
1339 static int printed_version = 0;
Jeff Garzik29da9f62006-09-25 21:56:33 -04001340 struct ata_port_info *ppi[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 struct ata_probe_ent *probe_ent;
1342 int pci_dev_busy = 0;
1343 int rc;
1344 u32 bar;
Jeff Garzik02cbd922006-03-22 23:59:46 -05001345 unsigned long base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001346 unsigned long type = ent->driver_data;
1347 int mask_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
1349 // Make sure this is a SATA controller by counting the number of bars
1350 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1351 // it's an IDE controller and we ignore it.
1352 for (bar=0; bar<6; bar++)
1353 if (pci_resource_start(pdev, bar) == 0)
1354 return -ENODEV;
1355
Robert Hancockfbbb2622006-10-27 19:08:41 -07001356 if ( !printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05001357 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
1359 rc = pci_enable_device(pdev);
1360 if (rc)
1361 goto err_out;
1362
1363 rc = pci_request_regions(pdev, DRV_NAME);
1364 if (rc) {
1365 pci_dev_busy = 1;
1366 goto err_out_disable;
1367 }
1368
Robert Hancockfbbb2622006-10-27 19:08:41 -07001369 if(type >= CK804 && adma_enabled) {
1370 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1371 type = ADMA;
1372 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1373 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1374 mask_set = 1;
1375 }
1376
1377 if(!mask_set) {
1378 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1379 if (rc)
1380 goto err_out_regions;
1381 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1382 if (rc)
1383 goto err_out_regions;
1384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386 rc = -ENOMEM;
1387
Robert Hancockfbbb2622006-10-27 19:08:41 -07001388 ppi[0] = ppi[1] = &nv_port_info[type];
Jeff Garzik29da9f62006-09-25 21:56:33 -04001389 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 if (!probe_ent)
1391 goto err_out_regions;
1392
Jeff Garzik02cbd922006-03-22 23:59:46 -05001393 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1394 if (!probe_ent->mmio_base) {
1395 rc = -EIO;
Tejun Heoe6faf082006-06-17 15:49:55 +09001396 goto err_out_free_ent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 }
1398
Jeff Garzik02cbd922006-03-22 23:59:46 -05001399 base = (unsigned long)probe_ent->mmio_base;
1400
1401 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1402 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1403
Tejun Heoada364e2006-06-17 15:49:56 +09001404 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001405 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09001406 u8 regval;
1407
1408 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1409 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1410 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1411 }
1412
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 pci_set_master(pdev);
1414
Robert Hancockfbbb2622006-10-27 19:08:41 -07001415 if (type == ADMA) {
1416 rc = nv_adma_host_init(probe_ent);
1417 if (rc)
1418 goto err_out_iounmap;
1419 }
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 rc = ata_device_add(probe_ent);
1422 if (rc != NV_PORTS)
1423 goto err_out_iounmap;
1424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 kfree(probe_ent);
1426
1427 return 0;
1428
1429err_out_iounmap:
Jeff Garzik02cbd922006-03-22 23:59:46 -05001430 pci_iounmap(pdev, probe_ent->mmio_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431err_out_free_ent:
1432 kfree(probe_ent);
1433err_out_regions:
1434 pci_release_regions(pdev);
1435err_out_disable:
1436 if (!pci_dev_busy)
1437 pci_disable_device(pdev);
1438err_out:
1439 return rc;
1440}
1441
Jeff Garzikcca39742006-08-24 03:19:22 -04001442static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09001443{
Jeff Garzikcca39742006-08-24 03:19:22 -04001444 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09001445 u8 regval;
1446
1447 /* disable SATA space for CK804 */
1448 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1449 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1450 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1451
Jeff Garzikcca39742006-08-24 03:19:22 -04001452 ata_pci_host_stop(host);
Tejun Heoada364e2006-06-17 15:49:56 +09001453}
1454
Robert Hancockfbbb2622006-10-27 19:08:41 -07001455static void nv_adma_host_stop(struct ata_host *host)
1456{
1457 struct pci_dev *pdev = to_pci_dev(host->dev);
1458 int i;
1459 u32 tmp32;
1460
1461 for (i = 0; i < host->n_ports; i++) {
1462 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1463 u16 tmp;
1464
1465 /* disable interrupt */
1466 tmp = readw(mmio + NV_ADMA_CTL);
1467 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1468 }
1469
1470 /* disable ADMA on the ports */
1471 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1472 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1473 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1474 NV_MCP_SATA_CFG_20_PORT1_EN |
1475 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1476
1477 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1478
1479 nv_ck804_host_stop(host);
1480}
1481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482static int __init nv_init(void)
1483{
Pavel Roskinb7887192006-08-10 18:13:18 +09001484 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485}
1486
1487static void __exit nv_exit(void)
1488{
1489 pci_unregister_driver(&nv_pci_driver);
1490}
1491
1492module_init(nv_init);
1493module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001494module_param_named(adma, adma_enabled, bool, 0444);
1495MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");