blob: b93e64d3fcf1d32781a871404c9b66a1aeee37a7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Robert Hancockcdf56bc2007-01-03 18:13:57 -060052#define DRV_VERSION "3.3"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090057 NV_MMIO_BAR = 5,
58
Jeff Garzik10ad05d2006-03-22 23:50:50 -050059 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Tejun Heo27e4b272006-06-17 15:49:55 +090066 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050067 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090069 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050070 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Tejun Heo27e4b272006-06-17 15:49:55 +090072 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
Tejun Heo39f87582006-06-17 15:49:56 +090080 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090081 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090083
Tejun Heo27e4b272006-06-17 15:49:55 +090084 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050085 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Jeff Garzik10ad05d2006-03-22 23:50:50 -050088 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070091 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500172};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Robert Hancockfbbb2622006-10-27 19:08:41 -0700174/* ADMA Physical Region Descriptor - one SG segment */
175struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
181};
182
183enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
191};
192
193/* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
210};
211
212
213struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700221 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600222 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700223};
224
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600225struct nv_host_priv {
226 unsigned long type;
227};
228
Robert Hancockfbbb2622006-10-27 19:08:41 -0700229#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600232static void nv_remove_one (struct pci_dev *pdev);
233static int nv_pci_device_resume(struct pci_dev *pdev);
Jeff Garzikcca39742006-08-24 03:19:22 -0400234static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100235static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
236static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
237static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
239static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Tejun Heo39f87582006-06-17 15:49:56 +0900241static void nv_nf2_freeze(struct ata_port *ap);
242static void nv_nf2_thaw(struct ata_port *ap);
243static void nv_ck804_freeze(struct ata_port *ap);
244static void nv_ck804_thaw(struct ata_port *ap);
245static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700246static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600247static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700248static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
249static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
250static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
251static void nv_adma_irq_clear(struct ata_port *ap);
252static int nv_adma_port_start(struct ata_port *ap);
253static void nv_adma_port_stop(struct ata_port *ap);
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600254static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
255static int nv_adma_port_resume(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700256static void nv_adma_error_handler(struct ata_port *ap);
257static void nv_adma_host_stop(struct ata_host *host);
258static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
259static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
260static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
261static u8 nv_adma_bmdma_status(struct ata_port *ap);
Tejun Heo39f87582006-06-17 15:49:56 +0900262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263enum nv_host_type
264{
265 GENERIC,
266 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900267 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700268 CK804,
269 ADMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270};
271
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500272static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a92006-09-27 22:20:11 -0400273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
286 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
288 PCI_ANY_ID, PCI_ANY_ID,
289 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
Daniel Drake541134c2005-07-03 13:44:39 +0100290 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
291 PCI_ANY_ID, PCI_ANY_ID,
292 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400293
294 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295};
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297static struct pci_driver nv_pci_driver = {
298 .name = DRV_NAME,
299 .id_table = nv_pci_tbl,
300 .probe = nv_init_one,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600301 .suspend = ata_pci_device_suspend,
302 .resume = nv_pci_device_resume,
303 .remove = nv_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304};
305
Jeff Garzik193515d2005-11-07 00:59:37 -0500306static struct scsi_host_template nv_sht = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 .module = THIS_MODULE,
308 .name = DRV_NAME,
309 .ioctl = ata_scsi_ioctl,
310 .queuecommand = ata_scsi_queuecmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 .can_queue = ATA_DEF_QUEUE,
312 .this_id = ATA_SHT_THIS_ID,
313 .sg_tablesize = LIBATA_MAX_PRD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
315 .emulated = ATA_SHT_EMULATED,
316 .use_clustering = ATA_SHT_USE_CLUSTERING,
317 .proc_name = DRV_NAME,
318 .dma_boundary = ATA_DMA_BOUNDARY,
319 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900320 .slave_destroy = ata_scsi_slave_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 .bios_param = ata_std_bios_param,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600322 .suspend = ata_scsi_device_suspend,
323 .resume = ata_scsi_device_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324};
325
Robert Hancockfbbb2622006-10-27 19:08:41 -0700326static struct scsi_host_template nv_adma_sht = {
327 .module = THIS_MODULE,
328 .name = DRV_NAME,
329 .ioctl = ata_scsi_ioctl,
330 .queuecommand = ata_scsi_queuecmd,
331 .can_queue = NV_ADMA_MAX_CPBS,
332 .this_id = ATA_SHT_THIS_ID,
333 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700334 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
335 .emulated = ATA_SHT_EMULATED,
336 .use_clustering = ATA_SHT_USE_CLUSTERING,
337 .proc_name = DRV_NAME,
338 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
339 .slave_configure = nv_adma_slave_config,
340 .slave_destroy = ata_scsi_slave_destroy,
341 .bios_param = ata_std_bios_param,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600342 .suspend = ata_scsi_device_suspend,
343 .resume = ata_scsi_device_resume,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700344};
345
Tejun Heoada364e2006-06-17 15:49:56 +0900346static const struct ata_port_operations nv_generic_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 .port_disable = ata_port_disable,
348 .tf_load = ata_tf_load,
349 .tf_read = ata_tf_read,
350 .exec_command = ata_exec_command,
351 .check_status = ata_check_status,
352 .dev_select = ata_std_dev_select,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 .bmdma_setup = ata_bmdma_setup,
354 .bmdma_start = ata_bmdma_start,
355 .bmdma_stop = ata_bmdma_stop,
356 .bmdma_status = ata_bmdma_status,
357 .qc_prep = ata_qc_prep,
358 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900359 .freeze = ata_bmdma_freeze,
360 .thaw = ata_bmdma_thaw,
361 .error_handler = nv_error_handler,
362 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900363 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900364 .irq_handler = nv_generic_interrupt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900366 .irq_on = ata_irq_on,
367 .irq_ack = ata_irq_ack,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 .scr_read = nv_scr_read,
369 .scr_write = nv_scr_write,
370 .port_start = ata_port_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371};
372
Tejun Heoada364e2006-06-17 15:49:56 +0900373static const struct ata_port_operations nv_nf2_ops = {
374 .port_disable = ata_port_disable,
375 .tf_load = ata_tf_load,
376 .tf_read = ata_tf_read,
377 .exec_command = ata_exec_command,
378 .check_status = ata_check_status,
379 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900380 .bmdma_setup = ata_bmdma_setup,
381 .bmdma_start = ata_bmdma_start,
382 .bmdma_stop = ata_bmdma_stop,
383 .bmdma_status = ata_bmdma_status,
384 .qc_prep = ata_qc_prep,
385 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900386 .freeze = nv_nf2_freeze,
387 .thaw = nv_nf2_thaw,
388 .error_handler = nv_error_handler,
389 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900390 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900391 .irq_handler = nv_nf2_interrupt,
392 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900393 .irq_on = ata_irq_on,
394 .irq_ack = ata_irq_ack,
Tejun Heoada364e2006-06-17 15:49:56 +0900395 .scr_read = nv_scr_read,
396 .scr_write = nv_scr_write,
397 .port_start = ata_port_start,
Tejun Heoada364e2006-06-17 15:49:56 +0900398};
399
400static const struct ata_port_operations nv_ck804_ops = {
401 .port_disable = ata_port_disable,
402 .tf_load = ata_tf_load,
403 .tf_read = ata_tf_read,
404 .exec_command = ata_exec_command,
405 .check_status = ata_check_status,
406 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900407 .bmdma_setup = ata_bmdma_setup,
408 .bmdma_start = ata_bmdma_start,
409 .bmdma_stop = ata_bmdma_stop,
410 .bmdma_status = ata_bmdma_status,
411 .qc_prep = ata_qc_prep,
412 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900413 .freeze = nv_ck804_freeze,
414 .thaw = nv_ck804_thaw,
415 .error_handler = nv_error_handler,
416 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900417 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900418 .irq_handler = nv_ck804_interrupt,
419 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900420 .irq_on = ata_irq_on,
421 .irq_ack = ata_irq_ack,
Tejun Heoada364e2006-06-17 15:49:56 +0900422 .scr_read = nv_scr_read,
423 .scr_write = nv_scr_write,
424 .port_start = ata_port_start,
Tejun Heoada364e2006-06-17 15:49:56 +0900425 .host_stop = nv_ck804_host_stop,
426};
427
Robert Hancockfbbb2622006-10-27 19:08:41 -0700428static const struct ata_port_operations nv_adma_ops = {
429 .port_disable = ata_port_disable,
430 .tf_load = ata_tf_load,
431 .tf_read = ata_tf_read,
Robert Hancock2dec7552006-11-26 14:20:19 -0600432 .check_atapi_dma = nv_adma_check_atapi_dma,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700433 .exec_command = ata_exec_command,
434 .check_status = ata_check_status,
435 .dev_select = ata_std_dev_select,
436 .bmdma_setup = nv_adma_bmdma_setup,
437 .bmdma_start = nv_adma_bmdma_start,
438 .bmdma_stop = nv_adma_bmdma_stop,
439 .bmdma_status = nv_adma_bmdma_status,
440 .qc_prep = nv_adma_qc_prep,
441 .qc_issue = nv_adma_qc_issue,
442 .freeze = nv_ck804_freeze,
443 .thaw = nv_ck804_thaw,
444 .error_handler = nv_adma_error_handler,
445 .post_internal_cmd = nv_adma_bmdma_stop,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900446 .data_xfer = ata_data_xfer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700447 .irq_handler = nv_adma_interrupt,
448 .irq_clear = nv_adma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900449 .irq_on = ata_irq_on,
450 .irq_ack = ata_irq_ack,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700451 .scr_read = nv_scr_read,
452 .scr_write = nv_scr_write,
453 .port_start = nv_adma_port_start,
454 .port_stop = nv_adma_port_stop,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600455 .port_suspend = nv_adma_port_suspend,
456 .port_resume = nv_adma_port_resume,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700457 .host_stop = nv_adma_host_stop,
458};
459
Tejun Heoada364e2006-06-17 15:49:56 +0900460static struct ata_port_info nv_port_info[] = {
461 /* generic */
462 {
463 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900464 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
465 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900466 .pio_mask = NV_PIO_MASK,
467 .mwdma_mask = NV_MWDMA_MASK,
468 .udma_mask = NV_UDMA_MASK,
469 .port_ops = &nv_generic_ops,
470 },
471 /* nforce2/3 */
472 {
473 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900474 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
475 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900476 .pio_mask = NV_PIO_MASK,
477 .mwdma_mask = NV_MWDMA_MASK,
478 .udma_mask = NV_UDMA_MASK,
479 .port_ops = &nv_nf2_ops,
480 },
481 /* ck804 */
482 {
483 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900484 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
485 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900486 .pio_mask = NV_PIO_MASK,
487 .mwdma_mask = NV_MWDMA_MASK,
488 .udma_mask = NV_UDMA_MASK,
489 .port_ops = &nv_ck804_ops,
490 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700491 /* ADMA */
492 {
493 .sht = &nv_adma_sht,
494 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600495 ATA_FLAG_HRST_TO_RESUME |
Robert Hancockfbbb2622006-10-27 19:08:41 -0700496 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
497 .pio_mask = NV_PIO_MASK,
498 .mwdma_mask = NV_MWDMA_MASK,
499 .udma_mask = NV_UDMA_MASK,
500 .port_ops = &nv_adma_ops,
501 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502};
503
504MODULE_AUTHOR("NVIDIA");
505MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
506MODULE_LICENSE("GPL");
507MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
508MODULE_VERSION(DRV_VERSION);
509
Robert Hancockfbbb2622006-10-27 19:08:41 -0700510static int adma_enabled = 1;
511
Robert Hancock2dec7552006-11-26 14:20:19 -0600512static void nv_adma_register_mode(struct ata_port *ap)
513{
Robert Hancock2dec7552006-11-26 14:20:19 -0600514 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600515 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800516 u16 tmp, status;
517 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600518
519 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
520 return;
521
Robert Hancocka2cfe812007-02-05 16:26:03 -0800522 status = readw(mmio + NV_ADMA_STAT);
523 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
524 ndelay(50);
525 status = readw(mmio + NV_ADMA_STAT);
526 count++;
527 }
528 if(count == 20)
529 ata_port_printk(ap, KERN_WARNING,
530 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
531 status);
532
Robert Hancock2dec7552006-11-26 14:20:19 -0600533 tmp = readw(mmio + NV_ADMA_CTL);
534 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
535
Robert Hancocka2cfe812007-02-05 16:26:03 -0800536 count = 0;
537 status = readw(mmio + NV_ADMA_STAT);
538 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
539 ndelay(50);
540 status = readw(mmio + NV_ADMA_STAT);
541 count++;
542 }
543 if(count == 20)
544 ata_port_printk(ap, KERN_WARNING,
545 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
546 status);
547
Robert Hancock2dec7552006-11-26 14:20:19 -0600548 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
549}
550
551static void nv_adma_mode(struct ata_port *ap)
552{
Robert Hancock2dec7552006-11-26 14:20:19 -0600553 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600554 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800555 u16 tmp, status;
556 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600557
558 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
559 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500560
Robert Hancock2dec7552006-11-26 14:20:19 -0600561 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
562
563 tmp = readw(mmio + NV_ADMA_CTL);
564 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
565
Robert Hancocka2cfe812007-02-05 16:26:03 -0800566 status = readw(mmio + NV_ADMA_STAT);
567 while(((status & NV_ADMA_STAT_LEGACY) ||
568 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
569 ndelay(50);
570 status = readw(mmio + NV_ADMA_STAT);
571 count++;
572 }
573 if(count == 20)
574 ata_port_printk(ap, KERN_WARNING,
575 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
576 status);
577
Robert Hancock2dec7552006-11-26 14:20:19 -0600578 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
579}
580
Robert Hancockfbbb2622006-10-27 19:08:41 -0700581static int nv_adma_slave_config(struct scsi_device *sdev)
582{
583 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600584 struct nv_adma_port_priv *pp = ap->private_data;
585 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700586 u64 bounce_limit;
587 unsigned long segment_boundary;
588 unsigned short sg_tablesize;
589 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600590 int adma_enable;
591 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700592
593 rc = ata_scsi_slave_config(sdev);
594
595 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
596 /* Not a proper libata device, ignore */
597 return rc;
598
599 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
600 /*
601 * NVIDIA reports that ADMA mode does not support ATAPI commands.
602 * Therefore ATAPI commands are sent through the legacy interface.
603 * However, the legacy interface only supports 32-bit DMA.
604 * Restrict DMA parameters as required by the legacy interface
605 * when an ATAPI device is connected.
606 */
607 bounce_limit = ATA_DMA_MASK;
608 segment_boundary = ATA_DMA_BOUNDARY;
609 /* Subtract 1 since an extra entry may be needed for padding, see
610 libata-scsi.c */
611 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500612
Robert Hancock2dec7552006-11-26 14:20:19 -0600613 /* Since the legacy DMA engine is in use, we need to disable ADMA
614 on the port. */
615 adma_enable = 0;
616 nv_adma_register_mode(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700617 }
618 else {
619 bounce_limit = *ap->dev->dma_mask;
620 segment_boundary = NV_ADMA_DMA_BOUNDARY;
621 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600622 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700623 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500624
Robert Hancock2dec7552006-11-26 14:20:19 -0600625 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700626
Robert Hancock2dec7552006-11-26 14:20:19 -0600627 if(ap->port_no == 1)
628 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
629 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
630 else
631 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
632 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500633
Robert Hancock2dec7552006-11-26 14:20:19 -0600634 if(adma_enable) {
635 new_reg = current_reg | config_mask;
636 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
637 }
638 else {
639 new_reg = current_reg & ~config_mask;
640 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
641 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500642
Robert Hancock2dec7552006-11-26 14:20:19 -0600643 if(current_reg != new_reg)
644 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500645
Robert Hancockfbbb2622006-10-27 19:08:41 -0700646 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
647 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
648 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
649 ata_port_printk(ap, KERN_INFO,
650 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
651 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
652 return rc;
653}
654
Robert Hancock2dec7552006-11-26 14:20:19 -0600655static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
656{
657 struct nv_adma_port_priv *pp = qc->ap->private_data;
658 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
659}
660
661static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700662{
663 unsigned int idx = 0;
664
665 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
666
667 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
668 cpb[idx++] = cpu_to_le16(IGN);
669 cpb[idx++] = cpu_to_le16(IGN);
670 cpb[idx++] = cpu_to_le16(IGN);
671 cpb[idx++] = cpu_to_le16(IGN);
672 cpb[idx++] = cpu_to_le16(IGN);
673 }
674 else {
675 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
676 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
677 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
678 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
679 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
680 }
681 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
682 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
683 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
684 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
685 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
686
687 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
688
689 return idx;
690}
691
Robert Hancock5bd28a42007-02-05 16:26:01 -0800692static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700693{
694 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600695 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700696
697 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
698
Robert Hancock5bd28a42007-02-05 16:26:01 -0800699 if (unlikely((force_err ||
700 flags & (NV_CPB_RESP_ATA_ERR |
701 NV_CPB_RESP_CMD_ERR |
702 NV_CPB_RESP_CPB_ERR)))) {
703 struct ata_eh_info *ehi = &ap->eh_info;
704 int freeze = 0;
705
706 ata_ehi_clear_desc(ehi);
707 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
708 if (flags & NV_CPB_RESP_ATA_ERR) {
709 ata_ehi_push_desc(ehi, ": ATA error");
710 ehi->err_mask |= AC_ERR_DEV;
711 } else if (flags & NV_CPB_RESP_CMD_ERR) {
712 ata_ehi_push_desc(ehi, ": CMD error");
713 ehi->err_mask |= AC_ERR_DEV;
714 } else if (flags & NV_CPB_RESP_CPB_ERR) {
715 ata_ehi_push_desc(ehi, ": CPB error");
716 ehi->err_mask |= AC_ERR_SYSTEM;
717 freeze = 1;
718 } else {
719 /* notifier error, but no error in CPB flags? */
720 ehi->err_mask |= AC_ERR_OTHER;
721 freeze = 1;
722 }
723 /* Kill all commands. EH will determine what actually failed. */
724 if (freeze)
725 ata_port_freeze(ap);
726 else
727 ata_port_abort(ap);
728 return 1;
729 }
730
Robert Hancockfbbb2622006-10-27 19:08:41 -0700731 if (flags & NV_CPB_RESP_DONE) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700732 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800733 VPRINTK("CPB flags done, flags=0x%x\n", flags);
734 if (likely(qc)) {
735 /* Grab the ATA port status for non-NCQ commands.
Robert Hancockfbbb2622006-10-27 19:08:41 -0700736 For NCQ commands the current status may have nothing to do with
737 the command just completed. */
Robert Hancock5bd28a42007-02-05 16:26:01 -0800738 if (qc->tf.protocol != ATA_PROT_NCQ) {
739 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
740 qc->err_mask |= ac_err_mask(ata_status);
741 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700742 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
743 qc->err_mask);
744 ata_qc_complete(qc);
745 }
746 }
Robert Hancock5bd28a42007-02-05 16:26:01 -0800747 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700748}
749
Robert Hancock2dec7552006-11-26 14:20:19 -0600750static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
751{
752 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600753
754 /* freeze if hotplugged */
755 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
756 ata_port_freeze(ap);
757 return 1;
758 }
759
760 /* bail out if not our interrupt */
761 if (!(irq_stat & NV_INT_DEV))
762 return 0;
763
764 /* DEV interrupt w/ no active qc? */
765 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
766 ata_check_status(ap);
767 return 1;
768 }
769
770 /* handle interrupt */
Robert Hancockf740d162007-01-23 20:09:02 -0600771 return ata_host_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600772}
773
Robert Hancockfbbb2622006-10-27 19:08:41 -0700774static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
775{
776 struct ata_host *host = dev_instance;
777 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600778 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700779
780 spin_lock(&host->lock);
781
782 for (i = 0; i < host->n_ports; i++) {
783 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600784 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700785
786 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
787 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600788 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700789 u16 status;
790 u32 gen_ctl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700791 u32 notifier, notifier_error;
792
793 /* if in ATA register mode, use standard ata interrupt handler */
794 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Tejun Heo0d5ff562007-02-01 15:06:36 +0900795 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
Robert Hancock2dec7552006-11-26 14:20:19 -0600796 >> (NV_INT_PORT_SHIFT * i);
Robert Hancockf740d162007-01-23 20:09:02 -0600797 if(ata_tag_valid(ap->active_tag))
798 /** NV_INT_DEV indication seems unreliable at times
799 at least in ADMA mode. Force it on always when a
800 command is active, to prevent losing interrupts. */
801 irq_stat |= NV_INT_DEV;
Robert Hancock2dec7552006-11-26 14:20:19 -0600802 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700803 continue;
804 }
805
806 notifier = readl(mmio + NV_ADMA_NOTIFIER);
807 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600808 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700809
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600810 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700811
Robert Hancockfbbb2622006-10-27 19:08:41 -0700812 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
813 !notifier_error)
814 /* Nothing to do */
815 continue;
816
817 status = readw(mmio + NV_ADMA_STAT);
818
819 /* Clear status. Ensure the controller sees the clearing before we start
820 looking at any of the CPB statuses, so that any CPB completions after
821 this point in the handler will raise another interrupt. */
822 writew(status, mmio + NV_ADMA_STAT);
823 readw(mmio + NV_ADMA_STAT); /* flush posted write */
824 rmb();
825
Robert Hancock5bd28a42007-02-05 16:26:01 -0800826 handled++; /* irq handled if we got here */
827
828 /* freeze if hotplugged or controller error */
829 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
830 NV_ADMA_STAT_HOTUNPLUG |
Robert Hancock5278b502007-02-11 18:36:56 -0600831 NV_ADMA_STAT_TIMEOUT |
832 NV_ADMA_STAT_SERROR))) {
Robert Hancock5bd28a42007-02-05 16:26:01 -0800833 struct ata_eh_info *ehi = &ap->eh_info;
834
835 ata_ehi_clear_desc(ehi);
836 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
837 if (status & NV_ADMA_STAT_TIMEOUT) {
838 ehi->err_mask |= AC_ERR_SYSTEM;
839 ata_ehi_push_desc(ehi, ": timeout");
840 } else if (status & NV_ADMA_STAT_HOTPLUG) {
841 ata_ehi_hotplugged(ehi);
842 ata_ehi_push_desc(ehi, ": hotplug");
843 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
844 ata_ehi_hotplugged(ehi);
845 ata_ehi_push_desc(ehi, ": hot unplug");
Robert Hancock5278b502007-02-11 18:36:56 -0600846 } else if (status & NV_ADMA_STAT_SERROR) {
847 /* let libata analyze SError and figure out the cause */
848 ata_ehi_push_desc(ehi, ": SError");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800849 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700850 ata_port_freeze(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700851 continue;
852 }
853
Robert Hancock5bd28a42007-02-05 16:26:01 -0800854 if (status & (NV_ADMA_STAT_DONE |
855 NV_ADMA_STAT_CPBERR)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700856 /** Check CPBs for completed commands */
857
Robert Hancock5bd28a42007-02-05 16:26:01 -0800858 if (ata_tag_valid(ap->active_tag)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700859 /* Non-NCQ command */
Robert Hancock5bd28a42007-02-05 16:26:01 -0800860 nv_adma_check_cpb(ap, ap->active_tag,
861 notifier_error & (1 << ap->active_tag));
862 } else {
863 int pos, error = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700864 u32 active = ap->sactive;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800865
866 while ((pos = ffs(active)) && !error) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700867 pos--;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800868 error = nv_adma_check_cpb(ap, pos,
869 notifier_error & (1 << pos) );
Robert Hancockfbbb2622006-10-27 19:08:41 -0700870 active &= ~(1 << pos );
871 }
872 }
873 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700874 }
875 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500876
Robert Hancock2dec7552006-11-26 14:20:19 -0600877 if(notifier_clears[0] || notifier_clears[1]) {
878 /* Note: Both notifier clear registers must be written
879 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600880 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
881 writel(notifier_clears[0], pp->notifier_clear_block);
882 pp = host->ports[1]->private_data;
883 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -0600884 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700885
886 spin_unlock(&host->lock);
887
888 return IRQ_RETVAL(handled);
889}
890
891static void nv_adma_irq_clear(struct ata_port *ap)
892{
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600893 struct nv_adma_port_priv *pp = ap->private_data;
894 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700895 u16 status = readw(mmio + NV_ADMA_STAT);
896 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
897 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Tejun Heo0d5ff562007-02-01 15:06:36 +0900898 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700899
900 /* clear ADMA status */
901 writew(status, mmio + NV_ADMA_STAT);
902 writel(notifier | notifier_error,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600903 pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700904
905 /** clear legacy status */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900906 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700907}
908
909static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
910{
Robert Hancock2dec7552006-11-26 14:20:19 -0600911 struct ata_port *ap = qc->ap;
912 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
913 struct nv_adma_port_priv *pp = ap->private_data;
914 u8 dmactl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700915
Robert Hancock2dec7552006-11-26 14:20:19 -0600916 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700917 WARN_ON(1);
918 return;
919 }
920
Robert Hancock2dec7552006-11-26 14:20:19 -0600921 /* load PRD table addr. */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900922 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
Robert Hancock2dec7552006-11-26 14:20:19 -0600923
924 /* specify data direction, triple-check start bit is clear */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900925 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancock2dec7552006-11-26 14:20:19 -0600926 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
927 if (!rw)
928 dmactl |= ATA_DMA_WR;
929
Tejun Heo0d5ff562007-02-01 15:06:36 +0900930 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancock2dec7552006-11-26 14:20:19 -0600931
932 /* issue r/w command */
933 ata_exec_command(ap, &qc->tf);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700934}
935
936static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
937{
Robert Hancock2dec7552006-11-26 14:20:19 -0600938 struct ata_port *ap = qc->ap;
939 struct nv_adma_port_priv *pp = ap->private_data;
940 u8 dmactl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700941
Robert Hancock2dec7552006-11-26 14:20:19 -0600942 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700943 WARN_ON(1);
944 return;
945 }
946
Robert Hancock2dec7552006-11-26 14:20:19 -0600947 /* start host DMA transaction */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900948 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
949 iowrite8(dmactl | ATA_DMA_START,
950 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700951}
952
953static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
954{
Robert Hancock2dec7552006-11-26 14:20:19 -0600955 struct ata_port *ap = qc->ap;
956 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700957
Robert Hancock2dec7552006-11-26 14:20:19 -0600958 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700959 return;
960
Robert Hancock2dec7552006-11-26 14:20:19 -0600961 /* clear start/stop bit */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900962 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
963 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancock2dec7552006-11-26 14:20:19 -0600964
965 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
966 ata_altstatus(ap); /* dummy read */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700967}
968
969static u8 nv_adma_bmdma_status(struct ata_port *ap)
970{
Robert Hancockfbbb2622006-10-27 19:08:41 -0700971 struct nv_adma_port_priv *pp = ap->private_data;
972
Robert Hancock2dec7552006-11-26 14:20:19 -0600973 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
Robert Hancockfbbb2622006-10-27 19:08:41 -0700974
Tejun Heo0d5ff562007-02-01 15:06:36 +0900975 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700976}
977
978static int nv_adma_port_start(struct ata_port *ap)
979{
980 struct device *dev = ap->host->dev;
981 struct nv_adma_port_priv *pp;
982 int rc;
983 void *mem;
984 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600985 void __iomem *mmio;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700986 u16 tmp;
987
988 VPRINTK("ENTER\n");
989
990 rc = ata_port_start(ap);
991 if (rc)
992 return rc;
993
Tejun Heo24dc5f32007-01-20 16:00:28 +0900994 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
995 if (!pp)
996 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700997
Tejun Heo0d5ff562007-02-01 15:06:36 +0900998 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600999 ap->port_no * NV_ADMA_PORT_SIZE;
1000 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001001 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001002 pp->notifier_clear_block = pp->gen_block +
1003 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1004
Tejun Heo24dc5f32007-01-20 16:00:28 +09001005 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1006 &mem_dma, GFP_KERNEL);
1007 if (!mem)
1008 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001009 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1010
1011 /*
1012 * First item in chunk of DMA memory:
1013 * 128-byte command parameter block (CPB)
1014 * one for each command tag
1015 */
1016 pp->cpb = mem;
1017 pp->cpb_dma = mem_dma;
1018
1019 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1020 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1021
1022 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1023 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1024
1025 /*
1026 * Second item: block of ADMA_SGTBL_LEN s/g entries
1027 */
1028 pp->aprd = mem;
1029 pp->aprd_dma = mem_dma;
1030
1031 ap->private_data = pp;
1032
1033 /* clear any outstanding interrupt conditions */
1034 writew(0xffff, mmio + NV_ADMA_STAT);
1035
1036 /* initialize port variables */
1037 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1038
1039 /* clear CPB fetch count */
1040 writew(0, mmio + NV_ADMA_CPB_COUNT);
1041
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001042 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001043 tmp = readw(mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001044 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001045
1046 tmp = readw(mmio + NV_ADMA_CTL);
1047 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1048 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1049 udelay(1);
1050 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1051 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1052
1053 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001054}
1055
1056static void nv_adma_port_stop(struct ata_port *ap)
1057{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001058 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001059 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001060
1061 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001062 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001063}
1064
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001065static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1066{
1067 struct nv_adma_port_priv *pp = ap->private_data;
1068 void __iomem *mmio = pp->ctl_block;
1069
1070 /* Go to register mode - clears GO */
1071 nv_adma_register_mode(ap);
1072
1073 /* clear CPB fetch count */
1074 writew(0, mmio + NV_ADMA_CPB_COUNT);
1075
1076 /* disable interrupt, shut down port */
1077 writew(0, mmio + NV_ADMA_CTL);
1078
1079 return 0;
1080}
1081
1082static int nv_adma_port_resume(struct ata_port *ap)
1083{
1084 struct nv_adma_port_priv *pp = ap->private_data;
1085 void __iomem *mmio = pp->ctl_block;
1086 u16 tmp;
1087
1088 /* set CPB block location */
1089 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1090 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1091
1092 /* clear any outstanding interrupt conditions */
1093 writew(0xffff, mmio + NV_ADMA_STAT);
1094
1095 /* initialize port variables */
1096 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1097
1098 /* clear CPB fetch count */
1099 writew(0, mmio + NV_ADMA_CPB_COUNT);
1100
1101 /* clear GO for register mode, enable interrupt */
1102 tmp = readw(mmio + NV_ADMA_CTL);
1103 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1104
1105 tmp = readw(mmio + NV_ADMA_CTL);
1106 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1107 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1108 udelay(1);
1109 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1110 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1111
1112 return 0;
1113}
Robert Hancockfbbb2622006-10-27 19:08:41 -07001114
1115static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1116{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001117 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
Robert Hancockfbbb2622006-10-27 19:08:41 -07001118 struct ata_ioports *ioport = &probe_ent->port[port];
1119
1120 VPRINTK("ENTER\n");
1121
1122 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1123
Tejun Heo0d5ff562007-02-01 15:06:36 +09001124 ioport->cmd_addr = mmio;
1125 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001126 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001127 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1128 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1129 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1130 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1131 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1132 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001133 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001134 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001135 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001136 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001137}
1138
1139static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1140{
1141 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1142 unsigned int i;
1143 u32 tmp32;
1144
1145 VPRINTK("ENTER\n");
1146
1147 /* enable ADMA on the ports */
1148 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1149 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1150 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1151 NV_MCP_SATA_CFG_20_PORT1_EN |
1152 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1153
1154 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1155
1156 for (i = 0; i < probe_ent->n_ports; i++)
1157 nv_adma_setup_port(probe_ent, i);
1158
Robert Hancockfbbb2622006-10-27 19:08:41 -07001159 return 0;
1160}
1161
1162static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1163 struct scatterlist *sg,
1164 int idx,
1165 struct nv_adma_prd *aprd)
1166{
Robert Hancock2dec7552006-11-26 14:20:19 -06001167 u8 flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001168
1169 memset(aprd, 0, sizeof(struct nv_adma_prd));
1170
1171 flags = 0;
1172 if (qc->tf.flags & ATA_TFLAG_WRITE)
1173 flags |= NV_APRD_WRITE;
1174 if (idx == qc->n_elem - 1)
1175 flags |= NV_APRD_END;
1176 else if (idx != 4)
1177 flags |= NV_APRD_CONT;
1178
1179 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1180 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001181 aprd->flags = flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001182}
1183
1184static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1185{
1186 struct nv_adma_port_priv *pp = qc->ap->private_data;
1187 unsigned int idx;
1188 struct nv_adma_prd *aprd;
1189 struct scatterlist *sg;
1190
1191 VPRINTK("ENTER\n");
1192
1193 idx = 0;
1194
1195 ata_for_each_sg(sg, qc) {
1196 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1197 nv_adma_fill_aprd(qc, sg, idx, aprd);
1198 idx++;
1199 }
1200 if (idx > 5)
1201 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1202}
1203
Robert Hancock382a6652007-02-05 16:26:02 -08001204static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1205{
1206 struct nv_adma_port_priv *pp = qc->ap->private_data;
1207
1208 /* ADMA engine can only be used for non-ATAPI DMA commands,
1209 or interrupt-driven no-data commands. */
1210 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1211 (qc->tf.flags & ATA_TFLAG_POLLING))
1212 return 1;
1213
1214 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1215 (qc->tf.protocol == ATA_PROT_NODATA))
1216 return 0;
1217
1218 return 1;
1219}
1220
Robert Hancockfbbb2622006-10-27 19:08:41 -07001221static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1222{
1223 struct nv_adma_port_priv *pp = qc->ap->private_data;
1224 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1225 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001226 NV_CPB_CTL_IEN;
1227
Robert Hancock382a6652007-02-05 16:26:02 -08001228 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock2dec7552006-11-26 14:20:19 -06001229 nv_adma_register_mode(qc->ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001230 ata_qc_prep(qc);
1231 return;
1232 }
1233
1234 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1235
1236 cpb->len = 3;
1237 cpb->tag = qc->tag;
1238 cpb->next_cpb_idx = 0;
1239
1240 /* turn on NCQ flags for NCQ commands */
1241 if (qc->tf.protocol == ATA_PROT_NCQ)
1242 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1243
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001244 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1245
Robert Hancockfbbb2622006-10-27 19:08:41 -07001246 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1247
Robert Hancock382a6652007-02-05 16:26:02 -08001248 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1249 nv_adma_fill_sg(qc, cpb);
1250 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1251 } else
1252 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001253
1254 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1255 finished filling in all of the contents */
1256 wmb();
1257 cpb->ctl_flags = ctl_flags;
1258}
1259
1260static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1261{
Robert Hancock2dec7552006-11-26 14:20:19 -06001262 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001263 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001264 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001265
1266 VPRINTK("ENTER\n");
1267
Robert Hancock382a6652007-02-05 16:26:02 -08001268 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001269 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001270 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001271 nv_adma_register_mode(qc->ap);
1272 return ata_qc_issue_prot(qc);
1273 } else
1274 nv_adma_mode(qc->ap);
1275
1276 /* write append register, command tag in lower 8 bits
1277 and (number of cpbs to append -1) in top 8 bits */
1278 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001279
1280 if(curr_ncq != pp->last_issue_ncq) {
1281 /* Seems to need some delay before switching between NCQ and non-NCQ
1282 commands, else we get command timeouts and such. */
1283 udelay(20);
1284 pp->last_issue_ncq = curr_ncq;
1285 }
1286
Robert Hancockfbbb2622006-10-27 19:08:41 -07001287 writew(qc->tag, mmio + NV_ADMA_APPEND);
1288
1289 DPRINTK("Issued tag %u\n",qc->tag);
1290
1291 return 0;
1292}
1293
David Howells7d12e782006-10-05 14:55:46 +01001294static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295{
Jeff Garzikcca39742006-08-24 03:19:22 -04001296 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 unsigned int i;
1298 unsigned int handled = 0;
1299 unsigned long flags;
1300
Jeff Garzikcca39742006-08-24 03:19:22 -04001301 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Jeff Garzikcca39742006-08-24 03:19:22 -04001303 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 struct ata_port *ap;
1305
Jeff Garzikcca39742006-08-24 03:19:22 -04001306 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001307 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001308 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 struct ata_queued_cmd *qc;
1310
1311 qc = ata_qc_from_tag(ap, ap->active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001312 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001314 else
1315 // No request pending? Clear interrupt status
1316 // anyway, in case there's one pending.
1317 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 }
1319
1320 }
1321
Jeff Garzikcca39742006-08-24 03:19:22 -04001322 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
1324 return IRQ_RETVAL(handled);
1325}
1326
Jeff Garzikcca39742006-08-24 03:19:22 -04001327static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001328{
1329 int i, handled = 0;
1330
Jeff Garzikcca39742006-08-24 03:19:22 -04001331 for (i = 0; i < host->n_ports; i++) {
1332 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001333
1334 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1335 handled += nv_host_intr(ap, irq_stat);
1336
1337 irq_stat >>= NV_INT_PORT_SHIFT;
1338 }
1339
1340 return IRQ_RETVAL(handled);
1341}
1342
David Howells7d12e782006-10-05 14:55:46 +01001343static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001344{
Jeff Garzikcca39742006-08-24 03:19:22 -04001345 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001346 u8 irq_stat;
1347 irqreturn_t ret;
1348
Jeff Garzikcca39742006-08-24 03:19:22 -04001349 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001350 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001351 ret = nv_do_interrupt(host, irq_stat);
1352 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001353
1354 return ret;
1355}
1356
David Howells7d12e782006-10-05 14:55:46 +01001357static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001358{
Jeff Garzikcca39742006-08-24 03:19:22 -04001359 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001360 u8 irq_stat;
1361 irqreturn_t ret;
1362
Jeff Garzikcca39742006-08-24 03:19:22 -04001363 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001364 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001365 ret = nv_do_interrupt(host, irq_stat);
1366 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001367
1368 return ret;
1369}
1370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1372{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 if (sc_reg > SCR_CONTROL)
1374 return 0xffffffffU;
1375
Tejun Heo0d5ff562007-02-01 15:06:36 +09001376 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377}
1378
1379static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1380{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 if (sc_reg > SCR_CONTROL)
1382 return;
1383
Tejun Heo0d5ff562007-02-01 15:06:36 +09001384 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385}
1386
Tejun Heo39f87582006-06-17 15:49:56 +09001387static void nv_nf2_freeze(struct ata_port *ap)
1388{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001389 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001390 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1391 u8 mask;
1392
Tejun Heo0d5ff562007-02-01 15:06:36 +09001393 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001394 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001395 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001396}
1397
1398static void nv_nf2_thaw(struct ata_port *ap)
1399{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001400 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001401 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1402 u8 mask;
1403
Tejun Heo0d5ff562007-02-01 15:06:36 +09001404 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001405
Tejun Heo0d5ff562007-02-01 15:06:36 +09001406 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001407 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001408 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001409}
1410
1411static void nv_ck804_freeze(struct ata_port *ap)
1412{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001413 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001414 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1415 u8 mask;
1416
1417 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1418 mask &= ~(NV_INT_ALL << shift);
1419 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1420}
1421
1422static void nv_ck804_thaw(struct ata_port *ap)
1423{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001424 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001425 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1426 u8 mask;
1427
1428 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1429
1430 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1431 mask |= (NV_INT_MASK << shift);
1432 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1433}
1434
1435static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1436{
1437 unsigned int dummy;
1438
1439 /* SATA hardreset fails to retrieve proper device signature on
1440 * some controllers. Don't classify on hardreset. For more
1441 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1442 */
1443 return sata_std_hardreset(ap, &dummy);
1444}
1445
1446static void nv_error_handler(struct ata_port *ap)
1447{
1448 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1449 nv_hardreset, ata_std_postreset);
1450}
1451
Robert Hancockfbbb2622006-10-27 19:08:41 -07001452static void nv_adma_error_handler(struct ata_port *ap)
1453{
1454 struct nv_adma_port_priv *pp = ap->private_data;
1455 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001456 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001457 int i;
1458 u16 tmp;
Robert Hancock2cb27852007-02-11 18:34:44 -06001459
1460 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1461 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1462 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1463 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1464 u32 status = readw(mmio + NV_ADMA_STAT);
1465
1466 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1467 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1468 notifier, notifier_error, gen_ctl, status);
1469
1470 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1471 struct nv_adma_cpb *cpb = &pp->cpb[i];
1472 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1473 ap->sactive & (1 << i) )
1474 ata_port_printk(ap, KERN_ERR,
1475 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1476 i, cpb->ctl_flags, cpb->resp_flags);
1477 }
1478 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001479
Robert Hancockfbbb2622006-10-27 19:08:41 -07001480 /* Push us back into port register mode for error handling. */
1481 nv_adma_register_mode(ap);
1482
Robert Hancockfbbb2622006-10-27 19:08:41 -07001483 /* Mark all of the CPBs as invalid to prevent them from being executed */
1484 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1485 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1486
1487 /* clear CPB fetch count */
1488 writew(0, mmio + NV_ADMA_CPB_COUNT);
1489
1490 /* Reset channel */
1491 tmp = readw(mmio + NV_ADMA_CTL);
1492 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1493 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1494 udelay(1);
1495 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1496 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1497 }
1498
1499 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1500 nv_hardreset, ata_std_postreset);
1501}
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1504{
1505 static int printed_version = 0;
Jeff Garzik29da9f62006-09-25 21:56:33 -04001506 struct ata_port_info *ppi[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 struct ata_probe_ent *probe_ent;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001508 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 int rc;
1510 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001511 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001512 unsigned long type = ent->driver_data;
1513 int mask_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
1515 // Make sure this is a SATA controller by counting the number of bars
1516 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1517 // it's an IDE controller and we ignore it.
1518 for (bar=0; bar<6; bar++)
1519 if (pci_resource_start(pdev, bar) == 0)
1520 return -ENODEV;
1521
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001522 if (!printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05001523 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Tejun Heo24dc5f32007-01-20 16:00:28 +09001525 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001527 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529 rc = pci_request_regions(pdev, DRV_NAME);
1530 if (rc) {
Tejun Heo24dc5f32007-01-20 16:00:28 +09001531 pcim_pin_device(pdev);
1532 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 }
1534
Robert Hancockfbbb2622006-10-27 19:08:41 -07001535 if(type >= CK804 && adma_enabled) {
1536 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1537 type = ADMA;
1538 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1539 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1540 mask_set = 1;
1541 }
1542
1543 if(!mask_set) {
1544 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1545 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001546 return rc;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001547 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1548 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001549 return rc;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
1552 rc = -ENOMEM;
1553
Tejun Heo24dc5f32007-01-20 16:00:28 +09001554 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001555 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001556 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001557
Robert Hancockfbbb2622006-10-27 19:08:41 -07001558 ppi[0] = ppi[1] = &nv_port_info[type];
Jeff Garzik29da9f62006-09-25 21:56:33 -04001559 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 if (!probe_ent)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001561 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562
Tejun Heo0d5ff562007-02-01 15:06:36 +09001563 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
Tejun Heo24dc5f32007-01-20 16:00:28 +09001564 return -EIO;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001565 probe_ent->iomap = pcim_iomap_table(pdev);
Tejun Heo24dc5f32007-01-20 16:00:28 +09001566
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001567 probe_ent->private_data = hpriv;
1568 hpriv->type = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
Tejun Heo0d5ff562007-02-01 15:06:36 +09001570 base = probe_ent->iomap[NV_MMIO_BAR];
Jeff Garzik02cbd922006-03-22 23:59:46 -05001571 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1572 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1573
Tejun Heoada364e2006-06-17 15:49:56 +09001574 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001575 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09001576 u8 regval;
1577
1578 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1579 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1580 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1581 }
1582
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 pci_set_master(pdev);
1584
Robert Hancockfbbb2622006-10-27 19:08:41 -07001585 if (type == ADMA) {
1586 rc = nv_adma_host_init(probe_ent);
1587 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001588 return rc;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001589 }
1590
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 rc = ata_device_add(probe_ent);
1592 if (rc != NV_PORTS)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001593 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
Tejun Heo24dc5f32007-01-20 16:00:28 +09001595 devm_kfree(&pdev->dev, probe_ent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597}
1598
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001599static void nv_remove_one (struct pci_dev *pdev)
1600{
1601 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1602 struct nv_host_priv *hpriv = host->private_data;
1603
1604 ata_pci_remove_one(pdev);
1605 kfree(hpriv);
1606}
1607
1608static int nv_pci_device_resume(struct pci_dev *pdev)
1609{
1610 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1611 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08001612 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001613
Robert Hancockce053fa2007-02-05 16:26:04 -08001614 rc = ata_pci_device_do_resume(pdev);
1615 if(rc)
1616 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001617
1618 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1619 if(hpriv->type >= CK804) {
1620 u8 regval;
1621
1622 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1623 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1624 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1625 }
1626 if(hpriv->type == ADMA) {
1627 u32 tmp32;
1628 struct nv_adma_port_priv *pp;
1629 /* enable/disable ADMA on the ports appropriately */
1630 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1631
1632 pp = host->ports[0]->private_data;
1633 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1634 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1635 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1636 else
1637 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1638 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1639 pp = host->ports[1]->private_data;
1640 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1641 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1642 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1643 else
1644 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1645 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1646
1647 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1648 }
1649 }
1650
1651 ata_host_resume(host);
1652
1653 return 0;
1654}
1655
Jeff Garzikcca39742006-08-24 03:19:22 -04001656static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09001657{
Jeff Garzikcca39742006-08-24 03:19:22 -04001658 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09001659 u8 regval;
1660
1661 /* disable SATA space for CK804 */
1662 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1663 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1664 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09001665}
1666
Robert Hancockfbbb2622006-10-27 19:08:41 -07001667static void nv_adma_host_stop(struct ata_host *host)
1668{
1669 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001670 u32 tmp32;
1671
Robert Hancockfbbb2622006-10-27 19:08:41 -07001672 /* disable ADMA on the ports */
1673 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1674 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1675 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1676 NV_MCP_SATA_CFG_20_PORT1_EN |
1677 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1678
1679 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1680
1681 nv_ck804_host_stop(host);
1682}
1683
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684static int __init nv_init(void)
1685{
Pavel Roskinb7887192006-08-10 18:13:18 +09001686 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687}
1688
1689static void __exit nv_exit(void)
1690{
1691 pci_unregister_driver(&nv_pci_driver);
1692}
1693
1694module_init(nv_init);
1695module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001696module_param_named(adma, adma_enabled, bool, 0444);
1697MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");