blob: 745d85686f5e0f41c34c50352149857ffc77af4f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Robert Hancockcdf56bc2007-01-03 18:13:57 -060052#define DRV_VERSION "3.3"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090057 NV_MMIO_BAR = 5,
58
Jeff Garzik10ad05d2006-03-22 23:50:50 -050059 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Tejun Heo27e4b272006-06-17 15:49:55 +090066 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050067 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090069 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050070 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Tejun Heo27e4b272006-06-17 15:49:55 +090072 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
Tejun Heo39f87582006-06-17 15:49:56 +090080 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090081 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090083
Tejun Heo27e4b272006-06-17 15:49:55 +090084 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050085 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Jeff Garzik10ad05d2006-03-22 23:50:50 -050088 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070091 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500172};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Robert Hancockfbbb2622006-10-27 19:08:41 -0700174/* ADMA Physical Region Descriptor - one SG segment */
175struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
181};
182
183enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
191};
192
193/* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
210};
211
212
213struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700221 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600222 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700223};
224
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600225struct nv_host_priv {
226 unsigned long type;
227};
228
Robert Hancockfbbb2622006-10-27 19:08:41 -0700229#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600232static void nv_remove_one (struct pci_dev *pdev);
233static int nv_pci_device_resume(struct pci_dev *pdev);
Jeff Garzikcca39742006-08-24 03:19:22 -0400234static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100235static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
236static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
237static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
239static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Tejun Heo39f87582006-06-17 15:49:56 +0900241static void nv_nf2_freeze(struct ata_port *ap);
242static void nv_nf2_thaw(struct ata_port *ap);
243static void nv_ck804_freeze(struct ata_port *ap);
244static void nv_ck804_thaw(struct ata_port *ap);
245static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700246static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600247static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700248static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
249static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
250static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
251static void nv_adma_irq_clear(struct ata_port *ap);
252static int nv_adma_port_start(struct ata_port *ap);
253static void nv_adma_port_stop(struct ata_port *ap);
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600254static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
255static int nv_adma_port_resume(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700256static void nv_adma_error_handler(struct ata_port *ap);
257static void nv_adma_host_stop(struct ata_host *host);
258static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
259static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
260static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
261static u8 nv_adma_bmdma_status(struct ata_port *ap);
Tejun Heo39f87582006-06-17 15:49:56 +0900262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263enum nv_host_type
264{
265 GENERIC,
266 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900267 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700268 CK804,
269 ADMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270};
271
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500272static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a92006-09-27 22:20:11 -0400273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
286 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
288 PCI_ANY_ID, PCI_ANY_ID,
289 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
Daniel Drake541134c2005-07-03 13:44:39 +0100290 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
291 PCI_ANY_ID, PCI_ANY_ID,
292 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400293
294 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295};
296
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297static struct pci_driver nv_pci_driver = {
298 .name = DRV_NAME,
299 .id_table = nv_pci_tbl,
300 .probe = nv_init_one,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600301 .suspend = ata_pci_device_suspend,
302 .resume = nv_pci_device_resume,
303 .remove = nv_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304};
305
Jeff Garzik193515d2005-11-07 00:59:37 -0500306static struct scsi_host_template nv_sht = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 .module = THIS_MODULE,
308 .name = DRV_NAME,
309 .ioctl = ata_scsi_ioctl,
310 .queuecommand = ata_scsi_queuecmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 .can_queue = ATA_DEF_QUEUE,
312 .this_id = ATA_SHT_THIS_ID,
313 .sg_tablesize = LIBATA_MAX_PRD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
315 .emulated = ATA_SHT_EMULATED,
316 .use_clustering = ATA_SHT_USE_CLUSTERING,
317 .proc_name = DRV_NAME,
318 .dma_boundary = ATA_DMA_BOUNDARY,
319 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900320 .slave_destroy = ata_scsi_slave_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 .bios_param = ata_std_bios_param,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600322 .suspend = ata_scsi_device_suspend,
323 .resume = ata_scsi_device_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324};
325
Robert Hancockfbbb2622006-10-27 19:08:41 -0700326static struct scsi_host_template nv_adma_sht = {
327 .module = THIS_MODULE,
328 .name = DRV_NAME,
329 .ioctl = ata_scsi_ioctl,
330 .queuecommand = ata_scsi_queuecmd,
331 .can_queue = NV_ADMA_MAX_CPBS,
332 .this_id = ATA_SHT_THIS_ID,
333 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700334 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
335 .emulated = ATA_SHT_EMULATED,
336 .use_clustering = ATA_SHT_USE_CLUSTERING,
337 .proc_name = DRV_NAME,
338 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
339 .slave_configure = nv_adma_slave_config,
340 .slave_destroy = ata_scsi_slave_destroy,
341 .bios_param = ata_std_bios_param,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600342 .suspend = ata_scsi_device_suspend,
343 .resume = ata_scsi_device_resume,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700344};
345
Tejun Heoada364e2006-06-17 15:49:56 +0900346static const struct ata_port_operations nv_generic_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 .port_disable = ata_port_disable,
348 .tf_load = ata_tf_load,
349 .tf_read = ata_tf_read,
350 .exec_command = ata_exec_command,
351 .check_status = ata_check_status,
352 .dev_select = ata_std_dev_select,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 .bmdma_setup = ata_bmdma_setup,
354 .bmdma_start = ata_bmdma_start,
355 .bmdma_stop = ata_bmdma_stop,
356 .bmdma_status = ata_bmdma_status,
357 .qc_prep = ata_qc_prep,
358 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900359 .freeze = ata_bmdma_freeze,
360 .thaw = ata_bmdma_thaw,
361 .error_handler = nv_error_handler,
362 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900363 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900364 .irq_handler = nv_generic_interrupt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900366 .irq_on = ata_irq_on,
367 .irq_ack = ata_irq_ack,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 .scr_read = nv_scr_read,
369 .scr_write = nv_scr_write,
370 .port_start = ata_port_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371};
372
Tejun Heoada364e2006-06-17 15:49:56 +0900373static const struct ata_port_operations nv_nf2_ops = {
374 .port_disable = ata_port_disable,
375 .tf_load = ata_tf_load,
376 .tf_read = ata_tf_read,
377 .exec_command = ata_exec_command,
378 .check_status = ata_check_status,
379 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900380 .bmdma_setup = ata_bmdma_setup,
381 .bmdma_start = ata_bmdma_start,
382 .bmdma_stop = ata_bmdma_stop,
383 .bmdma_status = ata_bmdma_status,
384 .qc_prep = ata_qc_prep,
385 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900386 .freeze = nv_nf2_freeze,
387 .thaw = nv_nf2_thaw,
388 .error_handler = nv_error_handler,
389 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900390 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900391 .irq_handler = nv_nf2_interrupt,
392 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900393 .irq_on = ata_irq_on,
394 .irq_ack = ata_irq_ack,
Tejun Heoada364e2006-06-17 15:49:56 +0900395 .scr_read = nv_scr_read,
396 .scr_write = nv_scr_write,
397 .port_start = ata_port_start,
Tejun Heoada364e2006-06-17 15:49:56 +0900398};
399
400static const struct ata_port_operations nv_ck804_ops = {
401 .port_disable = ata_port_disable,
402 .tf_load = ata_tf_load,
403 .tf_read = ata_tf_read,
404 .exec_command = ata_exec_command,
405 .check_status = ata_check_status,
406 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900407 .bmdma_setup = ata_bmdma_setup,
408 .bmdma_start = ata_bmdma_start,
409 .bmdma_stop = ata_bmdma_stop,
410 .bmdma_status = ata_bmdma_status,
411 .qc_prep = ata_qc_prep,
412 .qc_issue = ata_qc_issue_prot,
Tejun Heo39f87582006-06-17 15:49:56 +0900413 .freeze = nv_ck804_freeze,
414 .thaw = nv_ck804_thaw,
415 .error_handler = nv_error_handler,
416 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900417 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900418 .irq_handler = nv_ck804_interrupt,
419 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900420 .irq_on = ata_irq_on,
421 .irq_ack = ata_irq_ack,
Tejun Heoada364e2006-06-17 15:49:56 +0900422 .scr_read = nv_scr_read,
423 .scr_write = nv_scr_write,
424 .port_start = ata_port_start,
Tejun Heoada364e2006-06-17 15:49:56 +0900425 .host_stop = nv_ck804_host_stop,
426};
427
Robert Hancockfbbb2622006-10-27 19:08:41 -0700428static const struct ata_port_operations nv_adma_ops = {
429 .port_disable = ata_port_disable,
430 .tf_load = ata_tf_load,
431 .tf_read = ata_tf_read,
Robert Hancock2dec7552006-11-26 14:20:19 -0600432 .check_atapi_dma = nv_adma_check_atapi_dma,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700433 .exec_command = ata_exec_command,
434 .check_status = ata_check_status,
435 .dev_select = ata_std_dev_select,
436 .bmdma_setup = nv_adma_bmdma_setup,
437 .bmdma_start = nv_adma_bmdma_start,
438 .bmdma_stop = nv_adma_bmdma_stop,
439 .bmdma_status = nv_adma_bmdma_status,
440 .qc_prep = nv_adma_qc_prep,
441 .qc_issue = nv_adma_qc_issue,
442 .freeze = nv_ck804_freeze,
443 .thaw = nv_ck804_thaw,
444 .error_handler = nv_adma_error_handler,
445 .post_internal_cmd = nv_adma_bmdma_stop,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900446 .data_xfer = ata_data_xfer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700447 .irq_handler = nv_adma_interrupt,
448 .irq_clear = nv_adma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900449 .irq_on = ata_irq_on,
450 .irq_ack = ata_irq_ack,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700451 .scr_read = nv_scr_read,
452 .scr_write = nv_scr_write,
453 .port_start = nv_adma_port_start,
454 .port_stop = nv_adma_port_stop,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600455 .port_suspend = nv_adma_port_suspend,
456 .port_resume = nv_adma_port_resume,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700457 .host_stop = nv_adma_host_stop,
458};
459
Tejun Heoada364e2006-06-17 15:49:56 +0900460static struct ata_port_info nv_port_info[] = {
461 /* generic */
462 {
463 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900464 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
465 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900466 .pio_mask = NV_PIO_MASK,
467 .mwdma_mask = NV_MWDMA_MASK,
468 .udma_mask = NV_UDMA_MASK,
469 .port_ops = &nv_generic_ops,
470 },
471 /* nforce2/3 */
472 {
473 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900474 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
475 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900476 .pio_mask = NV_PIO_MASK,
477 .mwdma_mask = NV_MWDMA_MASK,
478 .udma_mask = NV_UDMA_MASK,
479 .port_ops = &nv_nf2_ops,
480 },
481 /* ck804 */
482 {
483 .sht = &nv_sht,
Tejun Heo722420f2006-09-28 17:49:22 +0900484 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
485 ATA_FLAG_HRST_TO_RESUME,
Tejun Heoada364e2006-06-17 15:49:56 +0900486 .pio_mask = NV_PIO_MASK,
487 .mwdma_mask = NV_MWDMA_MASK,
488 .udma_mask = NV_UDMA_MASK,
489 .port_ops = &nv_ck804_ops,
490 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700491 /* ADMA */
492 {
493 .sht = &nv_adma_sht,
494 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600495 ATA_FLAG_HRST_TO_RESUME |
Robert Hancockfbbb2622006-10-27 19:08:41 -0700496 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
497 .pio_mask = NV_PIO_MASK,
498 .mwdma_mask = NV_MWDMA_MASK,
499 .udma_mask = NV_UDMA_MASK,
500 .port_ops = &nv_adma_ops,
501 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502};
503
504MODULE_AUTHOR("NVIDIA");
505MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
506MODULE_LICENSE("GPL");
507MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
508MODULE_VERSION(DRV_VERSION);
509
Robert Hancockfbbb2622006-10-27 19:08:41 -0700510static int adma_enabled = 1;
511
Robert Hancock2dec7552006-11-26 14:20:19 -0600512static void nv_adma_register_mode(struct ata_port *ap)
513{
Robert Hancock2dec7552006-11-26 14:20:19 -0600514 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600515 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800516 u16 tmp, status;
517 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600518
519 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
520 return;
521
Robert Hancocka2cfe812007-02-05 16:26:03 -0800522 status = readw(mmio + NV_ADMA_STAT);
523 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
524 ndelay(50);
525 status = readw(mmio + NV_ADMA_STAT);
526 count++;
527 }
528 if(count == 20)
529 ata_port_printk(ap, KERN_WARNING,
530 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
531 status);
532
Robert Hancock2dec7552006-11-26 14:20:19 -0600533 tmp = readw(mmio + NV_ADMA_CTL);
534 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
535
Robert Hancocka2cfe812007-02-05 16:26:03 -0800536 count = 0;
537 status = readw(mmio + NV_ADMA_STAT);
538 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
539 ndelay(50);
540 status = readw(mmio + NV_ADMA_STAT);
541 count++;
542 }
543 if(count == 20)
544 ata_port_printk(ap, KERN_WARNING,
545 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
546 status);
547
Robert Hancock2dec7552006-11-26 14:20:19 -0600548 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
549}
550
551static void nv_adma_mode(struct ata_port *ap)
552{
Robert Hancock2dec7552006-11-26 14:20:19 -0600553 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600554 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800555 u16 tmp, status;
556 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600557
558 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
559 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500560
Robert Hancock2dec7552006-11-26 14:20:19 -0600561 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
562
563 tmp = readw(mmio + NV_ADMA_CTL);
564 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
565
Robert Hancocka2cfe812007-02-05 16:26:03 -0800566 status = readw(mmio + NV_ADMA_STAT);
567 while(((status & NV_ADMA_STAT_LEGACY) ||
568 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
569 ndelay(50);
570 status = readw(mmio + NV_ADMA_STAT);
571 count++;
572 }
573 if(count == 20)
574 ata_port_printk(ap, KERN_WARNING,
575 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
576 status);
577
Robert Hancock2dec7552006-11-26 14:20:19 -0600578 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
579}
580
Robert Hancockfbbb2622006-10-27 19:08:41 -0700581static int nv_adma_slave_config(struct scsi_device *sdev)
582{
583 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600584 struct nv_adma_port_priv *pp = ap->private_data;
585 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700586 u64 bounce_limit;
587 unsigned long segment_boundary;
588 unsigned short sg_tablesize;
589 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600590 int adma_enable;
591 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700592
593 rc = ata_scsi_slave_config(sdev);
594
595 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
596 /* Not a proper libata device, ignore */
597 return rc;
598
599 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
600 /*
601 * NVIDIA reports that ADMA mode does not support ATAPI commands.
602 * Therefore ATAPI commands are sent through the legacy interface.
603 * However, the legacy interface only supports 32-bit DMA.
604 * Restrict DMA parameters as required by the legacy interface
605 * when an ATAPI device is connected.
606 */
607 bounce_limit = ATA_DMA_MASK;
608 segment_boundary = ATA_DMA_BOUNDARY;
609 /* Subtract 1 since an extra entry may be needed for padding, see
610 libata-scsi.c */
611 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500612
Robert Hancock2dec7552006-11-26 14:20:19 -0600613 /* Since the legacy DMA engine is in use, we need to disable ADMA
614 on the port. */
615 adma_enable = 0;
616 nv_adma_register_mode(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700617 }
618 else {
619 bounce_limit = *ap->dev->dma_mask;
620 segment_boundary = NV_ADMA_DMA_BOUNDARY;
621 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600622 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700623 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500624
Robert Hancock2dec7552006-11-26 14:20:19 -0600625 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700626
Robert Hancock2dec7552006-11-26 14:20:19 -0600627 if(ap->port_no == 1)
628 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
629 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
630 else
631 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
632 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500633
Robert Hancock2dec7552006-11-26 14:20:19 -0600634 if(adma_enable) {
635 new_reg = current_reg | config_mask;
636 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
637 }
638 else {
639 new_reg = current_reg & ~config_mask;
640 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
641 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500642
Robert Hancock2dec7552006-11-26 14:20:19 -0600643 if(current_reg != new_reg)
644 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500645
Robert Hancockfbbb2622006-10-27 19:08:41 -0700646 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
647 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
648 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
649 ata_port_printk(ap, KERN_INFO,
650 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
651 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
652 return rc;
653}
654
Robert Hancock2dec7552006-11-26 14:20:19 -0600655static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
656{
657 struct nv_adma_port_priv *pp = qc->ap->private_data;
658 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
659}
660
661static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700662{
663 unsigned int idx = 0;
664
Robert Hancockac3d6b82007-02-19 19:02:46 -0600665 if(tf->flags & ATA_TFLAG_ISADDR) {
666 if (tf->flags & ATA_TFLAG_LBA48) {
667 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
668 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
669 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
670 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
671 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
672 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
673 } else
674 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
675
676 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
677 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
678 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
679 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700680 }
Robert Hancockac3d6b82007-02-19 19:02:46 -0600681
682 if(tf->flags & ATA_TFLAG_DEVICE)
683 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700684
685 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Robert Hancockac3d6b82007-02-19 19:02:46 -0600686
687 while(idx < 12)
688 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700689
690 return idx;
691}
692
Robert Hancock5bd28a42007-02-05 16:26:01 -0800693static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700694{
695 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600696 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700697
698 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
699
Robert Hancock5bd28a42007-02-05 16:26:01 -0800700 if (unlikely((force_err ||
701 flags & (NV_CPB_RESP_ATA_ERR |
702 NV_CPB_RESP_CMD_ERR |
703 NV_CPB_RESP_CPB_ERR)))) {
704 struct ata_eh_info *ehi = &ap->eh_info;
705 int freeze = 0;
706
707 ata_ehi_clear_desc(ehi);
708 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
709 if (flags & NV_CPB_RESP_ATA_ERR) {
710 ata_ehi_push_desc(ehi, ": ATA error");
711 ehi->err_mask |= AC_ERR_DEV;
712 } else if (flags & NV_CPB_RESP_CMD_ERR) {
713 ata_ehi_push_desc(ehi, ": CMD error");
714 ehi->err_mask |= AC_ERR_DEV;
715 } else if (flags & NV_CPB_RESP_CPB_ERR) {
716 ata_ehi_push_desc(ehi, ": CPB error");
717 ehi->err_mask |= AC_ERR_SYSTEM;
718 freeze = 1;
719 } else {
720 /* notifier error, but no error in CPB flags? */
721 ehi->err_mask |= AC_ERR_OTHER;
722 freeze = 1;
723 }
724 /* Kill all commands. EH will determine what actually failed. */
725 if (freeze)
726 ata_port_freeze(ap);
727 else
728 ata_port_abort(ap);
729 return 1;
730 }
731
Robert Hancockfbbb2622006-10-27 19:08:41 -0700732 if (flags & NV_CPB_RESP_DONE) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700733 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800734 VPRINTK("CPB flags done, flags=0x%x\n", flags);
735 if (likely(qc)) {
736 /* Grab the ATA port status for non-NCQ commands.
Robert Hancockfbbb2622006-10-27 19:08:41 -0700737 For NCQ commands the current status may have nothing to do with
738 the command just completed. */
Robert Hancock5bd28a42007-02-05 16:26:01 -0800739 if (qc->tf.protocol != ATA_PROT_NCQ) {
740 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
741 qc->err_mask |= ac_err_mask(ata_status);
742 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700743 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
744 qc->err_mask);
745 ata_qc_complete(qc);
746 }
747 }
Robert Hancock5bd28a42007-02-05 16:26:01 -0800748 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700749}
750
Robert Hancock2dec7552006-11-26 14:20:19 -0600751static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
752{
753 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600754
755 /* freeze if hotplugged */
756 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
757 ata_port_freeze(ap);
758 return 1;
759 }
760
761 /* bail out if not our interrupt */
762 if (!(irq_stat & NV_INT_DEV))
763 return 0;
764
765 /* DEV interrupt w/ no active qc? */
766 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
767 ata_check_status(ap);
768 return 1;
769 }
770
771 /* handle interrupt */
Robert Hancockf740d162007-01-23 20:09:02 -0600772 return ata_host_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600773}
774
Robert Hancockfbbb2622006-10-27 19:08:41 -0700775static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
776{
777 struct ata_host *host = dev_instance;
778 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600779 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700780
781 spin_lock(&host->lock);
782
783 for (i = 0; i < host->n_ports; i++) {
784 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600785 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700786
787 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
788 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600789 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700790 u16 status;
791 u32 gen_ctl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700792 u32 notifier, notifier_error;
793
794 /* if in ATA register mode, use standard ata interrupt handler */
795 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Tejun Heo0d5ff562007-02-01 15:06:36 +0900796 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
Robert Hancock2dec7552006-11-26 14:20:19 -0600797 >> (NV_INT_PORT_SHIFT * i);
Robert Hancockf740d162007-01-23 20:09:02 -0600798 if(ata_tag_valid(ap->active_tag))
799 /** NV_INT_DEV indication seems unreliable at times
800 at least in ADMA mode. Force it on always when a
801 command is active, to prevent losing interrupts. */
802 irq_stat |= NV_INT_DEV;
Robert Hancock2dec7552006-11-26 14:20:19 -0600803 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700804 continue;
805 }
806
807 notifier = readl(mmio + NV_ADMA_NOTIFIER);
808 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600809 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700810
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600811 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700812
Robert Hancockfbbb2622006-10-27 19:08:41 -0700813 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
814 !notifier_error)
815 /* Nothing to do */
816 continue;
817
818 status = readw(mmio + NV_ADMA_STAT);
819
820 /* Clear status. Ensure the controller sees the clearing before we start
821 looking at any of the CPB statuses, so that any CPB completions after
822 this point in the handler will raise another interrupt. */
823 writew(status, mmio + NV_ADMA_STAT);
824 readw(mmio + NV_ADMA_STAT); /* flush posted write */
825 rmb();
826
Robert Hancock5bd28a42007-02-05 16:26:01 -0800827 handled++; /* irq handled if we got here */
828
829 /* freeze if hotplugged or controller error */
830 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
831 NV_ADMA_STAT_HOTUNPLUG |
Robert Hancock5278b502007-02-11 18:36:56 -0600832 NV_ADMA_STAT_TIMEOUT |
833 NV_ADMA_STAT_SERROR))) {
Robert Hancock5bd28a42007-02-05 16:26:01 -0800834 struct ata_eh_info *ehi = &ap->eh_info;
835
836 ata_ehi_clear_desc(ehi);
837 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
838 if (status & NV_ADMA_STAT_TIMEOUT) {
839 ehi->err_mask |= AC_ERR_SYSTEM;
840 ata_ehi_push_desc(ehi, ": timeout");
841 } else if (status & NV_ADMA_STAT_HOTPLUG) {
842 ata_ehi_hotplugged(ehi);
843 ata_ehi_push_desc(ehi, ": hotplug");
844 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
845 ata_ehi_hotplugged(ehi);
846 ata_ehi_push_desc(ehi, ": hot unplug");
Robert Hancock5278b502007-02-11 18:36:56 -0600847 } else if (status & NV_ADMA_STAT_SERROR) {
848 /* let libata analyze SError and figure out the cause */
849 ata_ehi_push_desc(ehi, ": SError");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800850 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700851 ata_port_freeze(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700852 continue;
853 }
854
Robert Hancock5bd28a42007-02-05 16:26:01 -0800855 if (status & (NV_ADMA_STAT_DONE |
856 NV_ADMA_STAT_CPBERR)) {
Robert Hancock721449b2007-02-19 19:03:08 -0600857 u32 check_commands = notifier | notifier_error;
858 int pos, error = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700859 /** Check CPBs for completed commands */
Robert Hancock721449b2007-02-19 19:03:08 -0600860 while ((pos = ffs(check_commands)) && !error) {
861 pos--;
862 error = nv_adma_check_cpb(ap, pos,
863 notifier_error & (1 << pos) );
864 check_commands &= ~(1 << pos );
Robert Hancockfbbb2622006-10-27 19:08:41 -0700865 }
866 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700867 }
868 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500869
Robert Hancock2dec7552006-11-26 14:20:19 -0600870 if(notifier_clears[0] || notifier_clears[1]) {
871 /* Note: Both notifier clear registers must be written
872 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600873 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
874 writel(notifier_clears[0], pp->notifier_clear_block);
875 pp = host->ports[1]->private_data;
876 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -0600877 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700878
879 spin_unlock(&host->lock);
880
881 return IRQ_RETVAL(handled);
882}
883
884static void nv_adma_irq_clear(struct ata_port *ap)
885{
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600886 struct nv_adma_port_priv *pp = ap->private_data;
887 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700888 u16 status = readw(mmio + NV_ADMA_STAT);
889 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
890 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Tejun Heo0d5ff562007-02-01 15:06:36 +0900891 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700892
893 /* clear ADMA status */
894 writew(status, mmio + NV_ADMA_STAT);
895 writel(notifier | notifier_error,
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600896 pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700897
898 /** clear legacy status */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900899 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700900}
901
902static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
903{
Robert Hancock2dec7552006-11-26 14:20:19 -0600904 struct ata_port *ap = qc->ap;
905 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
906 struct nv_adma_port_priv *pp = ap->private_data;
907 u8 dmactl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700908
Robert Hancock2dec7552006-11-26 14:20:19 -0600909 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700910 WARN_ON(1);
911 return;
912 }
913
Robert Hancock2dec7552006-11-26 14:20:19 -0600914 /* load PRD table addr. */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900915 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
Robert Hancock2dec7552006-11-26 14:20:19 -0600916
917 /* specify data direction, triple-check start bit is clear */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900918 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancock2dec7552006-11-26 14:20:19 -0600919 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
920 if (!rw)
921 dmactl |= ATA_DMA_WR;
922
Tejun Heo0d5ff562007-02-01 15:06:36 +0900923 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancock2dec7552006-11-26 14:20:19 -0600924
925 /* issue r/w command */
926 ata_exec_command(ap, &qc->tf);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700927}
928
929static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
930{
Robert Hancock2dec7552006-11-26 14:20:19 -0600931 struct ata_port *ap = qc->ap;
932 struct nv_adma_port_priv *pp = ap->private_data;
933 u8 dmactl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700934
Robert Hancock2dec7552006-11-26 14:20:19 -0600935 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700936 WARN_ON(1);
937 return;
938 }
939
Robert Hancock2dec7552006-11-26 14:20:19 -0600940 /* start host DMA transaction */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900941 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
942 iowrite8(dmactl | ATA_DMA_START,
943 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700944}
945
946static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
947{
Robert Hancock2dec7552006-11-26 14:20:19 -0600948 struct ata_port *ap = qc->ap;
949 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700950
Robert Hancock2dec7552006-11-26 14:20:19 -0600951 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700952 return;
953
Robert Hancock2dec7552006-11-26 14:20:19 -0600954 /* clear start/stop bit */
Tejun Heo0d5ff562007-02-01 15:06:36 +0900955 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
956 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Robert Hancock2dec7552006-11-26 14:20:19 -0600957
958 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
959 ata_altstatus(ap); /* dummy read */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700960}
961
962static u8 nv_adma_bmdma_status(struct ata_port *ap)
963{
Robert Hancockfbbb2622006-10-27 19:08:41 -0700964 struct nv_adma_port_priv *pp = ap->private_data;
965
Robert Hancock2dec7552006-11-26 14:20:19 -0600966 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
Robert Hancockfbbb2622006-10-27 19:08:41 -0700967
Tejun Heo0d5ff562007-02-01 15:06:36 +0900968 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700969}
970
971static int nv_adma_port_start(struct ata_port *ap)
972{
973 struct device *dev = ap->host->dev;
974 struct nv_adma_port_priv *pp;
975 int rc;
976 void *mem;
977 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600978 void __iomem *mmio;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700979 u16 tmp;
980
981 VPRINTK("ENTER\n");
982
983 rc = ata_port_start(ap);
984 if (rc)
985 return rc;
986
Tejun Heo24dc5f32007-01-20 16:00:28 +0900987 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
988 if (!pp)
989 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700990
Tejun Heo0d5ff562007-02-01 15:06:36 +0900991 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600992 ap->port_no * NV_ADMA_PORT_SIZE;
993 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +0900994 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600995 pp->notifier_clear_block = pp->gen_block +
996 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
997
Tejun Heo24dc5f32007-01-20 16:00:28 +0900998 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
999 &mem_dma, GFP_KERNEL);
1000 if (!mem)
1001 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001002 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1003
1004 /*
1005 * First item in chunk of DMA memory:
1006 * 128-byte command parameter block (CPB)
1007 * one for each command tag
1008 */
1009 pp->cpb = mem;
1010 pp->cpb_dma = mem_dma;
1011
1012 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1013 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1014
1015 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1016 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1017
1018 /*
1019 * Second item: block of ADMA_SGTBL_LEN s/g entries
1020 */
1021 pp->aprd = mem;
1022 pp->aprd_dma = mem_dma;
1023
1024 ap->private_data = pp;
1025
1026 /* clear any outstanding interrupt conditions */
1027 writew(0xffff, mmio + NV_ADMA_STAT);
1028
1029 /* initialize port variables */
1030 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1031
1032 /* clear CPB fetch count */
1033 writew(0, mmio + NV_ADMA_CPB_COUNT);
1034
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001035 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001036 tmp = readw(mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001037 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001038
1039 tmp = readw(mmio + NV_ADMA_CTL);
1040 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1041 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1042 udelay(1);
1043 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1044 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1045
1046 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001047}
1048
1049static void nv_adma_port_stop(struct ata_port *ap)
1050{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001051 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001052 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001053
1054 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001055 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001056}
1057
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001058static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1059{
1060 struct nv_adma_port_priv *pp = ap->private_data;
1061 void __iomem *mmio = pp->ctl_block;
1062
1063 /* Go to register mode - clears GO */
1064 nv_adma_register_mode(ap);
1065
1066 /* clear CPB fetch count */
1067 writew(0, mmio + NV_ADMA_CPB_COUNT);
1068
1069 /* disable interrupt, shut down port */
1070 writew(0, mmio + NV_ADMA_CTL);
1071
1072 return 0;
1073}
1074
1075static int nv_adma_port_resume(struct ata_port *ap)
1076{
1077 struct nv_adma_port_priv *pp = ap->private_data;
1078 void __iomem *mmio = pp->ctl_block;
1079 u16 tmp;
1080
1081 /* set CPB block location */
1082 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1083 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1084
1085 /* clear any outstanding interrupt conditions */
1086 writew(0xffff, mmio + NV_ADMA_STAT);
1087
1088 /* initialize port variables */
1089 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1090
1091 /* clear CPB fetch count */
1092 writew(0, mmio + NV_ADMA_CPB_COUNT);
1093
1094 /* clear GO for register mode, enable interrupt */
1095 tmp = readw(mmio + NV_ADMA_CTL);
1096 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1097
1098 tmp = readw(mmio + NV_ADMA_CTL);
1099 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1100 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1101 udelay(1);
1102 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1103 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1104
1105 return 0;
1106}
Robert Hancockfbbb2622006-10-27 19:08:41 -07001107
1108static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1109{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001110 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
Robert Hancockfbbb2622006-10-27 19:08:41 -07001111 struct ata_ioports *ioport = &probe_ent->port[port];
1112
1113 VPRINTK("ENTER\n");
1114
1115 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1116
Tejun Heo0d5ff562007-02-01 15:06:36 +09001117 ioport->cmd_addr = mmio;
1118 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001119 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001120 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1121 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1122 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1123 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1124 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1125 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001126 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001127 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001128 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001129 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001130}
1131
1132static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1133{
1134 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1135 unsigned int i;
1136 u32 tmp32;
1137
1138 VPRINTK("ENTER\n");
1139
1140 /* enable ADMA on the ports */
1141 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1142 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1143 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1144 NV_MCP_SATA_CFG_20_PORT1_EN |
1145 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1146
1147 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1148
1149 for (i = 0; i < probe_ent->n_ports; i++)
1150 nv_adma_setup_port(probe_ent, i);
1151
Robert Hancockfbbb2622006-10-27 19:08:41 -07001152 return 0;
1153}
1154
1155static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1156 struct scatterlist *sg,
1157 int idx,
1158 struct nv_adma_prd *aprd)
1159{
Robert Hancock41949ed2007-02-19 19:02:27 -06001160 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001161 if (qc->tf.flags & ATA_TFLAG_WRITE)
1162 flags |= NV_APRD_WRITE;
1163 if (idx == qc->n_elem - 1)
1164 flags |= NV_APRD_END;
1165 else if (idx != 4)
1166 flags |= NV_APRD_CONT;
1167
1168 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1169 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001170 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001171 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001172}
1173
1174static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1175{
1176 struct nv_adma_port_priv *pp = qc->ap->private_data;
1177 unsigned int idx;
1178 struct nv_adma_prd *aprd;
1179 struct scatterlist *sg;
1180
1181 VPRINTK("ENTER\n");
1182
1183 idx = 0;
1184
1185 ata_for_each_sg(sg, qc) {
1186 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1187 nv_adma_fill_aprd(qc, sg, idx, aprd);
1188 idx++;
1189 }
1190 if (idx > 5)
1191 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001192 else
1193 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001194}
1195
Robert Hancock382a6652007-02-05 16:26:02 -08001196static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1197{
1198 struct nv_adma_port_priv *pp = qc->ap->private_data;
1199
1200 /* ADMA engine can only be used for non-ATAPI DMA commands,
1201 or interrupt-driven no-data commands. */
1202 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1203 (qc->tf.flags & ATA_TFLAG_POLLING))
1204 return 1;
1205
1206 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1207 (qc->tf.protocol == ATA_PROT_NODATA))
1208 return 0;
1209
1210 return 1;
1211}
1212
Robert Hancockfbbb2622006-10-27 19:08:41 -07001213static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1214{
1215 struct nv_adma_port_priv *pp = qc->ap->private_data;
1216 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1217 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001218 NV_CPB_CTL_IEN;
1219
Robert Hancock382a6652007-02-05 16:26:02 -08001220 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock2dec7552006-11-26 14:20:19 -06001221 nv_adma_register_mode(qc->ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001222 ata_qc_prep(qc);
1223 return;
1224 }
1225
Robert Hancock41949ed2007-02-19 19:02:27 -06001226 cpb->resp_flags = NV_CPB_RESP_DONE;
1227 wmb();
1228 cpb->ctl_flags = 0;
1229 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001230
1231 cpb->len = 3;
1232 cpb->tag = qc->tag;
1233 cpb->next_cpb_idx = 0;
1234
1235 /* turn on NCQ flags for NCQ commands */
1236 if (qc->tf.protocol == ATA_PROT_NCQ)
1237 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1238
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001239 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1240
Robert Hancockfbbb2622006-10-27 19:08:41 -07001241 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1242
Robert Hancock382a6652007-02-05 16:26:02 -08001243 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1244 nv_adma_fill_sg(qc, cpb);
1245 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1246 } else
1247 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001248
1249 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1250 finished filling in all of the contents */
1251 wmb();
1252 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001253 wmb();
1254 cpb->resp_flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001255}
1256
1257static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1258{
Robert Hancock2dec7552006-11-26 14:20:19 -06001259 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001260 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001261 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001262
1263 VPRINTK("ENTER\n");
1264
Robert Hancock382a6652007-02-05 16:26:02 -08001265 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001266 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001267 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001268 nv_adma_register_mode(qc->ap);
1269 return ata_qc_issue_prot(qc);
1270 } else
1271 nv_adma_mode(qc->ap);
1272
1273 /* write append register, command tag in lower 8 bits
1274 and (number of cpbs to append -1) in top 8 bits */
1275 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001276
1277 if(curr_ncq != pp->last_issue_ncq) {
1278 /* Seems to need some delay before switching between NCQ and non-NCQ
1279 commands, else we get command timeouts and such. */
1280 udelay(20);
1281 pp->last_issue_ncq = curr_ncq;
1282 }
1283
Robert Hancockfbbb2622006-10-27 19:08:41 -07001284 writew(qc->tag, mmio + NV_ADMA_APPEND);
1285
1286 DPRINTK("Issued tag %u\n",qc->tag);
1287
1288 return 0;
1289}
1290
David Howells7d12e782006-10-05 14:55:46 +01001291static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292{
Jeff Garzikcca39742006-08-24 03:19:22 -04001293 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 unsigned int i;
1295 unsigned int handled = 0;
1296 unsigned long flags;
1297
Jeff Garzikcca39742006-08-24 03:19:22 -04001298 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
Jeff Garzikcca39742006-08-24 03:19:22 -04001300 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 struct ata_port *ap;
1302
Jeff Garzikcca39742006-08-24 03:19:22 -04001303 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001304 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001305 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 struct ata_queued_cmd *qc;
1307
1308 qc = ata_qc_from_tag(ap, ap->active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001309 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001311 else
1312 // No request pending? Clear interrupt status
1313 // anyway, in case there's one pending.
1314 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 }
1316
1317 }
1318
Jeff Garzikcca39742006-08-24 03:19:22 -04001319 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 return IRQ_RETVAL(handled);
1322}
1323
Jeff Garzikcca39742006-08-24 03:19:22 -04001324static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001325{
1326 int i, handled = 0;
1327
Jeff Garzikcca39742006-08-24 03:19:22 -04001328 for (i = 0; i < host->n_ports; i++) {
1329 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001330
1331 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1332 handled += nv_host_intr(ap, irq_stat);
1333
1334 irq_stat >>= NV_INT_PORT_SHIFT;
1335 }
1336
1337 return IRQ_RETVAL(handled);
1338}
1339
David Howells7d12e782006-10-05 14:55:46 +01001340static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001341{
Jeff Garzikcca39742006-08-24 03:19:22 -04001342 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001343 u8 irq_stat;
1344 irqreturn_t ret;
1345
Jeff Garzikcca39742006-08-24 03:19:22 -04001346 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001347 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001348 ret = nv_do_interrupt(host, irq_stat);
1349 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001350
1351 return ret;
1352}
1353
David Howells7d12e782006-10-05 14:55:46 +01001354static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001355{
Jeff Garzikcca39742006-08-24 03:19:22 -04001356 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001357 u8 irq_stat;
1358 irqreturn_t ret;
1359
Jeff Garzikcca39742006-08-24 03:19:22 -04001360 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001361 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001362 ret = nv_do_interrupt(host, irq_stat);
1363 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001364
1365 return ret;
1366}
1367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1369{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 if (sc_reg > SCR_CONTROL)
1371 return 0xffffffffU;
1372
Tejun Heo0d5ff562007-02-01 15:06:36 +09001373 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374}
1375
1376static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1377{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 if (sc_reg > SCR_CONTROL)
1379 return;
1380
Tejun Heo0d5ff562007-02-01 15:06:36 +09001381 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382}
1383
Tejun Heo39f87582006-06-17 15:49:56 +09001384static void nv_nf2_freeze(struct ata_port *ap)
1385{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001386 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001387 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1388 u8 mask;
1389
Tejun Heo0d5ff562007-02-01 15:06:36 +09001390 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001391 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001392 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001393}
1394
1395static void nv_nf2_thaw(struct ata_port *ap)
1396{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001397 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001398 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1399 u8 mask;
1400
Tejun Heo0d5ff562007-02-01 15:06:36 +09001401 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001402
Tejun Heo0d5ff562007-02-01 15:06:36 +09001403 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001404 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001405 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001406}
1407
1408static void nv_ck804_freeze(struct ata_port *ap)
1409{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001410 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001411 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1412 u8 mask;
1413
1414 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1415 mask &= ~(NV_INT_ALL << shift);
1416 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1417}
1418
1419static void nv_ck804_thaw(struct ata_port *ap)
1420{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001421 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001422 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1423 u8 mask;
1424
1425 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1426
1427 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1428 mask |= (NV_INT_MASK << shift);
1429 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1430}
1431
1432static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1433{
1434 unsigned int dummy;
1435
1436 /* SATA hardreset fails to retrieve proper device signature on
1437 * some controllers. Don't classify on hardreset. For more
1438 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1439 */
1440 return sata_std_hardreset(ap, &dummy);
1441}
1442
1443static void nv_error_handler(struct ata_port *ap)
1444{
1445 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1446 nv_hardreset, ata_std_postreset);
1447}
1448
Robert Hancockfbbb2622006-10-27 19:08:41 -07001449static void nv_adma_error_handler(struct ata_port *ap)
1450{
1451 struct nv_adma_port_priv *pp = ap->private_data;
1452 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001453 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001454 int i;
1455 u16 tmp;
Robert Hancock2cb27852007-02-11 18:34:44 -06001456
1457 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1458 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1459 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1460 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1461 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001462 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1463 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001464
1465 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001466 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1467 "next cpb count 0x%X next cpb idx 0x%x\n",
1468 notifier, notifier_error, gen_ctl, status,
1469 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001470
1471 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1472 struct nv_adma_cpb *cpb = &pp->cpb[i];
1473 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1474 ap->sactive & (1 << i) )
1475 ata_port_printk(ap, KERN_ERR,
1476 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1477 i, cpb->ctl_flags, cpb->resp_flags);
1478 }
1479 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001480
Robert Hancockfbbb2622006-10-27 19:08:41 -07001481 /* Push us back into port register mode for error handling. */
1482 nv_adma_register_mode(ap);
1483
Robert Hancockfbbb2622006-10-27 19:08:41 -07001484 /* Mark all of the CPBs as invalid to prevent them from being executed */
1485 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1486 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1487
1488 /* clear CPB fetch count */
1489 writew(0, mmio + NV_ADMA_CPB_COUNT);
1490
1491 /* Reset channel */
1492 tmp = readw(mmio + NV_ADMA_CTL);
1493 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1494 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1495 udelay(1);
1496 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1497 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1498 }
1499
1500 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1501 nv_hardreset, ata_std_postreset);
1502}
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1505{
1506 static int printed_version = 0;
Jeff Garzik29da9f62006-09-25 21:56:33 -04001507 struct ata_port_info *ppi[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 struct ata_probe_ent *probe_ent;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001509 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 int rc;
1511 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001512 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001513 unsigned long type = ent->driver_data;
1514 int mask_set = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
1516 // Make sure this is a SATA controller by counting the number of bars
1517 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1518 // it's an IDE controller and we ignore it.
1519 for (bar=0; bar<6; bar++)
1520 if (pci_resource_start(pdev, bar) == 0)
1521 return -ENODEV;
1522
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001523 if (!printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05001524 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
Tejun Heo24dc5f32007-01-20 16:00:28 +09001526 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001528 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
1530 rc = pci_request_regions(pdev, DRV_NAME);
1531 if (rc) {
Tejun Heo24dc5f32007-01-20 16:00:28 +09001532 pcim_pin_device(pdev);
1533 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
1535
Robert Hancockfbbb2622006-10-27 19:08:41 -07001536 if(type >= CK804 && adma_enabled) {
1537 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1538 type = ADMA;
1539 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1540 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1541 mask_set = 1;
1542 }
1543
1544 if(!mask_set) {
1545 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1546 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001547 return rc;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001548 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1549 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001550 return rc;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
1553 rc = -ENOMEM;
1554
Tejun Heo24dc5f32007-01-20 16:00:28 +09001555 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001556 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001557 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001558
Robert Hancockfbbb2622006-10-27 19:08:41 -07001559 ppi[0] = ppi[1] = &nv_port_info[type];
Jeff Garzik29da9f62006-09-25 21:56:33 -04001560 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 if (!probe_ent)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001562 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Tejun Heo0d5ff562007-02-01 15:06:36 +09001564 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
Tejun Heo24dc5f32007-01-20 16:00:28 +09001565 return -EIO;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001566 probe_ent->iomap = pcim_iomap_table(pdev);
Tejun Heo24dc5f32007-01-20 16:00:28 +09001567
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001568 probe_ent->private_data = hpriv;
1569 hpriv->type = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Tejun Heo0d5ff562007-02-01 15:06:36 +09001571 base = probe_ent->iomap[NV_MMIO_BAR];
Jeff Garzik02cbd922006-03-22 23:59:46 -05001572 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1573 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1574
Tejun Heoada364e2006-06-17 15:49:56 +09001575 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001576 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09001577 u8 regval;
1578
1579 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1580 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1581 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1582 }
1583
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 pci_set_master(pdev);
1585
Robert Hancockfbbb2622006-10-27 19:08:41 -07001586 if (type == ADMA) {
1587 rc = nv_adma_host_init(probe_ent);
1588 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001589 return rc;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001590 }
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 rc = ata_device_add(probe_ent);
1593 if (rc != NV_PORTS)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001594 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
Tejun Heo24dc5f32007-01-20 16:00:28 +09001596 devm_kfree(&pdev->dev, probe_ent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598}
1599
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001600static void nv_remove_one (struct pci_dev *pdev)
1601{
1602 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1603 struct nv_host_priv *hpriv = host->private_data;
1604
1605 ata_pci_remove_one(pdev);
1606 kfree(hpriv);
1607}
1608
1609static int nv_pci_device_resume(struct pci_dev *pdev)
1610{
1611 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1612 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08001613 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001614
Robert Hancockce053fa2007-02-05 16:26:04 -08001615 rc = ata_pci_device_do_resume(pdev);
1616 if(rc)
1617 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001618
1619 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1620 if(hpriv->type >= CK804) {
1621 u8 regval;
1622
1623 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1624 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1625 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1626 }
1627 if(hpriv->type == ADMA) {
1628 u32 tmp32;
1629 struct nv_adma_port_priv *pp;
1630 /* enable/disable ADMA on the ports appropriately */
1631 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1632
1633 pp = host->ports[0]->private_data;
1634 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1635 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1636 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1637 else
1638 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1639 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1640 pp = host->ports[1]->private_data;
1641 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1642 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1643 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1644 else
1645 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1646 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1647
1648 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1649 }
1650 }
1651
1652 ata_host_resume(host);
1653
1654 return 0;
1655}
1656
Jeff Garzikcca39742006-08-24 03:19:22 -04001657static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09001658{
Jeff Garzikcca39742006-08-24 03:19:22 -04001659 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09001660 u8 regval;
1661
1662 /* disable SATA space for CK804 */
1663 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1664 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1665 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09001666}
1667
Robert Hancockfbbb2622006-10-27 19:08:41 -07001668static void nv_adma_host_stop(struct ata_host *host)
1669{
1670 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001671 u32 tmp32;
1672
Robert Hancockfbbb2622006-10-27 19:08:41 -07001673 /* disable ADMA on the ports */
1674 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1675 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1676 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1677 NV_MCP_SATA_CFG_20_PORT1_EN |
1678 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1679
1680 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1681
1682 nv_ck804_host_stop(host);
1683}
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685static int __init nv_init(void)
1686{
Pavel Roskinb7887192006-08-10 18:13:18 +09001687 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688}
1689
1690static void __exit nv_exit(void)
1691{
1692 pci_unregister_driver(&nv_pci_driver);
1693}
1694
1695module_init(nv_init);
1696module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001697module_param_named(adma, adma_enabled, bool, 0444);
1698MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");