blob: 5637b082bc85ea6d7abeee17b11bcad67ff61336 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040052#define DRV_VERSION "3.5"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090057 NV_MMIO_BAR = 5,
58
Jeff Garzik10ad05d2006-03-22 23:50:50 -050059 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Tejun Heo27e4b272006-06-17 15:49:55 +090066 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050067 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090069 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050070 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Tejun Heo27e4b272006-06-17 15:49:55 +090072 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
Tejun Heo39f87582006-06-17 15:49:56 +090080 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090081 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090083
Tejun Heo27e4b272006-06-17 15:49:55 +090084 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050085 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Jeff Garzik10ad05d2006-03-22 23:50:50 -050088 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070091 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400166 NV_ADMA_STAT_TIMEOUT,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171
Kuan Luof140f0f2007-10-15 15:16:53 -0400172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500201};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Robert Hancockfbbb2622006-10-27 19:08:41 -0700203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400231 u8 len; /* 3 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
239};
240
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
Robert Hancock8959d302008-02-04 19:39:02 -0600250 u64 adma_dma_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700251 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600252 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700253};
254
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600255struct nv_host_priv {
256 unsigned long type;
257};
258
Kuan Luof140f0f2007-10-15 15:16:53 -0400259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700296
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900298#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600299static int nv_pci_device_resume(struct pci_dev *pdev);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900300#endif
Jeff Garzikcca39742006-08-24 03:19:22 -0400301static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400305static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Tejun Heo39f87582006-06-17 15:49:56 +0900308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap);
312static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700313static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600314static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700315static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
316static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
317static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
318static void nv_adma_irq_clear(struct ata_port *ap);
319static int nv_adma_port_start(struct ata_port *ap);
320static void nv_adma_port_stop(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900321#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600322static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
323static int nv_adma_port_resume(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900324#endif
Robert Hancock53014e22007-05-05 15:36:36 -0600325static void nv_adma_freeze(struct ata_port *ap);
326static void nv_adma_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700327static void nv_adma_error_handler(struct ata_port *ap);
328static void nv_adma_host_stop(struct ata_host *host);
Robert Hancockf5ecac22007-02-20 21:49:10 -0600329static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800330static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
Tejun Heo39f87582006-06-17 15:49:56 +0900331
Kuan Luof140f0f2007-10-15 15:16:53 -0400332static void nv_mcp55_thaw(struct ata_port *ap);
333static void nv_mcp55_freeze(struct ata_port *ap);
334static void nv_swncq_error_handler(struct ata_port *ap);
335static int nv_swncq_slave_config(struct scsi_device *sdev);
336static int nv_swncq_port_start(struct ata_port *ap);
337static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
338static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
339static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
340static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
341static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
342#ifdef CONFIG_PM
343static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
344static int nv_swncq_port_resume(struct ata_port *ap);
345#endif
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347enum nv_host_type
348{
349 GENERIC,
350 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900351 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700352 CK804,
Kuan Luof140f0f2007-10-15 15:16:53 -0400353 ADMA,
354 SWNCQ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355};
356
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500357static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a942006-09-27 22:20:11 -0400358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
Kuan Luoe2e031e2007-10-25 02:14:17 -0400369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400372
373 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374};
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376static struct pci_driver nv_pci_driver = {
377 .name = DRV_NAME,
378 .id_table = nv_pci_tbl,
379 .probe = nv_init_one,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900380#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600381 .suspend = ata_pci_device_suspend,
382 .resume = nv_pci_device_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900383#endif
Tejun Heo1daf9ce2007-05-17 13:13:57 +0200384 .remove = ata_pci_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385};
386
Jeff Garzik193515d2005-11-07 00:59:37 -0500387static struct scsi_host_template nv_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900388 ATA_BMDMA_SHT(DRV_NAME),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389};
390
Robert Hancockfbbb2622006-10-27 19:08:41 -0700391static struct scsi_host_template nv_adma_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900392 ATA_NCQ_SHT(DRV_NAME),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700393 .can_queue = NV_ADMA_MAX_CPBS,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700394 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700395 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
396 .slave_configure = nv_adma_slave_config,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700397};
398
Kuan Luof140f0f2007-10-15 15:16:53 -0400399static struct scsi_host_template nv_swncq_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900400 ATA_NCQ_SHT(DRV_NAME),
Kuan Luof140f0f2007-10-15 15:16:53 -0400401 .can_queue = ATA_MAX_QUEUE,
Kuan Luof140f0f2007-10-15 15:16:53 -0400402 .sg_tablesize = LIBATA_MAX_PRD,
Kuan Luof140f0f2007-10-15 15:16:53 -0400403 .dma_boundary = ATA_DMA_BOUNDARY,
404 .slave_configure = nv_swncq_slave_config,
Kuan Luof140f0f2007-10-15 15:16:53 -0400405};
406
Tejun Heo029cfd62008-03-25 12:22:49 +0900407static struct ata_port_operations nv_generic_ops = {
408 .inherits = &ata_bmdma_port_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900409 .error_handler = nv_error_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 .scr_read = nv_scr_read,
411 .scr_write = nv_scr_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412};
413
Tejun Heo029cfd62008-03-25 12:22:49 +0900414static struct ata_port_operations nv_nf2_ops = {
415 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900416 .freeze = nv_nf2_freeze,
417 .thaw = nv_nf2_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900418};
419
Tejun Heo029cfd62008-03-25 12:22:49 +0900420static struct ata_port_operations nv_ck804_ops = {
421 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900422 .freeze = nv_ck804_freeze,
423 .thaw = nv_ck804_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900424 .host_stop = nv_ck804_host_stop,
425};
426
Tejun Heo029cfd62008-03-25 12:22:49 +0900427static struct ata_port_operations nv_adma_ops = {
428 .inherits = &nv_generic_ops,
429
Robert Hancock2dec7552006-11-26 14:20:19 -0600430 .check_atapi_dma = nv_adma_check_atapi_dma,
Tejun Heo029cfd62008-03-25 12:22:49 +0900431 .tf_read = nv_adma_tf_read,
Tejun Heo31cc23b2007-09-23 13:14:12 +0900432 .qc_defer = ata_std_qc_defer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700433 .qc_prep = nv_adma_qc_prep,
434 .qc_issue = nv_adma_qc_issue,
Tejun Heo029cfd62008-03-25 12:22:49 +0900435 .irq_clear = nv_adma_irq_clear,
436
Robert Hancock53014e22007-05-05 15:36:36 -0600437 .freeze = nv_adma_freeze,
438 .thaw = nv_adma_thaw,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700439 .error_handler = nv_adma_error_handler,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600440 .post_internal_cmd = nv_adma_post_internal_cmd,
Tejun Heo029cfd62008-03-25 12:22:49 +0900441
Robert Hancockfbbb2622006-10-27 19:08:41 -0700442 .port_start = nv_adma_port_start,
443 .port_stop = nv_adma_port_stop,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900444#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600445 .port_suspend = nv_adma_port_suspend,
446 .port_resume = nv_adma_port_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900447#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -0700448 .host_stop = nv_adma_host_stop,
449};
450
Tejun Heo029cfd62008-03-25 12:22:49 +0900451static struct ata_port_operations nv_swncq_ops = {
452 .inherits = &nv_generic_ops,
453
Kuan Luof140f0f2007-10-15 15:16:53 -0400454 .qc_defer = ata_std_qc_defer,
455 .qc_prep = nv_swncq_qc_prep,
456 .qc_issue = nv_swncq_qc_issue,
Tejun Heo029cfd62008-03-25 12:22:49 +0900457
Kuan Luof140f0f2007-10-15 15:16:53 -0400458 .freeze = nv_mcp55_freeze,
459 .thaw = nv_mcp55_thaw,
460 .error_handler = nv_swncq_error_handler,
Tejun Heo029cfd62008-03-25 12:22:49 +0900461
Kuan Luof140f0f2007-10-15 15:16:53 -0400462#ifdef CONFIG_PM
463 .port_suspend = nv_swncq_port_suspend,
464 .port_resume = nv_swncq_port_resume,
465#endif
466 .port_start = nv_swncq_port_start,
467};
468
Tejun Heo95947192008-03-25 12:22:49 +0900469struct nv_pi_priv {
470 irq_handler_t irq_handler;
471 struct scsi_host_template *sht;
472};
473
474#define NV_PI_PRIV(_irq_handler, _sht) \
475 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
476
Tejun Heo1626aeb2007-05-04 12:43:58 +0200477static const struct ata_port_info nv_port_info[] = {
Tejun Heoada364e2006-06-17 15:49:56 +0900478 /* generic */
479 {
Tejun Heo0c887582007-08-06 18:36:23 +0900480 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900481 .pio_mask = NV_PIO_MASK,
482 .mwdma_mask = NV_MWDMA_MASK,
483 .udma_mask = NV_UDMA_MASK,
484 .port_ops = &nv_generic_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900485 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900486 },
487 /* nforce2/3 */
488 {
Tejun Heo0c887582007-08-06 18:36:23 +0900489 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900490 .pio_mask = NV_PIO_MASK,
491 .mwdma_mask = NV_MWDMA_MASK,
492 .udma_mask = NV_UDMA_MASK,
493 .port_ops = &nv_nf2_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900494 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900495 },
496 /* ck804 */
497 {
Tejun Heo0c887582007-08-06 18:36:23 +0900498 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900499 .pio_mask = NV_PIO_MASK,
500 .mwdma_mask = NV_MWDMA_MASK,
501 .udma_mask = NV_UDMA_MASK,
502 .port_ops = &nv_ck804_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900503 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900504 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700505 /* ADMA */
506 {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700507 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
508 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
509 .pio_mask = NV_PIO_MASK,
510 .mwdma_mask = NV_MWDMA_MASK,
511 .udma_mask = NV_UDMA_MASK,
512 .port_ops = &nv_adma_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900513 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700514 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400515 /* SWNCQ */
516 {
Kuan Luof140f0f2007-10-15 15:16:53 -0400517 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
518 ATA_FLAG_NCQ,
Kuan Luof140f0f2007-10-15 15:16:53 -0400519 .pio_mask = NV_PIO_MASK,
520 .mwdma_mask = NV_MWDMA_MASK,
521 .udma_mask = NV_UDMA_MASK,
522 .port_ops = &nv_swncq_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900523 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
Kuan Luof140f0f2007-10-15 15:16:53 -0400524 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525};
526
527MODULE_AUTHOR("NVIDIA");
528MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
529MODULE_LICENSE("GPL");
530MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
531MODULE_VERSION(DRV_VERSION);
532
Robert Hancockfbbb2622006-10-27 19:08:41 -0700533static int adma_enabled = 1;
Kuan Luof140f0f2007-10-15 15:16:53 -0400534static int swncq_enabled;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700535
Robert Hancock2dec7552006-11-26 14:20:19 -0600536static void nv_adma_register_mode(struct ata_port *ap)
537{
Robert Hancock2dec7552006-11-26 14:20:19 -0600538 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600539 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800540 u16 tmp, status;
541 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600542
543 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
544 return;
545
Robert Hancocka2cfe812007-02-05 16:26:03 -0800546 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400547 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800548 ndelay(50);
549 status = readw(mmio + NV_ADMA_STAT);
550 count++;
551 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400552 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800553 ata_port_printk(ap, KERN_WARNING,
554 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
555 status);
556
Robert Hancock2dec7552006-11-26 14:20:19 -0600557 tmp = readw(mmio + NV_ADMA_CTL);
558 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
559
Robert Hancocka2cfe812007-02-05 16:26:03 -0800560 count = 0;
561 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400562 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800563 ndelay(50);
564 status = readw(mmio + NV_ADMA_STAT);
565 count++;
566 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400567 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800568 ata_port_printk(ap, KERN_WARNING,
569 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
570 status);
571
Robert Hancock2dec7552006-11-26 14:20:19 -0600572 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
573}
574
575static void nv_adma_mode(struct ata_port *ap)
576{
Robert Hancock2dec7552006-11-26 14:20:19 -0600577 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600578 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800579 u16 tmp, status;
580 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600581
582 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
583 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500584
Robert Hancock2dec7552006-11-26 14:20:19 -0600585 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
586
587 tmp = readw(mmio + NV_ADMA_CTL);
588 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
589
Robert Hancocka2cfe812007-02-05 16:26:03 -0800590 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400591 while (((status & NV_ADMA_STAT_LEGACY) ||
Robert Hancocka2cfe812007-02-05 16:26:03 -0800592 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
593 ndelay(50);
594 status = readw(mmio + NV_ADMA_STAT);
595 count++;
596 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400597 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800598 ata_port_printk(ap, KERN_WARNING,
599 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
600 status);
601
Robert Hancock2dec7552006-11-26 14:20:19 -0600602 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
603}
604
Robert Hancockfbbb2622006-10-27 19:08:41 -0700605static int nv_adma_slave_config(struct scsi_device *sdev)
606{
607 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600608 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600609 struct nv_adma_port_priv *port0, *port1;
610 struct scsi_device *sdev0, *sdev1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600611 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancock8959d302008-02-04 19:39:02 -0600612 unsigned long segment_boundary, flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700613 unsigned short sg_tablesize;
614 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600615 int adma_enable;
616 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700617
618 rc = ata_scsi_slave_config(sdev);
619
620 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
621 /* Not a proper libata device, ignore */
622 return rc;
623
Robert Hancock8959d302008-02-04 19:39:02 -0600624 spin_lock_irqsave(ap->lock, flags);
625
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900626 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700627 /*
628 * NVIDIA reports that ADMA mode does not support ATAPI commands.
629 * Therefore ATAPI commands are sent through the legacy interface.
630 * However, the legacy interface only supports 32-bit DMA.
631 * Restrict DMA parameters as required by the legacy interface
632 * when an ATAPI device is connected.
633 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700634 segment_boundary = ATA_DMA_BOUNDARY;
635 /* Subtract 1 since an extra entry may be needed for padding, see
636 libata-scsi.c */
637 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500638
Robert Hancock2dec7552006-11-26 14:20:19 -0600639 /* Since the legacy DMA engine is in use, we need to disable ADMA
640 on the port. */
641 adma_enable = 0;
642 nv_adma_register_mode(ap);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400643 } else {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700644 segment_boundary = NV_ADMA_DMA_BOUNDARY;
645 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600646 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700647 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500648
Robert Hancock2dec7552006-11-26 14:20:19 -0600649 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700650
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400651 if (ap->port_no == 1)
Robert Hancock2dec7552006-11-26 14:20:19 -0600652 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
653 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
654 else
655 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
656 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500657
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400658 if (adma_enable) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600659 new_reg = current_reg | config_mask;
660 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400661 } else {
Robert Hancock2dec7552006-11-26 14:20:19 -0600662 new_reg = current_reg & ~config_mask;
663 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
664 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500665
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400666 if (current_reg != new_reg)
Robert Hancock2dec7552006-11-26 14:20:19 -0600667 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500668
Robert Hancock8959d302008-02-04 19:39:02 -0600669 port0 = ap->host->ports[0]->private_data;
670 port1 = ap->host->ports[1]->private_data;
671 sdev0 = ap->host->ports[0]->link.device[0].sdev;
672 sdev1 = ap->host->ports[1]->link.device[0].sdev;
673 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
674 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
675 /** We have to set the DMA mask to 32-bit if either port is in
676 ATAPI mode, since they are on the same PCI device which is
677 used for DMA mapping. If we set the mask we also need to set
678 the bounce limit on both ports to ensure that the block
679 layer doesn't feed addresses that cause DMA mapping to
680 choke. If either SCSI device is not allocated yet, it's OK
681 since that port will discover its correct setting when it
682 does get allocated.
683 Note: Setting 32-bit mask should not fail. */
684 if (sdev0)
685 blk_queue_bounce_limit(sdev0->request_queue,
686 ATA_DMA_MASK);
687 if (sdev1)
688 blk_queue_bounce_limit(sdev1->request_queue,
689 ATA_DMA_MASK);
690
691 pci_set_dma_mask(pdev, ATA_DMA_MASK);
692 } else {
693 /** This shouldn't fail as it was set to this value before */
694 pci_set_dma_mask(pdev, pp->adma_dma_mask);
695 if (sdev0)
696 blk_queue_bounce_limit(sdev0->request_queue,
697 pp->adma_dma_mask);
698 if (sdev1)
699 blk_queue_bounce_limit(sdev1->request_queue,
700 pp->adma_dma_mask);
701 }
702
Robert Hancockfbbb2622006-10-27 19:08:41 -0700703 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
704 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
705 ata_port_printk(ap, KERN_INFO,
Robert Hancock8959d302008-02-04 19:39:02 -0600706 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
707 (unsigned long long)*ap->host->dev->dma_mask,
708 segment_boundary, sg_tablesize);
709
710 spin_unlock_irqrestore(ap->lock, flags);
711
Robert Hancockfbbb2622006-10-27 19:08:41 -0700712 return rc;
713}
714
Robert Hancock2dec7552006-11-26 14:20:19 -0600715static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
716{
717 struct nv_adma_port_priv *pp = qc->ap->private_data;
718 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
719}
720
Robert Hancockf2fb3442007-03-26 21:43:36 -0800721static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
722{
Robert Hancock3f3debd2007-11-25 16:59:36 -0600723 /* Other than when internal or pass-through commands are executed,
724 the only time this function will be called in ADMA mode will be
725 if a command fails. In the failure case we don't care about going
726 into register mode with ADMA commands pending, as the commands will
727 all shortly be aborted anyway. We assume that NCQ commands are not
728 issued via passthrough, which is the only way that switching into
729 ADMA mode could abort outstanding commands. */
Robert Hancockf2fb3442007-03-26 21:43:36 -0800730 nv_adma_register_mode(ap);
731
732 ata_tf_read(ap, tf);
733}
734
Robert Hancock2dec7552006-11-26 14:20:19 -0600735static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700736{
737 unsigned int idx = 0;
738
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400739 if (tf->flags & ATA_TFLAG_ISADDR) {
Robert Hancockac3d6b82007-02-19 19:02:46 -0600740 if (tf->flags & ATA_TFLAG_LBA48) {
741 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
742 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
743 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
744 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
745 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
746 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
747 } else
748 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
Jeff Garzika84471f2007-02-26 05:51:33 -0500749
Robert Hancockac3d6b82007-02-19 19:02:46 -0600750 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
751 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
752 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
753 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700754 }
Jeff Garzika84471f2007-02-26 05:51:33 -0500755
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400756 if (tf->flags & ATA_TFLAG_DEVICE)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600757 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700758
759 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Jeff Garzika84471f2007-02-26 05:51:33 -0500760
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400761 while (idx < 12)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600762 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700763
764 return idx;
765}
766
Robert Hancock5bd28a42007-02-05 16:26:01 -0800767static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700768{
769 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600770 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700771
772 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
773
Robert Hancock5bd28a42007-02-05 16:26:01 -0800774 if (unlikely((force_err ||
775 flags & (NV_CPB_RESP_ATA_ERR |
776 NV_CPB_RESP_CMD_ERR |
777 NV_CPB_RESP_CPB_ERR)))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900778 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800779 int freeze = 0;
780
781 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400782 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800783 if (flags & NV_CPB_RESP_ATA_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900784 ata_ehi_push_desc(ehi, "ATA error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800785 ehi->err_mask |= AC_ERR_DEV;
786 } else if (flags & NV_CPB_RESP_CMD_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900787 ata_ehi_push_desc(ehi, "CMD error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800788 ehi->err_mask |= AC_ERR_DEV;
789 } else if (flags & NV_CPB_RESP_CPB_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900790 ata_ehi_push_desc(ehi, "CPB error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800791 ehi->err_mask |= AC_ERR_SYSTEM;
792 freeze = 1;
793 } else {
794 /* notifier error, but no error in CPB flags? */
Tejun Heob64bbc32007-07-16 14:29:39 +0900795 ata_ehi_push_desc(ehi, "unknown");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800796 ehi->err_mask |= AC_ERR_OTHER;
797 freeze = 1;
798 }
799 /* Kill all commands. EH will determine what actually failed. */
800 if (freeze)
801 ata_port_freeze(ap);
802 else
803 ata_port_abort(ap);
804 return 1;
805 }
806
Robert Hancockf2fb3442007-03-26 21:43:36 -0800807 if (likely(flags & NV_CPB_RESP_DONE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700808 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800809 VPRINTK("CPB flags done, flags=0x%x\n", flags);
810 if (likely(qc)) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400811 DPRINTK("Completing qc from tag %d\n", cpb_num);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700812 ata_qc_complete(qc);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600813 } else {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900814 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600815 /* Notifier bits set without a command may indicate the drive
816 is misbehaving. Raise host state machine violation on this
817 condition. */
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400818 ata_port_printk(ap, KERN_ERR,
819 "notifier for tag %d with no cmd?\n",
820 cpb_num);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600821 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +0900822 ehi->action |= ATA_EH_RESET;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600823 ata_port_freeze(ap);
824 return 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700825 }
826 }
Robert Hancock5bd28a42007-02-05 16:26:01 -0800827 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700828}
829
Robert Hancock2dec7552006-11-26 14:20:19 -0600830static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
831{
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900832 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600833
834 /* freeze if hotplugged */
835 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
836 ata_port_freeze(ap);
837 return 1;
838 }
839
840 /* bail out if not our interrupt */
841 if (!(irq_stat & NV_INT_DEV))
842 return 0;
843
844 /* DEV interrupt w/ no active qc? */
845 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
846 ata_check_status(ap);
847 return 1;
848 }
849
850 /* handle interrupt */
Robert Hancockf740d162007-01-23 20:09:02 -0600851 return ata_host_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600852}
853
Robert Hancockfbbb2622006-10-27 19:08:41 -0700854static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
855{
856 struct ata_host *host = dev_instance;
857 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600858 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700859
860 spin_lock(&host->lock);
861
862 for (i = 0; i < host->n_ports; i++) {
863 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600864 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700865
866 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
867 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600868 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700869 u16 status;
870 u32 gen_ctl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700871 u32 notifier, notifier_error;
Jeff Garzika617c092007-05-21 20:14:23 -0400872
Robert Hancock53014e22007-05-05 15:36:36 -0600873 /* if ADMA is disabled, use standard ata interrupt handler */
874 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
875 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
876 >> (NV_INT_PORT_SHIFT * i);
877 handled += nv_host_intr(ap, irq_stat);
878 continue;
879 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700880
Robert Hancock53014e22007-05-05 15:36:36 -0600881 /* if in ATA register mode, check for standard interrupts */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700882 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Tejun Heo0d5ff562007-02-01 15:06:36 +0900883 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
Robert Hancock2dec7552006-11-26 14:20:19 -0600884 >> (NV_INT_PORT_SHIFT * i);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400885 if (ata_tag_valid(ap->link.active_tag))
Robert Hancockf740d162007-01-23 20:09:02 -0600886 /** NV_INT_DEV indication seems unreliable at times
887 at least in ADMA mode. Force it on always when a
888 command is active, to prevent losing interrupts. */
889 irq_stat |= NV_INT_DEV;
Robert Hancock2dec7552006-11-26 14:20:19 -0600890 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700891 }
892
893 notifier = readl(mmio + NV_ADMA_NOTIFIER);
894 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600895 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700896
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600897 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700898
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400899 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
Robert Hancockfbbb2622006-10-27 19:08:41 -0700900 !notifier_error)
901 /* Nothing to do */
902 continue;
903
904 status = readw(mmio + NV_ADMA_STAT);
905
906 /* Clear status. Ensure the controller sees the clearing before we start
907 looking at any of the CPB statuses, so that any CPB completions after
908 this point in the handler will raise another interrupt. */
909 writew(status, mmio + NV_ADMA_STAT);
910 readw(mmio + NV_ADMA_STAT); /* flush posted write */
911 rmb();
912
Robert Hancock5bd28a42007-02-05 16:26:01 -0800913 handled++; /* irq handled if we got here */
914
915 /* freeze if hotplugged or controller error */
916 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
917 NV_ADMA_STAT_HOTUNPLUG |
Robert Hancock5278b502007-02-11 18:36:56 -0600918 NV_ADMA_STAT_TIMEOUT |
919 NV_ADMA_STAT_SERROR))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900920 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800921
922 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400923 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800924 if (status & NV_ADMA_STAT_TIMEOUT) {
925 ehi->err_mask |= AC_ERR_SYSTEM;
Tejun Heob64bbc32007-07-16 14:29:39 +0900926 ata_ehi_push_desc(ehi, "timeout");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800927 } else if (status & NV_ADMA_STAT_HOTPLUG) {
928 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +0900929 ata_ehi_push_desc(ehi, "hotplug");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800930 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
931 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +0900932 ata_ehi_push_desc(ehi, "hot unplug");
Robert Hancock5278b502007-02-11 18:36:56 -0600933 } else if (status & NV_ADMA_STAT_SERROR) {
934 /* let libata analyze SError and figure out the cause */
Tejun Heob64bbc32007-07-16 14:29:39 +0900935 ata_ehi_push_desc(ehi, "SError");
936 } else
937 ata_ehi_push_desc(ehi, "unknown");
Robert Hancockfbbb2622006-10-27 19:08:41 -0700938 ata_port_freeze(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700939 continue;
940 }
941
Robert Hancock5bd28a42007-02-05 16:26:01 -0800942 if (status & (NV_ADMA_STAT_DONE |
Robert Hancocka1fe7822008-01-29 19:53:19 -0600943 NV_ADMA_STAT_CPBERR |
944 NV_ADMA_STAT_CMD_COMPLETE)) {
945 u32 check_commands = notifier_clears[i];
Robert Hancock721449b2007-02-19 19:03:08 -0600946 int pos, error = 0;
Robert Hancock8ba5e4c2007-03-08 18:02:18 -0600947
Robert Hancocka1fe7822008-01-29 19:53:19 -0600948 if (status & NV_ADMA_STAT_CPBERR) {
949 /* Check all active commands */
950 if (ata_tag_valid(ap->link.active_tag))
951 check_commands = 1 <<
952 ap->link.active_tag;
953 else
954 check_commands = ap->
955 link.sactive;
956 }
Robert Hancock8ba5e4c2007-03-08 18:02:18 -0600957
Robert Hancockfbbb2622006-10-27 19:08:41 -0700958 /** Check CPBs for completed commands */
Robert Hancock721449b2007-02-19 19:03:08 -0600959 while ((pos = ffs(check_commands)) && !error) {
960 pos--;
961 error = nv_adma_check_cpb(ap, pos,
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400962 notifier_error & (1 << pos));
963 check_commands &= ~(1 << pos);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700964 }
965 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700966 }
967 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500968
Jeff Garzikb4479162007-10-25 20:47:30 -0400969 if (notifier_clears[0] || notifier_clears[1]) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600970 /* Note: Both notifier clear registers must be written
971 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600972 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
973 writel(notifier_clears[0], pp->notifier_clear_block);
974 pp = host->ports[1]->private_data;
975 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -0600976 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700977
978 spin_unlock(&host->lock);
979
980 return IRQ_RETVAL(handled);
981}
982
Robert Hancock53014e22007-05-05 15:36:36 -0600983static void nv_adma_freeze(struct ata_port *ap)
984{
985 struct nv_adma_port_priv *pp = ap->private_data;
986 void __iomem *mmio = pp->ctl_block;
987 u16 tmp;
988
989 nv_ck804_freeze(ap);
990
991 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
992 return;
993
994 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400995 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -0600996 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
997
998 /* Disable interrupt */
999 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001000 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001001 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001002 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001003}
1004
1005static void nv_adma_thaw(struct ata_port *ap)
1006{
1007 struct nv_adma_port_priv *pp = ap->private_data;
1008 void __iomem *mmio = pp->ctl_block;
1009 u16 tmp;
1010
1011 nv_ck804_thaw(ap);
1012
1013 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1014 return;
1015
1016 /* Enable interrupt */
1017 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001018 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001019 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001020 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001021}
1022
Robert Hancockfbbb2622006-10-27 19:08:41 -07001023static void nv_adma_irq_clear(struct ata_port *ap)
1024{
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001025 struct nv_adma_port_priv *pp = ap->private_data;
1026 void __iomem *mmio = pp->ctl_block;
Robert Hancock53014e22007-05-05 15:36:36 -06001027 u32 notifier_clears[2];
1028
1029 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1030 ata_bmdma_irq_clear(ap);
1031 return;
1032 }
1033
1034 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001035 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001036 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001037
1038 /* clear ADMA status */
Robert Hancock53014e22007-05-05 15:36:36 -06001039 writew(0xffff, mmio + NV_ADMA_STAT);
Jeff Garzika617c092007-05-21 20:14:23 -04001040
Robert Hancock53014e22007-05-05 15:36:36 -06001041 /* clear notifiers - note both ports need to be written with
1042 something even though we are only clearing on one */
1043 if (ap->port_no == 0) {
1044 notifier_clears[0] = 0xFFFFFFFF;
1045 notifier_clears[1] = 0;
1046 } else {
1047 notifier_clears[0] = 0;
1048 notifier_clears[1] = 0xFFFFFFFF;
1049 }
1050 pp = ap->host->ports[0]->private_data;
1051 writel(notifier_clears[0], pp->notifier_clear_block);
1052 pp = ap->host->ports[1]->private_data;
1053 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001054}
1055
Robert Hancockf5ecac22007-02-20 21:49:10 -06001056static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001057{
Robert Hancockf5ecac22007-02-20 21:49:10 -06001058 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001059
Jeff Garzikb4479162007-10-25 20:47:30 -04001060 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
Robert Hancockf5ecac22007-02-20 21:49:10 -06001061 ata_bmdma_post_internal_cmd(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001062}
1063
1064static int nv_adma_port_start(struct ata_port *ap)
1065{
1066 struct device *dev = ap->host->dev;
1067 struct nv_adma_port_priv *pp;
1068 int rc;
1069 void *mem;
1070 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001071 void __iomem *mmio;
Robert Hancock8959d302008-02-04 19:39:02 -06001072 struct pci_dev *pdev = to_pci_dev(dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001073 u16 tmp;
1074
1075 VPRINTK("ENTER\n");
1076
Robert Hancock8959d302008-02-04 19:39:02 -06001077 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1078 pad buffers */
1079 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1080 if (rc)
1081 return rc;
1082 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1083 if (rc)
1084 return rc;
1085
Robert Hancockfbbb2622006-10-27 19:08:41 -07001086 rc = ata_port_start(ap);
1087 if (rc)
1088 return rc;
1089
Tejun Heo24dc5f32007-01-20 16:00:28 +09001090 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1091 if (!pp)
1092 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001093
Tejun Heo0d5ff562007-02-01 15:06:36 +09001094 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001095 ap->port_no * NV_ADMA_PORT_SIZE;
1096 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001097 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001098 pp->notifier_clear_block = pp->gen_block +
1099 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1100
Robert Hancock8959d302008-02-04 19:39:02 -06001101 /* Now that the legacy PRD and padding buffer are allocated we can
1102 safely raise the DMA mask to allocate the CPB/APRD table.
1103 These are allowed to fail since we store the value that ends up
1104 being used to set as the bounce limit in slave_config later if
1105 needed. */
1106 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1107 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1108 pp->adma_dma_mask = *dev->dma_mask;
1109
Tejun Heo24dc5f32007-01-20 16:00:28 +09001110 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1111 &mem_dma, GFP_KERNEL);
1112 if (!mem)
1113 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001114 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1115
1116 /*
1117 * First item in chunk of DMA memory:
1118 * 128-byte command parameter block (CPB)
1119 * one for each command tag
1120 */
1121 pp->cpb = mem;
1122 pp->cpb_dma = mem_dma;
1123
1124 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001125 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001126
1127 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1128 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1129
1130 /*
1131 * Second item: block of ADMA_SGTBL_LEN s/g entries
1132 */
1133 pp->aprd = mem;
1134 pp->aprd_dma = mem_dma;
1135
1136 ap->private_data = pp;
1137
1138 /* clear any outstanding interrupt conditions */
1139 writew(0xffff, mmio + NV_ADMA_STAT);
1140
1141 /* initialize port variables */
1142 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1143
1144 /* clear CPB fetch count */
1145 writew(0, mmio + NV_ADMA_CPB_COUNT);
1146
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001147 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001148 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001149 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1150 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001151
1152 tmp = readw(mmio + NV_ADMA_CTL);
1153 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001154 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001155 udelay(1);
1156 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001157 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001158
1159 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001160}
1161
1162static void nv_adma_port_stop(struct ata_port *ap)
1163{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001164 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001165 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001166
1167 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001168 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001169}
1170
Tejun Heo438ac6d2007-03-02 17:31:26 +09001171#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001172static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1173{
1174 struct nv_adma_port_priv *pp = ap->private_data;
1175 void __iomem *mmio = pp->ctl_block;
1176
1177 /* Go to register mode - clears GO */
1178 nv_adma_register_mode(ap);
1179
1180 /* clear CPB fetch count */
1181 writew(0, mmio + NV_ADMA_CPB_COUNT);
1182
1183 /* disable interrupt, shut down port */
1184 writew(0, mmio + NV_ADMA_CTL);
1185
1186 return 0;
1187}
1188
1189static int nv_adma_port_resume(struct ata_port *ap)
1190{
1191 struct nv_adma_port_priv *pp = ap->private_data;
1192 void __iomem *mmio = pp->ctl_block;
1193 u16 tmp;
1194
1195 /* set CPB block location */
1196 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001197 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001198
1199 /* clear any outstanding interrupt conditions */
1200 writew(0xffff, mmio + NV_ADMA_STAT);
1201
1202 /* initialize port variables */
1203 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1204
1205 /* clear CPB fetch count */
1206 writew(0, mmio + NV_ADMA_CPB_COUNT);
1207
1208 /* clear GO for register mode, enable interrupt */
1209 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001210 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1211 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001212
1213 tmp = readw(mmio + NV_ADMA_CTL);
1214 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001215 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001216 udelay(1);
1217 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001218 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001219
1220 return 0;
1221}
Tejun Heo438ac6d2007-03-02 17:31:26 +09001222#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -07001223
Tejun Heo9a829cc2007-04-17 23:44:08 +09001224static void nv_adma_setup_port(struct ata_port *ap)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001225{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001226 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1227 struct ata_ioports *ioport = &ap->ioaddr;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001228
1229 VPRINTK("ENTER\n");
1230
Tejun Heo9a829cc2007-04-17 23:44:08 +09001231 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001232
Tejun Heo0d5ff562007-02-01 15:06:36 +09001233 ioport->cmd_addr = mmio;
1234 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001235 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001236 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1237 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1238 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1239 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1240 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1241 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001242 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001243 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001244 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001245 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001246}
1247
Tejun Heo9a829cc2007-04-17 23:44:08 +09001248static int nv_adma_host_init(struct ata_host *host)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001249{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001250 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001251 unsigned int i;
1252 u32 tmp32;
1253
1254 VPRINTK("ENTER\n");
1255
1256 /* enable ADMA on the ports */
1257 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1258 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1259 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1260 NV_MCP_SATA_CFG_20_PORT1_EN |
1261 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1262
1263 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1264
Tejun Heo9a829cc2007-04-17 23:44:08 +09001265 for (i = 0; i < host->n_ports; i++)
1266 nv_adma_setup_port(host->ports[i]);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001267
Robert Hancockfbbb2622006-10-27 19:08:41 -07001268 return 0;
1269}
1270
1271static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1272 struct scatterlist *sg,
1273 int idx,
1274 struct nv_adma_prd *aprd)
1275{
Robert Hancock41949ed2007-02-19 19:02:27 -06001276 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001277 if (qc->tf.flags & ATA_TFLAG_WRITE)
1278 flags |= NV_APRD_WRITE;
1279 if (idx == qc->n_elem - 1)
1280 flags |= NV_APRD_END;
1281 else if (idx != 4)
1282 flags |= NV_APRD_CONT;
1283
1284 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1285 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001286 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001287 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001288}
1289
1290static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1291{
1292 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001293 struct nv_adma_prd *aprd;
1294 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001295 unsigned int si;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001296
1297 VPRINTK("ENTER\n");
1298
Tejun Heoff2aeb12007-12-05 16:43:11 +09001299 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1300 aprd = (si < 5) ? &cpb->aprd[si] :
1301 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1302 nv_adma_fill_aprd(qc, sg, si, aprd);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001303 }
Tejun Heoff2aeb12007-12-05 16:43:11 +09001304 if (si > 5)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001305 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001306 else
1307 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001308}
1309
Robert Hancock382a6652007-02-05 16:26:02 -08001310static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1311{
1312 struct nv_adma_port_priv *pp = qc->ap->private_data;
1313
1314 /* ADMA engine can only be used for non-ATAPI DMA commands,
Robert Hancock3f3debd2007-11-25 16:59:36 -06001315 or interrupt-driven no-data commands. */
Jeff Garzikb4479162007-10-25 20:47:30 -04001316 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
Robert Hancock3f3debd2007-11-25 16:59:36 -06001317 (qc->tf.flags & ATA_TFLAG_POLLING))
Robert Hancock382a6652007-02-05 16:26:02 -08001318 return 1;
1319
Jeff Garzikb4479162007-10-25 20:47:30 -04001320 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock382a6652007-02-05 16:26:02 -08001321 (qc->tf.protocol == ATA_PROT_NODATA))
1322 return 0;
1323
1324 return 1;
1325}
1326
Robert Hancockfbbb2622006-10-27 19:08:41 -07001327static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1328{
1329 struct nv_adma_port_priv *pp = qc->ap->private_data;
1330 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1331 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001332 NV_CPB_CTL_IEN;
1333
Robert Hancock382a6652007-02-05 16:26:02 -08001334 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock3f3debd2007-11-25 16:59:36 -06001335 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1336 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancock2dec7552006-11-26 14:20:19 -06001337 nv_adma_register_mode(qc->ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001338 ata_qc_prep(qc);
1339 return;
1340 }
1341
Robert Hancock41949ed2007-02-19 19:02:27 -06001342 cpb->resp_flags = NV_CPB_RESP_DONE;
1343 wmb();
1344 cpb->ctl_flags = 0;
1345 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001346
1347 cpb->len = 3;
1348 cpb->tag = qc->tag;
1349 cpb->next_cpb_idx = 0;
1350
1351 /* turn on NCQ flags for NCQ commands */
1352 if (qc->tf.protocol == ATA_PROT_NCQ)
1353 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1354
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001355 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1356
Robert Hancockfbbb2622006-10-27 19:08:41 -07001357 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1358
Jeff Garzikb4479162007-10-25 20:47:30 -04001359 if (qc->flags & ATA_QCFLAG_DMAMAP) {
Robert Hancock382a6652007-02-05 16:26:02 -08001360 nv_adma_fill_sg(qc, cpb);
1361 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1362 } else
1363 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001364
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001365 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1366 until we are finished filling in all of the contents */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001367 wmb();
1368 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001369 wmb();
1370 cpb->resp_flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001371}
1372
1373static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1374{
Robert Hancock2dec7552006-11-26 14:20:19 -06001375 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001376 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001377 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001378
1379 VPRINTK("ENTER\n");
1380
Robert Hancock3f3debd2007-11-25 16:59:36 -06001381 /* We can't handle result taskfile with NCQ commands, since
1382 retrieving the taskfile switches us out of ADMA mode and would abort
1383 existing commands. */
1384 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1385 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1386 ata_dev_printk(qc->dev, KERN_ERR,
1387 "NCQ w/ RESULT_TF not allowed\n");
1388 return AC_ERR_SYSTEM;
1389 }
1390
Robert Hancock382a6652007-02-05 16:26:02 -08001391 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001392 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001393 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancock3f3debd2007-11-25 16:59:36 -06001394 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1395 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancockfbbb2622006-10-27 19:08:41 -07001396 nv_adma_register_mode(qc->ap);
1397 return ata_qc_issue_prot(qc);
1398 } else
1399 nv_adma_mode(qc->ap);
1400
1401 /* write append register, command tag in lower 8 bits
1402 and (number of cpbs to append -1) in top 8 bits */
1403 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001404
Jeff Garzikb4479162007-10-25 20:47:30 -04001405 if (curr_ncq != pp->last_issue_ncq) {
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001406 /* Seems to need some delay before switching between NCQ and
1407 non-NCQ commands, else we get command timeouts and such. */
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001408 udelay(20);
1409 pp->last_issue_ncq = curr_ncq;
1410 }
1411
Robert Hancockfbbb2622006-10-27 19:08:41 -07001412 writew(qc->tag, mmio + NV_ADMA_APPEND);
1413
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001414 DPRINTK("Issued tag %u\n", qc->tag);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001415
1416 return 0;
1417}
1418
David Howells7d12e782006-10-05 14:55:46 +01001419static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420{
Jeff Garzikcca39742006-08-24 03:19:22 -04001421 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 unsigned int i;
1423 unsigned int handled = 0;
1424 unsigned long flags;
1425
Jeff Garzikcca39742006-08-24 03:19:22 -04001426 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427
Jeff Garzikcca39742006-08-24 03:19:22 -04001428 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 struct ata_port *ap;
1430
Jeff Garzikcca39742006-08-24 03:19:22 -04001431 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001432 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001433 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 struct ata_queued_cmd *qc;
1435
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001436 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001437 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001439 else
1440 // No request pending? Clear interrupt status
1441 // anyway, in case there's one pending.
1442 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 }
1444
1445 }
1446
Jeff Garzikcca39742006-08-24 03:19:22 -04001447 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 return IRQ_RETVAL(handled);
1450}
1451
Jeff Garzikcca39742006-08-24 03:19:22 -04001452static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001453{
1454 int i, handled = 0;
1455
Jeff Garzikcca39742006-08-24 03:19:22 -04001456 for (i = 0; i < host->n_ports; i++) {
1457 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001458
1459 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1460 handled += nv_host_intr(ap, irq_stat);
1461
1462 irq_stat >>= NV_INT_PORT_SHIFT;
1463 }
1464
1465 return IRQ_RETVAL(handled);
1466}
1467
David Howells7d12e782006-10-05 14:55:46 +01001468static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001469{
Jeff Garzikcca39742006-08-24 03:19:22 -04001470 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001471 u8 irq_stat;
1472 irqreturn_t ret;
1473
Jeff Garzikcca39742006-08-24 03:19:22 -04001474 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001475 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001476 ret = nv_do_interrupt(host, irq_stat);
1477 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001478
1479 return ret;
1480}
1481
David Howells7d12e782006-10-05 14:55:46 +01001482static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001483{
Jeff Garzikcca39742006-08-24 03:19:22 -04001484 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001485 u8 irq_stat;
1486 irqreturn_t ret;
1487
Jeff Garzikcca39742006-08-24 03:19:22 -04001488 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001489 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001490 ret = nv_do_interrupt(host, irq_stat);
1491 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001492
1493 return ret;
1494}
1495
Tejun Heoda3dbb12007-07-16 14:29:40 +09001496static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001499 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
Tejun Heoda3dbb12007-07-16 14:29:40 +09001501 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1502 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503}
1504
Tejun Heoda3dbb12007-07-16 14:29:40 +09001505static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001508 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Tejun Heo0d5ff562007-02-01 15:06:36 +09001510 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001511 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512}
1513
Tejun Heo39f87582006-06-17 15:49:56 +09001514static void nv_nf2_freeze(struct ata_port *ap)
1515{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001516 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001517 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1518 u8 mask;
1519
Tejun Heo0d5ff562007-02-01 15:06:36 +09001520 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001521 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001522 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001523}
1524
1525static void nv_nf2_thaw(struct ata_port *ap)
1526{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001527 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001528 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1529 u8 mask;
1530
Tejun Heo0d5ff562007-02-01 15:06:36 +09001531 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001532
Tejun Heo0d5ff562007-02-01 15:06:36 +09001533 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001534 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001535 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001536}
1537
1538static void nv_ck804_freeze(struct ata_port *ap)
1539{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001540 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001541 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1542 u8 mask;
1543
1544 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1545 mask &= ~(NV_INT_ALL << shift);
1546 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1547}
1548
1549static void nv_ck804_thaw(struct ata_port *ap)
1550{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001551 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001552 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1553 u8 mask;
1554
1555 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1556
1557 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1558 mask |= (NV_INT_MASK << shift);
1559 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1560}
1561
Kuan Luof140f0f2007-10-15 15:16:53 -04001562static void nv_mcp55_freeze(struct ata_port *ap)
1563{
1564 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1565 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1566 u32 mask;
1567
1568 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1569
1570 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1571 mask &= ~(NV_INT_ALL_MCP55 << shift);
1572 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1573 ata_bmdma_freeze(ap);
1574}
1575
1576static void nv_mcp55_thaw(struct ata_port *ap)
1577{
1578 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1579 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1580 u32 mask;
1581
1582 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1583
1584 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1585 mask |= (NV_INT_MASK_MCP55 << shift);
1586 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1587 ata_bmdma_thaw(ap);
1588}
1589
Tejun Heocc0680a2007-08-06 18:36:23 +09001590static int nv_hardreset(struct ata_link *link, unsigned int *class,
Tejun Heod4b2bab2007-02-02 16:50:52 +09001591 unsigned long deadline)
Tejun Heo39f87582006-06-17 15:49:56 +09001592{
1593 unsigned int dummy;
1594
1595 /* SATA hardreset fails to retrieve proper device signature on
1596 * some controllers. Don't classify on hardreset. For more
Fernando Luis Vázquez Cao647c5952007-11-07 16:33:49 +09001597 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
Tejun Heo39f87582006-06-17 15:49:56 +09001598 */
Tejun Heocc0680a2007-08-06 18:36:23 +09001599 return sata_std_hardreset(link, &dummy, deadline);
Tejun Heo39f87582006-06-17 15:49:56 +09001600}
1601
1602static void nv_error_handler(struct ata_port *ap)
1603{
1604 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1605 nv_hardreset, ata_std_postreset);
1606}
1607
Robert Hancockfbbb2622006-10-27 19:08:41 -07001608static void nv_adma_error_handler(struct ata_port *ap)
1609{
1610 struct nv_adma_port_priv *pp = ap->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04001611 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001612 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001613 int i;
1614 u16 tmp;
Jeff Garzika84471f2007-02-26 05:51:33 -05001615
Jeff Garzikb4479162007-10-25 20:47:30 -04001616 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001617 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1618 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1619 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1620 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001621 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1622 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001623
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001624 ata_port_printk(ap, KERN_ERR,
1625 "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001626 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1627 "next cpb count 0x%X next cpb idx 0x%x\n",
1628 notifier, notifier_error, gen_ctl, status,
1629 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001630
Jeff Garzikb4479162007-10-25 20:47:30 -04001631 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001632 struct nv_adma_cpb *cpb = &pp->cpb[i];
Jeff Garzikb4479162007-10-25 20:47:30 -04001633 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001634 ap->link.sactive & (1 << i))
Robert Hancock2cb27852007-02-11 18:34:44 -06001635 ata_port_printk(ap, KERN_ERR,
1636 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1637 i, cpb->ctl_flags, cpb->resp_flags);
1638 }
1639 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001640
Robert Hancockfbbb2622006-10-27 19:08:41 -07001641 /* Push us back into port register mode for error handling. */
1642 nv_adma_register_mode(ap);
1643
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001644 /* Mark all of the CPBs as invalid to prevent them from
1645 being executed */
Jeff Garzikb4479162007-10-25 20:47:30 -04001646 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001647 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1648
1649 /* clear CPB fetch count */
1650 writew(0, mmio + NV_ADMA_CPB_COUNT);
1651
1652 /* Reset channel */
1653 tmp = readw(mmio + NV_ADMA_CTL);
1654 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001655 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001656 udelay(1);
1657 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001658 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001659 }
1660
1661 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1662 nv_hardreset, ata_std_postreset);
1663}
1664
Kuan Luof140f0f2007-10-15 15:16:53 -04001665static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1666{
1667 struct nv_swncq_port_priv *pp = ap->private_data;
1668 struct defer_queue *dq = &pp->defer_queue;
1669
1670 /* queue is full */
1671 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1672 dq->defer_bits |= (1 << qc->tag);
1673 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1674}
1675
1676static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1677{
1678 struct nv_swncq_port_priv *pp = ap->private_data;
1679 struct defer_queue *dq = &pp->defer_queue;
1680 unsigned int tag;
1681
1682 if (dq->head == dq->tail) /* null queue */
1683 return NULL;
1684
1685 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1686 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1687 WARN_ON(!(dq->defer_bits & (1 << tag)));
1688 dq->defer_bits &= ~(1 << tag);
1689
1690 return ata_qc_from_tag(ap, tag);
1691}
1692
1693static void nv_swncq_fis_reinit(struct ata_port *ap)
1694{
1695 struct nv_swncq_port_priv *pp = ap->private_data;
1696
1697 pp->dhfis_bits = 0;
1698 pp->dmafis_bits = 0;
1699 pp->sdbfis_bits = 0;
1700 pp->ncq_flags = 0;
1701}
1702
1703static void nv_swncq_pp_reinit(struct ata_port *ap)
1704{
1705 struct nv_swncq_port_priv *pp = ap->private_data;
1706 struct defer_queue *dq = &pp->defer_queue;
1707
1708 dq->head = 0;
1709 dq->tail = 0;
1710 dq->defer_bits = 0;
1711 pp->qc_active = 0;
1712 pp->last_issue_tag = ATA_TAG_POISON;
1713 nv_swncq_fis_reinit(ap);
1714}
1715
1716static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1717{
1718 struct nv_swncq_port_priv *pp = ap->private_data;
1719
1720 writew(fis, pp->irq_block);
1721}
1722
1723static void __ata_bmdma_stop(struct ata_port *ap)
1724{
1725 struct ata_queued_cmd qc;
1726
1727 qc.ap = ap;
1728 ata_bmdma_stop(&qc);
1729}
1730
1731static void nv_swncq_ncq_stop(struct ata_port *ap)
1732{
1733 struct nv_swncq_port_priv *pp = ap->private_data;
1734 unsigned int i;
1735 u32 sactive;
1736 u32 done_mask;
1737
1738 ata_port_printk(ap, KERN_ERR,
1739 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1740 ap->qc_active, ap->link.sactive);
1741 ata_port_printk(ap, KERN_ERR,
1742 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1743 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1744 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1745 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1746
1747 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1748 ap->ops->check_status(ap),
1749 ioread8(ap->ioaddr.error_addr));
1750
1751 sactive = readl(pp->sactive_block);
1752 done_mask = pp->qc_active ^ sactive;
1753
1754 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1755 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1756 u8 err = 0;
1757 if (pp->qc_active & (1 << i))
1758 err = 0;
1759 else if (done_mask & (1 << i))
1760 err = 1;
1761 else
1762 continue;
1763
1764 ata_port_printk(ap, KERN_ERR,
1765 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1766 (pp->dhfis_bits >> i) & 0x1,
1767 (pp->dmafis_bits >> i) & 0x1,
1768 (pp->sdbfis_bits >> i) & 0x1,
1769 (sactive >> i) & 0x1,
1770 (err ? "error! tag doesn't exit" : " "));
1771 }
1772
1773 nv_swncq_pp_reinit(ap);
1774 ap->ops->irq_clear(ap);
1775 __ata_bmdma_stop(ap);
1776 nv_swncq_irq_clear(ap, 0xffff);
1777}
1778
1779static void nv_swncq_error_handler(struct ata_port *ap)
1780{
1781 struct ata_eh_context *ehc = &ap->link.eh_context;
1782
1783 if (ap->link.sactive) {
1784 nv_swncq_ncq_stop(ap);
Tejun Heocf480622008-01-24 00:05:14 +09001785 ehc->i.action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04001786 }
1787
1788 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1789 nv_hardreset, ata_std_postreset);
1790}
1791
1792#ifdef CONFIG_PM
1793static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1794{
1795 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1796 u32 tmp;
1797
1798 /* clear irq */
1799 writel(~0, mmio + NV_INT_STATUS_MCP55);
1800
1801 /* disable irq */
1802 writel(0, mmio + NV_INT_ENABLE_MCP55);
1803
1804 /* disable swncq */
1805 tmp = readl(mmio + NV_CTL_MCP55);
1806 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1807 writel(tmp, mmio + NV_CTL_MCP55);
1808
1809 return 0;
1810}
1811
1812static int nv_swncq_port_resume(struct ata_port *ap)
1813{
1814 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1815 u32 tmp;
1816
1817 /* clear irq */
1818 writel(~0, mmio + NV_INT_STATUS_MCP55);
1819
1820 /* enable irq */
1821 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1822
1823 /* enable swncq */
1824 tmp = readl(mmio + NV_CTL_MCP55);
1825 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1826
1827 return 0;
1828}
1829#endif
1830
1831static void nv_swncq_host_init(struct ata_host *host)
1832{
1833 u32 tmp;
1834 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1835 struct pci_dev *pdev = to_pci_dev(host->dev);
1836 u8 regval;
1837
1838 /* disable ECO 398 */
1839 pci_read_config_byte(pdev, 0x7f, &regval);
1840 regval &= ~(1 << 7);
1841 pci_write_config_byte(pdev, 0x7f, regval);
1842
1843 /* enable swncq */
1844 tmp = readl(mmio + NV_CTL_MCP55);
1845 VPRINTK("HOST_CTL:0x%X\n", tmp);
1846 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1847
1848 /* enable irq intr */
1849 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1850 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1851 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1852
1853 /* clear port irq */
1854 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1855}
1856
1857static int nv_swncq_slave_config(struct scsi_device *sdev)
1858{
1859 struct ata_port *ap = ata_shost_to_port(sdev->host);
1860 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1861 struct ata_device *dev;
1862 int rc;
1863 u8 rev;
1864 u8 check_maxtor = 0;
1865 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1866
1867 rc = ata_scsi_slave_config(sdev);
1868 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1869 /* Not a proper libata device, ignore */
1870 return rc;
1871
1872 dev = &ap->link.device[sdev->id];
1873 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1874 return rc;
1875
1876 /* if MCP51 and Maxtor, then disable ncq */
1877 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1878 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1879 check_maxtor = 1;
1880
1881 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1882 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1883 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1884 pci_read_config_byte(pdev, 0x8, &rev);
1885 if (rev <= 0xa2)
1886 check_maxtor = 1;
1887 }
1888
1889 if (!check_maxtor)
1890 return rc;
1891
1892 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1893
1894 if (strncmp(model_num, "Maxtor", 6) == 0) {
1895 ata_scsi_change_queue_depth(sdev, 1);
1896 ata_dev_printk(dev, KERN_NOTICE,
1897 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1898 }
1899
1900 return rc;
1901}
1902
1903static int nv_swncq_port_start(struct ata_port *ap)
1904{
1905 struct device *dev = ap->host->dev;
1906 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1907 struct nv_swncq_port_priv *pp;
1908 int rc;
1909
1910 rc = ata_port_start(ap);
1911 if (rc)
1912 return rc;
1913
1914 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1915 if (!pp)
1916 return -ENOMEM;
1917
1918 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1919 &pp->prd_dma, GFP_KERNEL);
1920 if (!pp->prd)
1921 return -ENOMEM;
1922 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1923
1924 ap->private_data = pp;
1925 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1926 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1927 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1928
1929 return 0;
1930}
1931
1932static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1933{
1934 if (qc->tf.protocol != ATA_PROT_NCQ) {
1935 ata_qc_prep(qc);
1936 return;
1937 }
1938
1939 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1940 return;
1941
1942 nv_swncq_fill_sg(qc);
1943}
1944
1945static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1946{
1947 struct ata_port *ap = qc->ap;
1948 struct scatterlist *sg;
Kuan Luof140f0f2007-10-15 15:16:53 -04001949 struct nv_swncq_port_priv *pp = ap->private_data;
1950 struct ata_prd *prd;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001951 unsigned int si, idx;
Kuan Luof140f0f2007-10-15 15:16:53 -04001952
1953 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1954
1955 idx = 0;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001956 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Kuan Luof140f0f2007-10-15 15:16:53 -04001957 u32 addr, offset;
1958 u32 sg_len, len;
1959
1960 addr = (u32)sg_dma_address(sg);
1961 sg_len = sg_dma_len(sg);
1962
1963 while (sg_len) {
1964 offset = addr & 0xffff;
1965 len = sg_len;
1966 if ((offset + sg_len) > 0x10000)
1967 len = 0x10000 - offset;
1968
1969 prd[idx].addr = cpu_to_le32(addr);
1970 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1971
1972 idx++;
1973 sg_len -= len;
1974 addr += len;
1975 }
1976 }
1977
Tejun Heoff2aeb12007-12-05 16:43:11 +09001978 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
Kuan Luof140f0f2007-10-15 15:16:53 -04001979}
1980
1981static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1982 struct ata_queued_cmd *qc)
1983{
1984 struct nv_swncq_port_priv *pp = ap->private_data;
1985
1986 if (qc == NULL)
1987 return 0;
1988
1989 DPRINTK("Enter\n");
1990
1991 writel((1 << qc->tag), pp->sactive_block);
1992 pp->last_issue_tag = qc->tag;
1993 pp->dhfis_bits &= ~(1 << qc->tag);
1994 pp->dmafis_bits &= ~(1 << qc->tag);
1995 pp->qc_active |= (0x1 << qc->tag);
1996
1997 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
1998 ap->ops->exec_command(ap, &qc->tf);
1999
2000 DPRINTK("Issued tag %u\n", qc->tag);
2001
2002 return 0;
2003}
2004
2005static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2006{
2007 struct ata_port *ap = qc->ap;
2008 struct nv_swncq_port_priv *pp = ap->private_data;
2009
2010 if (qc->tf.protocol != ATA_PROT_NCQ)
2011 return ata_qc_issue_prot(qc);
2012
2013 DPRINTK("Enter\n");
2014
2015 if (!pp->qc_active)
2016 nv_swncq_issue_atacmd(ap, qc);
2017 else
2018 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2019
2020 return 0;
2021}
2022
2023static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2024{
2025 u32 serror;
2026 struct ata_eh_info *ehi = &ap->link.eh_info;
2027
2028 ata_ehi_clear_desc(ehi);
2029
2030 /* AHCI needs SError cleared; otherwise, it might lock up */
2031 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2032 sata_scr_write(&ap->link, SCR_ERROR, serror);
2033
2034 /* analyze @irq_stat */
2035 if (fis & NV_SWNCQ_IRQ_ADDED)
2036 ata_ehi_push_desc(ehi, "hot plug");
2037 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2038 ata_ehi_push_desc(ehi, "hot unplug");
2039
2040 ata_ehi_hotplugged(ehi);
2041
2042 /* okay, let's hand over to EH */
2043 ehi->serror |= serror;
2044
2045 ata_port_freeze(ap);
2046}
2047
2048static int nv_swncq_sdbfis(struct ata_port *ap)
2049{
2050 struct ata_queued_cmd *qc;
2051 struct nv_swncq_port_priv *pp = ap->private_data;
2052 struct ata_eh_info *ehi = &ap->link.eh_info;
2053 u32 sactive;
2054 int nr_done = 0;
2055 u32 done_mask;
2056 int i;
2057 u8 host_stat;
2058 u8 lack_dhfis = 0;
2059
2060 host_stat = ap->ops->bmdma_status(ap);
2061 if (unlikely(host_stat & ATA_DMA_ERR)) {
2062 /* error when transfering data to/from memory */
2063 ata_ehi_clear_desc(ehi);
2064 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2065 ehi->err_mask |= AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002066 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002067 return -EINVAL;
2068 }
2069
2070 ap->ops->irq_clear(ap);
2071 __ata_bmdma_stop(ap);
2072
2073 sactive = readl(pp->sactive_block);
2074 done_mask = pp->qc_active ^ sactive;
2075
2076 if (unlikely(done_mask & sactive)) {
2077 ata_ehi_clear_desc(ehi);
2078 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2079 "(%08x->%08x)", pp->qc_active, sactive);
2080 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002081 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002082 return -EINVAL;
2083 }
2084 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2085 if (!(done_mask & (1 << i)))
2086 continue;
2087
2088 qc = ata_qc_from_tag(ap, i);
2089 if (qc) {
2090 ata_qc_complete(qc);
2091 pp->qc_active &= ~(1 << i);
2092 pp->dhfis_bits &= ~(1 << i);
2093 pp->dmafis_bits &= ~(1 << i);
2094 pp->sdbfis_bits |= (1 << i);
2095 nr_done++;
2096 }
2097 }
2098
2099 if (!ap->qc_active) {
2100 DPRINTK("over\n");
2101 nv_swncq_pp_reinit(ap);
2102 return nr_done;
2103 }
2104
2105 if (pp->qc_active & pp->dhfis_bits)
2106 return nr_done;
2107
2108 if ((pp->ncq_flags & ncq_saw_backout) ||
2109 (pp->qc_active ^ pp->dhfis_bits))
2110 /* if the controller cann't get a device to host register FIS,
2111 * The driver needs to reissue the new command.
2112 */
2113 lack_dhfis = 1;
2114
2115 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2116 "SWNCQ:qc_active 0x%X defer_bits %X "
2117 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2118 ap->print_id, ap->qc_active, pp->qc_active,
2119 pp->defer_queue.defer_bits, pp->dhfis_bits,
2120 pp->dmafis_bits, pp->last_issue_tag);
2121
2122 nv_swncq_fis_reinit(ap);
2123
2124 if (lack_dhfis) {
2125 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2126 nv_swncq_issue_atacmd(ap, qc);
2127 return nr_done;
2128 }
2129
2130 if (pp->defer_queue.defer_bits) {
2131 /* send deferral queue command */
2132 qc = nv_swncq_qc_from_dq(ap);
2133 WARN_ON(qc == NULL);
2134 nv_swncq_issue_atacmd(ap, qc);
2135 }
2136
2137 return nr_done;
2138}
2139
2140static inline u32 nv_swncq_tag(struct ata_port *ap)
2141{
2142 struct nv_swncq_port_priv *pp = ap->private_data;
2143 u32 tag;
2144
2145 tag = readb(pp->tag_block) >> 2;
2146 return (tag & 0x1f);
2147}
2148
2149static int nv_swncq_dmafis(struct ata_port *ap)
2150{
2151 struct ata_queued_cmd *qc;
2152 unsigned int rw;
2153 u8 dmactl;
2154 u32 tag;
2155 struct nv_swncq_port_priv *pp = ap->private_data;
2156
2157 __ata_bmdma_stop(ap);
2158 tag = nv_swncq_tag(ap);
2159
2160 DPRINTK("dma setup tag 0x%x\n", tag);
2161 qc = ata_qc_from_tag(ap, tag);
2162
2163 if (unlikely(!qc))
2164 return 0;
2165
2166 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2167
2168 /* load PRD table addr. */
2169 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2170 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2171
2172 /* specify data direction, triple-check start bit is clear */
2173 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2174 dmactl &= ~ATA_DMA_WR;
2175 if (!rw)
2176 dmactl |= ATA_DMA_WR;
2177
2178 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2179
2180 return 1;
2181}
2182
2183static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2184{
2185 struct nv_swncq_port_priv *pp = ap->private_data;
2186 struct ata_queued_cmd *qc;
2187 struct ata_eh_info *ehi = &ap->link.eh_info;
2188 u32 serror;
2189 u8 ata_stat;
2190 int rc = 0;
2191
2192 ata_stat = ap->ops->check_status(ap);
2193 nv_swncq_irq_clear(ap, fis);
2194 if (!fis)
2195 return;
2196
2197 if (ap->pflags & ATA_PFLAG_FROZEN)
2198 return;
2199
2200 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2201 nv_swncq_hotplug(ap, fis);
2202 return;
2203 }
2204
2205 if (!pp->qc_active)
2206 return;
2207
2208 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2209 return;
2210 ap->ops->scr_write(ap, SCR_ERROR, serror);
2211
2212 if (ata_stat & ATA_ERR) {
2213 ata_ehi_clear_desc(ehi);
2214 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2215 ehi->err_mask |= AC_ERR_DEV;
2216 ehi->serror |= serror;
Tejun Heocf480622008-01-24 00:05:14 +09002217 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002218 ata_port_freeze(ap);
2219 return;
2220 }
2221
2222 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2223 /* If the IRQ is backout, driver must issue
2224 * the new command again some time later.
2225 */
2226 pp->ncq_flags |= ncq_saw_backout;
2227 }
2228
2229 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2230 pp->ncq_flags |= ncq_saw_sdb;
2231 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2232 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2233 ap->print_id, pp->qc_active, pp->dhfis_bits,
2234 pp->dmafis_bits, readl(pp->sactive_block));
2235 rc = nv_swncq_sdbfis(ap);
2236 if (rc < 0)
2237 goto irq_error;
2238 }
2239
2240 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2241 /* The interrupt indicates the new command
2242 * was transmitted correctly to the drive.
2243 */
2244 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2245 pp->ncq_flags |= ncq_saw_d2h;
2246 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2247 ata_ehi_push_desc(ehi, "illegal fis transaction");
2248 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002249 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002250 goto irq_error;
2251 }
2252
2253 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2254 !(pp->ncq_flags & ncq_saw_dmas)) {
2255 ata_stat = ap->ops->check_status(ap);
2256 if (ata_stat & ATA_BUSY)
2257 goto irq_exit;
2258
2259 if (pp->defer_queue.defer_bits) {
2260 DPRINTK("send next command\n");
2261 qc = nv_swncq_qc_from_dq(ap);
2262 nv_swncq_issue_atacmd(ap, qc);
2263 }
2264 }
2265 }
2266
2267 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2268 /* program the dma controller with appropriate PRD buffers
2269 * and start the DMA transfer for requested command.
2270 */
2271 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2272 pp->ncq_flags |= ncq_saw_dmas;
2273 rc = nv_swncq_dmafis(ap);
2274 }
2275
2276irq_exit:
2277 return;
2278irq_error:
2279 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2280 ata_port_freeze(ap);
2281 return;
2282}
2283
2284static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2285{
2286 struct ata_host *host = dev_instance;
2287 unsigned int i;
2288 unsigned int handled = 0;
2289 unsigned long flags;
2290 u32 irq_stat;
2291
2292 spin_lock_irqsave(&host->lock, flags);
2293
2294 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2295
2296 for (i = 0; i < host->n_ports; i++) {
2297 struct ata_port *ap = host->ports[i];
2298
2299 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2300 if (ap->link.sactive) {
2301 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2302 handled = 1;
2303 } else {
2304 if (irq_stat) /* reserve Hotplug */
2305 nv_swncq_irq_clear(ap, 0xfff0);
2306
2307 handled += nv_host_intr(ap, (u8)irq_stat);
2308 }
2309 }
2310 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2311 }
2312
2313 spin_unlock_irqrestore(&host->lock, flags);
2314
2315 return IRQ_RETVAL(handled);
2316}
2317
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002318static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319{
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002320 static int printed_version;
Tejun Heo1626aeb2007-05-04 12:43:58 +02002321 const struct ata_port_info *ppi[] = { NULL, NULL };
Tejun Heo95947192008-03-25 12:22:49 +09002322 struct nv_pi_priv *ipriv;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002323 struct ata_host *host;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002324 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 int rc;
2326 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09002327 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002328 unsigned long type = ent->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
2330 // Make sure this is a SATA controller by counting the number of bars
2331 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2332 // it's an IDE controller and we ignore it.
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002333 for (bar = 0; bar < 6; bar++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 if (pci_resource_start(pdev, bar) == 0)
2335 return -ENODEV;
2336
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002337 if (!printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05002338 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
Tejun Heo24dc5f32007-01-20 16:00:28 +09002340 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002342 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Tejun Heo9a829cc2007-04-17 23:44:08 +09002344 /* determine type and allocate host */
Kuan Luof140f0f2007-10-15 15:16:53 -04002345 if (type == CK804 && adma_enabled) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07002346 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2347 type = ADMA;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002348 }
2349
Jeff Garzik360737a2007-10-29 06:49:24 -04002350 if (type == SWNCQ) {
2351 if (swncq_enabled)
2352 dev_printk(KERN_NOTICE, &pdev->dev,
2353 "Using SWNCQ mode\n");
2354 else
2355 type = GENERIC;
2356 }
2357
Tejun Heo1626aeb2007-05-04 12:43:58 +02002358 ppi[0] = &nv_port_info[type];
Tejun Heo95947192008-03-25 12:22:49 +09002359 ipriv = ppi[0]->private_data;
Tejun Heod583bc12007-07-04 18:02:07 +09002360 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
Tejun Heo9a829cc2007-04-17 23:44:08 +09002361 if (rc)
2362 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
Tejun Heo24dc5f32007-01-20 16:00:28 +09002364 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002365 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002366 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002367 hpriv->type = type;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002368 host->private_data = hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
Tejun Heo9a829cc2007-04-17 23:44:08 +09002370 /* request and iomap NV_MMIO_BAR */
2371 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2372 if (rc)
2373 return rc;
2374
2375 /* configure SCR access */
2376 base = host->iomap[NV_MMIO_BAR];
2377 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2378 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
Jeff Garzik02cbd922006-03-22 23:59:46 -05002379
Tejun Heoada364e2006-06-17 15:49:56 +09002380 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002381 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09002382 u8 regval;
2383
2384 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2385 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2386 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2387 }
2388
Tejun Heo9a829cc2007-04-17 23:44:08 +09002389 /* init ADMA */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002390 if (type == ADMA) {
Tejun Heo9a829cc2007-04-17 23:44:08 +09002391 rc = nv_adma_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002392 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002393 return rc;
Jeff Garzik360737a2007-10-29 06:49:24 -04002394 } else if (type == SWNCQ)
Kuan Luof140f0f2007-10-15 15:16:53 -04002395 nv_swncq_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002396
Tejun Heo9a829cc2007-04-17 23:44:08 +09002397 pci_set_master(pdev);
Tejun Heo95947192008-03-25 12:22:49 +09002398 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2399 IRQF_SHARED, ipriv->sht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400}
2401
Tejun Heo438ac6d2007-03-02 17:31:26 +09002402#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002403static int nv_pci_device_resume(struct pci_dev *pdev)
2404{
2405 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2406 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08002407 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002408
Robert Hancockce053fa2007-02-05 16:26:04 -08002409 rc = ata_pci_device_do_resume(pdev);
Jeff Garzikb4479162007-10-25 20:47:30 -04002410 if (rc)
Robert Hancockce053fa2007-02-05 16:26:04 -08002411 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002412
2413 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
Jeff Garzikb4479162007-10-25 20:47:30 -04002414 if (hpriv->type >= CK804) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002415 u8 regval;
2416
2417 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2418 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2419 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2420 }
Jeff Garzikb4479162007-10-25 20:47:30 -04002421 if (hpriv->type == ADMA) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002422 u32 tmp32;
2423 struct nv_adma_port_priv *pp;
2424 /* enable/disable ADMA on the ports appropriately */
2425 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2426
2427 pp = host->ports[0]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002428 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002429 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002430 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002431 else
2432 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002433 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002434 pp = host->ports[1]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002435 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002436 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002437 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002438 else
2439 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002440 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002441
2442 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2443 }
2444 }
2445
2446 ata_host_resume(host);
2447
2448 return 0;
2449}
Tejun Heo438ac6d2007-03-02 17:31:26 +09002450#endif
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002451
Jeff Garzikcca39742006-08-24 03:19:22 -04002452static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09002453{
Jeff Garzikcca39742006-08-24 03:19:22 -04002454 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09002455 u8 regval;
2456
2457 /* disable SATA space for CK804 */
2458 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2459 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2460 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09002461}
2462
Robert Hancockfbbb2622006-10-27 19:08:41 -07002463static void nv_adma_host_stop(struct ata_host *host)
2464{
2465 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002466 u32 tmp32;
2467
Robert Hancockfbbb2622006-10-27 19:08:41 -07002468 /* disable ADMA on the ports */
2469 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2470 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2471 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2472 NV_MCP_SATA_CFG_20_PORT1_EN |
2473 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2474
2475 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2476
2477 nv_ck804_host_stop(host);
2478}
2479
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480static int __init nv_init(void)
2481{
Pavel Roskinb7887192006-08-10 18:13:18 +09002482 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483}
2484
2485static void __exit nv_exit(void)
2486{
2487 pci_unregister_driver(&nv_pci_driver);
2488}
2489
2490module_init(nv_init);
2491module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002492module_param_named(adma, adma_enabled, bool, 0444);
2493MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002494module_param_named(swncq, swncq_enabled, bool, 0444);
2495MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2496