blob: 88fd4aeacde0971066eb7b242033e5675602bd90 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040052#define DRV_VERSION "3.5"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090057 NV_MMIO_BAR = 5,
58
Jeff Garzik10ad05d2006-03-22 23:50:50 -050059 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Tejun Heo27e4b272006-06-17 15:49:55 +090066 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050067 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090069 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050070 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Tejun Heo27e4b272006-06-17 15:49:55 +090072 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
Tejun Heo39f87582006-06-17 15:49:56 +090080 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090081 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090083
Tejun Heo27e4b272006-06-17 15:49:55 +090084 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050085 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Jeff Garzik10ad05d2006-03-22 23:50:50 -050088 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070091 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400166 NV_ADMA_STAT_TIMEOUT,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171
Kuan Luof140f0f2007-10-15 15:16:53 -0400172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500201};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Robert Hancockfbbb2622006-10-27 19:08:41 -0700203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400231 u8 len; /* 3 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
239};
240
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
Robert Hancock8959d302008-02-04 19:39:02 -0600250 u64 adma_dma_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700251 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600252 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700253};
254
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600255struct nv_host_priv {
256 unsigned long type;
257};
258
Kuan Luof140f0f2007-10-15 15:16:53 -0400259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700296
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900298#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600299static int nv_pci_device_resume(struct pci_dev *pdev);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900300#endif
Jeff Garzikcca39742006-08-24 03:19:22 -0400301static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Tejun Heo82ef04f2008-07-31 17:02:40 +0900305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Tejun Heo39f87582006-06-17 15:49:56 +0900308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700312static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600313static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700314static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317static void nv_adma_irq_clear(struct ata_port *ap);
318static int nv_adma_port_start(struct ata_port *ap);
319static void nv_adma_port_stop(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900320#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600321static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322static int nv_adma_port_resume(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900323#endif
Robert Hancock53014e22007-05-05 15:36:36 -0600324static void nv_adma_freeze(struct ata_port *ap);
325static void nv_adma_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700326static void nv_adma_error_handler(struct ata_port *ap);
327static void nv_adma_host_stop(struct ata_host *host);
Robert Hancockf5ecac22007-02-20 21:49:10 -0600328static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800329static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
Tejun Heo39f87582006-06-17 15:49:56 +0900330
Kuan Luof140f0f2007-10-15 15:16:53 -0400331static void nv_mcp55_thaw(struct ata_port *ap);
332static void nv_mcp55_freeze(struct ata_port *ap);
333static void nv_swncq_error_handler(struct ata_port *ap);
334static int nv_swncq_slave_config(struct scsi_device *sdev);
335static int nv_swncq_port_start(struct ata_port *ap);
336static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
341#ifdef CONFIG_PM
342static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343static int nv_swncq_port_resume(struct ata_port *ap);
344#endif
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346enum nv_host_type
347{
348 GENERIC,
349 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900350 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700351 CK804,
Kuan Luof140f0f2007-10-15 15:16:53 -0400352 ADMA,
353 SWNCQ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354};
355
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500356static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a942006-09-27 22:20:11 -0400357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
Kuan Luoe2e031e2007-10-25 02:14:17 -0400368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400371
372 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373};
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375static struct pci_driver nv_pci_driver = {
376 .name = DRV_NAME,
377 .id_table = nv_pci_tbl,
378 .probe = nv_init_one,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900379#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600380 .suspend = ata_pci_device_suspend,
381 .resume = nv_pci_device_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900382#endif
Tejun Heo1daf9ce2007-05-17 13:13:57 +0200383 .remove = ata_pci_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384};
385
Jeff Garzik193515d2005-11-07 00:59:37 -0500386static struct scsi_host_template nv_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900387 ATA_BMDMA_SHT(DRV_NAME),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388};
389
Robert Hancockfbbb2622006-10-27 19:08:41 -0700390static struct scsi_host_template nv_adma_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900391 ATA_NCQ_SHT(DRV_NAME),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700392 .can_queue = NV_ADMA_MAX_CPBS,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700393 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700394 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
395 .slave_configure = nv_adma_slave_config,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700396};
397
Kuan Luof140f0f2007-10-15 15:16:53 -0400398static struct scsi_host_template nv_swncq_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900399 ATA_NCQ_SHT(DRV_NAME),
Kuan Luof140f0f2007-10-15 15:16:53 -0400400 .can_queue = ATA_MAX_QUEUE,
Kuan Luof140f0f2007-10-15 15:16:53 -0400401 .sg_tablesize = LIBATA_MAX_PRD,
Kuan Luof140f0f2007-10-15 15:16:53 -0400402 .dma_boundary = ATA_DMA_BOUNDARY,
403 .slave_configure = nv_swncq_slave_config,
Kuan Luof140f0f2007-10-15 15:16:53 -0400404};
405
Tejun Heo029cfd62008-03-25 12:22:49 +0900406static struct ata_port_operations nv_generic_ops = {
407 .inherits = &ata_bmdma_port_ops,
Tejun Heo2fd673e2008-08-29 16:13:12 +0200408 .hardreset = ATA_OP_NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 .scr_read = nv_scr_read,
410 .scr_write = nv_scr_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411};
412
Tejun Heo029cfd62008-03-25 12:22:49 +0900413static struct ata_port_operations nv_nf2_ops = {
414 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900415 .freeze = nv_nf2_freeze,
416 .thaw = nv_nf2_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900417};
418
Tejun Heo029cfd62008-03-25 12:22:49 +0900419static struct ata_port_operations nv_ck804_ops = {
420 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900421 .freeze = nv_ck804_freeze,
422 .thaw = nv_ck804_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900423 .host_stop = nv_ck804_host_stop,
424};
425
Tejun Heo029cfd62008-03-25 12:22:49 +0900426static struct ata_port_operations nv_adma_ops = {
427 .inherits = &nv_generic_ops,
428
Robert Hancock2dec7552006-11-26 14:20:19 -0600429 .check_atapi_dma = nv_adma_check_atapi_dma,
Tejun Heo5682ed32008-04-07 22:47:16 +0900430 .sff_tf_read = nv_adma_tf_read,
Tejun Heo31cc23b2007-09-23 13:14:12 +0900431 .qc_defer = ata_std_qc_defer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700432 .qc_prep = nv_adma_qc_prep,
433 .qc_issue = nv_adma_qc_issue,
Tejun Heo5682ed32008-04-07 22:47:16 +0900434 .sff_irq_clear = nv_adma_irq_clear,
Tejun Heo029cfd62008-03-25 12:22:49 +0900435
Robert Hancock53014e22007-05-05 15:36:36 -0600436 .freeze = nv_adma_freeze,
437 .thaw = nv_adma_thaw,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700438 .error_handler = nv_adma_error_handler,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600439 .post_internal_cmd = nv_adma_post_internal_cmd,
Tejun Heo029cfd62008-03-25 12:22:49 +0900440
Robert Hancockfbbb2622006-10-27 19:08:41 -0700441 .port_start = nv_adma_port_start,
442 .port_stop = nv_adma_port_stop,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900443#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600444 .port_suspend = nv_adma_port_suspend,
445 .port_resume = nv_adma_port_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900446#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -0700447 .host_stop = nv_adma_host_stop,
448};
449
Tejun Heo029cfd62008-03-25 12:22:49 +0900450static struct ata_port_operations nv_swncq_ops = {
451 .inherits = &nv_generic_ops,
452
Kuan Luof140f0f2007-10-15 15:16:53 -0400453 .qc_defer = ata_std_qc_defer,
454 .qc_prep = nv_swncq_qc_prep,
455 .qc_issue = nv_swncq_qc_issue,
Tejun Heo029cfd62008-03-25 12:22:49 +0900456
Kuan Luof140f0f2007-10-15 15:16:53 -0400457 .freeze = nv_mcp55_freeze,
458 .thaw = nv_mcp55_thaw,
459 .error_handler = nv_swncq_error_handler,
Tejun Heo029cfd62008-03-25 12:22:49 +0900460
Kuan Luof140f0f2007-10-15 15:16:53 -0400461#ifdef CONFIG_PM
462 .port_suspend = nv_swncq_port_suspend,
463 .port_resume = nv_swncq_port_resume,
464#endif
465 .port_start = nv_swncq_port_start,
466};
467
Tejun Heo95947192008-03-25 12:22:49 +0900468struct nv_pi_priv {
469 irq_handler_t irq_handler;
470 struct scsi_host_template *sht;
471};
472
473#define NV_PI_PRIV(_irq_handler, _sht) \
474 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
475
Tejun Heo1626aeb2007-05-04 12:43:58 +0200476static const struct ata_port_info nv_port_info[] = {
Tejun Heoada364e2006-06-17 15:49:56 +0900477 /* generic */
478 {
Tejun Heo0c887582007-08-06 18:36:23 +0900479 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900480 .pio_mask = NV_PIO_MASK,
481 .mwdma_mask = NV_MWDMA_MASK,
482 .udma_mask = NV_UDMA_MASK,
483 .port_ops = &nv_generic_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900484 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900485 },
486 /* nforce2/3 */
487 {
Tejun Heo0c887582007-08-06 18:36:23 +0900488 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900489 .pio_mask = NV_PIO_MASK,
490 .mwdma_mask = NV_MWDMA_MASK,
491 .udma_mask = NV_UDMA_MASK,
492 .port_ops = &nv_nf2_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900493 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900494 },
495 /* ck804 */
496 {
Tejun Heo0c887582007-08-06 18:36:23 +0900497 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900498 .pio_mask = NV_PIO_MASK,
499 .mwdma_mask = NV_MWDMA_MASK,
500 .udma_mask = NV_UDMA_MASK,
501 .port_ops = &nv_ck804_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900502 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900503 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700504 /* ADMA */
505 {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700506 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
507 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
508 .pio_mask = NV_PIO_MASK,
509 .mwdma_mask = NV_MWDMA_MASK,
510 .udma_mask = NV_UDMA_MASK,
511 .port_ops = &nv_adma_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900512 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700513 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400514 /* SWNCQ */
515 {
Kuan Luof140f0f2007-10-15 15:16:53 -0400516 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
517 ATA_FLAG_NCQ,
Kuan Luof140f0f2007-10-15 15:16:53 -0400518 .pio_mask = NV_PIO_MASK,
519 .mwdma_mask = NV_MWDMA_MASK,
520 .udma_mask = NV_UDMA_MASK,
521 .port_ops = &nv_swncq_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900522 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
Kuan Luof140f0f2007-10-15 15:16:53 -0400523 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524};
525
526MODULE_AUTHOR("NVIDIA");
527MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
528MODULE_LICENSE("GPL");
529MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
530MODULE_VERSION(DRV_VERSION);
531
Jeff Garzik06993d22008-04-04 03:34:45 -0400532static int adma_enabled;
Zoltan Boszormenyid21279f2008-03-28 14:33:46 -0700533static int swncq_enabled = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700534
Robert Hancock2dec7552006-11-26 14:20:19 -0600535static void nv_adma_register_mode(struct ata_port *ap)
536{
Robert Hancock2dec7552006-11-26 14:20:19 -0600537 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600538 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800539 u16 tmp, status;
540 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600541
542 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
543 return;
544
Robert Hancocka2cfe812007-02-05 16:26:03 -0800545 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400546 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800547 ndelay(50);
548 status = readw(mmio + NV_ADMA_STAT);
549 count++;
550 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400551 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800552 ata_port_printk(ap, KERN_WARNING,
553 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
554 status);
555
Robert Hancock2dec7552006-11-26 14:20:19 -0600556 tmp = readw(mmio + NV_ADMA_CTL);
557 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
558
Robert Hancocka2cfe812007-02-05 16:26:03 -0800559 count = 0;
560 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400561 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800562 ndelay(50);
563 status = readw(mmio + NV_ADMA_STAT);
564 count++;
565 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400566 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800567 ata_port_printk(ap, KERN_WARNING,
568 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
569 status);
570
Robert Hancock2dec7552006-11-26 14:20:19 -0600571 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
572}
573
574static void nv_adma_mode(struct ata_port *ap)
575{
Robert Hancock2dec7552006-11-26 14:20:19 -0600576 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600577 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800578 u16 tmp, status;
579 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600580
581 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
582 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500583
Robert Hancock2dec7552006-11-26 14:20:19 -0600584 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
585
586 tmp = readw(mmio + NV_ADMA_CTL);
587 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
588
Robert Hancocka2cfe812007-02-05 16:26:03 -0800589 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400590 while (((status & NV_ADMA_STAT_LEGACY) ||
Robert Hancocka2cfe812007-02-05 16:26:03 -0800591 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
592 ndelay(50);
593 status = readw(mmio + NV_ADMA_STAT);
594 count++;
595 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400596 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800597 ata_port_printk(ap, KERN_WARNING,
598 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
599 status);
600
Robert Hancock2dec7552006-11-26 14:20:19 -0600601 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
602}
603
Robert Hancockfbbb2622006-10-27 19:08:41 -0700604static int nv_adma_slave_config(struct scsi_device *sdev)
605{
606 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600607 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600608 struct nv_adma_port_priv *port0, *port1;
609 struct scsi_device *sdev0, *sdev1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600610 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancock8959d302008-02-04 19:39:02 -0600611 unsigned long segment_boundary, flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700612 unsigned short sg_tablesize;
613 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600614 int adma_enable;
615 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700616
617 rc = ata_scsi_slave_config(sdev);
618
619 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
620 /* Not a proper libata device, ignore */
621 return rc;
622
Robert Hancock8959d302008-02-04 19:39:02 -0600623 spin_lock_irqsave(ap->lock, flags);
624
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900625 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700626 /*
627 * NVIDIA reports that ADMA mode does not support ATAPI commands.
628 * Therefore ATAPI commands are sent through the legacy interface.
629 * However, the legacy interface only supports 32-bit DMA.
630 * Restrict DMA parameters as required by the legacy interface
631 * when an ATAPI device is connected.
632 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700633 segment_boundary = ATA_DMA_BOUNDARY;
634 /* Subtract 1 since an extra entry may be needed for padding, see
635 libata-scsi.c */
636 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500637
Robert Hancock2dec7552006-11-26 14:20:19 -0600638 /* Since the legacy DMA engine is in use, we need to disable ADMA
639 on the port. */
640 adma_enable = 0;
641 nv_adma_register_mode(ap);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400642 } else {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700643 segment_boundary = NV_ADMA_DMA_BOUNDARY;
644 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600645 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700646 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500647
Robert Hancock2dec7552006-11-26 14:20:19 -0600648 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700649
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400650 if (ap->port_no == 1)
Robert Hancock2dec7552006-11-26 14:20:19 -0600651 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
652 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
653 else
654 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
655 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500656
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400657 if (adma_enable) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600658 new_reg = current_reg | config_mask;
659 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400660 } else {
Robert Hancock2dec7552006-11-26 14:20:19 -0600661 new_reg = current_reg & ~config_mask;
662 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
663 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500664
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400665 if (current_reg != new_reg)
Robert Hancock2dec7552006-11-26 14:20:19 -0600666 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500667
Robert Hancock8959d302008-02-04 19:39:02 -0600668 port0 = ap->host->ports[0]->private_data;
669 port1 = ap->host->ports[1]->private_data;
670 sdev0 = ap->host->ports[0]->link.device[0].sdev;
671 sdev1 = ap->host->ports[1]->link.device[0].sdev;
672 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
673 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
674 /** We have to set the DMA mask to 32-bit if either port is in
675 ATAPI mode, since they are on the same PCI device which is
676 used for DMA mapping. If we set the mask we also need to set
677 the bounce limit on both ports to ensure that the block
678 layer doesn't feed addresses that cause DMA mapping to
679 choke. If either SCSI device is not allocated yet, it's OK
680 since that port will discover its correct setting when it
681 does get allocated.
682 Note: Setting 32-bit mask should not fail. */
683 if (sdev0)
684 blk_queue_bounce_limit(sdev0->request_queue,
685 ATA_DMA_MASK);
686 if (sdev1)
687 blk_queue_bounce_limit(sdev1->request_queue,
688 ATA_DMA_MASK);
689
690 pci_set_dma_mask(pdev, ATA_DMA_MASK);
691 } else {
692 /** This shouldn't fail as it was set to this value before */
693 pci_set_dma_mask(pdev, pp->adma_dma_mask);
694 if (sdev0)
695 blk_queue_bounce_limit(sdev0->request_queue,
696 pp->adma_dma_mask);
697 if (sdev1)
698 blk_queue_bounce_limit(sdev1->request_queue,
699 pp->adma_dma_mask);
700 }
701
Robert Hancockfbbb2622006-10-27 19:08:41 -0700702 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
703 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
704 ata_port_printk(ap, KERN_INFO,
Robert Hancock8959d302008-02-04 19:39:02 -0600705 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
706 (unsigned long long)*ap->host->dev->dma_mask,
707 segment_boundary, sg_tablesize);
708
709 spin_unlock_irqrestore(ap->lock, flags);
710
Robert Hancockfbbb2622006-10-27 19:08:41 -0700711 return rc;
712}
713
Robert Hancock2dec7552006-11-26 14:20:19 -0600714static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
715{
716 struct nv_adma_port_priv *pp = qc->ap->private_data;
717 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
718}
719
Robert Hancockf2fb3442007-03-26 21:43:36 -0800720static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
721{
Robert Hancock3f3debd2007-11-25 16:59:36 -0600722 /* Other than when internal or pass-through commands are executed,
723 the only time this function will be called in ADMA mode will be
724 if a command fails. In the failure case we don't care about going
725 into register mode with ADMA commands pending, as the commands will
726 all shortly be aborted anyway. We assume that NCQ commands are not
727 issued via passthrough, which is the only way that switching into
728 ADMA mode could abort outstanding commands. */
Robert Hancockf2fb3442007-03-26 21:43:36 -0800729 nv_adma_register_mode(ap);
730
Tejun Heo9363c382008-04-07 22:47:16 +0900731 ata_sff_tf_read(ap, tf);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800732}
733
Robert Hancock2dec7552006-11-26 14:20:19 -0600734static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700735{
736 unsigned int idx = 0;
737
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400738 if (tf->flags & ATA_TFLAG_ISADDR) {
Robert Hancockac3d6b82007-02-19 19:02:46 -0600739 if (tf->flags & ATA_TFLAG_LBA48) {
740 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
741 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
742 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
743 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
744 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
745 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
746 } else
747 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
Jeff Garzika84471f2007-02-26 05:51:33 -0500748
Robert Hancockac3d6b82007-02-19 19:02:46 -0600749 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
750 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
751 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
752 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700753 }
Jeff Garzika84471f2007-02-26 05:51:33 -0500754
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400755 if (tf->flags & ATA_TFLAG_DEVICE)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600756 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700757
758 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Jeff Garzika84471f2007-02-26 05:51:33 -0500759
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400760 while (idx < 12)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600761 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700762
763 return idx;
764}
765
Robert Hancock5bd28a42007-02-05 16:26:01 -0800766static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700767{
768 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600769 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700770
771 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
772
Robert Hancock5bd28a42007-02-05 16:26:01 -0800773 if (unlikely((force_err ||
774 flags & (NV_CPB_RESP_ATA_ERR |
775 NV_CPB_RESP_CMD_ERR |
776 NV_CPB_RESP_CPB_ERR)))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900777 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800778 int freeze = 0;
779
780 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400781 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800782 if (flags & NV_CPB_RESP_ATA_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900783 ata_ehi_push_desc(ehi, "ATA error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800784 ehi->err_mask |= AC_ERR_DEV;
785 } else if (flags & NV_CPB_RESP_CMD_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900786 ata_ehi_push_desc(ehi, "CMD error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800787 ehi->err_mask |= AC_ERR_DEV;
788 } else if (flags & NV_CPB_RESP_CPB_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900789 ata_ehi_push_desc(ehi, "CPB error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800790 ehi->err_mask |= AC_ERR_SYSTEM;
791 freeze = 1;
792 } else {
793 /* notifier error, but no error in CPB flags? */
Tejun Heob64bbc32007-07-16 14:29:39 +0900794 ata_ehi_push_desc(ehi, "unknown");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800795 ehi->err_mask |= AC_ERR_OTHER;
796 freeze = 1;
797 }
798 /* Kill all commands. EH will determine what actually failed. */
799 if (freeze)
800 ata_port_freeze(ap);
801 else
802 ata_port_abort(ap);
803 return 1;
804 }
805
Robert Hancockf2fb3442007-03-26 21:43:36 -0800806 if (likely(flags & NV_CPB_RESP_DONE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700807 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800808 VPRINTK("CPB flags done, flags=0x%x\n", flags);
809 if (likely(qc)) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400810 DPRINTK("Completing qc from tag %d\n", cpb_num);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700811 ata_qc_complete(qc);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600812 } else {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900813 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600814 /* Notifier bits set without a command may indicate the drive
815 is misbehaving. Raise host state machine violation on this
816 condition. */
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400817 ata_port_printk(ap, KERN_ERR,
818 "notifier for tag %d with no cmd?\n",
819 cpb_num);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600820 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +0900821 ehi->action |= ATA_EH_RESET;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600822 ata_port_freeze(ap);
823 return 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700824 }
825 }
Robert Hancock5bd28a42007-02-05 16:26:01 -0800826 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700827}
828
Robert Hancock2dec7552006-11-26 14:20:19 -0600829static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
830{
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900831 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600832
833 /* freeze if hotplugged */
834 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
835 ata_port_freeze(ap);
836 return 1;
837 }
838
839 /* bail out if not our interrupt */
840 if (!(irq_stat & NV_INT_DEV))
841 return 0;
842
843 /* DEV interrupt w/ no active qc? */
844 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
Tejun Heo9363c382008-04-07 22:47:16 +0900845 ata_sff_check_status(ap);
Robert Hancock2dec7552006-11-26 14:20:19 -0600846 return 1;
847 }
848
849 /* handle interrupt */
Tejun Heo9363c382008-04-07 22:47:16 +0900850 return ata_sff_host_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600851}
852
Robert Hancockfbbb2622006-10-27 19:08:41 -0700853static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
854{
855 struct ata_host *host = dev_instance;
856 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600857 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700858
859 spin_lock(&host->lock);
860
861 for (i = 0; i < host->n_ports; i++) {
862 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600863 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700864
865 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
866 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600867 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700868 u16 status;
869 u32 gen_ctl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700870 u32 notifier, notifier_error;
Jeff Garzika617c092007-05-21 20:14:23 -0400871
Robert Hancock53014e22007-05-05 15:36:36 -0600872 /* if ADMA is disabled, use standard ata interrupt handler */
873 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
874 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
875 >> (NV_INT_PORT_SHIFT * i);
876 handled += nv_host_intr(ap, irq_stat);
877 continue;
878 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700879
Robert Hancock53014e22007-05-05 15:36:36 -0600880 /* if in ATA register mode, check for standard interrupts */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700881 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Tejun Heo0d5ff562007-02-01 15:06:36 +0900882 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
Robert Hancock2dec7552006-11-26 14:20:19 -0600883 >> (NV_INT_PORT_SHIFT * i);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400884 if (ata_tag_valid(ap->link.active_tag))
Robert Hancockf740d162007-01-23 20:09:02 -0600885 /** NV_INT_DEV indication seems unreliable at times
886 at least in ADMA mode. Force it on always when a
887 command is active, to prevent losing interrupts. */
888 irq_stat |= NV_INT_DEV;
Robert Hancock2dec7552006-11-26 14:20:19 -0600889 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700890 }
891
892 notifier = readl(mmio + NV_ADMA_NOTIFIER);
893 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600894 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700895
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600896 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700897
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400898 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
Robert Hancockfbbb2622006-10-27 19:08:41 -0700899 !notifier_error)
900 /* Nothing to do */
901 continue;
902
903 status = readw(mmio + NV_ADMA_STAT);
904
905 /* Clear status. Ensure the controller sees the clearing before we start
906 looking at any of the CPB statuses, so that any CPB completions after
907 this point in the handler will raise another interrupt. */
908 writew(status, mmio + NV_ADMA_STAT);
909 readw(mmio + NV_ADMA_STAT); /* flush posted write */
910 rmb();
911
Robert Hancock5bd28a42007-02-05 16:26:01 -0800912 handled++; /* irq handled if we got here */
913
914 /* freeze if hotplugged or controller error */
915 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
916 NV_ADMA_STAT_HOTUNPLUG |
Robert Hancock5278b502007-02-11 18:36:56 -0600917 NV_ADMA_STAT_TIMEOUT |
918 NV_ADMA_STAT_SERROR))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900919 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800920
921 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400922 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800923 if (status & NV_ADMA_STAT_TIMEOUT) {
924 ehi->err_mask |= AC_ERR_SYSTEM;
Tejun Heob64bbc32007-07-16 14:29:39 +0900925 ata_ehi_push_desc(ehi, "timeout");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800926 } else if (status & NV_ADMA_STAT_HOTPLUG) {
927 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +0900928 ata_ehi_push_desc(ehi, "hotplug");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800929 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
930 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +0900931 ata_ehi_push_desc(ehi, "hot unplug");
Robert Hancock5278b502007-02-11 18:36:56 -0600932 } else if (status & NV_ADMA_STAT_SERROR) {
933 /* let libata analyze SError and figure out the cause */
Tejun Heob64bbc32007-07-16 14:29:39 +0900934 ata_ehi_push_desc(ehi, "SError");
935 } else
936 ata_ehi_push_desc(ehi, "unknown");
Robert Hancockfbbb2622006-10-27 19:08:41 -0700937 ata_port_freeze(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700938 continue;
939 }
940
Robert Hancock5bd28a42007-02-05 16:26:01 -0800941 if (status & (NV_ADMA_STAT_DONE |
Robert Hancocka1fe7822008-01-29 19:53:19 -0600942 NV_ADMA_STAT_CPBERR |
943 NV_ADMA_STAT_CMD_COMPLETE)) {
944 u32 check_commands = notifier_clears[i];
Robert Hancock721449b2007-02-19 19:03:08 -0600945 int pos, error = 0;
Robert Hancock8ba5e4c2007-03-08 18:02:18 -0600946
Robert Hancocka1fe7822008-01-29 19:53:19 -0600947 if (status & NV_ADMA_STAT_CPBERR) {
948 /* Check all active commands */
949 if (ata_tag_valid(ap->link.active_tag))
950 check_commands = 1 <<
951 ap->link.active_tag;
952 else
953 check_commands = ap->
954 link.sactive;
955 }
Robert Hancock8ba5e4c2007-03-08 18:02:18 -0600956
Robert Hancockfbbb2622006-10-27 19:08:41 -0700957 /** Check CPBs for completed commands */
Robert Hancock721449b2007-02-19 19:03:08 -0600958 while ((pos = ffs(check_commands)) && !error) {
959 pos--;
960 error = nv_adma_check_cpb(ap, pos,
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400961 notifier_error & (1 << pos));
962 check_commands &= ~(1 << pos);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700963 }
964 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700965 }
966 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500967
Jeff Garzikb4479162007-10-25 20:47:30 -0400968 if (notifier_clears[0] || notifier_clears[1]) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600969 /* Note: Both notifier clear registers must be written
970 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600971 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
972 writel(notifier_clears[0], pp->notifier_clear_block);
973 pp = host->ports[1]->private_data;
974 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -0600975 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700976
977 spin_unlock(&host->lock);
978
979 return IRQ_RETVAL(handled);
980}
981
Robert Hancock53014e22007-05-05 15:36:36 -0600982static void nv_adma_freeze(struct ata_port *ap)
983{
984 struct nv_adma_port_priv *pp = ap->private_data;
985 void __iomem *mmio = pp->ctl_block;
986 u16 tmp;
987
988 nv_ck804_freeze(ap);
989
990 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
991 return;
992
993 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400994 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -0600995 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
996
997 /* Disable interrupt */
998 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400999 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001000 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001001 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001002}
1003
1004static void nv_adma_thaw(struct ata_port *ap)
1005{
1006 struct nv_adma_port_priv *pp = ap->private_data;
1007 void __iomem *mmio = pp->ctl_block;
1008 u16 tmp;
1009
1010 nv_ck804_thaw(ap);
1011
1012 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1013 return;
1014
1015 /* Enable interrupt */
1016 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001017 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001018 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001019 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001020}
1021
Robert Hancockfbbb2622006-10-27 19:08:41 -07001022static void nv_adma_irq_clear(struct ata_port *ap)
1023{
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001024 struct nv_adma_port_priv *pp = ap->private_data;
1025 void __iomem *mmio = pp->ctl_block;
Robert Hancock53014e22007-05-05 15:36:36 -06001026 u32 notifier_clears[2];
1027
1028 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
Tejun Heo9363c382008-04-07 22:47:16 +09001029 ata_sff_irq_clear(ap);
Robert Hancock53014e22007-05-05 15:36:36 -06001030 return;
1031 }
1032
1033 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001034 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001035 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001036
1037 /* clear ADMA status */
Robert Hancock53014e22007-05-05 15:36:36 -06001038 writew(0xffff, mmio + NV_ADMA_STAT);
Jeff Garzika617c092007-05-21 20:14:23 -04001039
Robert Hancock53014e22007-05-05 15:36:36 -06001040 /* clear notifiers - note both ports need to be written with
1041 something even though we are only clearing on one */
1042 if (ap->port_no == 0) {
1043 notifier_clears[0] = 0xFFFFFFFF;
1044 notifier_clears[1] = 0;
1045 } else {
1046 notifier_clears[0] = 0;
1047 notifier_clears[1] = 0xFFFFFFFF;
1048 }
1049 pp = ap->host->ports[0]->private_data;
1050 writel(notifier_clears[0], pp->notifier_clear_block);
1051 pp = ap->host->ports[1]->private_data;
1052 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001053}
1054
Robert Hancockf5ecac22007-02-20 21:49:10 -06001055static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001056{
Robert Hancockf5ecac22007-02-20 21:49:10 -06001057 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001058
Jeff Garzikb4479162007-10-25 20:47:30 -04001059 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
Tejun Heo9363c382008-04-07 22:47:16 +09001060 ata_sff_post_internal_cmd(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001061}
1062
1063static int nv_adma_port_start(struct ata_port *ap)
1064{
1065 struct device *dev = ap->host->dev;
1066 struct nv_adma_port_priv *pp;
1067 int rc;
1068 void *mem;
1069 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001070 void __iomem *mmio;
Robert Hancock8959d302008-02-04 19:39:02 -06001071 struct pci_dev *pdev = to_pci_dev(dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001072 u16 tmp;
1073
1074 VPRINTK("ENTER\n");
1075
Robert Hancock8959d302008-02-04 19:39:02 -06001076 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1077 pad buffers */
1078 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1079 if (rc)
1080 return rc;
1081 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1082 if (rc)
1083 return rc;
1084
Robert Hancockfbbb2622006-10-27 19:08:41 -07001085 rc = ata_port_start(ap);
1086 if (rc)
1087 return rc;
1088
Tejun Heo24dc5f32007-01-20 16:00:28 +09001089 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1090 if (!pp)
1091 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001092
Tejun Heo0d5ff562007-02-01 15:06:36 +09001093 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001094 ap->port_no * NV_ADMA_PORT_SIZE;
1095 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001096 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001097 pp->notifier_clear_block = pp->gen_block +
1098 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1099
Robert Hancock8959d302008-02-04 19:39:02 -06001100 /* Now that the legacy PRD and padding buffer are allocated we can
1101 safely raise the DMA mask to allocate the CPB/APRD table.
1102 These are allowed to fail since we store the value that ends up
1103 being used to set as the bounce limit in slave_config later if
1104 needed. */
1105 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1106 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1107 pp->adma_dma_mask = *dev->dma_mask;
1108
Tejun Heo24dc5f32007-01-20 16:00:28 +09001109 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1110 &mem_dma, GFP_KERNEL);
1111 if (!mem)
1112 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001113 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1114
1115 /*
1116 * First item in chunk of DMA memory:
1117 * 128-byte command parameter block (CPB)
1118 * one for each command tag
1119 */
1120 pp->cpb = mem;
1121 pp->cpb_dma = mem_dma;
1122
1123 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001124 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001125
1126 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1127 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1128
1129 /*
1130 * Second item: block of ADMA_SGTBL_LEN s/g entries
1131 */
1132 pp->aprd = mem;
1133 pp->aprd_dma = mem_dma;
1134
1135 ap->private_data = pp;
1136
1137 /* clear any outstanding interrupt conditions */
1138 writew(0xffff, mmio + NV_ADMA_STAT);
1139
1140 /* initialize port variables */
1141 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1142
1143 /* clear CPB fetch count */
1144 writew(0, mmio + NV_ADMA_CPB_COUNT);
1145
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001146 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001147 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001148 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1149 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001150
1151 tmp = readw(mmio + NV_ADMA_CTL);
1152 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001153 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001154 udelay(1);
1155 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001156 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001157
1158 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001159}
1160
1161static void nv_adma_port_stop(struct ata_port *ap)
1162{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001163 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001164 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001165
1166 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001167 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001168}
1169
Tejun Heo438ac6d2007-03-02 17:31:26 +09001170#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001171static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1172{
1173 struct nv_adma_port_priv *pp = ap->private_data;
1174 void __iomem *mmio = pp->ctl_block;
1175
1176 /* Go to register mode - clears GO */
1177 nv_adma_register_mode(ap);
1178
1179 /* clear CPB fetch count */
1180 writew(0, mmio + NV_ADMA_CPB_COUNT);
1181
1182 /* disable interrupt, shut down port */
1183 writew(0, mmio + NV_ADMA_CTL);
1184
1185 return 0;
1186}
1187
1188static int nv_adma_port_resume(struct ata_port *ap)
1189{
1190 struct nv_adma_port_priv *pp = ap->private_data;
1191 void __iomem *mmio = pp->ctl_block;
1192 u16 tmp;
1193
1194 /* set CPB block location */
1195 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001196 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001197
1198 /* clear any outstanding interrupt conditions */
1199 writew(0xffff, mmio + NV_ADMA_STAT);
1200
1201 /* initialize port variables */
1202 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1203
1204 /* clear CPB fetch count */
1205 writew(0, mmio + NV_ADMA_CPB_COUNT);
1206
1207 /* clear GO for register mode, enable interrupt */
1208 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001209 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1210 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001211
1212 tmp = readw(mmio + NV_ADMA_CTL);
1213 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001214 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001215 udelay(1);
1216 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001217 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001218
1219 return 0;
1220}
Tejun Heo438ac6d2007-03-02 17:31:26 +09001221#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -07001222
Tejun Heo9a829cc2007-04-17 23:44:08 +09001223static void nv_adma_setup_port(struct ata_port *ap)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001224{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001225 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1226 struct ata_ioports *ioport = &ap->ioaddr;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001227
1228 VPRINTK("ENTER\n");
1229
Tejun Heo9a829cc2007-04-17 23:44:08 +09001230 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001231
Tejun Heo0d5ff562007-02-01 15:06:36 +09001232 ioport->cmd_addr = mmio;
1233 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001234 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001235 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1236 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1237 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1238 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1239 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1240 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001241 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001242 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001243 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001244 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001245}
1246
Tejun Heo9a829cc2007-04-17 23:44:08 +09001247static int nv_adma_host_init(struct ata_host *host)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001248{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001249 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001250 unsigned int i;
1251 u32 tmp32;
1252
1253 VPRINTK("ENTER\n");
1254
1255 /* enable ADMA on the ports */
1256 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1257 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1258 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1259 NV_MCP_SATA_CFG_20_PORT1_EN |
1260 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1261
1262 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1263
Tejun Heo9a829cc2007-04-17 23:44:08 +09001264 for (i = 0; i < host->n_ports; i++)
1265 nv_adma_setup_port(host->ports[i]);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001266
Robert Hancockfbbb2622006-10-27 19:08:41 -07001267 return 0;
1268}
1269
1270static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1271 struct scatterlist *sg,
1272 int idx,
1273 struct nv_adma_prd *aprd)
1274{
Robert Hancock41949ed2007-02-19 19:02:27 -06001275 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001276 if (qc->tf.flags & ATA_TFLAG_WRITE)
1277 flags |= NV_APRD_WRITE;
1278 if (idx == qc->n_elem - 1)
1279 flags |= NV_APRD_END;
1280 else if (idx != 4)
1281 flags |= NV_APRD_CONT;
1282
1283 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1284 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001285 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001286 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001287}
1288
1289static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1290{
1291 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001292 struct nv_adma_prd *aprd;
1293 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001294 unsigned int si;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001295
1296 VPRINTK("ENTER\n");
1297
Tejun Heoff2aeb12007-12-05 16:43:11 +09001298 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1299 aprd = (si < 5) ? &cpb->aprd[si] :
1300 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1301 nv_adma_fill_aprd(qc, sg, si, aprd);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001302 }
Tejun Heoff2aeb12007-12-05 16:43:11 +09001303 if (si > 5)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001304 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001305 else
1306 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001307}
1308
Robert Hancock382a6652007-02-05 16:26:02 -08001309static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1310{
1311 struct nv_adma_port_priv *pp = qc->ap->private_data;
1312
1313 /* ADMA engine can only be used for non-ATAPI DMA commands,
Robert Hancock3f3debd2007-11-25 16:59:36 -06001314 or interrupt-driven no-data commands. */
Jeff Garzikb4479162007-10-25 20:47:30 -04001315 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
Robert Hancock3f3debd2007-11-25 16:59:36 -06001316 (qc->tf.flags & ATA_TFLAG_POLLING))
Robert Hancock382a6652007-02-05 16:26:02 -08001317 return 1;
1318
Jeff Garzikb4479162007-10-25 20:47:30 -04001319 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock382a6652007-02-05 16:26:02 -08001320 (qc->tf.protocol == ATA_PROT_NODATA))
1321 return 0;
1322
1323 return 1;
1324}
1325
Robert Hancockfbbb2622006-10-27 19:08:41 -07001326static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1327{
1328 struct nv_adma_port_priv *pp = qc->ap->private_data;
1329 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1330 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001331 NV_CPB_CTL_IEN;
1332
Robert Hancock382a6652007-02-05 16:26:02 -08001333 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock3f3debd2007-11-25 16:59:36 -06001334 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1335 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancock2dec7552006-11-26 14:20:19 -06001336 nv_adma_register_mode(qc->ap);
Tejun Heo9363c382008-04-07 22:47:16 +09001337 ata_sff_qc_prep(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001338 return;
1339 }
1340
Robert Hancock41949ed2007-02-19 19:02:27 -06001341 cpb->resp_flags = NV_CPB_RESP_DONE;
1342 wmb();
1343 cpb->ctl_flags = 0;
1344 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001345
1346 cpb->len = 3;
1347 cpb->tag = qc->tag;
1348 cpb->next_cpb_idx = 0;
1349
1350 /* turn on NCQ flags for NCQ commands */
1351 if (qc->tf.protocol == ATA_PROT_NCQ)
1352 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1353
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001354 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1355
Robert Hancockfbbb2622006-10-27 19:08:41 -07001356 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1357
Jeff Garzikb4479162007-10-25 20:47:30 -04001358 if (qc->flags & ATA_QCFLAG_DMAMAP) {
Robert Hancock382a6652007-02-05 16:26:02 -08001359 nv_adma_fill_sg(qc, cpb);
1360 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1361 } else
1362 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001363
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001364 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1365 until we are finished filling in all of the contents */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001366 wmb();
1367 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001368 wmb();
1369 cpb->resp_flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001370}
1371
1372static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1373{
Robert Hancock2dec7552006-11-26 14:20:19 -06001374 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001375 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001376 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001377
1378 VPRINTK("ENTER\n");
1379
Robert Hancock3f3debd2007-11-25 16:59:36 -06001380 /* We can't handle result taskfile with NCQ commands, since
1381 retrieving the taskfile switches us out of ADMA mode and would abort
1382 existing commands. */
1383 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1384 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1385 ata_dev_printk(qc->dev, KERN_ERR,
1386 "NCQ w/ RESULT_TF not allowed\n");
1387 return AC_ERR_SYSTEM;
1388 }
1389
Robert Hancock382a6652007-02-05 16:26:02 -08001390 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001391 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001392 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancock3f3debd2007-11-25 16:59:36 -06001393 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1394 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancockfbbb2622006-10-27 19:08:41 -07001395 nv_adma_register_mode(qc->ap);
Tejun Heo9363c382008-04-07 22:47:16 +09001396 return ata_sff_qc_issue(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001397 } else
1398 nv_adma_mode(qc->ap);
1399
1400 /* write append register, command tag in lower 8 bits
1401 and (number of cpbs to append -1) in top 8 bits */
1402 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001403
Jeff Garzikb4479162007-10-25 20:47:30 -04001404 if (curr_ncq != pp->last_issue_ncq) {
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001405 /* Seems to need some delay before switching between NCQ and
1406 non-NCQ commands, else we get command timeouts and such. */
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001407 udelay(20);
1408 pp->last_issue_ncq = curr_ncq;
1409 }
1410
Robert Hancockfbbb2622006-10-27 19:08:41 -07001411 writew(qc->tag, mmio + NV_ADMA_APPEND);
1412
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001413 DPRINTK("Issued tag %u\n", qc->tag);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001414
1415 return 0;
1416}
1417
David Howells7d12e782006-10-05 14:55:46 +01001418static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419{
Jeff Garzikcca39742006-08-24 03:19:22 -04001420 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 unsigned int i;
1422 unsigned int handled = 0;
1423 unsigned long flags;
1424
Jeff Garzikcca39742006-08-24 03:19:22 -04001425 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Jeff Garzikcca39742006-08-24 03:19:22 -04001427 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 struct ata_port *ap;
1429
Jeff Garzikcca39742006-08-24 03:19:22 -04001430 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001431 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001432 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 struct ata_queued_cmd *qc;
1434
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001435 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001436 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Tejun Heo9363c382008-04-07 22:47:16 +09001437 handled += ata_sff_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001438 else
1439 // No request pending? Clear interrupt status
1440 // anyway, in case there's one pending.
Tejun Heo5682ed32008-04-07 22:47:16 +09001441 ap->ops->sff_check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 }
1443
1444 }
1445
Jeff Garzikcca39742006-08-24 03:19:22 -04001446 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 return IRQ_RETVAL(handled);
1449}
1450
Jeff Garzikcca39742006-08-24 03:19:22 -04001451static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001452{
1453 int i, handled = 0;
1454
Jeff Garzikcca39742006-08-24 03:19:22 -04001455 for (i = 0; i < host->n_ports; i++) {
1456 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001457
1458 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1459 handled += nv_host_intr(ap, irq_stat);
1460
1461 irq_stat >>= NV_INT_PORT_SHIFT;
1462 }
1463
1464 return IRQ_RETVAL(handled);
1465}
1466
David Howells7d12e782006-10-05 14:55:46 +01001467static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001468{
Jeff Garzikcca39742006-08-24 03:19:22 -04001469 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001470 u8 irq_stat;
1471 irqreturn_t ret;
1472
Jeff Garzikcca39742006-08-24 03:19:22 -04001473 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001474 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001475 ret = nv_do_interrupt(host, irq_stat);
1476 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001477
1478 return ret;
1479}
1480
David Howells7d12e782006-10-05 14:55:46 +01001481static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001482{
Jeff Garzikcca39742006-08-24 03:19:22 -04001483 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001484 u8 irq_stat;
1485 irqreturn_t ret;
1486
Jeff Garzikcca39742006-08-24 03:19:22 -04001487 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001488 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001489 ret = nv_do_interrupt(host, irq_stat);
1490 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001491
1492 return ret;
1493}
1494
Tejun Heo82ef04f2008-07-31 17:02:40 +09001495static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001498 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
Tejun Heo82ef04f2008-07-31 17:02:40 +09001500 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001501 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502}
1503
Tejun Heo82ef04f2008-07-31 17:02:40 +09001504static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001507 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Tejun Heo82ef04f2008-07-31 17:02:40 +09001509 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001510 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511}
1512
Tejun Heo39f87582006-06-17 15:49:56 +09001513static void nv_nf2_freeze(struct ata_port *ap)
1514{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001515 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001516 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1517 u8 mask;
1518
Tejun Heo0d5ff562007-02-01 15:06:36 +09001519 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001520 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001521 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001522}
1523
1524static void nv_nf2_thaw(struct ata_port *ap)
1525{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001526 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001527 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1528 u8 mask;
1529
Tejun Heo0d5ff562007-02-01 15:06:36 +09001530 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001531
Tejun Heo0d5ff562007-02-01 15:06:36 +09001532 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001533 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001534 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001535}
1536
1537static void nv_ck804_freeze(struct ata_port *ap)
1538{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001539 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001540 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1541 u8 mask;
1542
1543 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1544 mask &= ~(NV_INT_ALL << shift);
1545 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1546}
1547
1548static void nv_ck804_thaw(struct ata_port *ap)
1549{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001550 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001551 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1552 u8 mask;
1553
1554 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1555
1556 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1557 mask |= (NV_INT_MASK << shift);
1558 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1559}
1560
Kuan Luof140f0f2007-10-15 15:16:53 -04001561static void nv_mcp55_freeze(struct ata_port *ap)
1562{
1563 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1564 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1565 u32 mask;
1566
1567 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1568
1569 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1570 mask &= ~(NV_INT_ALL_MCP55 << shift);
1571 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Tejun Heo9363c382008-04-07 22:47:16 +09001572 ata_sff_freeze(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001573}
1574
1575static void nv_mcp55_thaw(struct ata_port *ap)
1576{
1577 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1578 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1579 u32 mask;
1580
1581 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1582
1583 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1584 mask |= (NV_INT_MASK_MCP55 << shift);
1585 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Tejun Heo9363c382008-04-07 22:47:16 +09001586 ata_sff_thaw(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001587}
1588
Robert Hancockfbbb2622006-10-27 19:08:41 -07001589static void nv_adma_error_handler(struct ata_port *ap)
1590{
1591 struct nv_adma_port_priv *pp = ap->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04001592 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001593 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001594 int i;
1595 u16 tmp;
Jeff Garzika84471f2007-02-26 05:51:33 -05001596
Jeff Garzikb4479162007-10-25 20:47:30 -04001597 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001598 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1599 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1600 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1601 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001602 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1603 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001604
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001605 ata_port_printk(ap, KERN_ERR,
1606 "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001607 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1608 "next cpb count 0x%X next cpb idx 0x%x\n",
1609 notifier, notifier_error, gen_ctl, status,
1610 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001611
Jeff Garzikb4479162007-10-25 20:47:30 -04001612 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001613 struct nv_adma_cpb *cpb = &pp->cpb[i];
Jeff Garzikb4479162007-10-25 20:47:30 -04001614 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001615 ap->link.sactive & (1 << i))
Robert Hancock2cb27852007-02-11 18:34:44 -06001616 ata_port_printk(ap, KERN_ERR,
1617 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1618 i, cpb->ctl_flags, cpb->resp_flags);
1619 }
1620 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001621
Robert Hancockfbbb2622006-10-27 19:08:41 -07001622 /* Push us back into port register mode for error handling. */
1623 nv_adma_register_mode(ap);
1624
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001625 /* Mark all of the CPBs as invalid to prevent them from
1626 being executed */
Jeff Garzikb4479162007-10-25 20:47:30 -04001627 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001628 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1629
1630 /* clear CPB fetch count */
1631 writew(0, mmio + NV_ADMA_CPB_COUNT);
1632
1633 /* Reset channel */
1634 tmp = readw(mmio + NV_ADMA_CTL);
1635 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001636 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001637 udelay(1);
1638 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001639 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001640 }
1641
Tejun Heo9363c382008-04-07 22:47:16 +09001642 ata_sff_error_handler(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001643}
1644
Kuan Luof140f0f2007-10-15 15:16:53 -04001645static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1646{
1647 struct nv_swncq_port_priv *pp = ap->private_data;
1648 struct defer_queue *dq = &pp->defer_queue;
1649
1650 /* queue is full */
1651 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1652 dq->defer_bits |= (1 << qc->tag);
1653 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1654}
1655
1656static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1657{
1658 struct nv_swncq_port_priv *pp = ap->private_data;
1659 struct defer_queue *dq = &pp->defer_queue;
1660 unsigned int tag;
1661
1662 if (dq->head == dq->tail) /* null queue */
1663 return NULL;
1664
1665 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1666 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1667 WARN_ON(!(dq->defer_bits & (1 << tag)));
1668 dq->defer_bits &= ~(1 << tag);
1669
1670 return ata_qc_from_tag(ap, tag);
1671}
1672
1673static void nv_swncq_fis_reinit(struct ata_port *ap)
1674{
1675 struct nv_swncq_port_priv *pp = ap->private_data;
1676
1677 pp->dhfis_bits = 0;
1678 pp->dmafis_bits = 0;
1679 pp->sdbfis_bits = 0;
1680 pp->ncq_flags = 0;
1681}
1682
1683static void nv_swncq_pp_reinit(struct ata_port *ap)
1684{
1685 struct nv_swncq_port_priv *pp = ap->private_data;
1686 struct defer_queue *dq = &pp->defer_queue;
1687
1688 dq->head = 0;
1689 dq->tail = 0;
1690 dq->defer_bits = 0;
1691 pp->qc_active = 0;
1692 pp->last_issue_tag = ATA_TAG_POISON;
1693 nv_swncq_fis_reinit(ap);
1694}
1695
1696static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1697{
1698 struct nv_swncq_port_priv *pp = ap->private_data;
1699
1700 writew(fis, pp->irq_block);
1701}
1702
1703static void __ata_bmdma_stop(struct ata_port *ap)
1704{
1705 struct ata_queued_cmd qc;
1706
1707 qc.ap = ap;
1708 ata_bmdma_stop(&qc);
1709}
1710
1711static void nv_swncq_ncq_stop(struct ata_port *ap)
1712{
1713 struct nv_swncq_port_priv *pp = ap->private_data;
1714 unsigned int i;
1715 u32 sactive;
1716 u32 done_mask;
1717
1718 ata_port_printk(ap, KERN_ERR,
1719 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1720 ap->qc_active, ap->link.sactive);
1721 ata_port_printk(ap, KERN_ERR,
1722 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1723 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1724 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1725 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1726
1727 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
Tejun Heo5682ed32008-04-07 22:47:16 +09001728 ap->ops->sff_check_status(ap),
Kuan Luof140f0f2007-10-15 15:16:53 -04001729 ioread8(ap->ioaddr.error_addr));
1730
1731 sactive = readl(pp->sactive_block);
1732 done_mask = pp->qc_active ^ sactive;
1733
1734 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1735 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1736 u8 err = 0;
1737 if (pp->qc_active & (1 << i))
1738 err = 0;
1739 else if (done_mask & (1 << i))
1740 err = 1;
1741 else
1742 continue;
1743
1744 ata_port_printk(ap, KERN_ERR,
1745 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1746 (pp->dhfis_bits >> i) & 0x1,
1747 (pp->dmafis_bits >> i) & 0x1,
1748 (pp->sdbfis_bits >> i) & 0x1,
1749 (sactive >> i) & 0x1,
1750 (err ? "error! tag doesn't exit" : " "));
1751 }
1752
1753 nv_swncq_pp_reinit(ap);
Tejun Heo5682ed32008-04-07 22:47:16 +09001754 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001755 __ata_bmdma_stop(ap);
1756 nv_swncq_irq_clear(ap, 0xffff);
1757}
1758
1759static void nv_swncq_error_handler(struct ata_port *ap)
1760{
1761 struct ata_eh_context *ehc = &ap->link.eh_context;
1762
1763 if (ap->link.sactive) {
1764 nv_swncq_ncq_stop(ap);
Tejun Heocf480622008-01-24 00:05:14 +09001765 ehc->i.action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04001766 }
1767
Tejun Heo9363c382008-04-07 22:47:16 +09001768 ata_sff_error_handler(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001769}
1770
1771#ifdef CONFIG_PM
1772static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1773{
1774 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1775 u32 tmp;
1776
1777 /* clear irq */
1778 writel(~0, mmio + NV_INT_STATUS_MCP55);
1779
1780 /* disable irq */
1781 writel(0, mmio + NV_INT_ENABLE_MCP55);
1782
1783 /* disable swncq */
1784 tmp = readl(mmio + NV_CTL_MCP55);
1785 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1786 writel(tmp, mmio + NV_CTL_MCP55);
1787
1788 return 0;
1789}
1790
1791static int nv_swncq_port_resume(struct ata_port *ap)
1792{
1793 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1794 u32 tmp;
1795
1796 /* clear irq */
1797 writel(~0, mmio + NV_INT_STATUS_MCP55);
1798
1799 /* enable irq */
1800 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1801
1802 /* enable swncq */
1803 tmp = readl(mmio + NV_CTL_MCP55);
1804 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1805
1806 return 0;
1807}
1808#endif
1809
1810static void nv_swncq_host_init(struct ata_host *host)
1811{
1812 u32 tmp;
1813 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1814 struct pci_dev *pdev = to_pci_dev(host->dev);
1815 u8 regval;
1816
1817 /* disable ECO 398 */
1818 pci_read_config_byte(pdev, 0x7f, &regval);
1819 regval &= ~(1 << 7);
1820 pci_write_config_byte(pdev, 0x7f, regval);
1821
1822 /* enable swncq */
1823 tmp = readl(mmio + NV_CTL_MCP55);
1824 VPRINTK("HOST_CTL:0x%X\n", tmp);
1825 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1826
1827 /* enable irq intr */
1828 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1829 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1830 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1831
1832 /* clear port irq */
1833 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1834}
1835
1836static int nv_swncq_slave_config(struct scsi_device *sdev)
1837{
1838 struct ata_port *ap = ata_shost_to_port(sdev->host);
1839 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1840 struct ata_device *dev;
1841 int rc;
1842 u8 rev;
1843 u8 check_maxtor = 0;
1844 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1845
1846 rc = ata_scsi_slave_config(sdev);
1847 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1848 /* Not a proper libata device, ignore */
1849 return rc;
1850
1851 dev = &ap->link.device[sdev->id];
1852 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1853 return rc;
1854
1855 /* if MCP51 and Maxtor, then disable ncq */
1856 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1857 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1858 check_maxtor = 1;
1859
1860 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1861 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1862 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1863 pci_read_config_byte(pdev, 0x8, &rev);
1864 if (rev <= 0xa2)
1865 check_maxtor = 1;
1866 }
1867
1868 if (!check_maxtor)
1869 return rc;
1870
1871 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1872
1873 if (strncmp(model_num, "Maxtor", 6) == 0) {
1874 ata_scsi_change_queue_depth(sdev, 1);
1875 ata_dev_printk(dev, KERN_NOTICE,
1876 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1877 }
1878
1879 return rc;
1880}
1881
1882static int nv_swncq_port_start(struct ata_port *ap)
1883{
1884 struct device *dev = ap->host->dev;
1885 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1886 struct nv_swncq_port_priv *pp;
1887 int rc;
1888
1889 rc = ata_port_start(ap);
1890 if (rc)
1891 return rc;
1892
1893 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1894 if (!pp)
1895 return -ENOMEM;
1896
1897 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1898 &pp->prd_dma, GFP_KERNEL);
1899 if (!pp->prd)
1900 return -ENOMEM;
1901 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1902
1903 ap->private_data = pp;
1904 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1905 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1906 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1907
1908 return 0;
1909}
1910
1911static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1912{
1913 if (qc->tf.protocol != ATA_PROT_NCQ) {
Tejun Heo9363c382008-04-07 22:47:16 +09001914 ata_sff_qc_prep(qc);
Kuan Luof140f0f2007-10-15 15:16:53 -04001915 return;
1916 }
1917
1918 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1919 return;
1920
1921 nv_swncq_fill_sg(qc);
1922}
1923
1924static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1925{
1926 struct ata_port *ap = qc->ap;
1927 struct scatterlist *sg;
Kuan Luof140f0f2007-10-15 15:16:53 -04001928 struct nv_swncq_port_priv *pp = ap->private_data;
1929 struct ata_prd *prd;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001930 unsigned int si, idx;
Kuan Luof140f0f2007-10-15 15:16:53 -04001931
1932 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1933
1934 idx = 0;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001935 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Kuan Luof140f0f2007-10-15 15:16:53 -04001936 u32 addr, offset;
1937 u32 sg_len, len;
1938
1939 addr = (u32)sg_dma_address(sg);
1940 sg_len = sg_dma_len(sg);
1941
1942 while (sg_len) {
1943 offset = addr & 0xffff;
1944 len = sg_len;
1945 if ((offset + sg_len) > 0x10000)
1946 len = 0x10000 - offset;
1947
1948 prd[idx].addr = cpu_to_le32(addr);
1949 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1950
1951 idx++;
1952 sg_len -= len;
1953 addr += len;
1954 }
1955 }
1956
Tejun Heoff2aeb12007-12-05 16:43:11 +09001957 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
Kuan Luof140f0f2007-10-15 15:16:53 -04001958}
1959
1960static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1961 struct ata_queued_cmd *qc)
1962{
1963 struct nv_swncq_port_priv *pp = ap->private_data;
1964
1965 if (qc == NULL)
1966 return 0;
1967
1968 DPRINTK("Enter\n");
1969
1970 writel((1 << qc->tag), pp->sactive_block);
1971 pp->last_issue_tag = qc->tag;
1972 pp->dhfis_bits &= ~(1 << qc->tag);
1973 pp->dmafis_bits &= ~(1 << qc->tag);
1974 pp->qc_active |= (0x1 << qc->tag);
1975
Tejun Heo5682ed32008-04-07 22:47:16 +09001976 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1977 ap->ops->sff_exec_command(ap, &qc->tf);
Kuan Luof140f0f2007-10-15 15:16:53 -04001978
1979 DPRINTK("Issued tag %u\n", qc->tag);
1980
1981 return 0;
1982}
1983
1984static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
1985{
1986 struct ata_port *ap = qc->ap;
1987 struct nv_swncq_port_priv *pp = ap->private_data;
1988
1989 if (qc->tf.protocol != ATA_PROT_NCQ)
Tejun Heo9363c382008-04-07 22:47:16 +09001990 return ata_sff_qc_issue(qc);
Kuan Luof140f0f2007-10-15 15:16:53 -04001991
1992 DPRINTK("Enter\n");
1993
1994 if (!pp->qc_active)
1995 nv_swncq_issue_atacmd(ap, qc);
1996 else
1997 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
1998
1999 return 0;
2000}
2001
2002static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2003{
2004 u32 serror;
2005 struct ata_eh_info *ehi = &ap->link.eh_info;
2006
2007 ata_ehi_clear_desc(ehi);
2008
2009 /* AHCI needs SError cleared; otherwise, it might lock up */
2010 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2011 sata_scr_write(&ap->link, SCR_ERROR, serror);
2012
2013 /* analyze @irq_stat */
2014 if (fis & NV_SWNCQ_IRQ_ADDED)
2015 ata_ehi_push_desc(ehi, "hot plug");
2016 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2017 ata_ehi_push_desc(ehi, "hot unplug");
2018
2019 ata_ehi_hotplugged(ehi);
2020
2021 /* okay, let's hand over to EH */
2022 ehi->serror |= serror;
2023
2024 ata_port_freeze(ap);
2025}
2026
2027static int nv_swncq_sdbfis(struct ata_port *ap)
2028{
2029 struct ata_queued_cmd *qc;
2030 struct nv_swncq_port_priv *pp = ap->private_data;
2031 struct ata_eh_info *ehi = &ap->link.eh_info;
2032 u32 sactive;
2033 int nr_done = 0;
2034 u32 done_mask;
2035 int i;
2036 u8 host_stat;
2037 u8 lack_dhfis = 0;
2038
2039 host_stat = ap->ops->bmdma_status(ap);
2040 if (unlikely(host_stat & ATA_DMA_ERR)) {
2041 /* error when transfering data to/from memory */
2042 ata_ehi_clear_desc(ehi);
2043 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2044 ehi->err_mask |= AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002045 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002046 return -EINVAL;
2047 }
2048
Tejun Heo5682ed32008-04-07 22:47:16 +09002049 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002050 __ata_bmdma_stop(ap);
2051
2052 sactive = readl(pp->sactive_block);
2053 done_mask = pp->qc_active ^ sactive;
2054
2055 if (unlikely(done_mask & sactive)) {
2056 ata_ehi_clear_desc(ehi);
2057 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2058 "(%08x->%08x)", pp->qc_active, sactive);
2059 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002060 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002061 return -EINVAL;
2062 }
2063 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2064 if (!(done_mask & (1 << i)))
2065 continue;
2066
2067 qc = ata_qc_from_tag(ap, i);
2068 if (qc) {
2069 ata_qc_complete(qc);
2070 pp->qc_active &= ~(1 << i);
2071 pp->dhfis_bits &= ~(1 << i);
2072 pp->dmafis_bits &= ~(1 << i);
2073 pp->sdbfis_bits |= (1 << i);
2074 nr_done++;
2075 }
2076 }
2077
2078 if (!ap->qc_active) {
2079 DPRINTK("over\n");
2080 nv_swncq_pp_reinit(ap);
2081 return nr_done;
2082 }
2083
2084 if (pp->qc_active & pp->dhfis_bits)
2085 return nr_done;
2086
2087 if ((pp->ncq_flags & ncq_saw_backout) ||
2088 (pp->qc_active ^ pp->dhfis_bits))
2089 /* if the controller cann't get a device to host register FIS,
2090 * The driver needs to reissue the new command.
2091 */
2092 lack_dhfis = 1;
2093
2094 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2095 "SWNCQ:qc_active 0x%X defer_bits %X "
2096 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2097 ap->print_id, ap->qc_active, pp->qc_active,
2098 pp->defer_queue.defer_bits, pp->dhfis_bits,
2099 pp->dmafis_bits, pp->last_issue_tag);
2100
2101 nv_swncq_fis_reinit(ap);
2102
2103 if (lack_dhfis) {
2104 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2105 nv_swncq_issue_atacmd(ap, qc);
2106 return nr_done;
2107 }
2108
2109 if (pp->defer_queue.defer_bits) {
2110 /* send deferral queue command */
2111 qc = nv_swncq_qc_from_dq(ap);
2112 WARN_ON(qc == NULL);
2113 nv_swncq_issue_atacmd(ap, qc);
2114 }
2115
2116 return nr_done;
2117}
2118
2119static inline u32 nv_swncq_tag(struct ata_port *ap)
2120{
2121 struct nv_swncq_port_priv *pp = ap->private_data;
2122 u32 tag;
2123
2124 tag = readb(pp->tag_block) >> 2;
2125 return (tag & 0x1f);
2126}
2127
2128static int nv_swncq_dmafis(struct ata_port *ap)
2129{
2130 struct ata_queued_cmd *qc;
2131 unsigned int rw;
2132 u8 dmactl;
2133 u32 tag;
2134 struct nv_swncq_port_priv *pp = ap->private_data;
2135
2136 __ata_bmdma_stop(ap);
2137 tag = nv_swncq_tag(ap);
2138
2139 DPRINTK("dma setup tag 0x%x\n", tag);
2140 qc = ata_qc_from_tag(ap, tag);
2141
2142 if (unlikely(!qc))
2143 return 0;
2144
2145 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2146
2147 /* load PRD table addr. */
2148 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2149 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2150
2151 /* specify data direction, triple-check start bit is clear */
2152 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2153 dmactl &= ~ATA_DMA_WR;
2154 if (!rw)
2155 dmactl |= ATA_DMA_WR;
2156
2157 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2158
2159 return 1;
2160}
2161
2162static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2163{
2164 struct nv_swncq_port_priv *pp = ap->private_data;
2165 struct ata_queued_cmd *qc;
2166 struct ata_eh_info *ehi = &ap->link.eh_info;
2167 u32 serror;
2168 u8 ata_stat;
2169 int rc = 0;
2170
Tejun Heo5682ed32008-04-07 22:47:16 +09002171 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002172 nv_swncq_irq_clear(ap, fis);
2173 if (!fis)
2174 return;
2175
2176 if (ap->pflags & ATA_PFLAG_FROZEN)
2177 return;
2178
2179 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2180 nv_swncq_hotplug(ap, fis);
2181 return;
2182 }
2183
2184 if (!pp->qc_active)
2185 return;
2186
Tejun Heo82ef04f2008-07-31 17:02:40 +09002187 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
Kuan Luof140f0f2007-10-15 15:16:53 -04002188 return;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002189 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
Kuan Luof140f0f2007-10-15 15:16:53 -04002190
2191 if (ata_stat & ATA_ERR) {
2192 ata_ehi_clear_desc(ehi);
2193 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2194 ehi->err_mask |= AC_ERR_DEV;
2195 ehi->serror |= serror;
Tejun Heocf480622008-01-24 00:05:14 +09002196 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002197 ata_port_freeze(ap);
2198 return;
2199 }
2200
2201 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2202 /* If the IRQ is backout, driver must issue
2203 * the new command again some time later.
2204 */
2205 pp->ncq_flags |= ncq_saw_backout;
2206 }
2207
2208 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2209 pp->ncq_flags |= ncq_saw_sdb;
2210 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2211 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2212 ap->print_id, pp->qc_active, pp->dhfis_bits,
2213 pp->dmafis_bits, readl(pp->sactive_block));
2214 rc = nv_swncq_sdbfis(ap);
2215 if (rc < 0)
2216 goto irq_error;
2217 }
2218
2219 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2220 /* The interrupt indicates the new command
2221 * was transmitted correctly to the drive.
2222 */
2223 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2224 pp->ncq_flags |= ncq_saw_d2h;
2225 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2226 ata_ehi_push_desc(ehi, "illegal fis transaction");
2227 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002228 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002229 goto irq_error;
2230 }
2231
2232 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2233 !(pp->ncq_flags & ncq_saw_dmas)) {
Tejun Heo5682ed32008-04-07 22:47:16 +09002234 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002235 if (ata_stat & ATA_BUSY)
2236 goto irq_exit;
2237
2238 if (pp->defer_queue.defer_bits) {
2239 DPRINTK("send next command\n");
2240 qc = nv_swncq_qc_from_dq(ap);
2241 nv_swncq_issue_atacmd(ap, qc);
2242 }
2243 }
2244 }
2245
2246 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2247 /* program the dma controller with appropriate PRD buffers
2248 * and start the DMA transfer for requested command.
2249 */
2250 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2251 pp->ncq_flags |= ncq_saw_dmas;
2252 rc = nv_swncq_dmafis(ap);
2253 }
2254
2255irq_exit:
2256 return;
2257irq_error:
2258 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2259 ata_port_freeze(ap);
2260 return;
2261}
2262
2263static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2264{
2265 struct ata_host *host = dev_instance;
2266 unsigned int i;
2267 unsigned int handled = 0;
2268 unsigned long flags;
2269 u32 irq_stat;
2270
2271 spin_lock_irqsave(&host->lock, flags);
2272
2273 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2274
2275 for (i = 0; i < host->n_ports; i++) {
2276 struct ata_port *ap = host->ports[i];
2277
2278 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2279 if (ap->link.sactive) {
2280 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2281 handled = 1;
2282 } else {
2283 if (irq_stat) /* reserve Hotplug */
2284 nv_swncq_irq_clear(ap, 0xfff0);
2285
2286 handled += nv_host_intr(ap, (u8)irq_stat);
2287 }
2288 }
2289 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2290 }
2291
2292 spin_unlock_irqrestore(&host->lock, flags);
2293
2294 return IRQ_RETVAL(handled);
2295}
2296
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298{
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002299 static int printed_version;
Tejun Heo1626aeb2007-05-04 12:43:58 +02002300 const struct ata_port_info *ppi[] = { NULL, NULL };
Tejun Heo95947192008-03-25 12:22:49 +09002301 struct nv_pi_priv *ipriv;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002302 struct ata_host *host;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002303 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 int rc;
2305 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09002306 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002307 unsigned long type = ent->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
2309 // Make sure this is a SATA controller by counting the number of bars
2310 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2311 // it's an IDE controller and we ignore it.
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002312 for (bar = 0; bar < 6; bar++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 if (pci_resource_start(pdev, bar) == 0)
2314 return -ENODEV;
2315
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002316 if (!printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05002317 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
Tejun Heo24dc5f32007-01-20 16:00:28 +09002319 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002321 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
Tejun Heo9a829cc2007-04-17 23:44:08 +09002323 /* determine type and allocate host */
Kuan Luof140f0f2007-10-15 15:16:53 -04002324 if (type == CK804 && adma_enabled) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07002325 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2326 type = ADMA;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002327 }
2328
Jeff Garzik360737a2007-10-29 06:49:24 -04002329 if (type == SWNCQ) {
2330 if (swncq_enabled)
2331 dev_printk(KERN_NOTICE, &pdev->dev,
2332 "Using SWNCQ mode\n");
2333 else
2334 type = GENERIC;
2335 }
2336
Tejun Heo1626aeb2007-05-04 12:43:58 +02002337 ppi[0] = &nv_port_info[type];
Tejun Heo95947192008-03-25 12:22:49 +09002338 ipriv = ppi[0]->private_data;
Tejun Heo9363c382008-04-07 22:47:16 +09002339 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
Tejun Heo9a829cc2007-04-17 23:44:08 +09002340 if (rc)
2341 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
Tejun Heo24dc5f32007-01-20 16:00:28 +09002343 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002344 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002345 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002346 hpriv->type = type;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002347 host->private_data = hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
Tejun Heo9a829cc2007-04-17 23:44:08 +09002349 /* request and iomap NV_MMIO_BAR */
2350 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2351 if (rc)
2352 return rc;
2353
2354 /* configure SCR access */
2355 base = host->iomap[NV_MMIO_BAR];
2356 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2357 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
Jeff Garzik02cbd922006-03-22 23:59:46 -05002358
Tejun Heoada364e2006-06-17 15:49:56 +09002359 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002360 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09002361 u8 regval;
2362
2363 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2364 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2365 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2366 }
2367
Tejun Heo9a829cc2007-04-17 23:44:08 +09002368 /* init ADMA */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002369 if (type == ADMA) {
Tejun Heo9a829cc2007-04-17 23:44:08 +09002370 rc = nv_adma_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002371 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002372 return rc;
Jeff Garzik360737a2007-10-29 06:49:24 -04002373 } else if (type == SWNCQ)
Kuan Luof140f0f2007-10-15 15:16:53 -04002374 nv_swncq_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002375
Tejun Heo9a829cc2007-04-17 23:44:08 +09002376 pci_set_master(pdev);
Tejun Heo95947192008-03-25 12:22:49 +09002377 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2378 IRQF_SHARED, ipriv->sht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379}
2380
Tejun Heo438ac6d2007-03-02 17:31:26 +09002381#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002382static int nv_pci_device_resume(struct pci_dev *pdev)
2383{
2384 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2385 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08002386 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002387
Robert Hancockce053fa2007-02-05 16:26:04 -08002388 rc = ata_pci_device_do_resume(pdev);
Jeff Garzikb4479162007-10-25 20:47:30 -04002389 if (rc)
Robert Hancockce053fa2007-02-05 16:26:04 -08002390 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002391
2392 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
Jeff Garzikb4479162007-10-25 20:47:30 -04002393 if (hpriv->type >= CK804) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002394 u8 regval;
2395
2396 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2397 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2398 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2399 }
Jeff Garzikb4479162007-10-25 20:47:30 -04002400 if (hpriv->type == ADMA) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002401 u32 tmp32;
2402 struct nv_adma_port_priv *pp;
2403 /* enable/disable ADMA on the ports appropriately */
2404 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2405
2406 pp = host->ports[0]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002407 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002408 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002409 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002410 else
2411 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002412 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002413 pp = host->ports[1]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002414 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002415 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002416 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002417 else
2418 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002419 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002420
2421 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2422 }
2423 }
2424
2425 ata_host_resume(host);
2426
2427 return 0;
2428}
Tejun Heo438ac6d2007-03-02 17:31:26 +09002429#endif
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002430
Jeff Garzikcca39742006-08-24 03:19:22 -04002431static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09002432{
Jeff Garzikcca39742006-08-24 03:19:22 -04002433 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09002434 u8 regval;
2435
2436 /* disable SATA space for CK804 */
2437 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2438 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2439 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09002440}
2441
Robert Hancockfbbb2622006-10-27 19:08:41 -07002442static void nv_adma_host_stop(struct ata_host *host)
2443{
2444 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002445 u32 tmp32;
2446
Robert Hancockfbbb2622006-10-27 19:08:41 -07002447 /* disable ADMA on the ports */
2448 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2449 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2450 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2451 NV_MCP_SATA_CFG_20_PORT1_EN |
2452 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2453
2454 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2455
2456 nv_ck804_host_stop(host);
2457}
2458
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459static int __init nv_init(void)
2460{
Pavel Roskinb7887192006-08-10 18:13:18 +09002461 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462}
2463
2464static void __exit nv_exit(void)
2465{
2466 pci_unregister_driver(&nv_pci_driver);
2467}
2468
2469module_init(nv_init);
2470module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002471module_param_named(adma, adma_enabled, bool, 0444);
2472MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002473module_param_named(swncq, swncq_enabled, bool, 0444);
Zoltan Boszormenyid21279f2008-03-28 14:33:46 -07002474MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002475